]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.2.2-2.6.32.41-201106120643.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-2.6.32.41-201106120643.patch
1 diff -urNp linux-2.6.32.41/arch/alpha/include/asm/elf.h linux-2.6.32.41/arch/alpha/include/asm/elf.h
2 --- linux-2.6.32.41/arch/alpha/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3 +++ linux-2.6.32.41/arch/alpha/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
4 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8 +#ifdef CONFIG_PAX_ASLR
9 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10 +
11 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13 +#endif
14 +
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18 diff -urNp linux-2.6.32.41/arch/alpha/include/asm/pgtable.h linux-2.6.32.41/arch/alpha/include/asm/pgtable.h
19 --- linux-2.6.32.41/arch/alpha/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
20 +++ linux-2.6.32.41/arch/alpha/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
21 @@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25 +
26 +#ifdef CONFIG_PAX_PAGEEXEC
27 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30 +#else
31 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
32 +# define PAGE_COPY_NOEXEC PAGE_COPY
33 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
34 +#endif
35 +
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39 diff -urNp linux-2.6.32.41/arch/alpha/kernel/module.c linux-2.6.32.41/arch/alpha/kernel/module.c
40 --- linux-2.6.32.41/arch/alpha/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
41 +++ linux-2.6.32.41/arch/alpha/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
42 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46 - gp = (u64)me->module_core + me->core_size - 0x8000;
47 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51 diff -urNp linux-2.6.32.41/arch/alpha/kernel/osf_sys.c linux-2.6.32.41/arch/alpha/kernel/osf_sys.c
52 --- linux-2.6.32.41/arch/alpha/kernel/osf_sys.c 2011-03-27 14:31:47.000000000 -0400
53 +++ linux-2.6.32.41/arch/alpha/kernel/osf_sys.c 2011-04-17 15:56:45.000000000 -0400
54 @@ -1169,7 +1169,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58 - if (!vma || addr + len <= vma->vm_start)
59 + if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63 @@ -1205,6 +1205,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67 +#ifdef CONFIG_PAX_RANDMMAP
68 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69 +#endif
70 +
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74 @@ -1212,8 +1216,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79 - len, limit);
80 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81 +
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85 diff -urNp linux-2.6.32.41/arch/alpha/mm/fault.c linux-2.6.32.41/arch/alpha/mm/fault.c
86 --- linux-2.6.32.41/arch/alpha/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
87 +++ linux-2.6.32.41/arch/alpha/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
88 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92 +#ifdef CONFIG_PAX_PAGEEXEC
93 +/*
94 + * PaX: decide what to do with offenders (regs->pc = fault address)
95 + *
96 + * returns 1 when task should be killed
97 + * 2 when patched PLT trampoline was detected
98 + * 3 when unpatched PLT trampoline was detected
99 + */
100 +static int pax_handle_fetch_fault(struct pt_regs *regs)
101 +{
102 +
103 +#ifdef CONFIG_PAX_EMUPLT
104 + int err;
105 +
106 + do { /* PaX: patched PLT emulation #1 */
107 + unsigned int ldah, ldq, jmp;
108 +
109 + err = get_user(ldah, (unsigned int *)regs->pc);
110 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112 +
113 + if (err)
114 + break;
115 +
116 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118 + jmp == 0x6BFB0000U)
119 + {
120 + unsigned long r27, addr;
121 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123 +
124 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125 + err = get_user(r27, (unsigned long *)addr);
126 + if (err)
127 + break;
128 +
129 + regs->r27 = r27;
130 + regs->pc = r27;
131 + return 2;
132 + }
133 + } while (0);
134 +
135 + do { /* PaX: patched PLT emulation #2 */
136 + unsigned int ldah, lda, br;
137 +
138 + err = get_user(ldah, (unsigned int *)regs->pc);
139 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
140 + err |= get_user(br, (unsigned int *)(regs->pc+8));
141 +
142 + if (err)
143 + break;
144 +
145 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
147 + (br & 0xFFE00000U) == 0xC3E00000U)
148 + {
149 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152 +
153 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155 + return 2;
156 + }
157 + } while (0);
158 +
159 + do { /* PaX: unpatched PLT emulation */
160 + unsigned int br;
161 +
162 + err = get_user(br, (unsigned int *)regs->pc);
163 +
164 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165 + unsigned int br2, ldq, nop, jmp;
166 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167 +
168 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169 + err = get_user(br2, (unsigned int *)addr);
170 + err |= get_user(ldq, (unsigned int *)(addr+4));
171 + err |= get_user(nop, (unsigned int *)(addr+8));
172 + err |= get_user(jmp, (unsigned int *)(addr+12));
173 + err |= get_user(resolver, (unsigned long *)(addr+16));
174 +
175 + if (err)
176 + break;
177 +
178 + if (br2 == 0xC3600000U &&
179 + ldq == 0xA77B000CU &&
180 + nop == 0x47FF041FU &&
181 + jmp == 0x6B7B0000U)
182 + {
183 + regs->r28 = regs->pc+4;
184 + regs->r27 = addr+16;
185 + regs->pc = resolver;
186 + return 3;
187 + }
188 + }
189 + } while (0);
190 +#endif
191 +
192 + return 1;
193 +}
194 +
195 +void pax_report_insns(void *pc, void *sp)
196 +{
197 + unsigned long i;
198 +
199 + printk(KERN_ERR "PAX: bytes at PC: ");
200 + for (i = 0; i < 5; i++) {
201 + unsigned int c;
202 + if (get_user(c, (unsigned int *)pc+i))
203 + printk(KERN_CONT "???????? ");
204 + else
205 + printk(KERN_CONT "%08x ", c);
206 + }
207 + printk("\n");
208 +}
209 +#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217 - if (!(vma->vm_flags & VM_EXEC))
218 + if (!(vma->vm_flags & VM_EXEC)) {
219 +
220 +#ifdef CONFIG_PAX_PAGEEXEC
221 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222 + goto bad_area;
223 +
224 + up_read(&mm->mmap_sem);
225 + switch (pax_handle_fetch_fault(regs)) {
226 +
227 +#ifdef CONFIG_PAX_EMUPLT
228 + case 2:
229 + case 3:
230 + return;
231 +#endif
232 +
233 + }
234 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235 + do_group_exit(SIGKILL);
236 +#else
237 goto bad_area;
238 +#endif
239 +
240 + }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244 diff -urNp linux-2.6.32.41/arch/arm/include/asm/elf.h linux-2.6.32.41/arch/arm/include/asm/elf.h
245 --- linux-2.6.32.41/arch/arm/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
246 +++ linux-2.6.32.41/arch/arm/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
247 @@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253 +
254 +#ifdef CONFIG_PAX_ASLR
255 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256 +
257 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259 +#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263 diff -urNp linux-2.6.32.41/arch/arm/include/asm/kmap_types.h linux-2.6.32.41/arch/arm/include/asm/kmap_types.h
264 --- linux-2.6.32.41/arch/arm/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
265 +++ linux-2.6.32.41/arch/arm/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
266 @@ -19,6 +19,7 @@ enum km_type {
267 KM_SOFTIRQ0,
268 KM_SOFTIRQ1,
269 KM_L2_CACHE,
270 + KM_CLEARPAGE,
271 KM_TYPE_NR
272 };
273
274 diff -urNp linux-2.6.32.41/arch/arm/include/asm/uaccess.h linux-2.6.32.41/arch/arm/include/asm/uaccess.h
275 --- linux-2.6.32.41/arch/arm/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
276 +++ linux-2.6.32.41/arch/arm/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
277 @@ -403,6 +403,9 @@ extern unsigned long __must_check __strn
278
279 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
280 {
281 + if ((long)n < 0)
282 + return n;
283 +
284 if (access_ok(VERIFY_READ, from, n))
285 n = __copy_from_user(to, from, n);
286 else /* security hole - plug it */
287 @@ -412,6 +415,9 @@ static inline unsigned long __must_check
288
289 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
290 {
291 + if ((long)n < 0)
292 + return n;
293 +
294 if (access_ok(VERIFY_WRITE, to, n))
295 n = __copy_to_user(to, from, n);
296 return n;
297 diff -urNp linux-2.6.32.41/arch/arm/kernel/kgdb.c linux-2.6.32.41/arch/arm/kernel/kgdb.c
298 --- linux-2.6.32.41/arch/arm/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
299 +++ linux-2.6.32.41/arch/arm/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
300 @@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
301 * and we handle the normal undef case within the do_undefinstr
302 * handler.
303 */
304 -struct kgdb_arch arch_kgdb_ops = {
305 +const struct kgdb_arch arch_kgdb_ops = {
306 #ifndef __ARMEB__
307 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
308 #else /* ! __ARMEB__ */
309 diff -urNp linux-2.6.32.41/arch/arm/mach-at91/pm.c linux-2.6.32.41/arch/arm/mach-at91/pm.c
310 --- linux-2.6.32.41/arch/arm/mach-at91/pm.c 2011-03-27 14:31:47.000000000 -0400
311 +++ linux-2.6.32.41/arch/arm/mach-at91/pm.c 2011-04-17 15:56:45.000000000 -0400
312 @@ -348,7 +348,7 @@ static void at91_pm_end(void)
313 }
314
315
316 -static struct platform_suspend_ops at91_pm_ops ={
317 +static const struct platform_suspend_ops at91_pm_ops ={
318 .valid = at91_pm_valid_state,
319 .begin = at91_pm_begin,
320 .enter = at91_pm_enter,
321 diff -urNp linux-2.6.32.41/arch/arm/mach-omap1/pm.c linux-2.6.32.41/arch/arm/mach-omap1/pm.c
322 --- linux-2.6.32.41/arch/arm/mach-omap1/pm.c 2011-03-27 14:31:47.000000000 -0400
323 +++ linux-2.6.32.41/arch/arm/mach-omap1/pm.c 2011-04-17 15:56:45.000000000 -0400
324 @@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq
325
326
327
328 -static struct platform_suspend_ops omap_pm_ops ={
329 +static const struct platform_suspend_ops omap_pm_ops ={
330 .prepare = omap_pm_prepare,
331 .enter = omap_pm_enter,
332 .finish = omap_pm_finish,
333 diff -urNp linux-2.6.32.41/arch/arm/mach-omap2/pm24xx.c linux-2.6.32.41/arch/arm/mach-omap2/pm24xx.c
334 --- linux-2.6.32.41/arch/arm/mach-omap2/pm24xx.c 2011-03-27 14:31:47.000000000 -0400
335 +++ linux-2.6.32.41/arch/arm/mach-omap2/pm24xx.c 2011-04-17 15:56:45.000000000 -0400
336 @@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
337 enable_hlt();
338 }
339
340 -static struct platform_suspend_ops omap_pm_ops = {
341 +static const struct platform_suspend_ops omap_pm_ops = {
342 .prepare = omap2_pm_prepare,
343 .enter = omap2_pm_enter,
344 .finish = omap2_pm_finish,
345 diff -urNp linux-2.6.32.41/arch/arm/mach-omap2/pm34xx.c linux-2.6.32.41/arch/arm/mach-omap2/pm34xx.c
346 --- linux-2.6.32.41/arch/arm/mach-omap2/pm34xx.c 2011-03-27 14:31:47.000000000 -0400
347 +++ linux-2.6.32.41/arch/arm/mach-omap2/pm34xx.c 2011-04-17 15:56:45.000000000 -0400
348 @@ -401,7 +401,7 @@ static void omap3_pm_end(void)
349 return;
350 }
351
352 -static struct platform_suspend_ops omap_pm_ops = {
353 +static const struct platform_suspend_ops omap_pm_ops = {
354 .begin = omap3_pm_begin,
355 .end = omap3_pm_end,
356 .prepare = omap3_pm_prepare,
357 diff -urNp linux-2.6.32.41/arch/arm/mach-pnx4008/pm.c linux-2.6.32.41/arch/arm/mach-pnx4008/pm.c
358 --- linux-2.6.32.41/arch/arm/mach-pnx4008/pm.c 2011-03-27 14:31:47.000000000 -0400
359 +++ linux-2.6.32.41/arch/arm/mach-pnx4008/pm.c 2011-04-17 15:56:45.000000000 -0400
360 @@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_stat
361 (state == PM_SUSPEND_MEM);
362 }
363
364 -static struct platform_suspend_ops pnx4008_pm_ops = {
365 +static const struct platform_suspend_ops pnx4008_pm_ops = {
366 .enter = pnx4008_pm_enter,
367 .valid = pnx4008_pm_valid,
368 };
369 diff -urNp linux-2.6.32.41/arch/arm/mach-pxa/pm.c linux-2.6.32.41/arch/arm/mach-pxa/pm.c
370 --- linux-2.6.32.41/arch/arm/mach-pxa/pm.c 2011-03-27 14:31:47.000000000 -0400
371 +++ linux-2.6.32.41/arch/arm/mach-pxa/pm.c 2011-04-17 15:56:45.000000000 -0400
372 @@ -95,7 +95,7 @@ void pxa_pm_finish(void)
373 pxa_cpu_pm_fns->finish();
374 }
375
376 -static struct platform_suspend_ops pxa_pm_ops = {
377 +static const struct platform_suspend_ops pxa_pm_ops = {
378 .valid = pxa_pm_valid,
379 .enter = pxa_pm_enter,
380 .prepare = pxa_pm_prepare,
381 diff -urNp linux-2.6.32.41/arch/arm/mach-pxa/sharpsl_pm.c linux-2.6.32.41/arch/arm/mach-pxa/sharpsl_pm.c
382 --- linux-2.6.32.41/arch/arm/mach-pxa/sharpsl_pm.c 2011-03-27 14:31:47.000000000 -0400
383 +++ linux-2.6.32.41/arch/arm/mach-pxa/sharpsl_pm.c 2011-04-17 15:56:45.000000000 -0400
384 @@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status
385 }
386
387 #ifdef CONFIG_PM
388 -static struct platform_suspend_ops sharpsl_pm_ops = {
389 +static const struct platform_suspend_ops sharpsl_pm_ops = {
390 .prepare = pxa_pm_prepare,
391 .finish = pxa_pm_finish,
392 .enter = corgi_pxa_pm_enter,
393 diff -urNp linux-2.6.32.41/arch/arm/mach-sa1100/pm.c linux-2.6.32.41/arch/arm/mach-sa1100/pm.c
394 --- linux-2.6.32.41/arch/arm/mach-sa1100/pm.c 2011-03-27 14:31:47.000000000 -0400
395 +++ linux-2.6.32.41/arch/arm/mach-sa1100/pm.c 2011-04-17 15:56:45.000000000 -0400
396 @@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
397 return virt_to_phys(sp);
398 }
399
400 -static struct platform_suspend_ops sa11x0_pm_ops = {
401 +static const struct platform_suspend_ops sa11x0_pm_ops = {
402 .enter = sa11x0_pm_enter,
403 .valid = suspend_valid_only_mem,
404 };
405 diff -urNp linux-2.6.32.41/arch/arm/mm/fault.c linux-2.6.32.41/arch/arm/mm/fault.c
406 --- linux-2.6.32.41/arch/arm/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
407 +++ linux-2.6.32.41/arch/arm/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
408 @@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk,
409 }
410 #endif
411
412 +#ifdef CONFIG_PAX_PAGEEXEC
413 + if (fsr & FSR_LNX_PF) {
414 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
415 + do_group_exit(SIGKILL);
416 + }
417 +#endif
418 +
419 tsk->thread.address = addr;
420 tsk->thread.error_code = fsr;
421 tsk->thread.trap_no = 14;
422 @@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsign
423 }
424 #endif /* CONFIG_MMU */
425
426 +#ifdef CONFIG_PAX_PAGEEXEC
427 +void pax_report_insns(void *pc, void *sp)
428 +{
429 + long i;
430 +
431 + printk(KERN_ERR "PAX: bytes at PC: ");
432 + for (i = 0; i < 20; i++) {
433 + unsigned char c;
434 + if (get_user(c, (__force unsigned char __user *)pc+i))
435 + printk(KERN_CONT "?? ");
436 + else
437 + printk(KERN_CONT "%02x ", c);
438 + }
439 + printk("\n");
440 +
441 + printk(KERN_ERR "PAX: bytes at SP-4: ");
442 + for (i = -1; i < 20; i++) {
443 + unsigned long c;
444 + if (get_user(c, (__force unsigned long __user *)sp+i))
445 + printk(KERN_CONT "???????? ");
446 + else
447 + printk(KERN_CONT "%08lx ", c);
448 + }
449 + printk("\n");
450 +}
451 +#endif
452 +
453 /*
454 * First Level Translation Fault Handler
455 *
456 diff -urNp linux-2.6.32.41/arch/arm/mm/mmap.c linux-2.6.32.41/arch/arm/mm/mmap.c
457 --- linux-2.6.32.41/arch/arm/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
458 +++ linux-2.6.32.41/arch/arm/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
459 @@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp
460 if (len > TASK_SIZE)
461 return -ENOMEM;
462
463 +#ifdef CONFIG_PAX_RANDMMAP
464 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
465 +#endif
466 +
467 if (addr) {
468 if (do_align)
469 addr = COLOUR_ALIGN(addr, pgoff);
470 @@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp
471 addr = PAGE_ALIGN(addr);
472
473 vma = find_vma(mm, addr);
474 - if (TASK_SIZE - len >= addr &&
475 - (!vma || addr + len <= vma->vm_start))
476 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
477 return addr;
478 }
479 if (len > mm->cached_hole_size) {
480 - start_addr = addr = mm->free_area_cache;
481 + start_addr = addr = mm->free_area_cache;
482 } else {
483 - start_addr = addr = TASK_UNMAPPED_BASE;
484 - mm->cached_hole_size = 0;
485 + start_addr = addr = mm->mmap_base;
486 + mm->cached_hole_size = 0;
487 }
488
489 full_search:
490 @@ -94,14 +97,14 @@ full_search:
491 * Start a new search - just in case we missed
492 * some holes.
493 */
494 - if (start_addr != TASK_UNMAPPED_BASE) {
495 - start_addr = addr = TASK_UNMAPPED_BASE;
496 + if (start_addr != mm->mmap_base) {
497 + start_addr = addr = mm->mmap_base;
498 mm->cached_hole_size = 0;
499 goto full_search;
500 }
501 return -ENOMEM;
502 }
503 - if (!vma || addr + len <= vma->vm_start) {
504 + if (check_heap_stack_gap(vma, addr, len)) {
505 /*
506 * Remember the place where we stopped the search:
507 */
508 diff -urNp linux-2.6.32.41/arch/arm/plat-s3c/pm.c linux-2.6.32.41/arch/arm/plat-s3c/pm.c
509 --- linux-2.6.32.41/arch/arm/plat-s3c/pm.c 2011-03-27 14:31:47.000000000 -0400
510 +++ linux-2.6.32.41/arch/arm/plat-s3c/pm.c 2011-04-17 15:56:45.000000000 -0400
511 @@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
512 s3c_pm_check_cleanup();
513 }
514
515 -static struct platform_suspend_ops s3c_pm_ops = {
516 +static const struct platform_suspend_ops s3c_pm_ops = {
517 .enter = s3c_pm_enter,
518 .prepare = s3c_pm_prepare,
519 .finish = s3c_pm_finish,
520 diff -urNp linux-2.6.32.41/arch/avr32/include/asm/elf.h linux-2.6.32.41/arch/avr32/include/asm/elf.h
521 --- linux-2.6.32.41/arch/avr32/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
522 +++ linux-2.6.32.41/arch/avr32/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
523 @@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpreg
524 the loader. We need to make sure that it is out of the way of the program
525 that it will "exec", and that there is sufficient room for the brk. */
526
527 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
528 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
529
530 +#ifdef CONFIG_PAX_ASLR
531 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
532 +
533 +#define PAX_DELTA_MMAP_LEN 15
534 +#define PAX_DELTA_STACK_LEN 15
535 +#endif
536
537 /* This yields a mask that user programs can use to figure out what
538 instruction set this CPU supports. This could be done in user space,
539 diff -urNp linux-2.6.32.41/arch/avr32/include/asm/kmap_types.h linux-2.6.32.41/arch/avr32/include/asm/kmap_types.h
540 --- linux-2.6.32.41/arch/avr32/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
541 +++ linux-2.6.32.41/arch/avr32/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
542 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
543 D(11) KM_IRQ1,
544 D(12) KM_SOFTIRQ0,
545 D(13) KM_SOFTIRQ1,
546 -D(14) KM_TYPE_NR
547 +D(14) KM_CLEARPAGE,
548 +D(15) KM_TYPE_NR
549 };
550
551 #undef D
552 diff -urNp linux-2.6.32.41/arch/avr32/mach-at32ap/pm.c linux-2.6.32.41/arch/avr32/mach-at32ap/pm.c
553 --- linux-2.6.32.41/arch/avr32/mach-at32ap/pm.c 2011-03-27 14:31:47.000000000 -0400
554 +++ linux-2.6.32.41/arch/avr32/mach-at32ap/pm.c 2011-04-17 15:56:45.000000000 -0400
555 @@ -176,7 +176,7 @@ out:
556 return 0;
557 }
558
559 -static struct platform_suspend_ops avr32_pm_ops = {
560 +static const struct platform_suspend_ops avr32_pm_ops = {
561 .valid = avr32_pm_valid_state,
562 .enter = avr32_pm_enter,
563 };
564 diff -urNp linux-2.6.32.41/arch/avr32/mm/fault.c linux-2.6.32.41/arch/avr32/mm/fault.c
565 --- linux-2.6.32.41/arch/avr32/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
566 +++ linux-2.6.32.41/arch/avr32/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
567 @@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
568
569 int exception_trace = 1;
570
571 +#ifdef CONFIG_PAX_PAGEEXEC
572 +void pax_report_insns(void *pc, void *sp)
573 +{
574 + unsigned long i;
575 +
576 + printk(KERN_ERR "PAX: bytes at PC: ");
577 + for (i = 0; i < 20; i++) {
578 + unsigned char c;
579 + if (get_user(c, (unsigned char *)pc+i))
580 + printk(KERN_CONT "???????? ");
581 + else
582 + printk(KERN_CONT "%02x ", c);
583 + }
584 + printk("\n");
585 +}
586 +#endif
587 +
588 /*
589 * This routine handles page faults. It determines the address and the
590 * problem, and then passes it off to one of the appropriate routines.
591 @@ -157,6 +174,16 @@ bad_area:
592 up_read(&mm->mmap_sem);
593
594 if (user_mode(regs)) {
595 +
596 +#ifdef CONFIG_PAX_PAGEEXEC
597 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
598 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
599 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
600 + do_group_exit(SIGKILL);
601 + }
602 + }
603 +#endif
604 +
605 if (exception_trace && printk_ratelimit())
606 printk("%s%s[%d]: segfault at %08lx pc %08lx "
607 "sp %08lx ecr %lu\n",
608 diff -urNp linux-2.6.32.41/arch/blackfin/kernel/kgdb.c linux-2.6.32.41/arch/blackfin/kernel/kgdb.c
609 --- linux-2.6.32.41/arch/blackfin/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
610 +++ linux-2.6.32.41/arch/blackfin/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
611 @@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vecto
612 return -1; /* this means that we do not want to exit from the handler */
613 }
614
615 -struct kgdb_arch arch_kgdb_ops = {
616 +const struct kgdb_arch arch_kgdb_ops = {
617 .gdb_bpt_instr = {0xa1},
618 #ifdef CONFIG_SMP
619 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
620 diff -urNp linux-2.6.32.41/arch/blackfin/mach-common/pm.c linux-2.6.32.41/arch/blackfin/mach-common/pm.c
621 --- linux-2.6.32.41/arch/blackfin/mach-common/pm.c 2011-03-27 14:31:47.000000000 -0400
622 +++ linux-2.6.32.41/arch/blackfin/mach-common/pm.c 2011-04-17 15:56:45.000000000 -0400
623 @@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t
624 return 0;
625 }
626
627 -struct platform_suspend_ops bfin_pm_ops = {
628 +const struct platform_suspend_ops bfin_pm_ops = {
629 .enter = bfin_pm_enter,
630 .valid = bfin_pm_valid,
631 };
632 diff -urNp linux-2.6.32.41/arch/frv/include/asm/kmap_types.h linux-2.6.32.41/arch/frv/include/asm/kmap_types.h
633 --- linux-2.6.32.41/arch/frv/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
634 +++ linux-2.6.32.41/arch/frv/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
635 @@ -23,6 +23,7 @@ enum km_type {
636 KM_IRQ1,
637 KM_SOFTIRQ0,
638 KM_SOFTIRQ1,
639 + KM_CLEARPAGE,
640 KM_TYPE_NR
641 };
642
643 diff -urNp linux-2.6.32.41/arch/frv/mm/elf-fdpic.c linux-2.6.32.41/arch/frv/mm/elf-fdpic.c
644 --- linux-2.6.32.41/arch/frv/mm/elf-fdpic.c 2011-03-27 14:31:47.000000000 -0400
645 +++ linux-2.6.32.41/arch/frv/mm/elf-fdpic.c 2011-04-17 15:56:45.000000000 -0400
646 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
647 if (addr) {
648 addr = PAGE_ALIGN(addr);
649 vma = find_vma(current->mm, addr);
650 - if (TASK_SIZE - len >= addr &&
651 - (!vma || addr + len <= vma->vm_start))
652 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
653 goto success;
654 }
655
656 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
657 for (; vma; vma = vma->vm_next) {
658 if (addr > limit)
659 break;
660 - if (addr + len <= vma->vm_start)
661 + if (check_heap_stack_gap(vma, addr, len))
662 goto success;
663 addr = vma->vm_end;
664 }
665 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
666 for (; vma; vma = vma->vm_next) {
667 if (addr > limit)
668 break;
669 - if (addr + len <= vma->vm_start)
670 + if (check_heap_stack_gap(vma, addr, len))
671 goto success;
672 addr = vma->vm_end;
673 }
674 diff -urNp linux-2.6.32.41/arch/ia64/hp/common/hwsw_iommu.c linux-2.6.32.41/arch/ia64/hp/common/hwsw_iommu.c
675 --- linux-2.6.32.41/arch/ia64/hp/common/hwsw_iommu.c 2011-03-27 14:31:47.000000000 -0400
676 +++ linux-2.6.32.41/arch/ia64/hp/common/hwsw_iommu.c 2011-04-17 15:56:45.000000000 -0400
677 @@ -17,7 +17,7 @@
678 #include <linux/swiotlb.h>
679 #include <asm/machvec.h>
680
681 -extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
682 +extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
683
684 /* swiotlb declarations & definitions: */
685 extern int swiotlb_late_init_with_default_size (size_t size);
686 @@ -33,7 +33,7 @@ static inline int use_swiotlb(struct dev
687 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
688 }
689
690 -struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
691 +const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
692 {
693 if (use_swiotlb(dev))
694 return &swiotlb_dma_ops;
695 diff -urNp linux-2.6.32.41/arch/ia64/hp/common/sba_iommu.c linux-2.6.32.41/arch/ia64/hp/common/sba_iommu.c
696 --- linux-2.6.32.41/arch/ia64/hp/common/sba_iommu.c 2011-03-27 14:31:47.000000000 -0400
697 +++ linux-2.6.32.41/arch/ia64/hp/common/sba_iommu.c 2011-04-17 15:56:45.000000000 -0400
698 @@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_d
699 },
700 };
701
702 -extern struct dma_map_ops swiotlb_dma_ops;
703 +extern const struct dma_map_ops swiotlb_dma_ops;
704
705 static int __init
706 sba_init(void)
707 @@ -2211,7 +2211,7 @@ sba_page_override(char *str)
708
709 __setup("sbapagesize=",sba_page_override);
710
711 -struct dma_map_ops sba_dma_ops = {
712 +const struct dma_map_ops sba_dma_ops = {
713 .alloc_coherent = sba_alloc_coherent,
714 .free_coherent = sba_free_coherent,
715 .map_page = sba_map_page,
716 diff -urNp linux-2.6.32.41/arch/ia64/ia32/binfmt_elf32.c linux-2.6.32.41/arch/ia64/ia32/binfmt_elf32.c
717 --- linux-2.6.32.41/arch/ia64/ia32/binfmt_elf32.c 2011-03-27 14:31:47.000000000 -0400
718 +++ linux-2.6.32.41/arch/ia64/ia32/binfmt_elf32.c 2011-04-17 15:56:45.000000000 -0400
719 @@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_
720
721 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
722
723 +#ifdef CONFIG_PAX_ASLR
724 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
725 +
726 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
727 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
728 +#endif
729 +
730 /* Ugly but avoids duplication */
731 #include "../../../fs/binfmt_elf.c"
732
733 diff -urNp linux-2.6.32.41/arch/ia64/ia32/ia32priv.h linux-2.6.32.41/arch/ia64/ia32/ia32priv.h
734 --- linux-2.6.32.41/arch/ia64/ia32/ia32priv.h 2011-03-27 14:31:47.000000000 -0400
735 +++ linux-2.6.32.41/arch/ia64/ia32/ia32priv.h 2011-04-17 15:56:45.000000000 -0400
736 @@ -296,7 +296,14 @@ typedef struct compat_siginfo {
737 #define ELF_DATA ELFDATA2LSB
738 #define ELF_ARCH EM_386
739
740 -#define IA32_STACK_TOP IA32_PAGE_OFFSET
741 +#ifdef CONFIG_PAX_RANDUSTACK
742 +#define __IA32_DELTA_STACK (current->mm->delta_stack)
743 +#else
744 +#define __IA32_DELTA_STACK 0UL
745 +#endif
746 +
747 +#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
748 +
749 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
750 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
751
752 diff -urNp linux-2.6.32.41/arch/ia64/include/asm/dma-mapping.h linux-2.6.32.41/arch/ia64/include/asm/dma-mapping.h
753 --- linux-2.6.32.41/arch/ia64/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
754 +++ linux-2.6.32.41/arch/ia64/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
755 @@ -12,7 +12,7 @@
756
757 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
758
759 -extern struct dma_map_ops *dma_ops;
760 +extern const struct dma_map_ops *dma_ops;
761 extern struct ia64_machine_vector ia64_mv;
762 extern void set_iommu_machvec(void);
763
764 @@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct d
765 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
766 dma_addr_t *daddr, gfp_t gfp)
767 {
768 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
769 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
770 void *caddr;
771
772 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
773 @@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(s
774 static inline void dma_free_coherent(struct device *dev, size_t size,
775 void *caddr, dma_addr_t daddr)
776 {
777 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
778 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
779 debug_dma_free_coherent(dev, size, caddr, daddr);
780 ops->free_coherent(dev, size, caddr, daddr);
781 }
782 @@ -49,13 +49,13 @@ static inline void dma_free_coherent(str
783
784 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
785 {
786 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
787 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
788 return ops->mapping_error(dev, daddr);
789 }
790
791 static inline int dma_supported(struct device *dev, u64 mask)
792 {
793 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
794 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
795 return ops->dma_supported(dev, mask);
796 }
797
798 diff -urNp linux-2.6.32.41/arch/ia64/include/asm/elf.h linux-2.6.32.41/arch/ia64/include/asm/elf.h
799 --- linux-2.6.32.41/arch/ia64/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
800 +++ linux-2.6.32.41/arch/ia64/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
801 @@ -43,6 +43,13 @@
802 */
803 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
804
805 +#ifdef CONFIG_PAX_ASLR
806 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
807 +
808 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
809 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
810 +#endif
811 +
812 #define PT_IA_64_UNWIND 0x70000001
813
814 /* IA-64 relocations: */
815 diff -urNp linux-2.6.32.41/arch/ia64/include/asm/machvec.h linux-2.6.32.41/arch/ia64/include/asm/machvec.h
816 --- linux-2.6.32.41/arch/ia64/include/asm/machvec.h 2011-03-27 14:31:47.000000000 -0400
817 +++ linux-2.6.32.41/arch/ia64/include/asm/machvec.h 2011-04-17 15:56:45.000000000 -0400
818 @@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event
819 /* DMA-mapping interface: */
820 typedef void ia64_mv_dma_init (void);
821 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
822 -typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
823 +typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
824
825 /*
826 * WARNING: The legacy I/O space is _architected_. Platforms are
827 @@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(co
828 # endif /* CONFIG_IA64_GENERIC */
829
830 extern void swiotlb_dma_init(void);
831 -extern struct dma_map_ops *dma_get_ops(struct device *);
832 +extern const struct dma_map_ops *dma_get_ops(struct device *);
833
834 /*
835 * Define default versions so we can extend machvec for new platforms without having
836 diff -urNp linux-2.6.32.41/arch/ia64/include/asm/pgtable.h linux-2.6.32.41/arch/ia64/include/asm/pgtable.h
837 --- linux-2.6.32.41/arch/ia64/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
838 +++ linux-2.6.32.41/arch/ia64/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
839 @@ -12,7 +12,7 @@
840 * David Mosberger-Tang <davidm@hpl.hp.com>
841 */
842
843 -
844 +#include <linux/const.h>
845 #include <asm/mman.h>
846 #include <asm/page.h>
847 #include <asm/processor.h>
848 @@ -143,6 +143,17 @@
849 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
850 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
851 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
852 +
853 +#ifdef CONFIG_PAX_PAGEEXEC
854 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
855 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
856 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
857 +#else
858 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
859 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
860 +# define PAGE_COPY_NOEXEC PAGE_COPY
861 +#endif
862 +
863 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
864 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
865 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
866 diff -urNp linux-2.6.32.41/arch/ia64/include/asm/spinlock.h linux-2.6.32.41/arch/ia64/include/asm/spinlock.h
867 --- linux-2.6.32.41/arch/ia64/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
868 +++ linux-2.6.32.41/arch/ia64/include/asm/spinlock.h 2011-04-17 15:56:45.000000000 -0400
869 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
870 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
871
872 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
873 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
874 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
875 }
876
877 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
878 diff -urNp linux-2.6.32.41/arch/ia64/include/asm/uaccess.h linux-2.6.32.41/arch/ia64/include/asm/uaccess.h
879 --- linux-2.6.32.41/arch/ia64/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
880 +++ linux-2.6.32.41/arch/ia64/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
881 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
882 const void *__cu_from = (from); \
883 long __cu_len = (n); \
884 \
885 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
886 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
887 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
888 __cu_len; \
889 })
890 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
891 long __cu_len = (n); \
892 \
893 __chk_user_ptr(__cu_from); \
894 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
895 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
896 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
897 __cu_len; \
898 })
899 diff -urNp linux-2.6.32.41/arch/ia64/kernel/dma-mapping.c linux-2.6.32.41/arch/ia64/kernel/dma-mapping.c
900 --- linux-2.6.32.41/arch/ia64/kernel/dma-mapping.c 2011-03-27 14:31:47.000000000 -0400
901 +++ linux-2.6.32.41/arch/ia64/kernel/dma-mapping.c 2011-04-17 15:56:45.000000000 -0400
902 @@ -3,7 +3,7 @@
903 /* Set this to 1 if there is a HW IOMMU in the system */
904 int iommu_detected __read_mostly;
905
906 -struct dma_map_ops *dma_ops;
907 +const struct dma_map_ops *dma_ops;
908 EXPORT_SYMBOL(dma_ops);
909
910 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
911 @@ -16,7 +16,7 @@ static int __init dma_init(void)
912 }
913 fs_initcall(dma_init);
914
915 -struct dma_map_ops *dma_get_ops(struct device *dev)
916 +const struct dma_map_ops *dma_get_ops(struct device *dev)
917 {
918 return dma_ops;
919 }
920 diff -urNp linux-2.6.32.41/arch/ia64/kernel/module.c linux-2.6.32.41/arch/ia64/kernel/module.c
921 --- linux-2.6.32.41/arch/ia64/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
922 +++ linux-2.6.32.41/arch/ia64/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
923 @@ -315,8 +315,7 @@ module_alloc (unsigned long size)
924 void
925 module_free (struct module *mod, void *module_region)
926 {
927 - if (mod && mod->arch.init_unw_table &&
928 - module_region == mod->module_init) {
929 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
930 unw_remove_unwind_table(mod->arch.init_unw_table);
931 mod->arch.init_unw_table = NULL;
932 }
933 @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
934 }
935
936 static inline int
937 +in_init_rx (const struct module *mod, uint64_t addr)
938 +{
939 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
940 +}
941 +
942 +static inline int
943 +in_init_rw (const struct module *mod, uint64_t addr)
944 +{
945 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
946 +}
947 +
948 +static inline int
949 in_init (const struct module *mod, uint64_t addr)
950 {
951 - return addr - (uint64_t) mod->module_init < mod->init_size;
952 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
953 +}
954 +
955 +static inline int
956 +in_core_rx (const struct module *mod, uint64_t addr)
957 +{
958 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
959 +}
960 +
961 +static inline int
962 +in_core_rw (const struct module *mod, uint64_t addr)
963 +{
964 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
965 }
966
967 static inline int
968 in_core (const struct module *mod, uint64_t addr)
969 {
970 - return addr - (uint64_t) mod->module_core < mod->core_size;
971 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
972 }
973
974 static inline int
975 @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
976 break;
977
978 case RV_BDREL:
979 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
980 + if (in_init_rx(mod, val))
981 + val -= (uint64_t) mod->module_init_rx;
982 + else if (in_init_rw(mod, val))
983 + val -= (uint64_t) mod->module_init_rw;
984 + else if (in_core_rx(mod, val))
985 + val -= (uint64_t) mod->module_core_rx;
986 + else if (in_core_rw(mod, val))
987 + val -= (uint64_t) mod->module_core_rw;
988 break;
989
990 case RV_LTV:
991 @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
992 * addresses have been selected...
993 */
994 uint64_t gp;
995 - if (mod->core_size > MAX_LTOFF)
996 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
997 /*
998 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
999 * at the end of the module.
1000 */
1001 - gp = mod->core_size - MAX_LTOFF / 2;
1002 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1003 else
1004 - gp = mod->core_size / 2;
1005 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1006 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1007 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1008 mod->arch.gp = gp;
1009 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1010 }
1011 diff -urNp linux-2.6.32.41/arch/ia64/kernel/pci-dma.c linux-2.6.32.41/arch/ia64/kernel/pci-dma.c
1012 --- linux-2.6.32.41/arch/ia64/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
1013 +++ linux-2.6.32.41/arch/ia64/kernel/pci-dma.c 2011-04-17 15:56:45.000000000 -0400
1014 @@ -43,7 +43,7 @@ struct device fallback_dev = {
1015 .dma_mask = &fallback_dev.coherent_dma_mask,
1016 };
1017
1018 -extern struct dma_map_ops intel_dma_ops;
1019 +extern const struct dma_map_ops intel_dma_ops;
1020
1021 static int __init pci_iommu_init(void)
1022 {
1023 @@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *d
1024 }
1025 EXPORT_SYMBOL(iommu_dma_supported);
1026
1027 +extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1028 +extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1029 +extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1030 +extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1031 +extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1032 +extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1033 +extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1034 +
1035 +static const struct dma_map_ops intel_iommu_dma_ops = {
1036 + /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1037 + .alloc_coherent = intel_alloc_coherent,
1038 + .free_coherent = intel_free_coherent,
1039 + .map_sg = intel_map_sg,
1040 + .unmap_sg = intel_unmap_sg,
1041 + .map_page = intel_map_page,
1042 + .unmap_page = intel_unmap_page,
1043 + .mapping_error = intel_mapping_error,
1044 +
1045 + .sync_single_for_cpu = machvec_dma_sync_single,
1046 + .sync_sg_for_cpu = machvec_dma_sync_sg,
1047 + .sync_single_for_device = machvec_dma_sync_single,
1048 + .sync_sg_for_device = machvec_dma_sync_sg,
1049 + .dma_supported = iommu_dma_supported,
1050 +};
1051 +
1052 void __init pci_iommu_alloc(void)
1053 {
1054 - dma_ops = &intel_dma_ops;
1055 -
1056 - dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
1057 - dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
1058 - dma_ops->sync_single_for_device = machvec_dma_sync_single;
1059 - dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
1060 - dma_ops->dma_supported = iommu_dma_supported;
1061 + dma_ops = &intel_iommu_dma_ops;
1062
1063 /*
1064 * The order of these functions is important for
1065 diff -urNp linux-2.6.32.41/arch/ia64/kernel/pci-swiotlb.c linux-2.6.32.41/arch/ia64/kernel/pci-swiotlb.c
1066 --- linux-2.6.32.41/arch/ia64/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
1067 +++ linux-2.6.32.41/arch/ia64/kernel/pci-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
1068 @@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent
1069 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1070 }
1071
1072 -struct dma_map_ops swiotlb_dma_ops = {
1073 +const struct dma_map_ops swiotlb_dma_ops = {
1074 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1075 .free_coherent = swiotlb_free_coherent,
1076 .map_page = swiotlb_map_page,
1077 diff -urNp linux-2.6.32.41/arch/ia64/kernel/sys_ia64.c linux-2.6.32.41/arch/ia64/kernel/sys_ia64.c
1078 --- linux-2.6.32.41/arch/ia64/kernel/sys_ia64.c 2011-03-27 14:31:47.000000000 -0400
1079 +++ linux-2.6.32.41/arch/ia64/kernel/sys_ia64.c 2011-04-17 15:56:45.000000000 -0400
1080 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
1081 if (REGION_NUMBER(addr) == RGN_HPAGE)
1082 addr = 0;
1083 #endif
1084 +
1085 +#ifdef CONFIG_PAX_RANDMMAP
1086 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1087 + addr = mm->free_area_cache;
1088 + else
1089 +#endif
1090 +
1091 if (!addr)
1092 addr = mm->free_area_cache;
1093
1094 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
1095 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1096 /* At this point: (!vma || addr < vma->vm_end). */
1097 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1098 - if (start_addr != TASK_UNMAPPED_BASE) {
1099 + if (start_addr != mm->mmap_base) {
1100 /* Start a new search --- just in case we missed some holes. */
1101 - addr = TASK_UNMAPPED_BASE;
1102 + addr = mm->mmap_base;
1103 goto full_search;
1104 }
1105 return -ENOMEM;
1106 }
1107 - if (!vma || addr + len <= vma->vm_start) {
1108 + if (check_heap_stack_gap(vma, addr, len)) {
1109 /* Remember the address where we stopped this search: */
1110 mm->free_area_cache = addr + len;
1111 return addr;
1112 diff -urNp linux-2.6.32.41/arch/ia64/kernel/topology.c linux-2.6.32.41/arch/ia64/kernel/topology.c
1113 --- linux-2.6.32.41/arch/ia64/kernel/topology.c 2011-03-27 14:31:47.000000000 -0400
1114 +++ linux-2.6.32.41/arch/ia64/kernel/topology.c 2011-04-17 15:56:45.000000000 -0400
1115 @@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject
1116 return ret;
1117 }
1118
1119 -static struct sysfs_ops cache_sysfs_ops = {
1120 +static const struct sysfs_ops cache_sysfs_ops = {
1121 .show = cache_show
1122 };
1123
1124 diff -urNp linux-2.6.32.41/arch/ia64/kernel/vmlinux.lds.S linux-2.6.32.41/arch/ia64/kernel/vmlinux.lds.S
1125 --- linux-2.6.32.41/arch/ia64/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
1126 +++ linux-2.6.32.41/arch/ia64/kernel/vmlinux.lds.S 2011-04-17 15:56:45.000000000 -0400
1127 @@ -190,7 +190,7 @@ SECTIONS
1128 /* Per-cpu data: */
1129 . = ALIGN(PERCPU_PAGE_SIZE);
1130 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1131 - __phys_per_cpu_start = __per_cpu_load;
1132 + __phys_per_cpu_start = per_cpu_load;
1133 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1134 * into percpu page size
1135 */
1136 diff -urNp linux-2.6.32.41/arch/ia64/mm/fault.c linux-2.6.32.41/arch/ia64/mm/fault.c
1137 --- linux-2.6.32.41/arch/ia64/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1138 +++ linux-2.6.32.41/arch/ia64/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1139 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
1140 return pte_present(pte);
1141 }
1142
1143 +#ifdef CONFIG_PAX_PAGEEXEC
1144 +void pax_report_insns(void *pc, void *sp)
1145 +{
1146 + unsigned long i;
1147 +
1148 + printk(KERN_ERR "PAX: bytes at PC: ");
1149 + for (i = 0; i < 8; i++) {
1150 + unsigned int c;
1151 + if (get_user(c, (unsigned int *)pc+i))
1152 + printk(KERN_CONT "???????? ");
1153 + else
1154 + printk(KERN_CONT "%08x ", c);
1155 + }
1156 + printk("\n");
1157 +}
1158 +#endif
1159 +
1160 void __kprobes
1161 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1162 {
1163 @@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres
1164 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1165 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1166
1167 - if ((vma->vm_flags & mask) != mask)
1168 + if ((vma->vm_flags & mask) != mask) {
1169 +
1170 +#ifdef CONFIG_PAX_PAGEEXEC
1171 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1172 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1173 + goto bad_area;
1174 +
1175 + up_read(&mm->mmap_sem);
1176 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1177 + do_group_exit(SIGKILL);
1178 + }
1179 +#endif
1180 +
1181 goto bad_area;
1182
1183 + }
1184 +
1185 survive:
1186 /*
1187 * If for any reason at all we couldn't handle the fault, make
1188 diff -urNp linux-2.6.32.41/arch/ia64/mm/hugetlbpage.c linux-2.6.32.41/arch/ia64/mm/hugetlbpage.c
1189 --- linux-2.6.32.41/arch/ia64/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
1190 +++ linux-2.6.32.41/arch/ia64/mm/hugetlbpage.c 2011-04-17 15:56:45.000000000 -0400
1191 @@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(
1192 /* At this point: (!vmm || addr < vmm->vm_end). */
1193 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1194 return -ENOMEM;
1195 - if (!vmm || (addr + len) <= vmm->vm_start)
1196 + if (check_heap_stack_gap(vmm, addr, len))
1197 return addr;
1198 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1199 }
1200 diff -urNp linux-2.6.32.41/arch/ia64/mm/init.c linux-2.6.32.41/arch/ia64/mm/init.c
1201 --- linux-2.6.32.41/arch/ia64/mm/init.c 2011-03-27 14:31:47.000000000 -0400
1202 +++ linux-2.6.32.41/arch/ia64/mm/init.c 2011-04-17 15:56:45.000000000 -0400
1203 @@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1204 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1205 vma->vm_end = vma->vm_start + PAGE_SIZE;
1206 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1207 +
1208 +#ifdef CONFIG_PAX_PAGEEXEC
1209 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1210 + vma->vm_flags &= ~VM_EXEC;
1211 +
1212 +#ifdef CONFIG_PAX_MPROTECT
1213 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1214 + vma->vm_flags &= ~VM_MAYEXEC;
1215 +#endif
1216 +
1217 + }
1218 +#endif
1219 +
1220 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1221 down_write(&current->mm->mmap_sem);
1222 if (insert_vm_struct(current->mm, vma)) {
1223 diff -urNp linux-2.6.32.41/arch/ia64/sn/pci/pci_dma.c linux-2.6.32.41/arch/ia64/sn/pci/pci_dma.c
1224 --- linux-2.6.32.41/arch/ia64/sn/pci/pci_dma.c 2011-03-27 14:31:47.000000000 -0400
1225 +++ linux-2.6.32.41/arch/ia64/sn/pci/pci_dma.c 2011-04-17 15:56:45.000000000 -0400
1226 @@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *
1227 return ret;
1228 }
1229
1230 -static struct dma_map_ops sn_dma_ops = {
1231 +static const struct dma_map_ops sn_dma_ops = {
1232 .alloc_coherent = sn_dma_alloc_coherent,
1233 .free_coherent = sn_dma_free_coherent,
1234 .map_page = sn_dma_map_page,
1235 diff -urNp linux-2.6.32.41/arch/m32r/lib/usercopy.c linux-2.6.32.41/arch/m32r/lib/usercopy.c
1236 --- linux-2.6.32.41/arch/m32r/lib/usercopy.c 2011-03-27 14:31:47.000000000 -0400
1237 +++ linux-2.6.32.41/arch/m32r/lib/usercopy.c 2011-04-17 15:56:45.000000000 -0400
1238 @@ -14,6 +14,9 @@
1239 unsigned long
1240 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1241 {
1242 + if ((long)n < 0)
1243 + return n;
1244 +
1245 prefetch(from);
1246 if (access_ok(VERIFY_WRITE, to, n))
1247 __copy_user(to,from,n);
1248 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1249 unsigned long
1250 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1251 {
1252 + if ((long)n < 0)
1253 + return n;
1254 +
1255 prefetchw(to);
1256 if (access_ok(VERIFY_READ, from, n))
1257 __copy_user_zeroing(to,from,n);
1258 diff -urNp linux-2.6.32.41/arch/mips/alchemy/devboards/pm.c linux-2.6.32.41/arch/mips/alchemy/devboards/pm.c
1259 --- linux-2.6.32.41/arch/mips/alchemy/devboards/pm.c 2011-03-27 14:31:47.000000000 -0400
1260 +++ linux-2.6.32.41/arch/mips/alchemy/devboards/pm.c 2011-04-17 15:56:45.000000000 -0400
1261 @@ -78,7 +78,7 @@ static void db1x_pm_end(void)
1262
1263 }
1264
1265 -static struct platform_suspend_ops db1x_pm_ops = {
1266 +static const struct platform_suspend_ops db1x_pm_ops = {
1267 .valid = suspend_valid_only_mem,
1268 .begin = db1x_pm_begin,
1269 .enter = db1x_pm_enter,
1270 diff -urNp linux-2.6.32.41/arch/mips/include/asm/elf.h linux-2.6.32.41/arch/mips/include/asm/elf.h
1271 --- linux-2.6.32.41/arch/mips/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1272 +++ linux-2.6.32.41/arch/mips/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1273 @@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_str
1274 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1275 #endif
1276
1277 +#ifdef CONFIG_PAX_ASLR
1278 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1279 +
1280 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1281 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1282 +#endif
1283 +
1284 #endif /* _ASM_ELF_H */
1285 diff -urNp linux-2.6.32.41/arch/mips/include/asm/page.h linux-2.6.32.41/arch/mips/include/asm/page.h
1286 --- linux-2.6.32.41/arch/mips/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
1287 +++ linux-2.6.32.41/arch/mips/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
1288 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1289 #ifdef CONFIG_CPU_MIPS32
1290 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1291 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1292 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1293 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1294 #else
1295 typedef struct { unsigned long long pte; } pte_t;
1296 #define pte_val(x) ((x).pte)
1297 diff -urNp linux-2.6.32.41/arch/mips/include/asm/system.h linux-2.6.32.41/arch/mips/include/asm/system.h
1298 --- linux-2.6.32.41/arch/mips/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
1299 +++ linux-2.6.32.41/arch/mips/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
1300 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1301 */
1302 #define __ARCH_WANT_UNLOCKED_CTXSW
1303
1304 -extern unsigned long arch_align_stack(unsigned long sp);
1305 +#define arch_align_stack(x) ((x) & ~0xfUL)
1306
1307 #endif /* _ASM_SYSTEM_H */
1308 diff -urNp linux-2.6.32.41/arch/mips/kernel/binfmt_elfn32.c linux-2.6.32.41/arch/mips/kernel/binfmt_elfn32.c
1309 --- linux-2.6.32.41/arch/mips/kernel/binfmt_elfn32.c 2011-03-27 14:31:47.000000000 -0400
1310 +++ linux-2.6.32.41/arch/mips/kernel/binfmt_elfn32.c 2011-04-17 15:56:45.000000000 -0400
1311 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1312 #undef ELF_ET_DYN_BASE
1313 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1314
1315 +#ifdef CONFIG_PAX_ASLR
1316 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1317 +
1318 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1319 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1320 +#endif
1321 +
1322 #include <asm/processor.h>
1323 #include <linux/module.h>
1324 #include <linux/elfcore.h>
1325 diff -urNp linux-2.6.32.41/arch/mips/kernel/binfmt_elfo32.c linux-2.6.32.41/arch/mips/kernel/binfmt_elfo32.c
1326 --- linux-2.6.32.41/arch/mips/kernel/binfmt_elfo32.c 2011-03-27 14:31:47.000000000 -0400
1327 +++ linux-2.6.32.41/arch/mips/kernel/binfmt_elfo32.c 2011-04-17 15:56:45.000000000 -0400
1328 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1329 #undef ELF_ET_DYN_BASE
1330 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1331
1332 +#ifdef CONFIG_PAX_ASLR
1333 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1334 +
1335 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1336 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1337 +#endif
1338 +
1339 #include <asm/processor.h>
1340
1341 /*
1342 diff -urNp linux-2.6.32.41/arch/mips/kernel/kgdb.c linux-2.6.32.41/arch/mips/kernel/kgdb.c
1343 --- linux-2.6.32.41/arch/mips/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
1344 +++ linux-2.6.32.41/arch/mips/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
1345 @@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vecto
1346 return -1;
1347 }
1348
1349 +/* cannot be const */
1350 struct kgdb_arch arch_kgdb_ops;
1351
1352 /*
1353 diff -urNp linux-2.6.32.41/arch/mips/kernel/process.c linux-2.6.32.41/arch/mips/kernel/process.c
1354 --- linux-2.6.32.41/arch/mips/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
1355 +++ linux-2.6.32.41/arch/mips/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
1356 @@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_stru
1357 out:
1358 return pc;
1359 }
1360 -
1361 -/*
1362 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1363 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1364 - */
1365 -unsigned long arch_align_stack(unsigned long sp)
1366 -{
1367 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1368 - sp -= get_random_int() & ~PAGE_MASK;
1369 -
1370 - return sp & ALMASK;
1371 -}
1372 diff -urNp linux-2.6.32.41/arch/mips/kernel/syscall.c linux-2.6.32.41/arch/mips/kernel/syscall.c
1373 --- linux-2.6.32.41/arch/mips/kernel/syscall.c 2011-03-27 14:31:47.000000000 -0400
1374 +++ linux-2.6.32.41/arch/mips/kernel/syscall.c 2011-04-17 15:56:45.000000000 -0400
1375 @@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(str
1376 do_color_align = 0;
1377 if (filp || (flags & MAP_SHARED))
1378 do_color_align = 1;
1379 +
1380 +#ifdef CONFIG_PAX_RANDMMAP
1381 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1382 +#endif
1383 +
1384 if (addr) {
1385 if (do_color_align)
1386 addr = COLOUR_ALIGN(addr, pgoff);
1387 else
1388 addr = PAGE_ALIGN(addr);
1389 vmm = find_vma(current->mm, addr);
1390 - if (task_size - len >= addr &&
1391 - (!vmm || addr + len <= vmm->vm_start))
1392 + if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
1393 return addr;
1394 }
1395 - addr = TASK_UNMAPPED_BASE;
1396 + addr = current->mm->mmap_base;
1397 if (do_color_align)
1398 addr = COLOUR_ALIGN(addr, pgoff);
1399 else
1400 @@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(str
1401 /* At this point: (!vmm || addr < vmm->vm_end). */
1402 if (task_size - len < addr)
1403 return -ENOMEM;
1404 - if (!vmm || addr + len <= vmm->vm_start)
1405 + if (check_heap_stack_gap(vmm, addr, len))
1406 return addr;
1407 addr = vmm->vm_end;
1408 if (do_color_align)
1409 diff -urNp linux-2.6.32.41/arch/mips/mm/fault.c linux-2.6.32.41/arch/mips/mm/fault.c
1410 --- linux-2.6.32.41/arch/mips/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1411 +++ linux-2.6.32.41/arch/mips/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1412 @@ -26,6 +26,23 @@
1413 #include <asm/ptrace.h>
1414 #include <asm/highmem.h> /* For VMALLOC_END */
1415
1416 +#ifdef CONFIG_PAX_PAGEEXEC
1417 +void pax_report_insns(void *pc, void *sp)
1418 +{
1419 + unsigned long i;
1420 +
1421 + printk(KERN_ERR "PAX: bytes at PC: ");
1422 + for (i = 0; i < 5; i++) {
1423 + unsigned int c;
1424 + if (get_user(c, (unsigned int *)pc+i))
1425 + printk(KERN_CONT "???????? ");
1426 + else
1427 + printk(KERN_CONT "%08x ", c);
1428 + }
1429 + printk("\n");
1430 +}
1431 +#endif
1432 +
1433 /*
1434 * This routine handles page faults. It determines the address,
1435 * and the problem, and then passes it off to one of the appropriate
1436 diff -urNp linux-2.6.32.41/arch/parisc/include/asm/elf.h linux-2.6.32.41/arch/parisc/include/asm/elf.h
1437 --- linux-2.6.32.41/arch/parisc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1438 +++ linux-2.6.32.41/arch/parisc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1439 @@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration..
1440
1441 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1442
1443 +#ifdef CONFIG_PAX_ASLR
1444 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
1445 +
1446 +#define PAX_DELTA_MMAP_LEN 16
1447 +#define PAX_DELTA_STACK_LEN 16
1448 +#endif
1449 +
1450 /* This yields a mask that user programs can use to figure out what
1451 instruction set this CPU supports. This could be done in user space,
1452 but it's not easy, and we've already done it here. */
1453 diff -urNp linux-2.6.32.41/arch/parisc/include/asm/pgtable.h linux-2.6.32.41/arch/parisc/include/asm/pgtable.h
1454 --- linux-2.6.32.41/arch/parisc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1455 +++ linux-2.6.32.41/arch/parisc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1456 @@ -207,6 +207,17 @@
1457 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1458 #define PAGE_COPY PAGE_EXECREAD
1459 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1460 +
1461 +#ifdef CONFIG_PAX_PAGEEXEC
1462 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1463 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1464 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1465 +#else
1466 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1467 +# define PAGE_COPY_NOEXEC PAGE_COPY
1468 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1469 +#endif
1470 +
1471 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1472 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
1473 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
1474 diff -urNp linux-2.6.32.41/arch/parisc/kernel/module.c linux-2.6.32.41/arch/parisc/kernel/module.c
1475 --- linux-2.6.32.41/arch/parisc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1476 +++ linux-2.6.32.41/arch/parisc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1477 @@ -95,16 +95,38 @@
1478
1479 /* three functions to determine where in the module core
1480 * or init pieces the location is */
1481 +static inline int in_init_rx(struct module *me, void *loc)
1482 +{
1483 + return (loc >= me->module_init_rx &&
1484 + loc < (me->module_init_rx + me->init_size_rx));
1485 +}
1486 +
1487 +static inline int in_init_rw(struct module *me, void *loc)
1488 +{
1489 + return (loc >= me->module_init_rw &&
1490 + loc < (me->module_init_rw + me->init_size_rw));
1491 +}
1492 +
1493 static inline int in_init(struct module *me, void *loc)
1494 {
1495 - return (loc >= me->module_init &&
1496 - loc <= (me->module_init + me->init_size));
1497 + return in_init_rx(me, loc) || in_init_rw(me, loc);
1498 +}
1499 +
1500 +static inline int in_core_rx(struct module *me, void *loc)
1501 +{
1502 + return (loc >= me->module_core_rx &&
1503 + loc < (me->module_core_rx + me->core_size_rx));
1504 +}
1505 +
1506 +static inline int in_core_rw(struct module *me, void *loc)
1507 +{
1508 + return (loc >= me->module_core_rw &&
1509 + loc < (me->module_core_rw + me->core_size_rw));
1510 }
1511
1512 static inline int in_core(struct module *me, void *loc)
1513 {
1514 - return (loc >= me->module_core &&
1515 - loc <= (me->module_core + me->core_size));
1516 + return in_core_rx(me, loc) || in_core_rw(me, loc);
1517 }
1518
1519 static inline int in_local(struct module *me, void *loc)
1520 @@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_
1521 }
1522
1523 /* align things a bit */
1524 - me->core_size = ALIGN(me->core_size, 16);
1525 - me->arch.got_offset = me->core_size;
1526 - me->core_size += gots * sizeof(struct got_entry);
1527 -
1528 - me->core_size = ALIGN(me->core_size, 16);
1529 - me->arch.fdesc_offset = me->core_size;
1530 - me->core_size += fdescs * sizeof(Elf_Fdesc);
1531 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1532 + me->arch.got_offset = me->core_size_rw;
1533 + me->core_size_rw += gots * sizeof(struct got_entry);
1534 +
1535 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1536 + me->arch.fdesc_offset = me->core_size_rw;
1537 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1538
1539 me->arch.got_max = gots;
1540 me->arch.fdesc_max = fdescs;
1541 @@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module
1542
1543 BUG_ON(value == 0);
1544
1545 - got = me->module_core + me->arch.got_offset;
1546 + got = me->module_core_rw + me->arch.got_offset;
1547 for (i = 0; got[i].addr; i++)
1548 if (got[i].addr == value)
1549 goto out;
1550 @@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module
1551 #ifdef CONFIG_64BIT
1552 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1553 {
1554 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1555 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1556
1557 if (!value) {
1558 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1559 @@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module
1560
1561 /* Create new one */
1562 fdesc->addr = value;
1563 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1564 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1565 return (Elf_Addr)fdesc;
1566 }
1567 #endif /* CONFIG_64BIT */
1568 @@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
1569
1570 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1571 end = table + sechdrs[me->arch.unwind_section].sh_size;
1572 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1573 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1574
1575 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1576 me->arch.unwind_section, table, end, gp);
1577 diff -urNp linux-2.6.32.41/arch/parisc/kernel/sys_parisc.c linux-2.6.32.41/arch/parisc/kernel/sys_parisc.c
1578 --- linux-2.6.32.41/arch/parisc/kernel/sys_parisc.c 2011-03-27 14:31:47.000000000 -0400
1579 +++ linux-2.6.32.41/arch/parisc/kernel/sys_parisc.c 2011-04-17 15:56:45.000000000 -0400
1580 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1581 /* At this point: (!vma || addr < vma->vm_end). */
1582 if (TASK_SIZE - len < addr)
1583 return -ENOMEM;
1584 - if (!vma || addr + len <= vma->vm_start)
1585 + if (check_heap_stack_gap(vma, addr, len))
1586 return addr;
1587 addr = vma->vm_end;
1588 }
1589 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1590 /* At this point: (!vma || addr < vma->vm_end). */
1591 if (TASK_SIZE - len < addr)
1592 return -ENOMEM;
1593 - if (!vma || addr + len <= vma->vm_start)
1594 + if (check_heap_stack_gap(vma, addr, len))
1595 return addr;
1596 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1597 if (addr < vma->vm_end) /* handle wraparound */
1598 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1599 if (flags & MAP_FIXED)
1600 return addr;
1601 if (!addr)
1602 - addr = TASK_UNMAPPED_BASE;
1603 + addr = current->mm->mmap_base;
1604
1605 if (filp) {
1606 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1607 diff -urNp linux-2.6.32.41/arch/parisc/kernel/traps.c linux-2.6.32.41/arch/parisc/kernel/traps.c
1608 --- linux-2.6.32.41/arch/parisc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
1609 +++ linux-2.6.32.41/arch/parisc/kernel/traps.c 2011-04-17 15:56:45.000000000 -0400
1610 @@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1611
1612 down_read(&current->mm->mmap_sem);
1613 vma = find_vma(current->mm,regs->iaoq[0]);
1614 - if (vma && (regs->iaoq[0] >= vma->vm_start)
1615 - && (vma->vm_flags & VM_EXEC)) {
1616 -
1617 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1618 fault_address = regs->iaoq[0];
1619 fault_space = regs->iasq[0];
1620
1621 diff -urNp linux-2.6.32.41/arch/parisc/mm/fault.c linux-2.6.32.41/arch/parisc/mm/fault.c
1622 --- linux-2.6.32.41/arch/parisc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1623 +++ linux-2.6.32.41/arch/parisc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1624 @@ -15,6 +15,7 @@
1625 #include <linux/sched.h>
1626 #include <linux/interrupt.h>
1627 #include <linux/module.h>
1628 +#include <linux/unistd.h>
1629
1630 #include <asm/uaccess.h>
1631 #include <asm/traps.h>
1632 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1633 static unsigned long
1634 parisc_acctyp(unsigned long code, unsigned int inst)
1635 {
1636 - if (code == 6 || code == 16)
1637 + if (code == 6 || code == 7 || code == 16)
1638 return VM_EXEC;
1639
1640 switch (inst & 0xf0000000) {
1641 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1642 }
1643 #endif
1644
1645 +#ifdef CONFIG_PAX_PAGEEXEC
1646 +/*
1647 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1648 + *
1649 + * returns 1 when task should be killed
1650 + * 2 when rt_sigreturn trampoline was detected
1651 + * 3 when unpatched PLT trampoline was detected
1652 + */
1653 +static int pax_handle_fetch_fault(struct pt_regs *regs)
1654 +{
1655 +
1656 +#ifdef CONFIG_PAX_EMUPLT
1657 + int err;
1658 +
1659 + do { /* PaX: unpatched PLT emulation */
1660 + unsigned int bl, depwi;
1661 +
1662 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1663 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1664 +
1665 + if (err)
1666 + break;
1667 +
1668 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1669 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1670 +
1671 + err = get_user(ldw, (unsigned int *)addr);
1672 + err |= get_user(bv, (unsigned int *)(addr+4));
1673 + err |= get_user(ldw2, (unsigned int *)(addr+8));
1674 +
1675 + if (err)
1676 + break;
1677 +
1678 + if (ldw == 0x0E801096U &&
1679 + bv == 0xEAC0C000U &&
1680 + ldw2 == 0x0E881095U)
1681 + {
1682 + unsigned int resolver, map;
1683 +
1684 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1685 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1686 + if (err)
1687 + break;
1688 +
1689 + regs->gr[20] = instruction_pointer(regs)+8;
1690 + regs->gr[21] = map;
1691 + regs->gr[22] = resolver;
1692 + regs->iaoq[0] = resolver | 3UL;
1693 + regs->iaoq[1] = regs->iaoq[0] + 4;
1694 + return 3;
1695 + }
1696 + }
1697 + } while (0);
1698 +#endif
1699 +
1700 +#ifdef CONFIG_PAX_EMUTRAMP
1701 +
1702 +#ifndef CONFIG_PAX_EMUSIGRT
1703 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1704 + return 1;
1705 +#endif
1706 +
1707 + do { /* PaX: rt_sigreturn emulation */
1708 + unsigned int ldi1, ldi2, bel, nop;
1709 +
1710 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1711 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1712 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1713 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1714 +
1715 + if (err)
1716 + break;
1717 +
1718 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1719 + ldi2 == 0x3414015AU &&
1720 + bel == 0xE4008200U &&
1721 + nop == 0x08000240U)
1722 + {
1723 + regs->gr[25] = (ldi1 & 2) >> 1;
1724 + regs->gr[20] = __NR_rt_sigreturn;
1725 + regs->gr[31] = regs->iaoq[1] + 16;
1726 + regs->sr[0] = regs->iasq[1];
1727 + regs->iaoq[0] = 0x100UL;
1728 + regs->iaoq[1] = regs->iaoq[0] + 4;
1729 + regs->iasq[0] = regs->sr[2];
1730 + regs->iasq[1] = regs->sr[2];
1731 + return 2;
1732 + }
1733 + } while (0);
1734 +#endif
1735 +
1736 + return 1;
1737 +}
1738 +
1739 +void pax_report_insns(void *pc, void *sp)
1740 +{
1741 + unsigned long i;
1742 +
1743 + printk(KERN_ERR "PAX: bytes at PC: ");
1744 + for (i = 0; i < 5; i++) {
1745 + unsigned int c;
1746 + if (get_user(c, (unsigned int *)pc+i))
1747 + printk(KERN_CONT "???????? ");
1748 + else
1749 + printk(KERN_CONT "%08x ", c);
1750 + }
1751 + printk("\n");
1752 +}
1753 +#endif
1754 +
1755 int fixup_exception(struct pt_regs *regs)
1756 {
1757 const struct exception_table_entry *fix;
1758 @@ -192,8 +303,33 @@ good_area:
1759
1760 acc_type = parisc_acctyp(code,regs->iir);
1761
1762 - if ((vma->vm_flags & acc_type) != acc_type)
1763 + if ((vma->vm_flags & acc_type) != acc_type) {
1764 +
1765 +#ifdef CONFIG_PAX_PAGEEXEC
1766 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1767 + (address & ~3UL) == instruction_pointer(regs))
1768 + {
1769 + up_read(&mm->mmap_sem);
1770 + switch (pax_handle_fetch_fault(regs)) {
1771 +
1772 +#ifdef CONFIG_PAX_EMUPLT
1773 + case 3:
1774 + return;
1775 +#endif
1776 +
1777 +#ifdef CONFIG_PAX_EMUTRAMP
1778 + case 2:
1779 + return;
1780 +#endif
1781 +
1782 + }
1783 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1784 + do_group_exit(SIGKILL);
1785 + }
1786 +#endif
1787 +
1788 goto bad_area;
1789 + }
1790
1791 /*
1792 * If for any reason at all we couldn't handle the fault, make
1793 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/device.h linux-2.6.32.41/arch/powerpc/include/asm/device.h
1794 --- linux-2.6.32.41/arch/powerpc/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
1795 +++ linux-2.6.32.41/arch/powerpc/include/asm/device.h 2011-04-17 15:56:45.000000000 -0400
1796 @@ -14,7 +14,7 @@ struct dev_archdata {
1797 struct device_node *of_node;
1798
1799 /* DMA operations on that device */
1800 - struct dma_map_ops *dma_ops;
1801 + const struct dma_map_ops *dma_ops;
1802
1803 /*
1804 * When an iommu is in use, dma_data is used as a ptr to the base of the
1805 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/dma-mapping.h linux-2.6.32.41/arch/powerpc/include/asm/dma-mapping.h
1806 --- linux-2.6.32.41/arch/powerpc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
1807 +++ linux-2.6.32.41/arch/powerpc/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
1808 @@ -69,9 +69,9 @@ static inline unsigned long device_to_ma
1809 #ifdef CONFIG_PPC64
1810 extern struct dma_map_ops dma_iommu_ops;
1811 #endif
1812 -extern struct dma_map_ops dma_direct_ops;
1813 +extern const struct dma_map_ops dma_direct_ops;
1814
1815 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
1816 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
1817 {
1818 /* We don't handle the NULL dev case for ISA for now. We could
1819 * do it via an out of line call but it is not needed for now. The
1820 @@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dm
1821 return dev->archdata.dma_ops;
1822 }
1823
1824 -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
1825 +static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
1826 {
1827 dev->archdata.dma_ops = ops;
1828 }
1829 @@ -118,7 +118,7 @@ static inline void set_dma_offset(struct
1830
1831 static inline int dma_supported(struct device *dev, u64 mask)
1832 {
1833 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
1834 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
1835
1836 if (unlikely(dma_ops == NULL))
1837 return 0;
1838 @@ -132,7 +132,7 @@ static inline int dma_supported(struct d
1839
1840 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
1841 {
1842 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
1843 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
1844
1845 if (unlikely(dma_ops == NULL))
1846 return -EIO;
1847 @@ -147,7 +147,7 @@ static inline int dma_set_mask(struct de
1848 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1849 dma_addr_t *dma_handle, gfp_t flag)
1850 {
1851 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
1852 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
1853 void *cpu_addr;
1854
1855 BUG_ON(!dma_ops);
1856 @@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(s
1857 static inline void dma_free_coherent(struct device *dev, size_t size,
1858 void *cpu_addr, dma_addr_t dma_handle)
1859 {
1860 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
1861 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
1862
1863 BUG_ON(!dma_ops);
1864
1865 @@ -173,7 +173,7 @@ static inline void dma_free_coherent(str
1866
1867 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1868 {
1869 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
1870 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
1871
1872 if (dma_ops->mapping_error)
1873 return dma_ops->mapping_error(dev, dma_addr);
1874 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/elf.h linux-2.6.32.41/arch/powerpc/include/asm/elf.h
1875 --- linux-2.6.32.41/arch/powerpc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1876 +++ linux-2.6.32.41/arch/powerpc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1877 @@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
1878 the loader. We need to make sure that it is out of the way of the program
1879 that it will "exec", and that there is sufficient room for the brk. */
1880
1881 -extern unsigned long randomize_et_dyn(unsigned long base);
1882 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
1883 +#define ELF_ET_DYN_BASE (0x20000000)
1884 +
1885 +#ifdef CONFIG_PAX_ASLR
1886 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
1887 +
1888 +#ifdef __powerpc64__
1889 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
1890 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
1891 +#else
1892 +#define PAX_DELTA_MMAP_LEN 15
1893 +#define PAX_DELTA_STACK_LEN 15
1894 +#endif
1895 +#endif
1896
1897 /*
1898 * Our registers are always unsigned longs, whether we're a 32 bit
1899 @@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(s
1900 (0x7ff >> (PAGE_SHIFT - 12)) : \
1901 (0x3ffff >> (PAGE_SHIFT - 12)))
1902
1903 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1904 -#define arch_randomize_brk arch_randomize_brk
1905 -
1906 #endif /* __KERNEL__ */
1907
1908 /*
1909 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/iommu.h linux-2.6.32.41/arch/powerpc/include/asm/iommu.h
1910 --- linux-2.6.32.41/arch/powerpc/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
1911 +++ linux-2.6.32.41/arch/powerpc/include/asm/iommu.h 2011-04-17 15:56:45.000000000 -0400
1912 @@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(voi
1913 extern void iommu_init_early_dart(void);
1914 extern void iommu_init_early_pasemi(void);
1915
1916 +/* dma-iommu.c */
1917 +extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
1918 +
1919 #ifdef CONFIG_PCI
1920 extern void pci_iommu_init(void);
1921 extern void pci_direct_iommu_init(void);
1922 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/kmap_types.h linux-2.6.32.41/arch/powerpc/include/asm/kmap_types.h
1923 --- linux-2.6.32.41/arch/powerpc/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
1924 +++ linux-2.6.32.41/arch/powerpc/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
1925 @@ -26,6 +26,7 @@ enum km_type {
1926 KM_SOFTIRQ1,
1927 KM_PPC_SYNC_PAGE,
1928 KM_PPC_SYNC_ICACHE,
1929 + KM_CLEARPAGE,
1930 KM_TYPE_NR
1931 };
1932
1933 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/page_64.h linux-2.6.32.41/arch/powerpc/include/asm/page_64.h
1934 --- linux-2.6.32.41/arch/powerpc/include/asm/page_64.h 2011-03-27 14:31:47.000000000 -0400
1935 +++ linux-2.6.32.41/arch/powerpc/include/asm/page_64.h 2011-04-17 15:56:45.000000000 -0400
1936 @@ -180,15 +180,18 @@ do { \
1937 * stack by default, so in the absense of a PT_GNU_STACK program header
1938 * we turn execute permission off.
1939 */
1940 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1941 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1942 +#define VM_STACK_DEFAULT_FLAGS32 \
1943 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1944 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1945
1946 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1947 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1948
1949 +#ifndef CONFIG_PAX_PAGEEXEC
1950 #define VM_STACK_DEFAULT_FLAGS \
1951 (test_thread_flag(TIF_32BIT) ? \
1952 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
1953 +#endif
1954
1955 #include <asm-generic/getorder.h>
1956
1957 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/page.h linux-2.6.32.41/arch/powerpc/include/asm/page.h
1958 --- linux-2.6.32.41/arch/powerpc/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
1959 +++ linux-2.6.32.41/arch/powerpc/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
1960 @@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
1961 * and needs to be executable. This means the whole heap ends
1962 * up being executable.
1963 */
1964 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1965 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1966 +#define VM_DATA_DEFAULT_FLAGS32 \
1967 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1968 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1969
1970 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1971 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1972 @@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
1973 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
1974 #endif
1975
1976 +#define ktla_ktva(addr) (addr)
1977 +#define ktva_ktla(addr) (addr)
1978 +
1979 #ifndef __ASSEMBLY__
1980
1981 #undef STRICT_MM_TYPECHECKS
1982 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/pci.h linux-2.6.32.41/arch/powerpc/include/asm/pci.h
1983 --- linux-2.6.32.41/arch/powerpc/include/asm/pci.h 2011-03-27 14:31:47.000000000 -0400
1984 +++ linux-2.6.32.41/arch/powerpc/include/asm/pci.h 2011-04-17 15:56:45.000000000 -0400
1985 @@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq
1986 }
1987
1988 #ifdef CONFIG_PCI
1989 -extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
1990 -extern struct dma_map_ops *get_pci_dma_ops(void);
1991 +extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
1992 +extern const struct dma_map_ops *get_pci_dma_ops(void);
1993 #else /* CONFIG_PCI */
1994 #define set_pci_dma_ops(d)
1995 #define get_pci_dma_ops() NULL
1996 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/pgtable.h linux-2.6.32.41/arch/powerpc/include/asm/pgtable.h
1997 --- linux-2.6.32.41/arch/powerpc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1998 +++ linux-2.6.32.41/arch/powerpc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1999 @@ -2,6 +2,7 @@
2000 #define _ASM_POWERPC_PGTABLE_H
2001 #ifdef __KERNEL__
2002
2003 +#include <linux/const.h>
2004 #ifndef __ASSEMBLY__
2005 #include <asm/processor.h> /* For TASK_SIZE */
2006 #include <asm/mmu.h>
2007 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/pte-hash32.h linux-2.6.32.41/arch/powerpc/include/asm/pte-hash32.h
2008 --- linux-2.6.32.41/arch/powerpc/include/asm/pte-hash32.h 2011-03-27 14:31:47.000000000 -0400
2009 +++ linux-2.6.32.41/arch/powerpc/include/asm/pte-hash32.h 2011-04-17 15:56:45.000000000 -0400
2010 @@ -21,6 +21,7 @@
2011 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2012 #define _PAGE_USER 0x004 /* usermode access allowed */
2013 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2014 +#define _PAGE_EXEC _PAGE_GUARDED
2015 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2016 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2017 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2018 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/reg.h linux-2.6.32.41/arch/powerpc/include/asm/reg.h
2019 --- linux-2.6.32.41/arch/powerpc/include/asm/reg.h 2011-03-27 14:31:47.000000000 -0400
2020 +++ linux-2.6.32.41/arch/powerpc/include/asm/reg.h 2011-04-17 15:56:45.000000000 -0400
2021 @@ -191,6 +191,7 @@
2022 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2023 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2024 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2025 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2026 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2027 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2028 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2029 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/swiotlb.h linux-2.6.32.41/arch/powerpc/include/asm/swiotlb.h
2030 --- linux-2.6.32.41/arch/powerpc/include/asm/swiotlb.h 2011-03-27 14:31:47.000000000 -0400
2031 +++ linux-2.6.32.41/arch/powerpc/include/asm/swiotlb.h 2011-04-17 15:56:45.000000000 -0400
2032 @@ -13,7 +13,7 @@
2033
2034 #include <linux/swiotlb.h>
2035
2036 -extern struct dma_map_ops swiotlb_dma_ops;
2037 +extern const struct dma_map_ops swiotlb_dma_ops;
2038
2039 static inline void dma_mark_clean(void *addr, size_t size) {}
2040
2041 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/system.h linux-2.6.32.41/arch/powerpc/include/asm/system.h
2042 --- linux-2.6.32.41/arch/powerpc/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
2043 +++ linux-2.6.32.41/arch/powerpc/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
2044 @@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
2045 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2046 #endif
2047
2048 -extern unsigned long arch_align_stack(unsigned long sp);
2049 +#define arch_align_stack(x) ((x) & ~0xfUL)
2050
2051 /* Used in very early kernel initialization. */
2052 extern unsigned long reloc_offset(void);
2053 diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/uaccess.h linux-2.6.32.41/arch/powerpc/include/asm/uaccess.h
2054 --- linux-2.6.32.41/arch/powerpc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
2055 +++ linux-2.6.32.41/arch/powerpc/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
2056 @@ -13,6 +13,8 @@
2057 #define VERIFY_READ 0
2058 #define VERIFY_WRITE 1
2059
2060 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
2061 +
2062 /*
2063 * The fs value determines whether argument validity checking should be
2064 * performed or not. If get_fs() == USER_DS, checking is performed, with
2065 @@ -327,52 +329,6 @@ do { \
2066 extern unsigned long __copy_tofrom_user(void __user *to,
2067 const void __user *from, unsigned long size);
2068
2069 -#ifndef __powerpc64__
2070 -
2071 -static inline unsigned long copy_from_user(void *to,
2072 - const void __user *from, unsigned long n)
2073 -{
2074 - unsigned long over;
2075 -
2076 - if (access_ok(VERIFY_READ, from, n))
2077 - return __copy_tofrom_user((__force void __user *)to, from, n);
2078 - if ((unsigned long)from < TASK_SIZE) {
2079 - over = (unsigned long)from + n - TASK_SIZE;
2080 - return __copy_tofrom_user((__force void __user *)to, from,
2081 - n - over) + over;
2082 - }
2083 - return n;
2084 -}
2085 -
2086 -static inline unsigned long copy_to_user(void __user *to,
2087 - const void *from, unsigned long n)
2088 -{
2089 - unsigned long over;
2090 -
2091 - if (access_ok(VERIFY_WRITE, to, n))
2092 - return __copy_tofrom_user(to, (__force void __user *)from, n);
2093 - if ((unsigned long)to < TASK_SIZE) {
2094 - over = (unsigned long)to + n - TASK_SIZE;
2095 - return __copy_tofrom_user(to, (__force void __user *)from,
2096 - n - over) + over;
2097 - }
2098 - return n;
2099 -}
2100 -
2101 -#else /* __powerpc64__ */
2102 -
2103 -#define __copy_in_user(to, from, size) \
2104 - __copy_tofrom_user((to), (from), (size))
2105 -
2106 -extern unsigned long copy_from_user(void *to, const void __user *from,
2107 - unsigned long n);
2108 -extern unsigned long copy_to_user(void __user *to, const void *from,
2109 - unsigned long n);
2110 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
2111 - unsigned long n);
2112 -
2113 -#endif /* __powerpc64__ */
2114 -
2115 static inline unsigned long __copy_from_user_inatomic(void *to,
2116 const void __user *from, unsigned long n)
2117 {
2118 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
2119 if (ret == 0)
2120 return 0;
2121 }
2122 +
2123 + if (!__builtin_constant_p(n))
2124 + check_object_size(to, n, false);
2125 +
2126 return __copy_tofrom_user((__force void __user *)to, from, n);
2127 }
2128
2129 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
2130 if (ret == 0)
2131 return 0;
2132 }
2133 +
2134 + if (!__builtin_constant_p(n))
2135 + check_object_size(from, n, true);
2136 +
2137 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2138 }
2139
2140 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
2141 return __copy_to_user_inatomic(to, from, size);
2142 }
2143
2144 +#ifndef __powerpc64__
2145 +
2146 +static inline unsigned long __must_check copy_from_user(void *to,
2147 + const void __user *from, unsigned long n)
2148 +{
2149 + unsigned long over;
2150 +
2151 + if ((long)n < 0)
2152 + return n;
2153 +
2154 + if (access_ok(VERIFY_READ, from, n)) {
2155 + if (!__builtin_constant_p(n))
2156 + check_object_size(to, n, false);
2157 + return __copy_tofrom_user((__force void __user *)to, from, n);
2158 + }
2159 + if ((unsigned long)from < TASK_SIZE) {
2160 + over = (unsigned long)from + n - TASK_SIZE;
2161 + if (!__builtin_constant_p(n - over))
2162 + check_object_size(to, n - over, false);
2163 + return __copy_tofrom_user((__force void __user *)to, from,
2164 + n - over) + over;
2165 + }
2166 + return n;
2167 +}
2168 +
2169 +static inline unsigned long __must_check copy_to_user(void __user *to,
2170 + const void *from, unsigned long n)
2171 +{
2172 + unsigned long over;
2173 +
2174 + if ((long)n < 0)
2175 + return n;
2176 +
2177 + if (access_ok(VERIFY_WRITE, to, n)) {
2178 + if (!__builtin_constant_p(n))
2179 + check_object_size(from, n, true);
2180 + return __copy_tofrom_user(to, (__force void __user *)from, n);
2181 + }
2182 + if ((unsigned long)to < TASK_SIZE) {
2183 + over = (unsigned long)to + n - TASK_SIZE;
2184 + if (!__builtin_constant_p(n))
2185 + check_object_size(from, n - over, true);
2186 + return __copy_tofrom_user(to, (__force void __user *)from,
2187 + n - over) + over;
2188 + }
2189 + return n;
2190 +}
2191 +
2192 +#else /* __powerpc64__ */
2193 +
2194 +#define __copy_in_user(to, from, size) \
2195 + __copy_tofrom_user((to), (from), (size))
2196 +
2197 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2198 +{
2199 + if ((long)n < 0 || n > INT_MAX)
2200 + return n;
2201 +
2202 + if (!__builtin_constant_p(n))
2203 + check_object_size(to, n, false);
2204 +
2205 + if (likely(access_ok(VERIFY_READ, from, n)))
2206 + n = __copy_from_user(to, from, n);
2207 + else
2208 + memset(to, 0, n);
2209 + return n;
2210 +}
2211 +
2212 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2213 +{
2214 + if ((long)n < 0 || n > INT_MAX)
2215 + return n;
2216 +
2217 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
2218 + if (!__builtin_constant_p(n))
2219 + check_object_size(from, n, true);
2220 + n = __copy_to_user(to, from, n);
2221 + }
2222 + return n;
2223 +}
2224 +
2225 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
2226 + unsigned long n);
2227 +
2228 +#endif /* __powerpc64__ */
2229 +
2230 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2231
2232 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2233 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/cacheinfo.c linux-2.6.32.41/arch/powerpc/kernel/cacheinfo.c
2234 --- linux-2.6.32.41/arch/powerpc/kernel/cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
2235 +++ linux-2.6.32.41/arch/powerpc/kernel/cacheinfo.c 2011-04-17 15:56:45.000000000 -0400
2236 @@ -642,7 +642,7 @@ static struct kobj_attribute *cache_inde
2237 &cache_assoc_attr,
2238 };
2239
2240 -static struct sysfs_ops cache_index_ops = {
2241 +static const struct sysfs_ops cache_index_ops = {
2242 .show = cache_index_show,
2243 };
2244
2245 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/dma.c linux-2.6.32.41/arch/powerpc/kernel/dma.c
2246 --- linux-2.6.32.41/arch/powerpc/kernel/dma.c 2011-03-27 14:31:47.000000000 -0400
2247 +++ linux-2.6.32.41/arch/powerpc/kernel/dma.c 2011-04-17 15:56:45.000000000 -0400
2248 @@ -134,7 +134,7 @@ static inline void dma_direct_sync_singl
2249 }
2250 #endif
2251
2252 -struct dma_map_ops dma_direct_ops = {
2253 +const struct dma_map_ops dma_direct_ops = {
2254 .alloc_coherent = dma_direct_alloc_coherent,
2255 .free_coherent = dma_direct_free_coherent,
2256 .map_sg = dma_direct_map_sg,
2257 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/dma-iommu.c linux-2.6.32.41/arch/powerpc/kernel/dma-iommu.c
2258 --- linux-2.6.32.41/arch/powerpc/kernel/dma-iommu.c 2011-03-27 14:31:47.000000000 -0400
2259 +++ linux-2.6.32.41/arch/powerpc/kernel/dma-iommu.c 2011-04-17 15:56:45.000000000 -0400
2260 @@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct de
2261 }
2262
2263 /* We support DMA to/from any memory page via the iommu */
2264 -static int dma_iommu_dma_supported(struct device *dev, u64 mask)
2265 +int dma_iommu_dma_supported(struct device *dev, u64 mask)
2266 {
2267 struct iommu_table *tbl = get_iommu_table_base(dev);
2268
2269 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/dma-swiotlb.c linux-2.6.32.41/arch/powerpc/kernel/dma-swiotlb.c
2270 --- linux-2.6.32.41/arch/powerpc/kernel/dma-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
2271 +++ linux-2.6.32.41/arch/powerpc/kernel/dma-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
2272 @@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
2273 * map_page, and unmap_page on highmem, use normal dma_ops
2274 * for everything else.
2275 */
2276 -struct dma_map_ops swiotlb_dma_ops = {
2277 +const struct dma_map_ops swiotlb_dma_ops = {
2278 .alloc_coherent = dma_direct_alloc_coherent,
2279 .free_coherent = dma_direct_free_coherent,
2280 .map_sg = swiotlb_map_sg_attrs,
2281 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/exceptions-64e.S linux-2.6.32.41/arch/powerpc/kernel/exceptions-64e.S
2282 --- linux-2.6.32.41/arch/powerpc/kernel/exceptions-64e.S 2011-03-27 14:31:47.000000000 -0400
2283 +++ linux-2.6.32.41/arch/powerpc/kernel/exceptions-64e.S 2011-04-17 15:56:45.000000000 -0400
2284 @@ -455,6 +455,7 @@ storage_fault_common:
2285 std r14,_DAR(r1)
2286 std r15,_DSISR(r1)
2287 addi r3,r1,STACK_FRAME_OVERHEAD
2288 + bl .save_nvgprs
2289 mr r4,r14
2290 mr r5,r15
2291 ld r14,PACA_EXGEN+EX_R14(r13)
2292 @@ -464,8 +465,7 @@ storage_fault_common:
2293 cmpdi r3,0
2294 bne- 1f
2295 b .ret_from_except_lite
2296 -1: bl .save_nvgprs
2297 - mr r5,r3
2298 +1: mr r5,r3
2299 addi r3,r1,STACK_FRAME_OVERHEAD
2300 ld r4,_DAR(r1)
2301 bl .bad_page_fault
2302 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/exceptions-64s.S linux-2.6.32.41/arch/powerpc/kernel/exceptions-64s.S
2303 --- linux-2.6.32.41/arch/powerpc/kernel/exceptions-64s.S 2011-03-27 14:31:47.000000000 -0400
2304 +++ linux-2.6.32.41/arch/powerpc/kernel/exceptions-64s.S 2011-04-17 15:56:45.000000000 -0400
2305 @@ -818,10 +818,10 @@ handle_page_fault:
2306 11: ld r4,_DAR(r1)
2307 ld r5,_DSISR(r1)
2308 addi r3,r1,STACK_FRAME_OVERHEAD
2309 + bl .save_nvgprs
2310 bl .do_page_fault
2311 cmpdi r3,0
2312 beq+ 13f
2313 - bl .save_nvgprs
2314 mr r5,r3
2315 addi r3,r1,STACK_FRAME_OVERHEAD
2316 lwz r4,_DAR(r1)
2317 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/ibmebus.c linux-2.6.32.41/arch/powerpc/kernel/ibmebus.c
2318 --- linux-2.6.32.41/arch/powerpc/kernel/ibmebus.c 2011-03-27 14:31:47.000000000 -0400
2319 +++ linux-2.6.32.41/arch/powerpc/kernel/ibmebus.c 2011-04-17 15:56:45.000000000 -0400
2320 @@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct
2321 return 1;
2322 }
2323
2324 -static struct dma_map_ops ibmebus_dma_ops = {
2325 +static const struct dma_map_ops ibmebus_dma_ops = {
2326 .alloc_coherent = ibmebus_alloc_coherent,
2327 .free_coherent = ibmebus_free_coherent,
2328 .map_sg = ibmebus_map_sg,
2329 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/kgdb.c linux-2.6.32.41/arch/powerpc/kernel/kgdb.c
2330 --- linux-2.6.32.41/arch/powerpc/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
2331 +++ linux-2.6.32.41/arch/powerpc/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
2332 @@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct
2333 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
2334 return 0;
2335
2336 - if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2337 + if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2338 regs->nip += 4;
2339
2340 return 1;
2341 @@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vecto
2342 /*
2343 * Global data
2344 */
2345 -struct kgdb_arch arch_kgdb_ops = {
2346 +const struct kgdb_arch arch_kgdb_ops = {
2347 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
2348 };
2349
2350 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/module_32.c linux-2.6.32.41/arch/powerpc/kernel/module_32.c
2351 --- linux-2.6.32.41/arch/powerpc/kernel/module_32.c 2011-03-27 14:31:47.000000000 -0400
2352 +++ linux-2.6.32.41/arch/powerpc/kernel/module_32.c 2011-04-17 15:56:45.000000000 -0400
2353 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2354 me->arch.core_plt_section = i;
2355 }
2356 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2357 - printk("Module doesn't contain .plt or .init.plt sections.\n");
2358 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2359 return -ENOEXEC;
2360 }
2361
2362 @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2363
2364 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2365 /* Init, or core PLT? */
2366 - if (location >= mod->module_core
2367 - && location < mod->module_core + mod->core_size)
2368 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2369 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2370 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2371 - else
2372 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2373 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2374 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2375 + else {
2376 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2377 + return ~0UL;
2378 + }
2379
2380 /* Find this entry, or if that fails, the next avail. entry */
2381 while (entry->jump[0]) {
2382 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/module.c linux-2.6.32.41/arch/powerpc/kernel/module.c
2383 --- linux-2.6.32.41/arch/powerpc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
2384 +++ linux-2.6.32.41/arch/powerpc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
2385 @@ -31,11 +31,24 @@
2386
2387 LIST_HEAD(module_bug_list);
2388
2389 +#ifdef CONFIG_PAX_KERNEXEC
2390 void *module_alloc(unsigned long size)
2391 {
2392 if (size == 0)
2393 return NULL;
2394
2395 + return vmalloc(size);
2396 +}
2397 +
2398 +void *module_alloc_exec(unsigned long size)
2399 +#else
2400 +void *module_alloc(unsigned long size)
2401 +#endif
2402 +
2403 +{
2404 + if (size == 0)
2405 + return NULL;
2406 +
2407 return vmalloc_exec(size);
2408 }
2409
2410 @@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2411 vfree(module_region);
2412 }
2413
2414 +#ifdef CONFIG_PAX_KERNEXEC
2415 +void module_free_exec(struct module *mod, void *module_region)
2416 +{
2417 + module_free(mod, module_region);
2418 +}
2419 +#endif
2420 +
2421 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2422 const Elf_Shdr *sechdrs,
2423 const char *name)
2424 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/pci-common.c linux-2.6.32.41/arch/powerpc/kernel/pci-common.c
2425 --- linux-2.6.32.41/arch/powerpc/kernel/pci-common.c 2011-03-27 14:31:47.000000000 -0400
2426 +++ linux-2.6.32.41/arch/powerpc/kernel/pci-common.c 2011-04-17 15:56:45.000000000 -0400
2427 @@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
2428 unsigned int ppc_pci_flags = 0;
2429
2430
2431 -static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2432 +static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2433
2434 -void set_pci_dma_ops(struct dma_map_ops *dma_ops)
2435 +void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
2436 {
2437 pci_dma_ops = dma_ops;
2438 }
2439
2440 -struct dma_map_ops *get_pci_dma_ops(void)
2441 +const struct dma_map_ops *get_pci_dma_ops(void)
2442 {
2443 return pci_dma_ops;
2444 }
2445 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/process.c linux-2.6.32.41/arch/powerpc/kernel/process.c
2446 --- linux-2.6.32.41/arch/powerpc/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
2447 +++ linux-2.6.32.41/arch/powerpc/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
2448 @@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
2449 * Lookup NIP late so we have the best change of getting the
2450 * above info out without failing
2451 */
2452 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2453 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2454 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2455 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2456 #endif
2457 show_stack(current, (unsigned long *) regs->gpr[1]);
2458 if (!user_mode(regs))
2459 @@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk,
2460 newsp = stack[0];
2461 ip = stack[STACK_FRAME_LR_SAVE];
2462 if (!firstframe || ip != lr) {
2463 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2464 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2465 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2466 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2467 - printk(" (%pS)",
2468 + printk(" (%pA)",
2469 (void *)current->ret_stack[curr_frame].ret);
2470 curr_frame--;
2471 }
2472 @@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk,
2473 struct pt_regs *regs = (struct pt_regs *)
2474 (sp + STACK_FRAME_OVERHEAD);
2475 lr = regs->link;
2476 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
2477 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
2478 regs->trap, (void *)regs->nip, (void *)lr);
2479 firstframe = 1;
2480 }
2481 @@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
2482 }
2483
2484 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2485 -
2486 -unsigned long arch_align_stack(unsigned long sp)
2487 -{
2488 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2489 - sp -= get_random_int() & ~PAGE_MASK;
2490 - return sp & ~0xf;
2491 -}
2492 -
2493 -static inline unsigned long brk_rnd(void)
2494 -{
2495 - unsigned long rnd = 0;
2496 -
2497 - /* 8MB for 32bit, 1GB for 64bit */
2498 - if (is_32bit_task())
2499 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2500 - else
2501 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2502 -
2503 - return rnd << PAGE_SHIFT;
2504 -}
2505 -
2506 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2507 -{
2508 - unsigned long base = mm->brk;
2509 - unsigned long ret;
2510 -
2511 -#ifdef CONFIG_PPC_STD_MMU_64
2512 - /*
2513 - * If we are using 1TB segments and we are allowed to randomise
2514 - * the heap, we can put it above 1TB so it is backed by a 1TB
2515 - * segment. Otherwise the heap will be in the bottom 1TB
2516 - * which always uses 256MB segments and this may result in a
2517 - * performance penalty.
2518 - */
2519 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2520 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2521 -#endif
2522 -
2523 - ret = PAGE_ALIGN(base + brk_rnd());
2524 -
2525 - if (ret < mm->brk)
2526 - return mm->brk;
2527 -
2528 - return ret;
2529 -}
2530 -
2531 -unsigned long randomize_et_dyn(unsigned long base)
2532 -{
2533 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2534 -
2535 - if (ret < base)
2536 - return base;
2537 -
2538 - return ret;
2539 -}
2540 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/signal_32.c linux-2.6.32.41/arch/powerpc/kernel/signal_32.c
2541 --- linux-2.6.32.41/arch/powerpc/kernel/signal_32.c 2011-03-27 14:31:47.000000000 -0400
2542 +++ linux-2.6.32.41/arch/powerpc/kernel/signal_32.c 2011-04-17 15:56:45.000000000 -0400
2543 @@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig
2544 /* Save user registers on the stack */
2545 frame = &rt_sf->uc.uc_mcontext;
2546 addr = frame;
2547 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2548 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2549 if (save_user_regs(regs, frame, 0, 1))
2550 goto badframe;
2551 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2552 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/signal_64.c linux-2.6.32.41/arch/powerpc/kernel/signal_64.c
2553 --- linux-2.6.32.41/arch/powerpc/kernel/signal_64.c 2011-03-27 14:31:47.000000000 -0400
2554 +++ linux-2.6.32.41/arch/powerpc/kernel/signal_64.c 2011-04-17 15:56:45.000000000 -0400
2555 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct
2556 current->thread.fpscr.val = 0;
2557
2558 /* Set up to return from userspace. */
2559 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2560 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2561 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2562 } else {
2563 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2564 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/sys_ppc32.c linux-2.6.32.41/arch/powerpc/kernel/sys_ppc32.c
2565 --- linux-2.6.32.41/arch/powerpc/kernel/sys_ppc32.c 2011-03-27 14:31:47.000000000 -0400
2566 +++ linux-2.6.32.41/arch/powerpc/kernel/sys_ppc32.c 2011-04-17 15:56:45.000000000 -0400
2567 @@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct
2568 if (oldlenp) {
2569 if (!error) {
2570 if (get_user(oldlen, oldlenp) ||
2571 - put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
2572 + put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
2573 + copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
2574 error = -EFAULT;
2575 }
2576 - copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
2577 }
2578 return error;
2579 }
2580 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/vdso.c linux-2.6.32.41/arch/powerpc/kernel/vdso.c
2581 --- linux-2.6.32.41/arch/powerpc/kernel/vdso.c 2011-03-27 14:31:47.000000000 -0400
2582 +++ linux-2.6.32.41/arch/powerpc/kernel/vdso.c 2011-04-17 15:56:45.000000000 -0400
2583 @@ -36,6 +36,7 @@
2584 #include <asm/firmware.h>
2585 #include <asm/vdso.h>
2586 #include <asm/vdso_datapage.h>
2587 +#include <asm/mman.h>
2588
2589 #include "setup.h"
2590
2591 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2592 vdso_base = VDSO32_MBASE;
2593 #endif
2594
2595 - current->mm->context.vdso_base = 0;
2596 + current->mm->context.vdso_base = ~0UL;
2597
2598 /* vDSO has a problem and was disabled, just don't "enable" it for the
2599 * process
2600 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2601 vdso_base = get_unmapped_area(NULL, vdso_base,
2602 (vdso_pages << PAGE_SHIFT) +
2603 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2604 - 0, 0);
2605 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
2606 if (IS_ERR_VALUE(vdso_base)) {
2607 rc = vdso_base;
2608 goto fail_mmapsem;
2609 diff -urNp linux-2.6.32.41/arch/powerpc/kernel/vio.c linux-2.6.32.41/arch/powerpc/kernel/vio.c
2610 --- linux-2.6.32.41/arch/powerpc/kernel/vio.c 2011-03-27 14:31:47.000000000 -0400
2611 +++ linux-2.6.32.41/arch/powerpc/kernel/vio.c 2011-04-17 15:56:45.000000000 -0400
2612 @@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struc
2613 vio_cmo_dealloc(viodev, alloc_size);
2614 }
2615
2616 -struct dma_map_ops vio_dma_mapping_ops = {
2617 +static const struct dma_map_ops vio_dma_mapping_ops = {
2618 .alloc_coherent = vio_dma_iommu_alloc_coherent,
2619 .free_coherent = vio_dma_iommu_free_coherent,
2620 .map_sg = vio_dma_iommu_map_sg,
2621 .unmap_sg = vio_dma_iommu_unmap_sg,
2622 + .dma_supported = dma_iommu_dma_supported,
2623 .map_page = vio_dma_iommu_map_page,
2624 .unmap_page = vio_dma_iommu_unmap_page,
2625
2626 @@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vi
2627
2628 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
2629 {
2630 - vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
2631 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
2632 }
2633
2634 diff -urNp linux-2.6.32.41/arch/powerpc/lib/usercopy_64.c linux-2.6.32.41/arch/powerpc/lib/usercopy_64.c
2635 --- linux-2.6.32.41/arch/powerpc/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
2636 +++ linux-2.6.32.41/arch/powerpc/lib/usercopy_64.c 2011-04-17 15:56:45.000000000 -0400
2637 @@ -9,22 +9,6 @@
2638 #include <linux/module.h>
2639 #include <asm/uaccess.h>
2640
2641 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2642 -{
2643 - if (likely(access_ok(VERIFY_READ, from, n)))
2644 - n = __copy_from_user(to, from, n);
2645 - else
2646 - memset(to, 0, n);
2647 - return n;
2648 -}
2649 -
2650 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2651 -{
2652 - if (likely(access_ok(VERIFY_WRITE, to, n)))
2653 - n = __copy_to_user(to, from, n);
2654 - return n;
2655 -}
2656 -
2657 unsigned long copy_in_user(void __user *to, const void __user *from,
2658 unsigned long n)
2659 {
2660 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2661 return n;
2662 }
2663
2664 -EXPORT_SYMBOL(copy_from_user);
2665 -EXPORT_SYMBOL(copy_to_user);
2666 EXPORT_SYMBOL(copy_in_user);
2667
2668 diff -urNp linux-2.6.32.41/arch/powerpc/mm/fault.c linux-2.6.32.41/arch/powerpc/mm/fault.c
2669 --- linux-2.6.32.41/arch/powerpc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
2670 +++ linux-2.6.32.41/arch/powerpc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
2671 @@ -30,6 +30,10 @@
2672 #include <linux/kprobes.h>
2673 #include <linux/kdebug.h>
2674 #include <linux/perf_event.h>
2675 +#include <linux/slab.h>
2676 +#include <linux/pagemap.h>
2677 +#include <linux/compiler.h>
2678 +#include <linux/unistd.h>
2679
2680 #include <asm/firmware.h>
2681 #include <asm/page.h>
2682 @@ -40,6 +44,7 @@
2683 #include <asm/uaccess.h>
2684 #include <asm/tlbflush.h>
2685 #include <asm/siginfo.h>
2686 +#include <asm/ptrace.h>
2687
2688
2689 #ifdef CONFIG_KPROBES
2690 @@ -64,6 +69,33 @@ static inline int notify_page_fault(stru
2691 }
2692 #endif
2693
2694 +#ifdef CONFIG_PAX_PAGEEXEC
2695 +/*
2696 + * PaX: decide what to do with offenders (regs->nip = fault address)
2697 + *
2698 + * returns 1 when task should be killed
2699 + */
2700 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2701 +{
2702 + return 1;
2703 +}
2704 +
2705 +void pax_report_insns(void *pc, void *sp)
2706 +{
2707 + unsigned long i;
2708 +
2709 + printk(KERN_ERR "PAX: bytes at PC: ");
2710 + for (i = 0; i < 5; i++) {
2711 + unsigned int c;
2712 + if (get_user(c, (unsigned int __user *)pc+i))
2713 + printk(KERN_CONT "???????? ");
2714 + else
2715 + printk(KERN_CONT "%08x ", c);
2716 + }
2717 + printk("\n");
2718 +}
2719 +#endif
2720 +
2721 /*
2722 * Check whether the instruction at regs->nip is a store using
2723 * an update addressing form which will update r1.
2724 @@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_re
2725 * indicate errors in DSISR but can validly be set in SRR1.
2726 */
2727 if (trap == 0x400)
2728 - error_code &= 0x48200000;
2729 + error_code &= 0x58200000;
2730 else
2731 is_write = error_code & DSISR_ISSTORE;
2732 #else
2733 @@ -250,7 +282,7 @@ good_area:
2734 * "undefined". Of those that can be set, this is the only
2735 * one which seems bad.
2736 */
2737 - if (error_code & 0x10000000)
2738 + if (error_code & DSISR_GUARDED)
2739 /* Guarded storage error. */
2740 goto bad_area;
2741 #endif /* CONFIG_8xx */
2742 @@ -265,7 +297,7 @@ good_area:
2743 * processors use the same I/D cache coherency mechanism
2744 * as embedded.
2745 */
2746 - if (error_code & DSISR_PROTFAULT)
2747 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2748 goto bad_area;
2749 #endif /* CONFIG_PPC_STD_MMU */
2750
2751 @@ -335,6 +367,23 @@ bad_area:
2752 bad_area_nosemaphore:
2753 /* User mode accesses cause a SIGSEGV */
2754 if (user_mode(regs)) {
2755 +
2756 +#ifdef CONFIG_PAX_PAGEEXEC
2757 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2758 +#ifdef CONFIG_PPC_STD_MMU
2759 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2760 +#else
2761 + if (is_exec && regs->nip == address) {
2762 +#endif
2763 + switch (pax_handle_fetch_fault(regs)) {
2764 + }
2765 +
2766 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2767 + do_group_exit(SIGKILL);
2768 + }
2769 + }
2770 +#endif
2771 +
2772 _exception(SIGSEGV, regs, code, address);
2773 return 0;
2774 }
2775 diff -urNp linux-2.6.32.41/arch/powerpc/mm/mmap_64.c linux-2.6.32.41/arch/powerpc/mm/mmap_64.c
2776 --- linux-2.6.32.41/arch/powerpc/mm/mmap_64.c 2011-03-27 14:31:47.000000000 -0400
2777 +++ linux-2.6.32.41/arch/powerpc/mm/mmap_64.c 2011-04-17 15:56:45.000000000 -0400
2778 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2779 */
2780 if (mmap_is_legacy()) {
2781 mm->mmap_base = TASK_UNMAPPED_BASE;
2782 +
2783 +#ifdef CONFIG_PAX_RANDMMAP
2784 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2785 + mm->mmap_base += mm->delta_mmap;
2786 +#endif
2787 +
2788 mm->get_unmapped_area = arch_get_unmapped_area;
2789 mm->unmap_area = arch_unmap_area;
2790 } else {
2791 mm->mmap_base = mmap_base();
2792 +
2793 +#ifdef CONFIG_PAX_RANDMMAP
2794 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2795 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2796 +#endif
2797 +
2798 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2799 mm->unmap_area = arch_unmap_area_topdown;
2800 }
2801 diff -urNp linux-2.6.32.41/arch/powerpc/mm/slice.c linux-2.6.32.41/arch/powerpc/mm/slice.c
2802 --- linux-2.6.32.41/arch/powerpc/mm/slice.c 2011-03-27 14:31:47.000000000 -0400
2803 +++ linux-2.6.32.41/arch/powerpc/mm/slice.c 2011-04-17 15:56:45.000000000 -0400
2804 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
2805 if ((mm->task_size - len) < addr)
2806 return 0;
2807 vma = find_vma(mm, addr);
2808 - return (!vma || (addr + len) <= vma->vm_start);
2809 + return check_heap_stack_gap(vma, addr, len);
2810 }
2811
2812 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2813 @@ -256,7 +256,7 @@ full_search:
2814 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2815 continue;
2816 }
2817 - if (!vma || addr + len <= vma->vm_start) {
2818 + if (check_heap_stack_gap(vma, addr, len)) {
2819 /*
2820 * Remember the place where we stopped the search:
2821 */
2822 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
2823 }
2824 }
2825
2826 - addr = mm->mmap_base;
2827 - while (addr > len) {
2828 + if (mm->mmap_base < len)
2829 + addr = -ENOMEM;
2830 + else
2831 + addr = mm->mmap_base - len;
2832 +
2833 + while (!IS_ERR_VALUE(addr)) {
2834 /* Go down by chunk size */
2835 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2836 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
2837
2838 /* Check for hit with different page size */
2839 mask = slice_range_to_mask(addr, len);
2840 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
2841 * return with success:
2842 */
2843 vma = find_vma(mm, addr);
2844 - if (!vma || (addr + len) <= vma->vm_start) {
2845 + if (check_heap_stack_gap(vma, addr, len)) {
2846 /* remember the address as a hint for next time */
2847 if (use_cache)
2848 mm->free_area_cache = addr;
2849 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
2850 mm->cached_hole_size = vma->vm_start - addr;
2851
2852 /* try just below the current vma->vm_start */
2853 - addr = vma->vm_start;
2854 + addr = skip_heap_stack_gap(vma, len);
2855 }
2856
2857 /*
2858 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
2859 if (fixed && addr > (mm->task_size - len))
2860 return -EINVAL;
2861
2862 +#ifdef CONFIG_PAX_RANDMMAP
2863 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2864 + addr = 0;
2865 +#endif
2866 +
2867 /* If hint, make sure it matches our alignment restrictions */
2868 if (!fixed && addr) {
2869 addr = _ALIGN_UP(addr, 1ul << pshift);
2870 diff -urNp linux-2.6.32.41/arch/powerpc/platforms/52xx/lite5200_pm.c linux-2.6.32.41/arch/powerpc/platforms/52xx/lite5200_pm.c
2871 --- linux-2.6.32.41/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-03-27 14:31:47.000000000 -0400
2872 +++ linux-2.6.32.41/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-04-17 15:56:45.000000000 -0400
2873 @@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
2874 lite5200_pm_target_state = PM_SUSPEND_ON;
2875 }
2876
2877 -static struct platform_suspend_ops lite5200_pm_ops = {
2878 +static const struct platform_suspend_ops lite5200_pm_ops = {
2879 .valid = lite5200_pm_valid,
2880 .begin = lite5200_pm_begin,
2881 .prepare = lite5200_pm_prepare,
2882 diff -urNp linux-2.6.32.41/arch/powerpc/platforms/52xx/mpc52xx_pm.c linux-2.6.32.41/arch/powerpc/platforms/52xx/mpc52xx_pm.c
2883 --- linux-2.6.32.41/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-03-27 14:31:47.000000000 -0400
2884 +++ linux-2.6.32.41/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-04-17 15:56:45.000000000 -0400
2885 @@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
2886 iounmap(mbar);
2887 }
2888
2889 -static struct platform_suspend_ops mpc52xx_pm_ops = {
2890 +static const struct platform_suspend_ops mpc52xx_pm_ops = {
2891 .valid = mpc52xx_pm_valid,
2892 .prepare = mpc52xx_pm_prepare,
2893 .enter = mpc52xx_pm_enter,
2894 diff -urNp linux-2.6.32.41/arch/powerpc/platforms/83xx/suspend.c linux-2.6.32.41/arch/powerpc/platforms/83xx/suspend.c
2895 --- linux-2.6.32.41/arch/powerpc/platforms/83xx/suspend.c 2011-03-27 14:31:47.000000000 -0400
2896 +++ linux-2.6.32.41/arch/powerpc/platforms/83xx/suspend.c 2011-04-17 15:56:45.000000000 -0400
2897 @@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
2898 return ret;
2899 }
2900
2901 -static struct platform_suspend_ops mpc83xx_suspend_ops = {
2902 +static const struct platform_suspend_ops mpc83xx_suspend_ops = {
2903 .valid = mpc83xx_suspend_valid,
2904 .begin = mpc83xx_suspend_begin,
2905 .enter = mpc83xx_suspend_enter,
2906 diff -urNp linux-2.6.32.41/arch/powerpc/platforms/cell/iommu.c linux-2.6.32.41/arch/powerpc/platforms/cell/iommu.c
2907 --- linux-2.6.32.41/arch/powerpc/platforms/cell/iommu.c 2011-03-27 14:31:47.000000000 -0400
2908 +++ linux-2.6.32.41/arch/powerpc/platforms/cell/iommu.c 2011-04-17 15:56:45.000000000 -0400
2909 @@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struc
2910
2911 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
2912
2913 -struct dma_map_ops dma_iommu_fixed_ops = {
2914 +const struct dma_map_ops dma_iommu_fixed_ops = {
2915 .alloc_coherent = dma_fixed_alloc_coherent,
2916 .free_coherent = dma_fixed_free_coherent,
2917 .map_sg = dma_fixed_map_sg,
2918 diff -urNp linux-2.6.32.41/arch/powerpc/platforms/ps3/system-bus.c linux-2.6.32.41/arch/powerpc/platforms/ps3/system-bus.c
2919 --- linux-2.6.32.41/arch/powerpc/platforms/ps3/system-bus.c 2011-03-27 14:31:47.000000000 -0400
2920 +++ linux-2.6.32.41/arch/powerpc/platforms/ps3/system-bus.c 2011-04-17 15:56:45.000000000 -0400
2921 @@ -694,7 +694,7 @@ static int ps3_dma_supported(struct devi
2922 return mask >= DMA_BIT_MASK(32);
2923 }
2924
2925 -static struct dma_map_ops ps3_sb_dma_ops = {
2926 +static const struct dma_map_ops ps3_sb_dma_ops = {
2927 .alloc_coherent = ps3_alloc_coherent,
2928 .free_coherent = ps3_free_coherent,
2929 .map_sg = ps3_sb_map_sg,
2930 @@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops
2931 .unmap_page = ps3_unmap_page,
2932 };
2933
2934 -static struct dma_map_ops ps3_ioc0_dma_ops = {
2935 +static const struct dma_map_ops ps3_ioc0_dma_ops = {
2936 .alloc_coherent = ps3_alloc_coherent,
2937 .free_coherent = ps3_free_coherent,
2938 .map_sg = ps3_ioc0_map_sg,
2939 diff -urNp linux-2.6.32.41/arch/powerpc/platforms/pseries/Kconfig linux-2.6.32.41/arch/powerpc/platforms/pseries/Kconfig
2940 --- linux-2.6.32.41/arch/powerpc/platforms/pseries/Kconfig 2011-03-27 14:31:47.000000000 -0400
2941 +++ linux-2.6.32.41/arch/powerpc/platforms/pseries/Kconfig 2011-04-17 15:56:45.000000000 -0400
2942 @@ -2,6 +2,8 @@ config PPC_PSERIES
2943 depends on PPC64 && PPC_BOOK3S
2944 bool "IBM pSeries & new (POWER5-based) iSeries"
2945 select MPIC
2946 + select PCI_MSI
2947 + select XICS
2948 select PPC_I8259
2949 select PPC_RTAS
2950 select RTAS_ERROR_LOGGING
2951 diff -urNp linux-2.6.32.41/arch/s390/include/asm/elf.h linux-2.6.32.41/arch/s390/include/asm/elf.h
2952 --- linux-2.6.32.41/arch/s390/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
2953 +++ linux-2.6.32.41/arch/s390/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
2954 @@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
2955 that it will "exec", and that there is sufficient room for the brk. */
2956 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2957
2958 +#ifdef CONFIG_PAX_ASLR
2959 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2960 +
2961 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2962 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2963 +#endif
2964 +
2965 /* This yields a mask that user programs can use to figure out what
2966 instruction set this CPU supports. */
2967
2968 diff -urNp linux-2.6.32.41/arch/s390/include/asm/setup.h linux-2.6.32.41/arch/s390/include/asm/setup.h
2969 --- linux-2.6.32.41/arch/s390/include/asm/setup.h 2011-03-27 14:31:47.000000000 -0400
2970 +++ linux-2.6.32.41/arch/s390/include/asm/setup.h 2011-04-17 15:56:45.000000000 -0400
2971 @@ -50,13 +50,13 @@ extern unsigned long memory_end;
2972 void detect_memory_layout(struct mem_chunk chunk[]);
2973
2974 #ifdef CONFIG_S390_SWITCH_AMODE
2975 -extern unsigned int switch_amode;
2976 +#define switch_amode (1)
2977 #else
2978 #define switch_amode (0)
2979 #endif
2980
2981 #ifdef CONFIG_S390_EXEC_PROTECT
2982 -extern unsigned int s390_noexec;
2983 +#define s390_noexec (1)
2984 #else
2985 #define s390_noexec (0)
2986 #endif
2987 diff -urNp linux-2.6.32.41/arch/s390/include/asm/uaccess.h linux-2.6.32.41/arch/s390/include/asm/uaccess.h
2988 --- linux-2.6.32.41/arch/s390/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
2989 +++ linux-2.6.32.41/arch/s390/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
2990 @@ -232,6 +232,10 @@ static inline unsigned long __must_check
2991 copy_to_user(void __user *to, const void *from, unsigned long n)
2992 {
2993 might_fault();
2994 +
2995 + if ((long)n < 0)
2996 + return n;
2997 +
2998 if (access_ok(VERIFY_WRITE, to, n))
2999 n = __copy_to_user(to, from, n);
3000 return n;
3001 @@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void
3002 static inline unsigned long __must_check
3003 __copy_from_user(void *to, const void __user *from, unsigned long n)
3004 {
3005 + if ((long)n < 0)
3006 + return n;
3007 +
3008 if (__builtin_constant_p(n) && (n <= 256))
3009 return uaccess.copy_from_user_small(n, from, to);
3010 else
3011 @@ -283,6 +290,10 @@ static inline unsigned long __must_check
3012 copy_from_user(void *to, const void __user *from, unsigned long n)
3013 {
3014 might_fault();
3015 +
3016 + if ((long)n < 0)
3017 + return n;
3018 +
3019 if (access_ok(VERIFY_READ, from, n))
3020 n = __copy_from_user(to, from, n);
3021 else
3022 diff -urNp linux-2.6.32.41/arch/s390/Kconfig linux-2.6.32.41/arch/s390/Kconfig
3023 --- linux-2.6.32.41/arch/s390/Kconfig 2011-03-27 14:31:47.000000000 -0400
3024 +++ linux-2.6.32.41/arch/s390/Kconfig 2011-04-17 15:56:45.000000000 -0400
3025 @@ -194,28 +194,26 @@ config AUDIT_ARCH
3026
3027 config S390_SWITCH_AMODE
3028 bool "Switch kernel/user addressing modes"
3029 + default y
3030 help
3031 This option allows to switch the addressing modes of kernel and user
3032 - space. The kernel parameter switch_amode=on will enable this feature,
3033 - default is disabled. Enabling this (via kernel parameter) on machines
3034 - earlier than IBM System z9-109 EC/BC will reduce system performance.
3035 + space. Enabling this on machines earlier than IBM System z9-109 EC/BC
3036 + will reduce system performance.
3037
3038 Note that this option will also be selected by selecting the execute
3039 - protection option below. Enabling the execute protection via the
3040 - noexec kernel parameter will also switch the addressing modes,
3041 - independent of the switch_amode kernel parameter.
3042 + protection option below. Enabling the execute protection will also
3043 + switch the addressing modes, independent of this option.
3044
3045
3046 config S390_EXEC_PROTECT
3047 bool "Data execute protection"
3048 + default y
3049 select S390_SWITCH_AMODE
3050 help
3051 This option allows to enable a buffer overflow protection for user
3052 space programs and it also selects the addressing mode option above.
3053 - The kernel parameter noexec=on will enable this feature and also
3054 - switch the addressing modes, default is disabled. Enabling this (via
3055 - kernel parameter) on machines earlier than IBM System z9-109 EC/BC
3056 - will reduce system performance.
3057 + Enabling this on machines earlier than IBM System z9-109 EC/BC will
3058 + reduce system performance.
3059
3060 comment "Code generation options"
3061
3062 diff -urNp linux-2.6.32.41/arch/s390/kernel/module.c linux-2.6.32.41/arch/s390/kernel/module.c
3063 --- linux-2.6.32.41/arch/s390/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
3064 +++ linux-2.6.32.41/arch/s390/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
3065 @@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
3066
3067 /* Increase core size by size of got & plt and set start
3068 offsets for got and plt. */
3069 - me->core_size = ALIGN(me->core_size, 4);
3070 - me->arch.got_offset = me->core_size;
3071 - me->core_size += me->arch.got_size;
3072 - me->arch.plt_offset = me->core_size;
3073 - me->core_size += me->arch.plt_size;
3074 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
3075 + me->arch.got_offset = me->core_size_rw;
3076 + me->core_size_rw += me->arch.got_size;
3077 + me->arch.plt_offset = me->core_size_rx;
3078 + me->core_size_rx += me->arch.plt_size;
3079 return 0;
3080 }
3081
3082 @@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3083 if (info->got_initialized == 0) {
3084 Elf_Addr *gotent;
3085
3086 - gotent = me->module_core + me->arch.got_offset +
3087 + gotent = me->module_core_rw + me->arch.got_offset +
3088 info->got_offset;
3089 *gotent = val;
3090 info->got_initialized = 1;
3091 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3092 else if (r_type == R_390_GOTENT ||
3093 r_type == R_390_GOTPLTENT)
3094 *(unsigned int *) loc =
3095 - (val + (Elf_Addr) me->module_core - loc) >> 1;
3096 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3097 else if (r_type == R_390_GOT64 ||
3098 r_type == R_390_GOTPLT64)
3099 *(unsigned long *) loc = val;
3100 @@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3101 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3102 if (info->plt_initialized == 0) {
3103 unsigned int *ip;
3104 - ip = me->module_core + me->arch.plt_offset +
3105 + ip = me->module_core_rx + me->arch.plt_offset +
3106 info->plt_offset;
3107 #ifndef CONFIG_64BIT
3108 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3109 @@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3110 val - loc + 0xffffUL < 0x1ffffeUL) ||
3111 (r_type == R_390_PLT32DBL &&
3112 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3113 - val = (Elf_Addr) me->module_core +
3114 + val = (Elf_Addr) me->module_core_rx +
3115 me->arch.plt_offset +
3116 info->plt_offset;
3117 val += rela->r_addend - loc;
3118 @@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3119 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3120 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3121 val = val + rela->r_addend -
3122 - ((Elf_Addr) me->module_core + me->arch.got_offset);
3123 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3124 if (r_type == R_390_GOTOFF16)
3125 *(unsigned short *) loc = val;
3126 else if (r_type == R_390_GOTOFF32)
3127 @@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3128 break;
3129 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3130 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3131 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
3132 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3133 rela->r_addend - loc;
3134 if (r_type == R_390_GOTPC)
3135 *(unsigned int *) loc = val;
3136 diff -urNp linux-2.6.32.41/arch/s390/kernel/setup.c linux-2.6.32.41/arch/s390/kernel/setup.c
3137 --- linux-2.6.32.41/arch/s390/kernel/setup.c 2011-03-27 14:31:47.000000000 -0400
3138 +++ linux-2.6.32.41/arch/s390/kernel/setup.c 2011-04-17 15:56:45.000000000 -0400
3139 @@ -306,9 +306,6 @@ static int __init early_parse_mem(char *
3140 early_param("mem", early_parse_mem);
3141
3142 #ifdef CONFIG_S390_SWITCH_AMODE
3143 -unsigned int switch_amode = 0;
3144 -EXPORT_SYMBOL_GPL(switch_amode);
3145 -
3146 static int set_amode_and_uaccess(unsigned long user_amode,
3147 unsigned long user32_amode)
3148 {
3149 @@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigne
3150 return 0;
3151 }
3152 }
3153 -
3154 -/*
3155 - * Switch kernel/user addressing modes?
3156 - */
3157 -static int __init early_parse_switch_amode(char *p)
3158 -{
3159 - switch_amode = 1;
3160 - return 0;
3161 -}
3162 -early_param("switch_amode", early_parse_switch_amode);
3163 -
3164 #else /* CONFIG_S390_SWITCH_AMODE */
3165 static inline int set_amode_and_uaccess(unsigned long user_amode,
3166 unsigned long user32_amode)
3167 @@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(
3168 }
3169 #endif /* CONFIG_S390_SWITCH_AMODE */
3170
3171 -#ifdef CONFIG_S390_EXEC_PROTECT
3172 -unsigned int s390_noexec = 0;
3173 -EXPORT_SYMBOL_GPL(s390_noexec);
3174 -
3175 -/*
3176 - * Enable execute protection?
3177 - */
3178 -static int __init early_parse_noexec(char *p)
3179 -{
3180 - if (!strncmp(p, "off", 3))
3181 - return 0;
3182 - switch_amode = 1;
3183 - s390_noexec = 1;
3184 - return 0;
3185 -}
3186 -early_param("noexec", early_parse_noexec);
3187 -#endif /* CONFIG_S390_EXEC_PROTECT */
3188 -
3189 static void setup_addressing_mode(void)
3190 {
3191 if (s390_noexec) {
3192 diff -urNp linux-2.6.32.41/arch/s390/mm/mmap.c linux-2.6.32.41/arch/s390/mm/mmap.c
3193 --- linux-2.6.32.41/arch/s390/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3194 +++ linux-2.6.32.41/arch/s390/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
3195 @@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_str
3196 */
3197 if (mmap_is_legacy()) {
3198 mm->mmap_base = TASK_UNMAPPED_BASE;
3199 +
3200 +#ifdef CONFIG_PAX_RANDMMAP
3201 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3202 + mm->mmap_base += mm->delta_mmap;
3203 +#endif
3204 +
3205 mm->get_unmapped_area = arch_get_unmapped_area;
3206 mm->unmap_area = arch_unmap_area;
3207 } else {
3208 mm->mmap_base = mmap_base();
3209 +
3210 +#ifdef CONFIG_PAX_RANDMMAP
3211 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3212 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3213 +#endif
3214 +
3215 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3216 mm->unmap_area = arch_unmap_area_topdown;
3217 }
3218 @@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_str
3219 */
3220 if (mmap_is_legacy()) {
3221 mm->mmap_base = TASK_UNMAPPED_BASE;
3222 +
3223 +#ifdef CONFIG_PAX_RANDMMAP
3224 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3225 + mm->mmap_base += mm->delta_mmap;
3226 +#endif
3227 +
3228 mm->get_unmapped_area = s390_get_unmapped_area;
3229 mm->unmap_area = arch_unmap_area;
3230 } else {
3231 mm->mmap_base = mmap_base();
3232 +
3233 +#ifdef CONFIG_PAX_RANDMMAP
3234 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3235 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3236 +#endif
3237 +
3238 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3239 mm->unmap_area = arch_unmap_area_topdown;
3240 }
3241 diff -urNp linux-2.6.32.41/arch/score/include/asm/system.h linux-2.6.32.41/arch/score/include/asm/system.h
3242 --- linux-2.6.32.41/arch/score/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
3243 +++ linux-2.6.32.41/arch/score/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
3244 @@ -17,7 +17,7 @@ do { \
3245 #define finish_arch_switch(prev) do {} while (0)
3246
3247 typedef void (*vi_handler_t)(void);
3248 -extern unsigned long arch_align_stack(unsigned long sp);
3249 +#define arch_align_stack(x) (x)
3250
3251 #define mb() barrier()
3252 #define rmb() barrier()
3253 diff -urNp linux-2.6.32.41/arch/score/kernel/process.c linux-2.6.32.41/arch/score/kernel/process.c
3254 --- linux-2.6.32.41/arch/score/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
3255 +++ linux-2.6.32.41/arch/score/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
3256 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
3257
3258 return task_pt_regs(task)->cp0_epc;
3259 }
3260 -
3261 -unsigned long arch_align_stack(unsigned long sp)
3262 -{
3263 - return sp;
3264 -}
3265 diff -urNp linux-2.6.32.41/arch/sh/boards/mach-hp6xx/pm.c linux-2.6.32.41/arch/sh/boards/mach-hp6xx/pm.c
3266 --- linux-2.6.32.41/arch/sh/boards/mach-hp6xx/pm.c 2011-03-27 14:31:47.000000000 -0400
3267 +++ linux-2.6.32.41/arch/sh/boards/mach-hp6xx/pm.c 2011-04-17 15:56:45.000000000 -0400
3268 @@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_
3269 return 0;
3270 }
3271
3272 -static struct platform_suspend_ops hp6x0_pm_ops = {
3273 +static const struct platform_suspend_ops hp6x0_pm_ops = {
3274 .enter = hp6x0_pm_enter,
3275 .valid = suspend_valid_only_mem,
3276 };
3277 diff -urNp linux-2.6.32.41/arch/sh/kernel/cpu/sh4/sq.c linux-2.6.32.41/arch/sh/kernel/cpu/sh4/sq.c
3278 --- linux-2.6.32.41/arch/sh/kernel/cpu/sh4/sq.c 2011-03-27 14:31:47.000000000 -0400
3279 +++ linux-2.6.32.41/arch/sh/kernel/cpu/sh4/sq.c 2011-04-17 15:56:46.000000000 -0400
3280 @@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[
3281 NULL,
3282 };
3283
3284 -static struct sysfs_ops sq_sysfs_ops = {
3285 +static const struct sysfs_ops sq_sysfs_ops = {
3286 .show = sq_sysfs_show,
3287 .store = sq_sysfs_store,
3288 };
3289 diff -urNp linux-2.6.32.41/arch/sh/kernel/cpu/shmobile/pm.c linux-2.6.32.41/arch/sh/kernel/cpu/shmobile/pm.c
3290 --- linux-2.6.32.41/arch/sh/kernel/cpu/shmobile/pm.c 2011-03-27 14:31:47.000000000 -0400
3291 +++ linux-2.6.32.41/arch/sh/kernel/cpu/shmobile/pm.c 2011-04-17 15:56:46.000000000 -0400
3292 @@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t s
3293 return 0;
3294 }
3295
3296 -static struct platform_suspend_ops sh_pm_ops = {
3297 +static const struct platform_suspend_ops sh_pm_ops = {
3298 .enter = sh_pm_enter,
3299 .valid = suspend_valid_only_mem,
3300 };
3301 diff -urNp linux-2.6.32.41/arch/sh/kernel/kgdb.c linux-2.6.32.41/arch/sh/kernel/kgdb.c
3302 --- linux-2.6.32.41/arch/sh/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
3303 +++ linux-2.6.32.41/arch/sh/kernel/kgdb.c 2011-04-17 15:56:46.000000000 -0400
3304 @@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
3305 {
3306 }
3307
3308 -struct kgdb_arch arch_kgdb_ops = {
3309 +const struct kgdb_arch arch_kgdb_ops = {
3310 /* Breakpoint instruction: trapa #0x3c */
3311 #ifdef CONFIG_CPU_LITTLE_ENDIAN
3312 .gdb_bpt_instr = { 0x3c, 0xc3 },
3313 diff -urNp linux-2.6.32.41/arch/sh/mm/mmap.c linux-2.6.32.41/arch/sh/mm/mmap.c
3314 --- linux-2.6.32.41/arch/sh/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3315 +++ linux-2.6.32.41/arch/sh/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
3316 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
3317 addr = PAGE_ALIGN(addr);
3318
3319 vma = find_vma(mm, addr);
3320 - if (TASK_SIZE - len >= addr &&
3321 - (!vma || addr + len <= vma->vm_start))
3322 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3323 return addr;
3324 }
3325
3326 @@ -106,7 +105,7 @@ full_search:
3327 }
3328 return -ENOMEM;
3329 }
3330 - if (likely(!vma || addr + len <= vma->vm_start)) {
3331 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3332 /*
3333 * Remember the place where we stopped the search:
3334 */
3335 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
3336 addr = PAGE_ALIGN(addr);
3337
3338 vma = find_vma(mm, addr);
3339 - if (TASK_SIZE - len >= addr &&
3340 - (!vma || addr + len <= vma->vm_start))
3341 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3342 return addr;
3343 }
3344
3345 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
3346 /* make sure it can fit in the remaining address space */
3347 if (likely(addr > len)) {
3348 vma = find_vma(mm, addr-len);
3349 - if (!vma || addr <= vma->vm_start) {
3350 + if (check_heap_stack_gap(vma, addr - len, len)) {
3351 /* remember the address as a hint for next time */
3352 return (mm->free_area_cache = addr-len);
3353 }
3354 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
3355 if (unlikely(mm->mmap_base < len))
3356 goto bottomup;
3357
3358 - addr = mm->mmap_base-len;
3359 - if (do_colour_align)
3360 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3361 + addr = mm->mmap_base - len;
3362
3363 do {
3364 + if (do_colour_align)
3365 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3366 /*
3367 * Lookup failure means no vma is above this address,
3368 * else if new region fits below vma->vm_start,
3369 * return with success:
3370 */
3371 vma = find_vma(mm, addr);
3372 - if (likely(!vma || addr+len <= vma->vm_start)) {
3373 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3374 /* remember the address as a hint for next time */
3375 return (mm->free_area_cache = addr);
3376 }
3377 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
3378 mm->cached_hole_size = vma->vm_start - addr;
3379
3380 /* try just below the current vma->vm_start */
3381 - addr = vma->vm_start-len;
3382 - if (do_colour_align)
3383 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3384 - } while (likely(len < vma->vm_start));
3385 + addr = skip_heap_stack_gap(vma, len);
3386 + } while (!IS_ERR_VALUE(addr));
3387
3388 bottomup:
3389 /*
3390 diff -urNp linux-2.6.32.41/arch/sparc/include/asm/atomic_64.h linux-2.6.32.41/arch/sparc/include/asm/atomic_64.h
3391 --- linux-2.6.32.41/arch/sparc/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
3392 +++ linux-2.6.32.41/arch/sparc/include/asm/atomic_64.h 2011-05-04 17:56:20.000000000 -0400
3393 @@ -14,18 +14,40 @@
3394 #define ATOMIC64_INIT(i) { (i) }
3395
3396 #define atomic_read(v) ((v)->counter)
3397 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3398 +{
3399 + return v->counter;
3400 +}
3401 #define atomic64_read(v) ((v)->counter)
3402 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3403 +{
3404 + return v->counter;
3405 +}
3406
3407 #define atomic_set(v, i) (((v)->counter) = i)
3408 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3409 +{
3410 + v->counter = i;
3411 +}
3412 #define atomic64_set(v, i) (((v)->counter) = i)
3413 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3414 +{
3415 + v->counter = i;
3416 +}
3417
3418 extern void atomic_add(int, atomic_t *);
3419 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3420 extern void atomic64_add(long, atomic64_t *);
3421 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3422 extern void atomic_sub(int, atomic_t *);
3423 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3424 extern void atomic64_sub(long, atomic64_t *);
3425 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3426
3427 extern int atomic_add_ret(int, atomic_t *);
3428 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3429 extern long atomic64_add_ret(long, atomic64_t *);
3430 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3431 extern int atomic_sub_ret(int, atomic_t *);
3432 extern long atomic64_sub_ret(long, atomic64_t *);
3433
3434 @@ -33,7 +55,15 @@ extern long atomic64_sub_ret(long, atomi
3435 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3436
3437 #define atomic_inc_return(v) atomic_add_ret(1, v)
3438 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3439 +{
3440 + return atomic_add_ret_unchecked(1, v);
3441 +}
3442 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3443 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3444 +{
3445 + return atomic64_add_ret_unchecked(1, v);
3446 +}
3447
3448 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3449 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3450 @@ -50,6 +80,7 @@ extern long atomic64_sub_ret(long, atomi
3451 * other cases.
3452 */
3453 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3454 +#define atomic_inc_and_test_unchecked(v) (atomic_inc_return_unchecked(v) == 0)
3455 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3456
3457 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3458 @@ -59,30 +90,59 @@ extern long atomic64_sub_ret(long, atomi
3459 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3460
3461 #define atomic_inc(v) atomic_add(1, v)
3462 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3463 +{
3464 + atomic_add_unchecked(1, v);
3465 +}
3466 #define atomic64_inc(v) atomic64_add(1, v)
3467 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3468 +{
3469 + atomic64_add_unchecked(1, v);
3470 +}
3471
3472 #define atomic_dec(v) atomic_sub(1, v)
3473 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3474 +{
3475 + atomic_sub_unchecked(1, v);
3476 +}
3477 #define atomic64_dec(v) atomic64_sub(1, v)
3478 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3479 +{
3480 + atomic64_sub_unchecked(1, v);
3481 +}
3482
3483 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3484 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3485
3486 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3487 +#define atomic_cmpxchg_unchecked(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3488 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3489 +#define atomic_xchg_unchecked(v, new) (xchg(&((v)->counter), new))
3490
3491 static inline int atomic_add_unless(atomic_t *v, int a, int u)
3492 {
3493 - int c, old;
3494 + int c, old, new;
3495 c = atomic_read(v);
3496 for (;;) {
3497 - if (unlikely(c == (u)))
3498 + if (unlikely(c == u))
3499 break;
3500 - old = atomic_cmpxchg((v), c, c + (a));
3501 +
3502 + asm volatile("addcc %2, %0, %0\n"
3503 +
3504 +#ifdef CONFIG_PAX_REFCOUNT
3505 + "tvs %%icc, 6\n"
3506 +#endif
3507 +
3508 + : "=r" (new)
3509 + : "0" (c), "ir" (a)
3510 + : "cc");
3511 +
3512 + old = atomic_cmpxchg(v, c, new);
3513 if (likely(old == c))
3514 break;
3515 c = old;
3516 }
3517 - return c != (u);
3518 + return c != u;
3519 }
3520
3521 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
3522 @@ -93,17 +153,28 @@ static inline int atomic_add_unless(atom
3523
3524 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3525 {
3526 - long c, old;
3527 + long c, old, new;
3528 c = atomic64_read(v);
3529 for (;;) {
3530 - if (unlikely(c == (u)))
3531 + if (unlikely(c == u))
3532 break;
3533 - old = atomic64_cmpxchg((v), c, c + (a));
3534 +
3535 + asm volatile("addcc %2, %0, %0\n"
3536 +
3537 +#ifdef CONFIG_PAX_REFCOUNT
3538 + "tvs %%xcc, 6\n"
3539 +#endif
3540 +
3541 + : "=r" (new)
3542 + : "0" (c), "ir" (a)
3543 + : "cc");
3544 +
3545 + old = atomic64_cmpxchg(v, c, new);
3546 if (likely(old == c))
3547 break;
3548 c = old;
3549 }
3550 - return c != (u);
3551 + return c != u;
3552 }
3553
3554 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3555 diff -urNp linux-2.6.32.41/arch/sparc/include/asm/cache.h linux-2.6.32.41/arch/sparc/include/asm/cache.h
3556 --- linux-2.6.32.41/arch/sparc/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
3557 +++ linux-2.6.32.41/arch/sparc/include/asm/cache.h 2011-05-17 19:26:34.000000000 -0400
3558 @@ -8,7 +8,7 @@
3559 #define _SPARC_CACHE_H
3560
3561 #define L1_CACHE_SHIFT 5
3562 -#define L1_CACHE_BYTES 32
3563 +#define L1_CACHE_BYTES 32U
3564 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
3565
3566 #ifdef CONFIG_SPARC32
3567 diff -urNp linux-2.6.32.41/arch/sparc/include/asm/dma-mapping.h linux-2.6.32.41/arch/sparc/include/asm/dma-mapping.h
3568 --- linux-2.6.32.41/arch/sparc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
3569 +++ linux-2.6.32.41/arch/sparc/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
3570 @@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *d
3571 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
3572 #define dma_is_consistent(d, h) (1)
3573
3574 -extern struct dma_map_ops *dma_ops, pci32_dma_ops;
3575 +extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
3576 extern struct bus_type pci_bus_type;
3577
3578 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3579 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
3580 {
3581 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
3582 if (dev->bus == &pci_bus_type)
3583 @@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dm
3584 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3585 dma_addr_t *dma_handle, gfp_t flag)
3586 {
3587 - struct dma_map_ops *ops = get_dma_ops(dev);
3588 + const struct dma_map_ops *ops = get_dma_ops(dev);
3589 void *cpu_addr;
3590
3591 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
3592 @@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(s
3593 static inline void dma_free_coherent(struct device *dev, size_t size,
3594 void *cpu_addr, dma_addr_t dma_handle)
3595 {
3596 - struct dma_map_ops *ops = get_dma_ops(dev);
3597 + const struct dma_map_ops *ops = get_dma_ops(dev);
3598
3599 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
3600 ops->free_coherent(dev, size, cpu_addr, dma_handle);
3601 diff -urNp linux-2.6.32.41/arch/sparc/include/asm/elf_32.h linux-2.6.32.41/arch/sparc/include/asm/elf_32.h
3602 --- linux-2.6.32.41/arch/sparc/include/asm/elf_32.h 2011-03-27 14:31:47.000000000 -0400
3603 +++ linux-2.6.32.41/arch/sparc/include/asm/elf_32.h 2011-04-17 15:56:46.000000000 -0400
3604 @@ -116,6 +116,13 @@ typedef struct {
3605
3606 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3607
3608 +#ifdef CONFIG_PAX_ASLR
3609 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3610 +
3611 +#define PAX_DELTA_MMAP_LEN 16
3612 +#define PAX_DELTA_STACK_LEN 16
3613 +#endif
3614 +
3615 /* This yields a mask that user programs can use to figure out what
3616 instruction set this cpu supports. This can NOT be done in userspace
3617 on Sparc. */
3618 diff -urNp linux-2.6.32.41/arch/sparc/include/asm/elf_64.h linux-2.6.32.41/arch/sparc/include/asm/elf_64.h
3619 --- linux-2.6.32.41/arch/sparc/include/asm/elf_64.h 2011-03-27 14:31:47.000000000 -0400
3620 +++ linux-2.6.32.41/arch/sparc/include/asm/elf_64.h 2011-04-17 15:56:46.000000000 -0400
3621 @@ -163,6 +163,12 @@ typedef struct {
3622 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3623 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3624
3625 +#ifdef CONFIG_PAX_ASLR
3626 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3627 +
3628 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3629 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3630 +#endif
3631
3632 /* This yields a mask that user programs can use to figure out what
3633 instruction set this cpu supports. */
3634 diff -urNp linux-2.6.32.41/arch/sparc/include/asm/pgtable_32.h linux-2.6.32.41/arch/sparc/include/asm/pgtable_32.h
3635 --- linux-2.6.32.41/arch/sparc/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
3636 +++ linux-2.6.32.41/arch/sparc/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
3637 @@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3638 BTFIXUPDEF_INT(page_none)
3639 BTFIXUPDEF_INT(page_copy)
3640 BTFIXUPDEF_INT(page_readonly)
3641 +
3642 +#ifdef CONFIG_PAX_PAGEEXEC
3643 +BTFIXUPDEF_INT(page_shared_noexec)
3644 +BTFIXUPDEF_INT(page_copy_noexec)
3645 +BTFIXUPDEF_INT(page_readonly_noexec)
3646 +#endif
3647 +
3648 BTFIXUPDEF_INT(page_kernel)
3649
3650 #define PMD_SHIFT SUN4C_PMD_SHIFT
3651 @@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
3652 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3653 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3654
3655 +#ifdef CONFIG_PAX_PAGEEXEC
3656 +extern pgprot_t PAGE_SHARED_NOEXEC;
3657 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3658 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3659 +#else
3660 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3661 +# define PAGE_COPY_NOEXEC PAGE_COPY
3662 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3663 +#endif
3664 +
3665 extern unsigned long page_kernel;
3666
3667 #ifdef MODULE
3668 diff -urNp linux-2.6.32.41/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.32.41/arch/sparc/include/asm/pgtsrmmu.h
3669 --- linux-2.6.32.41/arch/sparc/include/asm/pgtsrmmu.h 2011-03-27 14:31:47.000000000 -0400
3670 +++ linux-2.6.32.41/arch/sparc/include/asm/pgtsrmmu.h 2011-04-17 15:56:46.000000000 -0400
3671 @@ -115,6 +115,13 @@
3672 SRMMU_EXEC | SRMMU_REF)
3673 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3674 SRMMU_EXEC | SRMMU_REF)
3675 +
3676 +#ifdef CONFIG_PAX_PAGEEXEC
3677 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3678 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3679 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3680 +#endif
3681 +
3682 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3683 SRMMU_DIRTY | SRMMU_REF)
3684
3685 diff -urNp linux-2.6.32.41/arch/sparc/include/asm/spinlock_64.h linux-2.6.32.41/arch/sparc/include/asm/spinlock_64.h
3686 --- linux-2.6.32.41/arch/sparc/include/asm/spinlock_64.h 2011-03-27 14:31:47.000000000 -0400
3687 +++ linux-2.6.32.41/arch/sparc/include/asm/spinlock_64.h 2011-05-04 17:56:20.000000000 -0400
3688 @@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags
3689
3690 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3691
3692 -static void inline arch_read_lock(raw_rwlock_t *lock)
3693 +static inline void arch_read_lock(raw_rwlock_t *lock)
3694 {
3695 unsigned long tmp1, tmp2;
3696
3697 __asm__ __volatile__ (
3698 "1: ldsw [%2], %0\n"
3699 " brlz,pn %0, 2f\n"
3700 -"4: add %0, 1, %1\n"
3701 +"4: addcc %0, 1, %1\n"
3702 +
3703 +#ifdef CONFIG_PAX_REFCOUNT
3704 +" tvs %%icc, 6\n"
3705 +#endif
3706 +
3707 " cas [%2], %0, %1\n"
3708 " cmp %0, %1\n"
3709 " bne,pn %%icc, 1b\n"
3710 @@ -112,7 +117,7 @@ static void inline arch_read_lock(raw_rw
3711 " .previous"
3712 : "=&r" (tmp1), "=&r" (tmp2)
3713 : "r" (lock)
3714 - : "memory");
3715 + : "memory", "cc");
3716 }
3717
3718 static int inline arch_read_trylock(raw_rwlock_t *lock)
3719 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_
3720 "1: ldsw [%2], %0\n"
3721 " brlz,a,pn %0, 2f\n"
3722 " mov 0, %0\n"
3723 -" add %0, 1, %1\n"
3724 +" addcc %0, 1, %1\n"
3725 +
3726 +#ifdef CONFIG_PAX_REFCOUNT
3727 +" tvs %%icc, 6\n"
3728 +#endif
3729 +
3730 " cas [%2], %0, %1\n"
3731 " cmp %0, %1\n"
3732 " bne,pn %%icc, 1b\n"
3733 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_
3734 return tmp1;
3735 }
3736
3737 -static void inline arch_read_unlock(raw_rwlock_t *lock)
3738 +static inline void arch_read_unlock(raw_rwlock_t *lock)
3739 {
3740 unsigned long tmp1, tmp2;
3741
3742 __asm__ __volatile__(
3743 "1: lduw [%2], %0\n"
3744 -" sub %0, 1, %1\n"
3745 +" subcc %0, 1, %1\n"
3746 +
3747 +#ifdef CONFIG_PAX_REFCOUNT
3748 +" tvs %%icc, 6\n"
3749 +#endif
3750 +
3751 " cas [%2], %0, %1\n"
3752 " cmp %0, %1\n"
3753 " bne,pn %%xcc, 1b\n"
3754 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_
3755 : "memory");
3756 }
3757
3758 -static void inline arch_write_lock(raw_rwlock_t *lock)
3759 +static inline void arch_write_lock(raw_rwlock_t *lock)
3760 {
3761 unsigned long mask, tmp1, tmp2;
3762
3763 @@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_r
3764 : "memory");
3765 }
3766
3767 -static void inline arch_write_unlock(raw_rwlock_t *lock)
3768 +static inline void arch_write_unlock(raw_rwlock_t *lock)
3769 {
3770 __asm__ __volatile__(
3771 " stw %%g0, [%0]"
3772 diff -urNp linux-2.6.32.41/arch/sparc/include/asm/thread_info_32.h linux-2.6.32.41/arch/sparc/include/asm/thread_info_32.h
3773 --- linux-2.6.32.41/arch/sparc/include/asm/thread_info_32.h 2011-03-27 14:31:47.000000000 -0400
3774 +++ linux-2.6.32.41/arch/sparc/include/asm/thread_info_32.h 2011-06-04 20:46:01.000000000 -0400
3775 @@ -50,6 +50,8 @@ struct thread_info {
3776 unsigned long w_saved;
3777
3778 struct restart_block restart_block;
3779 +
3780 + unsigned long lowest_stack;
3781 };
3782
3783 /*
3784 diff -urNp linux-2.6.32.41/arch/sparc/include/asm/thread_info_64.h linux-2.6.32.41/arch/sparc/include/asm/thread_info_64.h
3785 --- linux-2.6.32.41/arch/sparc/include/asm/thread_info_64.h 2011-03-27 14:31:47.000000000 -0400
3786 +++ linux-2.6.32.41/arch/sparc/include/asm/thread_info_64.h 2011-06-04 20:46:21.000000000 -0400
3787 @@ -68,6 +68,8 @@ struct thread_info {
3788 struct pt_regs *kern_una_regs;
3789 unsigned int kern_una_insn;
3790
3791 + unsigned long lowest_stack;
3792 +
3793 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3794 };
3795
3796 diff -urNp linux-2.6.32.41/arch/sparc/include/asm/uaccess_32.h linux-2.6.32.41/arch/sparc/include/asm/uaccess_32.h
3797 --- linux-2.6.32.41/arch/sparc/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
3798 +++ linux-2.6.32.41/arch/sparc/include/asm/uaccess_32.h 2011-04-17 15:56:46.000000000 -0400
3799 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
3800
3801 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3802 {
3803 - if (n && __access_ok((unsigned long) to, n))
3804 + if ((long)n < 0)
3805 + return n;
3806 +
3807 + if (n && __access_ok((unsigned long) to, n)) {
3808 + if (!__builtin_constant_p(n))
3809 + check_object_size(from, n, true);
3810 return __copy_user(to, (__force void __user *) from, n);
3811 - else
3812 + } else
3813 return n;
3814 }
3815
3816 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3817 {
3818 + if ((long)n < 0)
3819 + return n;
3820 +
3821 + if (!__builtin_constant_p(n))
3822 + check_object_size(from, n, true);
3823 +
3824 return __copy_user(to, (__force void __user *) from, n);
3825 }
3826
3827 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3828 {
3829 - if (n && __access_ok((unsigned long) from, n))
3830 + if ((long)n < 0)
3831 + return n;
3832 +
3833 + if (n && __access_ok((unsigned long) from, n)) {
3834 + if (!__builtin_constant_p(n))
3835 + check_object_size(to, n, false);
3836 return __copy_user((__force void __user *) to, from, n);
3837 - else
3838 + } else
3839 return n;
3840 }
3841
3842 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3843 {
3844 + if ((long)n < 0)
3845 + return n;
3846 +
3847 return __copy_user((__force void __user *) to, from, n);
3848 }
3849
3850 diff -urNp linux-2.6.32.41/arch/sparc/include/asm/uaccess_64.h linux-2.6.32.41/arch/sparc/include/asm/uaccess_64.h
3851 --- linux-2.6.32.41/arch/sparc/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
3852 +++ linux-2.6.32.41/arch/sparc/include/asm/uaccess_64.h 2011-04-17 15:56:46.000000000 -0400
3853 @@ -9,6 +9,7 @@
3854 #include <linux/compiler.h>
3855 #include <linux/string.h>
3856 #include <linux/thread_info.h>
3857 +#include <linux/kernel.h>
3858 #include <asm/asi.h>
3859 #include <asm/system.h>
3860 #include <asm/spitfire.h>
3861 @@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixu
3862 static inline unsigned long __must_check
3863 copy_from_user(void *to, const void __user *from, unsigned long size)
3864 {
3865 - unsigned long ret = ___copy_from_user(to, from, size);
3866 + unsigned long ret;
3867
3868 + if ((long)size < 0 || size > INT_MAX)
3869 + return size;
3870 +
3871 + if (!__builtin_constant_p(size))
3872 + check_object_size(to, size, false);
3873 +
3874 + ret = ___copy_from_user(to, from, size);
3875 if (unlikely(ret))
3876 ret = copy_from_user_fixup(to, from, size);
3877 return ret;
3878 @@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(
3879 static inline unsigned long __must_check
3880 copy_to_user(void __user *to, const void *from, unsigned long size)
3881 {
3882 - unsigned long ret = ___copy_to_user(to, from, size);
3883 + unsigned long ret;
3884 +
3885 + if ((long)size < 0 || size > INT_MAX)
3886 + return size;
3887 +
3888 + if (!__builtin_constant_p(size))
3889 + check_object_size(from, size, true);
3890
3891 + ret = ___copy_to_user(to, from, size);
3892 if (unlikely(ret))
3893 ret = copy_to_user_fixup(to, from, size);
3894 return ret;
3895 diff -urNp linux-2.6.32.41/arch/sparc/include/asm/uaccess.h linux-2.6.32.41/arch/sparc/include/asm/uaccess.h
3896 --- linux-2.6.32.41/arch/sparc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
3897 +++ linux-2.6.32.41/arch/sparc/include/asm/uaccess.h 2011-04-17 15:56:46.000000000 -0400
3898 @@ -1,5 +1,13 @@
3899 #ifndef ___ASM_SPARC_UACCESS_H
3900 #define ___ASM_SPARC_UACCESS_H
3901 +
3902 +#ifdef __KERNEL__
3903 +#ifndef __ASSEMBLY__
3904 +#include <linux/types.h>
3905 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3906 +#endif
3907 +#endif
3908 +
3909 #if defined(__sparc__) && defined(__arch64__)
3910 #include <asm/uaccess_64.h>
3911 #else
3912 diff -urNp linux-2.6.32.41/arch/sparc/kernel/iommu.c linux-2.6.32.41/arch/sparc/kernel/iommu.c
3913 --- linux-2.6.32.41/arch/sparc/kernel/iommu.c 2011-03-27 14:31:47.000000000 -0400
3914 +++ linux-2.6.32.41/arch/sparc/kernel/iommu.c 2011-04-17 15:56:46.000000000 -0400
3915 @@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struc
3916 spin_unlock_irqrestore(&iommu->lock, flags);
3917 }
3918
3919 -static struct dma_map_ops sun4u_dma_ops = {
3920 +static const struct dma_map_ops sun4u_dma_ops = {
3921 .alloc_coherent = dma_4u_alloc_coherent,
3922 .free_coherent = dma_4u_free_coherent,
3923 .map_page = dma_4u_map_page,
3924 @@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops
3925 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
3926 };
3927
3928 -struct dma_map_ops *dma_ops = &sun4u_dma_ops;
3929 +const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
3930 EXPORT_SYMBOL(dma_ops);
3931
3932 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
3933 diff -urNp linux-2.6.32.41/arch/sparc/kernel/ioport.c linux-2.6.32.41/arch/sparc/kernel/ioport.c
3934 --- linux-2.6.32.41/arch/sparc/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
3935 +++ linux-2.6.32.41/arch/sparc/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
3936 @@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(stru
3937 BUG();
3938 }
3939
3940 -struct dma_map_ops sbus_dma_ops = {
3941 +const struct dma_map_ops sbus_dma_ops = {
3942 .alloc_coherent = sbus_alloc_coherent,
3943 .free_coherent = sbus_free_coherent,
3944 .map_page = sbus_map_page,
3945 @@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
3946 .sync_sg_for_device = sbus_sync_sg_for_device,
3947 };
3948
3949 -struct dma_map_ops *dma_ops = &sbus_dma_ops;
3950 +const struct dma_map_ops *dma_ops = &sbus_dma_ops;
3951 EXPORT_SYMBOL(dma_ops);
3952
3953 static int __init sparc_register_ioport(void)
3954 @@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(str
3955 }
3956 }
3957
3958 -struct dma_map_ops pci32_dma_ops = {
3959 +const struct dma_map_ops pci32_dma_ops = {
3960 .alloc_coherent = pci32_alloc_coherent,
3961 .free_coherent = pci32_free_coherent,
3962 .map_page = pci32_map_page,
3963 diff -urNp linux-2.6.32.41/arch/sparc/kernel/kgdb_32.c linux-2.6.32.41/arch/sparc/kernel/kgdb_32.c
3964 --- linux-2.6.32.41/arch/sparc/kernel/kgdb_32.c 2011-03-27 14:31:47.000000000 -0400
3965 +++ linux-2.6.32.41/arch/sparc/kernel/kgdb_32.c 2011-04-17 15:56:46.000000000 -0400
3966 @@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
3967 {
3968 }
3969
3970 -struct kgdb_arch arch_kgdb_ops = {
3971 +const struct kgdb_arch arch_kgdb_ops = {
3972 /* Breakpoint instruction: ta 0x7d */
3973 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
3974 };
3975 diff -urNp linux-2.6.32.41/arch/sparc/kernel/kgdb_64.c linux-2.6.32.41/arch/sparc/kernel/kgdb_64.c
3976 --- linux-2.6.32.41/arch/sparc/kernel/kgdb_64.c 2011-03-27 14:31:47.000000000 -0400
3977 +++ linux-2.6.32.41/arch/sparc/kernel/kgdb_64.c 2011-04-17 15:56:46.000000000 -0400
3978 @@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
3979 {
3980 }
3981
3982 -struct kgdb_arch arch_kgdb_ops = {
3983 +const struct kgdb_arch arch_kgdb_ops = {
3984 /* Breakpoint instruction: ta 0x72 */
3985 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
3986 };
3987 diff -urNp linux-2.6.32.41/arch/sparc/kernel/Makefile linux-2.6.32.41/arch/sparc/kernel/Makefile
3988 --- linux-2.6.32.41/arch/sparc/kernel/Makefile 2011-03-27 14:31:47.000000000 -0400
3989 +++ linux-2.6.32.41/arch/sparc/kernel/Makefile 2011-04-17 15:56:46.000000000 -0400
3990 @@ -3,7 +3,7 @@
3991 #
3992
3993 asflags-y := -ansi
3994 -ccflags-y := -Werror
3995 +#ccflags-y := -Werror
3996
3997 extra-y := head_$(BITS).o
3998 extra-y += init_task.o
3999 diff -urNp linux-2.6.32.41/arch/sparc/kernel/pci_sun4v.c linux-2.6.32.41/arch/sparc/kernel/pci_sun4v.c
4000 --- linux-2.6.32.41/arch/sparc/kernel/pci_sun4v.c 2011-03-27 14:31:47.000000000 -0400
4001 +++ linux-2.6.32.41/arch/sparc/kernel/pci_sun4v.c 2011-04-17 15:56:46.000000000 -0400
4002 @@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct devic
4003 spin_unlock_irqrestore(&iommu->lock, flags);
4004 }
4005
4006 -static struct dma_map_ops sun4v_dma_ops = {
4007 +static const struct dma_map_ops sun4v_dma_ops = {
4008 .alloc_coherent = dma_4v_alloc_coherent,
4009 .free_coherent = dma_4v_free_coherent,
4010 .map_page = dma_4v_map_page,
4011 diff -urNp linux-2.6.32.41/arch/sparc/kernel/process_32.c linux-2.6.32.41/arch/sparc/kernel/process_32.c
4012 --- linux-2.6.32.41/arch/sparc/kernel/process_32.c 2011-03-27 14:31:47.000000000 -0400
4013 +++ linux-2.6.32.41/arch/sparc/kernel/process_32.c 2011-04-17 15:56:46.000000000 -0400
4014 @@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
4015 rw->ins[4], rw->ins[5],
4016 rw->ins[6],
4017 rw->ins[7]);
4018 - printk("%pS\n", (void *) rw->ins[7]);
4019 + printk("%pA\n", (void *) rw->ins[7]);
4020 rw = (struct reg_window32 *) rw->ins[6];
4021 }
4022 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
4023 @@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
4024
4025 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
4026 r->psr, r->pc, r->npc, r->y, print_tainted());
4027 - printk("PC: <%pS>\n", (void *) r->pc);
4028 + printk("PC: <%pA>\n", (void *) r->pc);
4029 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4030 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
4031 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
4032 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4033 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
4034 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
4035 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
4036 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
4037
4038 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4039 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
4040 @@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk,
4041 rw = (struct reg_window32 *) fp;
4042 pc = rw->ins[7];
4043 printk("[%08lx : ", pc);
4044 - printk("%pS ] ", (void *) pc);
4045 + printk("%pA ] ", (void *) pc);
4046 fp = rw->ins[6];
4047 } while (++count < 16);
4048 printk("\n");
4049 diff -urNp linux-2.6.32.41/arch/sparc/kernel/process_64.c linux-2.6.32.41/arch/sparc/kernel/process_64.c
4050 --- linux-2.6.32.41/arch/sparc/kernel/process_64.c 2011-03-27 14:31:47.000000000 -0400
4051 +++ linux-2.6.32.41/arch/sparc/kernel/process_64.c 2011-04-17 15:56:46.000000000 -0400
4052 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
4053 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
4054 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
4055 if (regs->tstate & TSTATE_PRIV)
4056 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
4057 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
4058 }
4059
4060 void show_regs(struct pt_regs *regs)
4061 {
4062 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
4063 regs->tpc, regs->tnpc, regs->y, print_tainted());
4064 - printk("TPC: <%pS>\n", (void *) regs->tpc);
4065 + printk("TPC: <%pA>\n", (void *) regs->tpc);
4066 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
4067 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
4068 regs->u_regs[3]);
4069 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
4070 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
4071 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
4072 regs->u_regs[15]);
4073 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
4074 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
4075 show_regwindow(regs);
4076 }
4077
4078 @@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void
4079 ((tp && tp->task) ? tp->task->pid : -1));
4080
4081 if (gp->tstate & TSTATE_PRIV) {
4082 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
4083 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
4084 (void *) gp->tpc,
4085 (void *) gp->o7,
4086 (void *) gp->i7,
4087 diff -urNp linux-2.6.32.41/arch/sparc/kernel/sys_sparc_32.c linux-2.6.32.41/arch/sparc/kernel/sys_sparc_32.c
4088 --- linux-2.6.32.41/arch/sparc/kernel/sys_sparc_32.c 2011-03-27 14:31:47.000000000 -0400
4089 +++ linux-2.6.32.41/arch/sparc/kernel/sys_sparc_32.c 2011-04-17 15:56:46.000000000 -0400
4090 @@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(str
4091 if (ARCH_SUN4C && len > 0x20000000)
4092 return -ENOMEM;
4093 if (!addr)
4094 - addr = TASK_UNMAPPED_BASE;
4095 + addr = current->mm->mmap_base;
4096
4097 if (flags & MAP_SHARED)
4098 addr = COLOUR_ALIGN(addr);
4099 @@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
4100 }
4101 if (TASK_SIZE - PAGE_SIZE - len < addr)
4102 return -ENOMEM;
4103 - if (!vmm || addr + len <= vmm->vm_start)
4104 + if (check_heap_stack_gap(vmm, addr, len))
4105 return addr;
4106 addr = vmm->vm_end;
4107 if (flags & MAP_SHARED)
4108 diff -urNp linux-2.6.32.41/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.41/arch/sparc/kernel/sys_sparc_64.c
4109 --- linux-2.6.32.41/arch/sparc/kernel/sys_sparc_64.c 2011-03-27 14:31:47.000000000 -0400
4110 +++ linux-2.6.32.41/arch/sparc/kernel/sys_sparc_64.c 2011-04-17 15:56:46.000000000 -0400
4111 @@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(str
4112 /* We do not accept a shared mapping if it would violate
4113 * cache aliasing constraints.
4114 */
4115 - if ((flags & MAP_SHARED) &&
4116 + if ((filp || (flags & MAP_SHARED)) &&
4117 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4118 return -EINVAL;
4119 return addr;
4120 @@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(str
4121 if (filp || (flags & MAP_SHARED))
4122 do_color_align = 1;
4123
4124 +#ifdef CONFIG_PAX_RANDMMAP
4125 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4126 +#endif
4127 +
4128 if (addr) {
4129 if (do_color_align)
4130 addr = COLOUR_ALIGN(addr, pgoff);
4131 @@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(str
4132 addr = PAGE_ALIGN(addr);
4133
4134 vma = find_vma(mm, addr);
4135 - if (task_size - len >= addr &&
4136 - (!vma || addr + len <= vma->vm_start))
4137 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4138 return addr;
4139 }
4140
4141 if (len > mm->cached_hole_size) {
4142 - start_addr = addr = mm->free_area_cache;
4143 + start_addr = addr = mm->free_area_cache;
4144 } else {
4145 - start_addr = addr = TASK_UNMAPPED_BASE;
4146 + start_addr = addr = mm->mmap_base;
4147 mm->cached_hole_size = 0;
4148 }
4149
4150 @@ -175,14 +178,14 @@ full_search:
4151 vma = find_vma(mm, VA_EXCLUDE_END);
4152 }
4153 if (unlikely(task_size < addr)) {
4154 - if (start_addr != TASK_UNMAPPED_BASE) {
4155 - start_addr = addr = TASK_UNMAPPED_BASE;
4156 + if (start_addr != mm->mmap_base) {
4157 + start_addr = addr = mm->mmap_base;
4158 mm->cached_hole_size = 0;
4159 goto full_search;
4160 }
4161 return -ENOMEM;
4162 }
4163 - if (likely(!vma || addr + len <= vma->vm_start)) {
4164 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4165 /*
4166 * Remember the place where we stopped the search:
4167 */
4168 @@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct fi
4169 /* We do not accept a shared mapping if it would violate
4170 * cache aliasing constraints.
4171 */
4172 - if ((flags & MAP_SHARED) &&
4173 + if ((filp || (flags & MAP_SHARED)) &&
4174 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4175 return -EINVAL;
4176 return addr;
4177 @@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct fi
4178 addr = PAGE_ALIGN(addr);
4179
4180 vma = find_vma(mm, addr);
4181 - if (task_size - len >= addr &&
4182 - (!vma || addr + len <= vma->vm_start))
4183 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4184 return addr;
4185 }
4186
4187 @@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct fi
4188 /* make sure it can fit in the remaining address space */
4189 if (likely(addr > len)) {
4190 vma = find_vma(mm, addr-len);
4191 - if (!vma || addr <= vma->vm_start) {
4192 + if (check_heap_stack_gap(vma, addr - len, len)) {
4193 /* remember the address as a hint for next time */
4194 return (mm->free_area_cache = addr-len);
4195 }
4196 @@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct fi
4197 if (unlikely(mm->mmap_base < len))
4198 goto bottomup;
4199
4200 - addr = mm->mmap_base-len;
4201 - if (do_color_align)
4202 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4203 + addr = mm->mmap_base - len;
4204
4205 do {
4206 + if (do_color_align)
4207 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4208 /*
4209 * Lookup failure means no vma is above this address,
4210 * else if new region fits below vma->vm_start,
4211 * return with success:
4212 */
4213 vma = find_vma(mm, addr);
4214 - if (likely(!vma || addr+len <= vma->vm_start)) {
4215 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4216 /* remember the address as a hint for next time */
4217 return (mm->free_area_cache = addr);
4218 }
4219 @@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct fi
4220 mm->cached_hole_size = vma->vm_start - addr;
4221
4222 /* try just below the current vma->vm_start */
4223 - addr = vma->vm_start-len;
4224 - if (do_color_align)
4225 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4226 - } while (likely(len < vma->vm_start));
4227 + addr = skip_heap_stack_gap(vma, len);
4228 + } while (!IS_ERR_VALUE(addr));
4229
4230 bottomup:
4231 /*
4232 @@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_str
4233 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
4234 sysctl_legacy_va_layout) {
4235 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4236 +
4237 +#ifdef CONFIG_PAX_RANDMMAP
4238 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4239 + mm->mmap_base += mm->delta_mmap;
4240 +#endif
4241 +
4242 mm->get_unmapped_area = arch_get_unmapped_area;
4243 mm->unmap_area = arch_unmap_area;
4244 } else {
4245 @@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_str
4246 gap = (task_size / 6 * 5);
4247
4248 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4249 +
4250 +#ifdef CONFIG_PAX_RANDMMAP
4251 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4252 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4253 +#endif
4254 +
4255 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4256 mm->unmap_area = arch_unmap_area_topdown;
4257 }
4258 diff -urNp linux-2.6.32.41/arch/sparc/kernel/traps_32.c linux-2.6.32.41/arch/sparc/kernel/traps_32.c
4259 --- linux-2.6.32.41/arch/sparc/kernel/traps_32.c 2011-03-27 14:31:47.000000000 -0400
4260 +++ linux-2.6.32.41/arch/sparc/kernel/traps_32.c 2011-04-17 15:56:46.000000000 -0400
4261 @@ -76,7 +76,7 @@ void die_if_kernel(char *str, struct pt_
4262 count++ < 30 &&
4263 (((unsigned long) rw) >= PAGE_OFFSET) &&
4264 !(((unsigned long) rw) & 0x7)) {
4265 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
4266 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
4267 (void *) rw->ins[7]);
4268 rw = (struct reg_window32 *)rw->ins[6];
4269 }
4270 diff -urNp linux-2.6.32.41/arch/sparc/kernel/traps_64.c linux-2.6.32.41/arch/sparc/kernel/traps_64.c
4271 --- linux-2.6.32.41/arch/sparc/kernel/traps_64.c 2011-03-27 14:31:47.000000000 -0400
4272 +++ linux-2.6.32.41/arch/sparc/kernel/traps_64.c 2011-04-17 15:56:46.000000000 -0400
4273 @@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_
4274 i + 1,
4275 p->trapstack[i].tstate, p->trapstack[i].tpc,
4276 p->trapstack[i].tnpc, p->trapstack[i].tt);
4277 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4278 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4279 }
4280 }
4281
4282 @@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long
4283
4284 lvl -= 0x100;
4285 if (regs->tstate & TSTATE_PRIV) {
4286 +
4287 +#ifdef CONFIG_PAX_REFCOUNT
4288 + if (lvl == 6)
4289 + pax_report_refcount_overflow(regs);
4290 +#endif
4291 +
4292 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4293 die_if_kernel(buffer, regs);
4294 }
4295 @@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long
4296 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4297 {
4298 char buffer[32];
4299 -
4300 +
4301 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4302 0, lvl, SIGTRAP) == NOTIFY_STOP)
4303 return;
4304
4305 +#ifdef CONFIG_PAX_REFCOUNT
4306 + if (lvl == 6)
4307 + pax_report_refcount_overflow(regs);
4308 +#endif
4309 +
4310 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4311
4312 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4313 @@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt
4314 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4315 printk("%s" "ERROR(%d): ",
4316 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4317 - printk("TPC<%pS>\n", (void *) regs->tpc);
4318 + printk("TPC<%pA>\n", (void *) regs->tpc);
4319 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4320 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4321 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4322 @@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type,
4323 smp_processor_id(),
4324 (type & 0x1) ? 'I' : 'D',
4325 regs->tpc);
4326 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4327 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4328 panic("Irrecoverable Cheetah+ parity error.");
4329 }
4330
4331 @@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type,
4332 smp_processor_id(),
4333 (type & 0x1) ? 'I' : 'D',
4334 regs->tpc);
4335 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4336 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4337 }
4338
4339 struct sun4v_error_entry {
4340 @@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_r
4341
4342 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4343 regs->tpc, tl);
4344 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4345 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4346 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4347 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4348 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4349 (void *) regs->u_regs[UREG_I7]);
4350 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4351 "pte[%lx] error[%lx]\n",
4352 @@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_r
4353
4354 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4355 regs->tpc, tl);
4356 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4357 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4358 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4359 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4360 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4361 (void *) regs->u_regs[UREG_I7]);
4362 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4363 "pte[%lx] error[%lx]\n",
4364 @@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk,
4365 fp = (unsigned long)sf->fp + STACK_BIAS;
4366 }
4367
4368 - printk(" [%016lx] %pS\n", pc, (void *) pc);
4369 + printk(" [%016lx] %pA\n", pc, (void *) pc);
4370 } while (++count < 16);
4371 }
4372
4373 @@ -2260,7 +2271,7 @@ void die_if_kernel(char *str, struct pt_
4374 while (rw &&
4375 count++ < 30&&
4376 is_kernel_stack(current, rw)) {
4377 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
4378 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
4379 (void *) rw->ins[7]);
4380
4381 rw = kernel_stack_up(rw);
4382 diff -urNp linux-2.6.32.41/arch/sparc/kernel/unaligned_64.c linux-2.6.32.41/arch/sparc/kernel/unaligned_64.c
4383 --- linux-2.6.32.41/arch/sparc/kernel/unaligned_64.c 2011-03-27 14:31:47.000000000 -0400
4384 +++ linux-2.6.32.41/arch/sparc/kernel/unaligned_64.c 2011-04-17 15:56:46.000000000 -0400
4385 @@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs
4386 if (count < 5) {
4387 last_time = jiffies;
4388 count++;
4389 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
4390 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
4391 regs->tpc, (void *) regs->tpc);
4392 }
4393 }
4394 diff -urNp linux-2.6.32.41/arch/sparc/lib/atomic_64.S linux-2.6.32.41/arch/sparc/lib/atomic_64.S
4395 --- linux-2.6.32.41/arch/sparc/lib/atomic_64.S 2011-03-27 14:31:47.000000000 -0400
4396 +++ linux-2.6.32.41/arch/sparc/lib/atomic_64.S 2011-04-17 15:56:46.000000000 -0400
4397 @@ -18,7 +18,12 @@
4398 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4399 BACKOFF_SETUP(%o2)
4400 1: lduw [%o1], %g1
4401 - add %g1, %o0, %g7
4402 + addcc %g1, %o0, %g7
4403 +
4404 +#ifdef CONFIG_PAX_REFCOUNT
4405 + tvs %icc, 6
4406 +#endif
4407 +
4408 cas [%o1], %g1, %g7
4409 cmp %g1, %g7
4410 bne,pn %icc, 2f
4411 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
4412 2: BACKOFF_SPIN(%o2, %o3, 1b)
4413 .size atomic_add, .-atomic_add
4414
4415 + .globl atomic_add_unchecked
4416 + .type atomic_add_unchecked,#function
4417 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4418 + BACKOFF_SETUP(%o2)
4419 +1: lduw [%o1], %g1
4420 + add %g1, %o0, %g7
4421 + cas [%o1], %g1, %g7
4422 + cmp %g1, %g7
4423 + bne,pn %icc, 2f
4424 + nop
4425 + retl
4426 + nop
4427 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4428 + .size atomic_add_unchecked, .-atomic_add_unchecked
4429 +
4430 .globl atomic_sub
4431 .type atomic_sub,#function
4432 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4433 BACKOFF_SETUP(%o2)
4434 1: lduw [%o1], %g1
4435 - sub %g1, %o0, %g7
4436 + subcc %g1, %o0, %g7
4437 +
4438 +#ifdef CONFIG_PAX_REFCOUNT
4439 + tvs %icc, 6
4440 +#endif
4441 +
4442 cas [%o1], %g1, %g7
4443 cmp %g1, %g7
4444 bne,pn %icc, 2f
4445 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
4446 2: BACKOFF_SPIN(%o2, %o3, 1b)
4447 .size atomic_sub, .-atomic_sub
4448
4449 + .globl atomic_sub_unchecked
4450 + .type atomic_sub_unchecked,#function
4451 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4452 + BACKOFF_SETUP(%o2)
4453 +1: lduw [%o1], %g1
4454 + sub %g1, %o0, %g7
4455 + cas [%o1], %g1, %g7
4456 + cmp %g1, %g7
4457 + bne,pn %icc, 2f
4458 + nop
4459 + retl
4460 + nop
4461 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4462 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
4463 +
4464 .globl atomic_add_ret
4465 .type atomic_add_ret,#function
4466 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4467 BACKOFF_SETUP(%o2)
4468 1: lduw [%o1], %g1
4469 - add %g1, %o0, %g7
4470 + addcc %g1, %o0, %g7
4471 +
4472 +#ifdef CONFIG_PAX_REFCOUNT
4473 + tvs %icc, 6
4474 +#endif
4475 +
4476 cas [%o1], %g1, %g7
4477 cmp %g1, %g7
4478 bne,pn %icc, 2f
4479 @@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1
4480 2: BACKOFF_SPIN(%o2, %o3, 1b)
4481 .size atomic_add_ret, .-atomic_add_ret
4482
4483 + .globl atomic_add_ret_unchecked
4484 + .type atomic_add_ret_unchecked,#function
4485 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4486 + BACKOFF_SETUP(%o2)
4487 +1: lduw [%o1], %g1
4488 + addcc %g1, %o0, %g7
4489 + cas [%o1], %g1, %g7
4490 + cmp %g1, %g7
4491 + bne,pn %icc, 2f
4492 + add %g7, %o0, %g7
4493 + sra %g7, 0, %o0
4494 + retl
4495 + nop
4496 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4497 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4498 +
4499 .globl atomic_sub_ret
4500 .type atomic_sub_ret,#function
4501 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4502 BACKOFF_SETUP(%o2)
4503 1: lduw [%o1], %g1
4504 - sub %g1, %o0, %g7
4505 + subcc %g1, %o0, %g7
4506 +
4507 +#ifdef CONFIG_PAX_REFCOUNT
4508 + tvs %icc, 6
4509 +#endif
4510 +
4511 cas [%o1], %g1, %g7
4512 cmp %g1, %g7
4513 bne,pn %icc, 2f
4514 @@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
4515 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4516 BACKOFF_SETUP(%o2)
4517 1: ldx [%o1], %g1
4518 - add %g1, %o0, %g7
4519 + addcc %g1, %o0, %g7
4520 +
4521 +#ifdef CONFIG_PAX_REFCOUNT
4522 + tvs %xcc, 6
4523 +#endif
4524 +
4525 casx [%o1], %g1, %g7
4526 cmp %g1, %g7
4527 bne,pn %xcc, 2f
4528 @@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 =
4529 2: BACKOFF_SPIN(%o2, %o3, 1b)
4530 .size atomic64_add, .-atomic64_add
4531
4532 + .globl atomic64_add_unchecked
4533 + .type atomic64_add_unchecked,#function
4534 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4535 + BACKOFF_SETUP(%o2)
4536 +1: ldx [%o1], %g1
4537 + addcc %g1, %o0, %g7
4538 + casx [%o1], %g1, %g7
4539 + cmp %g1, %g7
4540 + bne,pn %xcc, 2f
4541 + nop
4542 + retl
4543 + nop
4544 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4545 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
4546 +
4547 .globl atomic64_sub
4548 .type atomic64_sub,#function
4549 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4550 BACKOFF_SETUP(%o2)
4551 1: ldx [%o1], %g1
4552 - sub %g1, %o0, %g7
4553 + subcc %g1, %o0, %g7
4554 +
4555 +#ifdef CONFIG_PAX_REFCOUNT
4556 + tvs %xcc, 6
4557 +#endif
4558 +
4559 casx [%o1], %g1, %g7
4560 cmp %g1, %g7
4561 bne,pn %xcc, 2f
4562 @@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
4563 2: BACKOFF_SPIN(%o2, %o3, 1b)
4564 .size atomic64_sub, .-atomic64_sub
4565
4566 + .globl atomic64_sub_unchecked
4567 + .type atomic64_sub_unchecked,#function
4568 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4569 + BACKOFF_SETUP(%o2)
4570 +1: ldx [%o1], %g1
4571 + subcc %g1, %o0, %g7
4572 + casx [%o1], %g1, %g7
4573 + cmp %g1, %g7
4574 + bne,pn %xcc, 2f
4575 + nop
4576 + retl
4577 + nop
4578 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4579 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4580 +
4581 .globl atomic64_add_ret
4582 .type atomic64_add_ret,#function
4583 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4584 BACKOFF_SETUP(%o2)
4585 1: ldx [%o1], %g1
4586 - add %g1, %o0, %g7
4587 + addcc %g1, %o0, %g7
4588 +
4589 +#ifdef CONFIG_PAX_REFCOUNT
4590 + tvs %xcc, 6
4591 +#endif
4592 +
4593 casx [%o1], %g1, %g7
4594 cmp %g1, %g7
4595 bne,pn %xcc, 2f
4596 @@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4597 2: BACKOFF_SPIN(%o2, %o3, 1b)
4598 .size atomic64_add_ret, .-atomic64_add_ret
4599
4600 + .globl atomic64_add_ret_unchecked
4601 + .type atomic64_add_ret_unchecked,#function
4602 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4603 + BACKOFF_SETUP(%o2)
4604 +1: ldx [%o1], %g1
4605 + addcc %g1, %o0, %g7
4606 + casx [%o1], %g1, %g7
4607 + cmp %g1, %g7
4608 + bne,pn %xcc, 2f
4609 + add %g7, %o0, %g7
4610 + mov %g7, %o0
4611 + retl
4612 + nop
4613 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4614 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4615 +
4616 .globl atomic64_sub_ret
4617 .type atomic64_sub_ret,#function
4618 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4619 BACKOFF_SETUP(%o2)
4620 1: ldx [%o1], %g1
4621 - sub %g1, %o0, %g7
4622 + subcc %g1, %o0, %g7
4623 +
4624 +#ifdef CONFIG_PAX_REFCOUNT
4625 + tvs %xcc, 6
4626 +#endif
4627 +
4628 casx [%o1], %g1, %g7
4629 cmp %g1, %g7
4630 bne,pn %xcc, 2f
4631 diff -urNp linux-2.6.32.41/arch/sparc/lib/ksyms.c linux-2.6.32.41/arch/sparc/lib/ksyms.c
4632 --- linux-2.6.32.41/arch/sparc/lib/ksyms.c 2011-03-27 14:31:47.000000000 -0400
4633 +++ linux-2.6.32.41/arch/sparc/lib/ksyms.c 2011-04-17 15:56:46.000000000 -0400
4634 @@ -144,12 +144,17 @@ EXPORT_SYMBOL(__downgrade_write);
4635
4636 /* Atomic counter implementation. */
4637 EXPORT_SYMBOL(atomic_add);
4638 +EXPORT_SYMBOL(atomic_add_unchecked);
4639 EXPORT_SYMBOL(atomic_add_ret);
4640 EXPORT_SYMBOL(atomic_sub);
4641 +EXPORT_SYMBOL(atomic_sub_unchecked);
4642 EXPORT_SYMBOL(atomic_sub_ret);
4643 EXPORT_SYMBOL(atomic64_add);
4644 +EXPORT_SYMBOL(atomic64_add_unchecked);
4645 EXPORT_SYMBOL(atomic64_add_ret);
4646 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4647 EXPORT_SYMBOL(atomic64_sub);
4648 +EXPORT_SYMBOL(atomic64_sub_unchecked);
4649 EXPORT_SYMBOL(atomic64_sub_ret);
4650
4651 /* Atomic bit operations. */
4652 diff -urNp linux-2.6.32.41/arch/sparc/lib/Makefile linux-2.6.32.41/arch/sparc/lib/Makefile
4653 --- linux-2.6.32.41/arch/sparc/lib/Makefile 2011-03-27 14:31:47.000000000 -0400
4654 +++ linux-2.6.32.41/arch/sparc/lib/Makefile 2011-05-17 19:26:34.000000000 -0400
4655 @@ -2,7 +2,7 @@
4656 #
4657
4658 asflags-y := -ansi -DST_DIV0=0x02
4659 -ccflags-y := -Werror
4660 +#ccflags-y := -Werror
4661
4662 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4663 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4664 diff -urNp linux-2.6.32.41/arch/sparc/lib/rwsem_64.S linux-2.6.32.41/arch/sparc/lib/rwsem_64.S
4665 --- linux-2.6.32.41/arch/sparc/lib/rwsem_64.S 2011-03-27 14:31:47.000000000 -0400
4666 +++ linux-2.6.32.41/arch/sparc/lib/rwsem_64.S 2011-04-17 15:56:46.000000000 -0400
4667 @@ -11,7 +11,12 @@
4668 .globl __down_read
4669 __down_read:
4670 1: lduw [%o0], %g1
4671 - add %g1, 1, %g7
4672 + addcc %g1, 1, %g7
4673 +
4674 +#ifdef CONFIG_PAX_REFCOUNT
4675 + tvs %icc, 6
4676 +#endif
4677 +
4678 cas [%o0], %g1, %g7
4679 cmp %g1, %g7
4680 bne,pn %icc, 1b
4681 @@ -33,7 +38,12 @@ __down_read:
4682 .globl __down_read_trylock
4683 __down_read_trylock:
4684 1: lduw [%o0], %g1
4685 - add %g1, 1, %g7
4686 + addcc %g1, 1, %g7
4687 +
4688 +#ifdef CONFIG_PAX_REFCOUNT
4689 + tvs %icc, 6
4690 +#endif
4691 +
4692 cmp %g7, 0
4693 bl,pn %icc, 2f
4694 mov 0, %o1
4695 @@ -51,7 +61,12 @@ __down_write:
4696 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
4697 1:
4698 lduw [%o0], %g3
4699 - add %g3, %g1, %g7
4700 + addcc %g3, %g1, %g7
4701 +
4702 +#ifdef CONFIG_PAX_REFCOUNT
4703 + tvs %icc, 6
4704 +#endif
4705 +
4706 cas [%o0], %g3, %g7
4707 cmp %g3, %g7
4708 bne,pn %icc, 1b
4709 @@ -77,7 +92,12 @@ __down_write_trylock:
4710 cmp %g3, 0
4711 bne,pn %icc, 2f
4712 mov 0, %o1
4713 - add %g3, %g1, %g7
4714 + addcc %g3, %g1, %g7
4715 +
4716 +#ifdef CONFIG_PAX_REFCOUNT
4717 + tvs %icc, 6
4718 +#endif
4719 +
4720 cas [%o0], %g3, %g7
4721 cmp %g3, %g7
4722 bne,pn %icc, 1b
4723 @@ -90,7 +110,12 @@ __down_write_trylock:
4724 __up_read:
4725 1:
4726 lduw [%o0], %g1
4727 - sub %g1, 1, %g7
4728 + subcc %g1, 1, %g7
4729 +
4730 +#ifdef CONFIG_PAX_REFCOUNT
4731 + tvs %icc, 6
4732 +#endif
4733 +
4734 cas [%o0], %g1, %g7
4735 cmp %g1, %g7
4736 bne,pn %icc, 1b
4737 @@ -118,7 +143,12 @@ __up_write:
4738 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
4739 1:
4740 lduw [%o0], %g3
4741 - sub %g3, %g1, %g7
4742 + subcc %g3, %g1, %g7
4743 +
4744 +#ifdef CONFIG_PAX_REFCOUNT
4745 + tvs %icc, 6
4746 +#endif
4747 +
4748 cas [%o0], %g3, %g7
4749 cmp %g3, %g7
4750 bne,pn %icc, 1b
4751 @@ -143,7 +173,12 @@ __downgrade_write:
4752 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
4753 1:
4754 lduw [%o0], %g3
4755 - sub %g3, %g1, %g7
4756 + subcc %g3, %g1, %g7
4757 +
4758 +#ifdef CONFIG_PAX_REFCOUNT
4759 + tvs %icc, 6
4760 +#endif
4761 +
4762 cas [%o0], %g3, %g7
4763 cmp %g3, %g7
4764 bne,pn %icc, 1b
4765 diff -urNp linux-2.6.32.41/arch/sparc/Makefile linux-2.6.32.41/arch/sparc/Makefile
4766 --- linux-2.6.32.41/arch/sparc/Makefile 2011-03-27 14:31:47.000000000 -0400
4767 +++ linux-2.6.32.41/arch/sparc/Makefile 2011-04-17 15:56:46.000000000 -0400
4768 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
4769 # Export what is needed by arch/sparc/boot/Makefile
4770 export VMLINUX_INIT VMLINUX_MAIN
4771 VMLINUX_INIT := $(head-y) $(init-y)
4772 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4773 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4774 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4775 VMLINUX_MAIN += $(drivers-y) $(net-y)
4776
4777 diff -urNp linux-2.6.32.41/arch/sparc/mm/fault_32.c linux-2.6.32.41/arch/sparc/mm/fault_32.c
4778 --- linux-2.6.32.41/arch/sparc/mm/fault_32.c 2011-03-27 14:31:47.000000000 -0400
4779 +++ linux-2.6.32.41/arch/sparc/mm/fault_32.c 2011-04-17 15:56:46.000000000 -0400
4780 @@ -21,6 +21,9 @@
4781 #include <linux/interrupt.h>
4782 #include <linux/module.h>
4783 #include <linux/kdebug.h>
4784 +#include <linux/slab.h>
4785 +#include <linux/pagemap.h>
4786 +#include <linux/compiler.h>
4787
4788 #include <asm/system.h>
4789 #include <asm/page.h>
4790 @@ -167,6 +170,267 @@ static unsigned long compute_si_addr(str
4791 return safe_compute_effective_address(regs, insn);
4792 }
4793
4794 +#ifdef CONFIG_PAX_PAGEEXEC
4795 +#ifdef CONFIG_PAX_DLRESOLVE
4796 +static void pax_emuplt_close(struct vm_area_struct *vma)
4797 +{
4798 + vma->vm_mm->call_dl_resolve = 0UL;
4799 +}
4800 +
4801 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4802 +{
4803 + unsigned int *kaddr;
4804 +
4805 + vmf->page = alloc_page(GFP_HIGHUSER);
4806 + if (!vmf->page)
4807 + return VM_FAULT_OOM;
4808 +
4809 + kaddr = kmap(vmf->page);
4810 + memset(kaddr, 0, PAGE_SIZE);
4811 + kaddr[0] = 0x9DE3BFA8U; /* save */
4812 + flush_dcache_page(vmf->page);
4813 + kunmap(vmf->page);
4814 + return VM_FAULT_MAJOR;
4815 +}
4816 +
4817 +static const struct vm_operations_struct pax_vm_ops = {
4818 + .close = pax_emuplt_close,
4819 + .fault = pax_emuplt_fault
4820 +};
4821 +
4822 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4823 +{
4824 + int ret;
4825 +
4826 + vma->vm_mm = current->mm;
4827 + vma->vm_start = addr;
4828 + vma->vm_end = addr + PAGE_SIZE;
4829 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4830 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4831 + vma->vm_ops = &pax_vm_ops;
4832 +
4833 + ret = insert_vm_struct(current->mm, vma);
4834 + if (ret)
4835 + return ret;
4836 +
4837 + ++current->mm->total_vm;
4838 + return 0;
4839 +}
4840 +#endif
4841 +
4842 +/*
4843 + * PaX: decide what to do with offenders (regs->pc = fault address)
4844 + *
4845 + * returns 1 when task should be killed
4846 + * 2 when patched PLT trampoline was detected
4847 + * 3 when unpatched PLT trampoline was detected
4848 + */
4849 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4850 +{
4851 +
4852 +#ifdef CONFIG_PAX_EMUPLT
4853 + int err;
4854 +
4855 + do { /* PaX: patched PLT emulation #1 */
4856 + unsigned int sethi1, sethi2, jmpl;
4857 +
4858 + err = get_user(sethi1, (unsigned int *)regs->pc);
4859 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4860 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4861 +
4862 + if (err)
4863 + break;
4864 +
4865 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4866 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4867 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4868 + {
4869 + unsigned int addr;
4870 +
4871 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4872 + addr = regs->u_regs[UREG_G1];
4873 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4874 + regs->pc = addr;
4875 + regs->npc = addr+4;
4876 + return 2;
4877 + }
4878 + } while (0);
4879 +
4880 + { /* PaX: patched PLT emulation #2 */
4881 + unsigned int ba;
4882 +
4883 + err = get_user(ba, (unsigned int *)regs->pc);
4884 +
4885 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4886 + unsigned int addr;
4887 +
4888 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4889 + regs->pc = addr;
4890 + regs->npc = addr+4;
4891 + return 2;
4892 + }
4893 + }
4894 +
4895 + do { /* PaX: patched PLT emulation #3 */
4896 + unsigned int sethi, jmpl, nop;
4897 +
4898 + err = get_user(sethi, (unsigned int *)regs->pc);
4899 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4900 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4901 +
4902 + if (err)
4903 + break;
4904 +
4905 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4906 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4907 + nop == 0x01000000U)
4908 + {
4909 + unsigned int addr;
4910 +
4911 + addr = (sethi & 0x003FFFFFU) << 10;
4912 + regs->u_regs[UREG_G1] = addr;
4913 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4914 + regs->pc = addr;
4915 + regs->npc = addr+4;
4916 + return 2;
4917 + }
4918 + } while (0);
4919 +
4920 + do { /* PaX: unpatched PLT emulation step 1 */
4921 + unsigned int sethi, ba, nop;
4922 +
4923 + err = get_user(sethi, (unsigned int *)regs->pc);
4924 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
4925 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4926 +
4927 + if (err)
4928 + break;
4929 +
4930 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4931 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4932 + nop == 0x01000000U)
4933 + {
4934 + unsigned int addr, save, call;
4935 +
4936 + if ((ba & 0xFFC00000U) == 0x30800000U)
4937 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4938 + else
4939 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4940 +
4941 + err = get_user(save, (unsigned int *)addr);
4942 + err |= get_user(call, (unsigned int *)(addr+4));
4943 + err |= get_user(nop, (unsigned int *)(addr+8));
4944 + if (err)
4945 + break;
4946 +
4947 +#ifdef CONFIG_PAX_DLRESOLVE
4948 + if (save == 0x9DE3BFA8U &&
4949 + (call & 0xC0000000U) == 0x40000000U &&
4950 + nop == 0x01000000U)
4951 + {
4952 + struct vm_area_struct *vma;
4953 + unsigned long call_dl_resolve;
4954 +
4955 + down_read(&current->mm->mmap_sem);
4956 + call_dl_resolve = current->mm->call_dl_resolve;
4957 + up_read(&current->mm->mmap_sem);
4958 + if (likely(call_dl_resolve))
4959 + goto emulate;
4960 +
4961 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4962 +
4963 + down_write(&current->mm->mmap_sem);
4964 + if (current->mm->call_dl_resolve) {
4965 + call_dl_resolve = current->mm->call_dl_resolve;
4966 + up_write(&current->mm->mmap_sem);
4967 + if (vma)
4968 + kmem_cache_free(vm_area_cachep, vma);
4969 + goto emulate;
4970 + }
4971 +
4972 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4973 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4974 + up_write(&current->mm->mmap_sem);
4975 + if (vma)
4976 + kmem_cache_free(vm_area_cachep, vma);
4977 + return 1;
4978 + }
4979 +
4980 + if (pax_insert_vma(vma, call_dl_resolve)) {
4981 + up_write(&current->mm->mmap_sem);
4982 + kmem_cache_free(vm_area_cachep, vma);
4983 + return 1;
4984 + }
4985 +
4986 + current->mm->call_dl_resolve = call_dl_resolve;
4987 + up_write(&current->mm->mmap_sem);
4988 +
4989 +emulate:
4990 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4991 + regs->pc = call_dl_resolve;
4992 + regs->npc = addr+4;
4993 + return 3;
4994 + }
4995 +#endif
4996 +
4997 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4998 + if ((save & 0xFFC00000U) == 0x05000000U &&
4999 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5000 + nop == 0x01000000U)
5001 + {
5002 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5003 + regs->u_regs[UREG_G2] = addr + 4;
5004 + addr = (save & 0x003FFFFFU) << 10;
5005 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5006 + regs->pc = addr;
5007 + regs->npc = addr+4;
5008 + return 3;
5009 + }
5010 + }
5011 + } while (0);
5012 +
5013 + do { /* PaX: unpatched PLT emulation step 2 */
5014 + unsigned int save, call, nop;
5015 +
5016 + err = get_user(save, (unsigned int *)(regs->pc-4));
5017 + err |= get_user(call, (unsigned int *)regs->pc);
5018 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
5019 + if (err)
5020 + break;
5021 +
5022 + if (save == 0x9DE3BFA8U &&
5023 + (call & 0xC0000000U) == 0x40000000U &&
5024 + nop == 0x01000000U)
5025 + {
5026 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
5027 +
5028 + regs->u_regs[UREG_RETPC] = regs->pc;
5029 + regs->pc = dl_resolve;
5030 + regs->npc = dl_resolve+4;
5031 + return 3;
5032 + }
5033 + } while (0);
5034 +#endif
5035 +
5036 + return 1;
5037 +}
5038 +
5039 +void pax_report_insns(void *pc, void *sp)
5040 +{
5041 + unsigned long i;
5042 +
5043 + printk(KERN_ERR "PAX: bytes at PC: ");
5044 + for (i = 0; i < 8; i++) {
5045 + unsigned int c;
5046 + if (get_user(c, (unsigned int *)pc+i))
5047 + printk(KERN_CONT "???????? ");
5048 + else
5049 + printk(KERN_CONT "%08x ", c);
5050 + }
5051 + printk("\n");
5052 +}
5053 +#endif
5054 +
5055 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
5056 unsigned long address)
5057 {
5058 @@ -231,6 +495,24 @@ good_area:
5059 if(!(vma->vm_flags & VM_WRITE))
5060 goto bad_area;
5061 } else {
5062 +
5063 +#ifdef CONFIG_PAX_PAGEEXEC
5064 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
5065 + up_read(&mm->mmap_sem);
5066 + switch (pax_handle_fetch_fault(regs)) {
5067 +
5068 +#ifdef CONFIG_PAX_EMUPLT
5069 + case 2:
5070 + case 3:
5071 + return;
5072 +#endif
5073 +
5074 + }
5075 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
5076 + do_group_exit(SIGKILL);
5077 + }
5078 +#endif
5079 +
5080 /* Allow reads even for write-only mappings */
5081 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
5082 goto bad_area;
5083 diff -urNp linux-2.6.32.41/arch/sparc/mm/fault_64.c linux-2.6.32.41/arch/sparc/mm/fault_64.c
5084 --- linux-2.6.32.41/arch/sparc/mm/fault_64.c 2011-03-27 14:31:47.000000000 -0400
5085 +++ linux-2.6.32.41/arch/sparc/mm/fault_64.c 2011-04-17 15:56:46.000000000 -0400
5086 @@ -20,6 +20,9 @@
5087 #include <linux/kprobes.h>
5088 #include <linux/kdebug.h>
5089 #include <linux/percpu.h>
5090 +#include <linux/slab.h>
5091 +#include <linux/pagemap.h>
5092 +#include <linux/compiler.h>
5093
5094 #include <asm/page.h>
5095 #include <asm/pgtable.h>
5096 @@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs
5097 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
5098 regs->tpc);
5099 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
5100 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
5101 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
5102 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
5103 dump_stack();
5104 unhandled_fault(regs->tpc, current, regs);
5105 @@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_a
5106 show_regs(regs);
5107 }
5108
5109 +#ifdef CONFIG_PAX_PAGEEXEC
5110 +#ifdef CONFIG_PAX_DLRESOLVE
5111 +static void pax_emuplt_close(struct vm_area_struct *vma)
5112 +{
5113 + vma->vm_mm->call_dl_resolve = 0UL;
5114 +}
5115 +
5116 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5117 +{
5118 + unsigned int *kaddr;
5119 +
5120 + vmf->page = alloc_page(GFP_HIGHUSER);
5121 + if (!vmf->page)
5122 + return VM_FAULT_OOM;
5123 +
5124 + kaddr = kmap(vmf->page);
5125 + memset(kaddr, 0, PAGE_SIZE);
5126 + kaddr[0] = 0x9DE3BFA8U; /* save */
5127 + flush_dcache_page(vmf->page);
5128 + kunmap(vmf->page);
5129 + return VM_FAULT_MAJOR;
5130 +}
5131 +
5132 +static const struct vm_operations_struct pax_vm_ops = {
5133 + .close = pax_emuplt_close,
5134 + .fault = pax_emuplt_fault
5135 +};
5136 +
5137 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5138 +{
5139 + int ret;
5140 +
5141 + vma->vm_mm = current->mm;
5142 + vma->vm_start = addr;
5143 + vma->vm_end = addr + PAGE_SIZE;
5144 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5145 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5146 + vma->vm_ops = &pax_vm_ops;
5147 +
5148 + ret = insert_vm_struct(current->mm, vma);
5149 + if (ret)
5150 + return ret;
5151 +
5152 + ++current->mm->total_vm;
5153 + return 0;
5154 +}
5155 +#endif
5156 +
5157 +/*
5158 + * PaX: decide what to do with offenders (regs->tpc = fault address)
5159 + *
5160 + * returns 1 when task should be killed
5161 + * 2 when patched PLT trampoline was detected
5162 + * 3 when unpatched PLT trampoline was detected
5163 + */
5164 +static int pax_handle_fetch_fault(struct pt_regs *regs)
5165 +{
5166 +
5167 +#ifdef CONFIG_PAX_EMUPLT
5168 + int err;
5169 +
5170 + do { /* PaX: patched PLT emulation #1 */
5171 + unsigned int sethi1, sethi2, jmpl;
5172 +
5173 + err = get_user(sethi1, (unsigned int *)regs->tpc);
5174 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
5175 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
5176 +
5177 + if (err)
5178 + break;
5179 +
5180 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5181 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
5182 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
5183 + {
5184 + unsigned long addr;
5185 +
5186 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5187 + addr = regs->u_regs[UREG_G1];
5188 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5189 +
5190 + if (test_thread_flag(TIF_32BIT))
5191 + addr &= 0xFFFFFFFFUL;
5192 +
5193 + regs->tpc = addr;
5194 + regs->tnpc = addr+4;
5195 + return 2;
5196 + }
5197 + } while (0);
5198 +
5199 + { /* PaX: patched PLT emulation #2 */
5200 + unsigned int ba;
5201 +
5202 + err = get_user(ba, (unsigned int *)regs->tpc);
5203 +
5204 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5205 + unsigned long addr;
5206 +
5207 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5208 +
5209 + if (test_thread_flag(TIF_32BIT))
5210 + addr &= 0xFFFFFFFFUL;
5211 +
5212 + regs->tpc = addr;
5213 + regs->tnpc = addr+4;
5214 + return 2;
5215 + }
5216 + }
5217 +
5218 + do { /* PaX: patched PLT emulation #3 */
5219 + unsigned int sethi, jmpl, nop;
5220 +
5221 + err = get_user(sethi, (unsigned int *)regs->tpc);
5222 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5223 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5224 +
5225 + if (err)
5226 + break;
5227 +
5228 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5229 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5230 + nop == 0x01000000U)
5231 + {
5232 + unsigned long addr;
5233 +
5234 + addr = (sethi & 0x003FFFFFU) << 10;
5235 + regs->u_regs[UREG_G1] = addr;
5236 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5237 +
5238 + if (test_thread_flag(TIF_32BIT))
5239 + addr &= 0xFFFFFFFFUL;
5240 +
5241 + regs->tpc = addr;
5242 + regs->tnpc = addr+4;
5243 + return 2;
5244 + }
5245 + } while (0);
5246 +
5247 + do { /* PaX: patched PLT emulation #4 */
5248 + unsigned int sethi, mov1, call, mov2;
5249 +
5250 + err = get_user(sethi, (unsigned int *)regs->tpc);
5251 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5252 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
5253 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5254 +
5255 + if (err)
5256 + break;
5257 +
5258 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5259 + mov1 == 0x8210000FU &&
5260 + (call & 0xC0000000U) == 0x40000000U &&
5261 + mov2 == 0x9E100001U)
5262 + {
5263 + unsigned long addr;
5264 +
5265 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5266 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5267 +
5268 + if (test_thread_flag(TIF_32BIT))
5269 + addr &= 0xFFFFFFFFUL;
5270 +
5271 + regs->tpc = addr;
5272 + regs->tnpc = addr+4;
5273 + return 2;
5274 + }
5275 + } while (0);
5276 +
5277 + do { /* PaX: patched PLT emulation #5 */
5278 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5279 +
5280 + err = get_user(sethi, (unsigned int *)regs->tpc);
5281 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5282 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5283 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5284 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5285 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5286 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5287 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5288 +
5289 + if (err)
5290 + break;
5291 +
5292 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5293 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5294 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5295 + (or1 & 0xFFFFE000U) == 0x82106000U &&
5296 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5297 + sllx == 0x83287020U &&
5298 + jmpl == 0x81C04005U &&
5299 + nop == 0x01000000U)
5300 + {
5301 + unsigned long addr;
5302 +
5303 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5304 + regs->u_regs[UREG_G1] <<= 32;
5305 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5306 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5307 + regs->tpc = addr;
5308 + regs->tnpc = addr+4;
5309 + return 2;
5310 + }
5311 + } while (0);
5312 +
5313 + do { /* PaX: patched PLT emulation #6 */
5314 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5315 +
5316 + err = get_user(sethi, (unsigned int *)regs->tpc);
5317 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5318 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5319 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5320 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
5321 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5322 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5323 +
5324 + if (err)
5325 + break;
5326 +
5327 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5328 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5329 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5330 + sllx == 0x83287020U &&
5331 + (or & 0xFFFFE000U) == 0x8A116000U &&
5332 + jmpl == 0x81C04005U &&
5333 + nop == 0x01000000U)
5334 + {
5335 + unsigned long addr;
5336 +
5337 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5338 + regs->u_regs[UREG_G1] <<= 32;
5339 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5340 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5341 + regs->tpc = addr;
5342 + regs->tnpc = addr+4;
5343 + return 2;
5344 + }
5345 + } while (0);
5346 +
5347 + do { /* PaX: unpatched PLT emulation step 1 */
5348 + unsigned int sethi, ba, nop;
5349 +
5350 + err = get_user(sethi, (unsigned int *)regs->tpc);
5351 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5352 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5353 +
5354 + if (err)
5355 + break;
5356 +
5357 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5358 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5359 + nop == 0x01000000U)
5360 + {
5361 + unsigned long addr;
5362 + unsigned int save, call;
5363 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5364 +
5365 + if ((ba & 0xFFC00000U) == 0x30800000U)
5366 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5367 + else
5368 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5369 +
5370 + if (test_thread_flag(TIF_32BIT))
5371 + addr &= 0xFFFFFFFFUL;
5372 +
5373 + err = get_user(save, (unsigned int *)addr);
5374 + err |= get_user(call, (unsigned int *)(addr+4));
5375 + err |= get_user(nop, (unsigned int *)(addr+8));
5376 + if (err)
5377 + break;
5378 +
5379 +#ifdef CONFIG_PAX_DLRESOLVE
5380 + if (save == 0x9DE3BFA8U &&
5381 + (call & 0xC0000000U) == 0x40000000U &&
5382 + nop == 0x01000000U)
5383 + {
5384 + struct vm_area_struct *vma;
5385 + unsigned long call_dl_resolve;
5386 +
5387 + down_read(&current->mm->mmap_sem);
5388 + call_dl_resolve = current->mm->call_dl_resolve;
5389 + up_read(&current->mm->mmap_sem);
5390 + if (likely(call_dl_resolve))
5391 + goto emulate;
5392 +
5393 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5394 +
5395 + down_write(&current->mm->mmap_sem);
5396 + if (current->mm->call_dl_resolve) {
5397 + call_dl_resolve = current->mm->call_dl_resolve;
5398 + up_write(&current->mm->mmap_sem);
5399 + if (vma)
5400 + kmem_cache_free(vm_area_cachep, vma);
5401 + goto emulate;
5402 + }
5403 +
5404 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5405 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5406 + up_write(&current->mm->mmap_sem);
5407 + if (vma)
5408 + kmem_cache_free(vm_area_cachep, vma);
5409 + return 1;
5410 + }
5411 +
5412 + if (pax_insert_vma(vma, call_dl_resolve)) {
5413 + up_write(&current->mm->mmap_sem);
5414 + kmem_cache_free(vm_area_cachep, vma);
5415 + return 1;
5416 + }
5417 +
5418 + current->mm->call_dl_resolve = call_dl_resolve;
5419 + up_write(&current->mm->mmap_sem);
5420 +
5421 +emulate:
5422 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5423 + regs->tpc = call_dl_resolve;
5424 + regs->tnpc = addr+4;
5425 + return 3;
5426 + }
5427 +#endif
5428 +
5429 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5430 + if ((save & 0xFFC00000U) == 0x05000000U &&
5431 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5432 + nop == 0x01000000U)
5433 + {
5434 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5435 + regs->u_regs[UREG_G2] = addr + 4;
5436 + addr = (save & 0x003FFFFFU) << 10;
5437 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5438 +
5439 + if (test_thread_flag(TIF_32BIT))
5440 + addr &= 0xFFFFFFFFUL;
5441 +
5442 + regs->tpc = addr;
5443 + regs->tnpc = addr+4;
5444 + return 3;
5445 + }
5446 +
5447 + /* PaX: 64-bit PLT stub */
5448 + err = get_user(sethi1, (unsigned int *)addr);
5449 + err |= get_user(sethi2, (unsigned int *)(addr+4));
5450 + err |= get_user(or1, (unsigned int *)(addr+8));
5451 + err |= get_user(or2, (unsigned int *)(addr+12));
5452 + err |= get_user(sllx, (unsigned int *)(addr+16));
5453 + err |= get_user(add, (unsigned int *)(addr+20));
5454 + err |= get_user(jmpl, (unsigned int *)(addr+24));
5455 + err |= get_user(nop, (unsigned int *)(addr+28));
5456 + if (err)
5457 + break;
5458 +
5459 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5460 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5461 + (or1 & 0xFFFFE000U) == 0x88112000U &&
5462 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5463 + sllx == 0x89293020U &&
5464 + add == 0x8A010005U &&
5465 + jmpl == 0x89C14000U &&
5466 + nop == 0x01000000U)
5467 + {
5468 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5469 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5470 + regs->u_regs[UREG_G4] <<= 32;
5471 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5472 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5473 + regs->u_regs[UREG_G4] = addr + 24;
5474 + addr = regs->u_regs[UREG_G5];
5475 + regs->tpc = addr;
5476 + regs->tnpc = addr+4;
5477 + return 3;
5478 + }
5479 + }
5480 + } while (0);
5481 +
5482 +#ifdef CONFIG_PAX_DLRESOLVE
5483 + do { /* PaX: unpatched PLT emulation step 2 */
5484 + unsigned int save, call, nop;
5485 +
5486 + err = get_user(save, (unsigned int *)(regs->tpc-4));
5487 + err |= get_user(call, (unsigned int *)regs->tpc);
5488 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5489 + if (err)
5490 + break;
5491 +
5492 + if (save == 0x9DE3BFA8U &&
5493 + (call & 0xC0000000U) == 0x40000000U &&
5494 + nop == 0x01000000U)
5495 + {
5496 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5497 +
5498 + if (test_thread_flag(TIF_32BIT))
5499 + dl_resolve &= 0xFFFFFFFFUL;
5500 +
5501 + regs->u_regs[UREG_RETPC] = regs->tpc;
5502 + regs->tpc = dl_resolve;
5503 + regs->tnpc = dl_resolve+4;
5504 + return 3;
5505 + }
5506 + } while (0);
5507 +#endif
5508 +
5509 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5510 + unsigned int sethi, ba, nop;
5511 +
5512 + err = get_user(sethi, (unsigned int *)regs->tpc);
5513 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5514 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5515 +
5516 + if (err)
5517 + break;
5518 +
5519 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5520 + (ba & 0xFFF00000U) == 0x30600000U &&
5521 + nop == 0x01000000U)
5522 + {
5523 + unsigned long addr;
5524 +
5525 + addr = (sethi & 0x003FFFFFU) << 10;
5526 + regs->u_regs[UREG_G1] = addr;
5527 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5528 +
5529 + if (test_thread_flag(TIF_32BIT))
5530 + addr &= 0xFFFFFFFFUL;
5531 +
5532 + regs->tpc = addr;
5533 + regs->tnpc = addr+4;
5534 + return 2;
5535 + }
5536 + } while (0);
5537 +
5538 +#endif
5539 +
5540 + return 1;
5541 +}
5542 +
5543 +void pax_report_insns(void *pc, void *sp)
5544 +{
5545 + unsigned long i;
5546 +
5547 + printk(KERN_ERR "PAX: bytes at PC: ");
5548 + for (i = 0; i < 8; i++) {
5549 + unsigned int c;
5550 + if (get_user(c, (unsigned int *)pc+i))
5551 + printk(KERN_CONT "???????? ");
5552 + else
5553 + printk(KERN_CONT "%08x ", c);
5554 + }
5555 + printk("\n");
5556 +}
5557 +#endif
5558 +
5559 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5560 {
5561 struct mm_struct *mm = current->mm;
5562 @@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fau
5563 if (!vma)
5564 goto bad_area;
5565
5566 +#ifdef CONFIG_PAX_PAGEEXEC
5567 + /* PaX: detect ITLB misses on non-exec pages */
5568 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5569 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5570 + {
5571 + if (address != regs->tpc)
5572 + goto good_area;
5573 +
5574 + up_read(&mm->mmap_sem);
5575 + switch (pax_handle_fetch_fault(regs)) {
5576 +
5577 +#ifdef CONFIG_PAX_EMUPLT
5578 + case 2:
5579 + case 3:
5580 + return;
5581 +#endif
5582 +
5583 + }
5584 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
5585 + do_group_exit(SIGKILL);
5586 + }
5587 +#endif
5588 +
5589 /* Pure DTLB misses do not tell us whether the fault causing
5590 * load/store/atomic was a write or not, it only says that there
5591 * was no match. So in such a case we (carefully) read the
5592 diff -urNp linux-2.6.32.41/arch/sparc/mm/hugetlbpage.c linux-2.6.32.41/arch/sparc/mm/hugetlbpage.c
5593 --- linux-2.6.32.41/arch/sparc/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
5594 +++ linux-2.6.32.41/arch/sparc/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
5595 @@ -69,7 +69,7 @@ full_search:
5596 }
5597 return -ENOMEM;
5598 }
5599 - if (likely(!vma || addr + len <= vma->vm_start)) {
5600 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5601 /*
5602 * Remember the place where we stopped the search:
5603 */
5604 @@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct
5605 /* make sure it can fit in the remaining address space */
5606 if (likely(addr > len)) {
5607 vma = find_vma(mm, addr-len);
5608 - if (!vma || addr <= vma->vm_start) {
5609 + if (check_heap_stack_gap(vma, addr - len, len)) {
5610 /* remember the address as a hint for next time */
5611 return (mm->free_area_cache = addr-len);
5612 }
5613 @@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct
5614 if (unlikely(mm->mmap_base < len))
5615 goto bottomup;
5616
5617 - addr = (mm->mmap_base-len) & HPAGE_MASK;
5618 + addr = mm->mmap_base - len;
5619
5620 do {
5621 + addr &= HPAGE_MASK;
5622 /*
5623 * Lookup failure means no vma is above this address,
5624 * else if new region fits below vma->vm_start,
5625 * return with success:
5626 */
5627 vma = find_vma(mm, addr);
5628 - if (likely(!vma || addr+len <= vma->vm_start)) {
5629 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5630 /* remember the address as a hint for next time */
5631 return (mm->free_area_cache = addr);
5632 }
5633 @@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct
5634 mm->cached_hole_size = vma->vm_start - addr;
5635
5636 /* try just below the current vma->vm_start */
5637 - addr = (vma->vm_start-len) & HPAGE_MASK;
5638 - } while (likely(len < vma->vm_start));
5639 + addr = skip_heap_stack_gap(vma, len);
5640 + } while (!IS_ERR_VALUE(addr));
5641
5642 bottomup:
5643 /*
5644 @@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *f
5645 if (addr) {
5646 addr = ALIGN(addr, HPAGE_SIZE);
5647 vma = find_vma(mm, addr);
5648 - if (task_size - len >= addr &&
5649 - (!vma || addr + len <= vma->vm_start))
5650 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5651 return addr;
5652 }
5653 if (mm->get_unmapped_area == arch_get_unmapped_area)
5654 diff -urNp linux-2.6.32.41/arch/sparc/mm/init_32.c linux-2.6.32.41/arch/sparc/mm/init_32.c
5655 --- linux-2.6.32.41/arch/sparc/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
5656 +++ linux-2.6.32.41/arch/sparc/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
5657 @@ -317,6 +317,9 @@ extern void device_scan(void);
5658 pgprot_t PAGE_SHARED __read_mostly;
5659 EXPORT_SYMBOL(PAGE_SHARED);
5660
5661 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5662 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5663 +
5664 void __init paging_init(void)
5665 {
5666 switch(sparc_cpu_model) {
5667 @@ -345,17 +348,17 @@ void __init paging_init(void)
5668
5669 /* Initialize the protection map with non-constant, MMU dependent values. */
5670 protection_map[0] = PAGE_NONE;
5671 - protection_map[1] = PAGE_READONLY;
5672 - protection_map[2] = PAGE_COPY;
5673 - protection_map[3] = PAGE_COPY;
5674 + protection_map[1] = PAGE_READONLY_NOEXEC;
5675 + protection_map[2] = PAGE_COPY_NOEXEC;
5676 + protection_map[3] = PAGE_COPY_NOEXEC;
5677 protection_map[4] = PAGE_READONLY;
5678 protection_map[5] = PAGE_READONLY;
5679 protection_map[6] = PAGE_COPY;
5680 protection_map[7] = PAGE_COPY;
5681 protection_map[8] = PAGE_NONE;
5682 - protection_map[9] = PAGE_READONLY;
5683 - protection_map[10] = PAGE_SHARED;
5684 - protection_map[11] = PAGE_SHARED;
5685 + protection_map[9] = PAGE_READONLY_NOEXEC;
5686 + protection_map[10] = PAGE_SHARED_NOEXEC;
5687 + protection_map[11] = PAGE_SHARED_NOEXEC;
5688 protection_map[12] = PAGE_READONLY;
5689 protection_map[13] = PAGE_READONLY;
5690 protection_map[14] = PAGE_SHARED;
5691 diff -urNp linux-2.6.32.41/arch/sparc/mm/Makefile linux-2.6.32.41/arch/sparc/mm/Makefile
5692 --- linux-2.6.32.41/arch/sparc/mm/Makefile 2011-03-27 14:31:47.000000000 -0400
5693 +++ linux-2.6.32.41/arch/sparc/mm/Makefile 2011-04-17 15:56:46.000000000 -0400
5694 @@ -2,7 +2,7 @@
5695 #
5696
5697 asflags-y := -ansi
5698 -ccflags-y := -Werror
5699 +#ccflags-y := -Werror
5700
5701 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5702 obj-y += fault_$(BITS).o
5703 diff -urNp linux-2.6.32.41/arch/sparc/mm/srmmu.c linux-2.6.32.41/arch/sparc/mm/srmmu.c
5704 --- linux-2.6.32.41/arch/sparc/mm/srmmu.c 2011-03-27 14:31:47.000000000 -0400
5705 +++ linux-2.6.32.41/arch/sparc/mm/srmmu.c 2011-04-17 15:56:46.000000000 -0400
5706 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5707 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5708 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5709 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5710 +
5711 +#ifdef CONFIG_PAX_PAGEEXEC
5712 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5713 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5714 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5715 +#endif
5716 +
5717 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5718 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5719
5720 diff -urNp linux-2.6.32.41/arch/um/include/asm/kmap_types.h linux-2.6.32.41/arch/um/include/asm/kmap_types.h
5721 --- linux-2.6.32.41/arch/um/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
5722 +++ linux-2.6.32.41/arch/um/include/asm/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
5723 @@ -23,6 +23,7 @@ enum km_type {
5724 KM_IRQ1,
5725 KM_SOFTIRQ0,
5726 KM_SOFTIRQ1,
5727 + KM_CLEARPAGE,
5728 KM_TYPE_NR
5729 };
5730
5731 diff -urNp linux-2.6.32.41/arch/um/include/asm/page.h linux-2.6.32.41/arch/um/include/asm/page.h
5732 --- linux-2.6.32.41/arch/um/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
5733 +++ linux-2.6.32.41/arch/um/include/asm/page.h 2011-04-17 15:56:46.000000000 -0400
5734 @@ -14,6 +14,9 @@
5735 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5736 #define PAGE_MASK (~(PAGE_SIZE-1))
5737
5738 +#define ktla_ktva(addr) (addr)
5739 +#define ktva_ktla(addr) (addr)
5740 +
5741 #ifndef __ASSEMBLY__
5742
5743 struct page;
5744 diff -urNp linux-2.6.32.41/arch/um/kernel/process.c linux-2.6.32.41/arch/um/kernel/process.c
5745 --- linux-2.6.32.41/arch/um/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
5746 +++ linux-2.6.32.41/arch/um/kernel/process.c 2011-04-17 15:56:46.000000000 -0400
5747 @@ -393,22 +393,6 @@ int singlestepping(void * t)
5748 return 2;
5749 }
5750
5751 -/*
5752 - * Only x86 and x86_64 have an arch_align_stack().
5753 - * All other arches have "#define arch_align_stack(x) (x)"
5754 - * in their asm/system.h
5755 - * As this is included in UML from asm-um/system-generic.h,
5756 - * we can use it to behave as the subarch does.
5757 - */
5758 -#ifndef arch_align_stack
5759 -unsigned long arch_align_stack(unsigned long sp)
5760 -{
5761 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5762 - sp -= get_random_int() % 8192;
5763 - return sp & ~0xf;
5764 -}
5765 -#endif
5766 -
5767 unsigned long get_wchan(struct task_struct *p)
5768 {
5769 unsigned long stack_page, sp, ip;
5770 diff -urNp linux-2.6.32.41/arch/um/sys-i386/syscalls.c linux-2.6.32.41/arch/um/sys-i386/syscalls.c
5771 --- linux-2.6.32.41/arch/um/sys-i386/syscalls.c 2011-03-27 14:31:47.000000000 -0400
5772 +++ linux-2.6.32.41/arch/um/sys-i386/syscalls.c 2011-04-17 15:56:46.000000000 -0400
5773 @@ -11,6 +11,21 @@
5774 #include "asm/uaccess.h"
5775 #include "asm/unistd.h"
5776
5777 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5778 +{
5779 + unsigned long pax_task_size = TASK_SIZE;
5780 +
5781 +#ifdef CONFIG_PAX_SEGMEXEC
5782 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5783 + pax_task_size = SEGMEXEC_TASK_SIZE;
5784 +#endif
5785 +
5786 + if (len > pax_task_size || addr > pax_task_size - len)
5787 + return -EINVAL;
5788 +
5789 + return 0;
5790 +}
5791 +
5792 /*
5793 * Perform the select(nd, in, out, ex, tv) and mmap() system
5794 * calls. Linux/i386 didn't use to be able to handle more than
5795 diff -urNp linux-2.6.32.41/arch/x86/boot/bitops.h linux-2.6.32.41/arch/x86/boot/bitops.h
5796 --- linux-2.6.32.41/arch/x86/boot/bitops.h 2011-03-27 14:31:47.000000000 -0400
5797 +++ linux-2.6.32.41/arch/x86/boot/bitops.h 2011-04-17 15:56:46.000000000 -0400
5798 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int
5799 u8 v;
5800 const u32 *p = (const u32 *)addr;
5801
5802 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5803 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5804 return v;
5805 }
5806
5807 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int
5808
5809 static inline void set_bit(int nr, void *addr)
5810 {
5811 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5812 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5813 }
5814
5815 #endif /* BOOT_BITOPS_H */
5816 diff -urNp linux-2.6.32.41/arch/x86/boot/boot.h linux-2.6.32.41/arch/x86/boot/boot.h
5817 --- linux-2.6.32.41/arch/x86/boot/boot.h 2011-03-27 14:31:47.000000000 -0400
5818 +++ linux-2.6.32.41/arch/x86/boot/boot.h 2011-04-17 15:56:46.000000000 -0400
5819 @@ -82,7 +82,7 @@ static inline void io_delay(void)
5820 static inline u16 ds(void)
5821 {
5822 u16 seg;
5823 - asm("movw %%ds,%0" : "=rm" (seg));
5824 + asm volatile("movw %%ds,%0" : "=rm" (seg));
5825 return seg;
5826 }
5827
5828 @@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t
5829 static inline int memcmp(const void *s1, const void *s2, size_t len)
5830 {
5831 u8 diff;
5832 - asm("repe; cmpsb; setnz %0"
5833 + asm volatile("repe; cmpsb; setnz %0"
5834 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5835 return diff;
5836 }
5837 diff -urNp linux-2.6.32.41/arch/x86/boot/compressed/head_32.S linux-2.6.32.41/arch/x86/boot/compressed/head_32.S
5838 --- linux-2.6.32.41/arch/x86/boot/compressed/head_32.S 2011-03-27 14:31:47.000000000 -0400
5839 +++ linux-2.6.32.41/arch/x86/boot/compressed/head_32.S 2011-04-17 15:56:46.000000000 -0400
5840 @@ -76,7 +76,7 @@ ENTRY(startup_32)
5841 notl %eax
5842 andl %eax, %ebx
5843 #else
5844 - movl $LOAD_PHYSICAL_ADDR, %ebx
5845 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5846 #endif
5847
5848 /* Target address to relocate to for decompression */
5849 @@ -149,7 +149,7 @@ relocated:
5850 * and where it was actually loaded.
5851 */
5852 movl %ebp, %ebx
5853 - subl $LOAD_PHYSICAL_ADDR, %ebx
5854 + subl $____LOAD_PHYSICAL_ADDR, %ebx
5855 jz 2f /* Nothing to be done if loaded at compiled addr. */
5856 /*
5857 * Process relocations.
5858 @@ -157,8 +157,7 @@ relocated:
5859
5860 1: subl $4, %edi
5861 movl (%edi), %ecx
5862 - testl %ecx, %ecx
5863 - jz 2f
5864 + jecxz 2f
5865 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5866 jmp 1b
5867 2:
5868 diff -urNp linux-2.6.32.41/arch/x86/boot/compressed/head_64.S linux-2.6.32.41/arch/x86/boot/compressed/head_64.S
5869 --- linux-2.6.32.41/arch/x86/boot/compressed/head_64.S 2011-03-27 14:31:47.000000000 -0400
5870 +++ linux-2.6.32.41/arch/x86/boot/compressed/head_64.S 2011-04-17 15:56:46.000000000 -0400
5871 @@ -91,7 +91,7 @@ ENTRY(startup_32)
5872 notl %eax
5873 andl %eax, %ebx
5874 #else
5875 - movl $LOAD_PHYSICAL_ADDR, %ebx
5876 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5877 #endif
5878
5879 /* Target address to relocate to for decompression */
5880 @@ -234,7 +234,7 @@ ENTRY(startup_64)
5881 notq %rax
5882 andq %rax, %rbp
5883 #else
5884 - movq $LOAD_PHYSICAL_ADDR, %rbp
5885 + movq $____LOAD_PHYSICAL_ADDR, %rbp
5886 #endif
5887
5888 /* Target address to relocate to for decompression */
5889 diff -urNp linux-2.6.32.41/arch/x86/boot/compressed/misc.c linux-2.6.32.41/arch/x86/boot/compressed/misc.c
5890 --- linux-2.6.32.41/arch/x86/boot/compressed/misc.c 2011-03-27 14:31:47.000000000 -0400
5891 +++ linux-2.6.32.41/arch/x86/boot/compressed/misc.c 2011-04-17 15:56:46.000000000 -0400
5892 @@ -288,7 +288,7 @@ static void parse_elf(void *output)
5893 case PT_LOAD:
5894 #ifdef CONFIG_RELOCATABLE
5895 dest = output;
5896 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5897 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5898 #else
5899 dest = (void *)(phdr->p_paddr);
5900 #endif
5901 @@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *
5902 error("Destination address too large");
5903 #endif
5904 #ifndef CONFIG_RELOCATABLE
5905 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5906 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5907 error("Wrong destination address");
5908 #endif
5909
5910 diff -urNp linux-2.6.32.41/arch/x86/boot/compressed/mkpiggy.c linux-2.6.32.41/arch/x86/boot/compressed/mkpiggy.c
5911 --- linux-2.6.32.41/arch/x86/boot/compressed/mkpiggy.c 2011-03-27 14:31:47.000000000 -0400
5912 +++ linux-2.6.32.41/arch/x86/boot/compressed/mkpiggy.c 2011-04-17 15:56:46.000000000 -0400
5913 @@ -74,7 +74,7 @@ int main(int argc, char *argv[])
5914
5915 offs = (olen > ilen) ? olen - ilen : 0;
5916 offs += olen >> 12; /* Add 8 bytes for each 32K block */
5917 - offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
5918 + offs += 64*1024; /* Add 64K bytes slack */
5919 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
5920
5921 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
5922 diff -urNp linux-2.6.32.41/arch/x86/boot/compressed/relocs.c linux-2.6.32.41/arch/x86/boot/compressed/relocs.c
5923 --- linux-2.6.32.41/arch/x86/boot/compressed/relocs.c 2011-03-27 14:31:47.000000000 -0400
5924 +++ linux-2.6.32.41/arch/x86/boot/compressed/relocs.c 2011-04-17 15:56:46.000000000 -0400
5925 @@ -10,8 +10,11 @@
5926 #define USE_BSD
5927 #include <endian.h>
5928
5929 +#include "../../../../include/linux/autoconf.h"
5930 +
5931 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5932 static Elf32_Ehdr ehdr;
5933 +static Elf32_Phdr *phdr;
5934 static unsigned long reloc_count, reloc_idx;
5935 static unsigned long *relocs;
5936
5937 @@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
5938
5939 static int is_safe_abs_reloc(const char* sym_name)
5940 {
5941 - int i;
5942 + unsigned int i;
5943
5944 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
5945 if (!strcmp(sym_name, safe_abs_relocs[i]))
5946 @@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
5947 }
5948 }
5949
5950 +static void read_phdrs(FILE *fp)
5951 +{
5952 + unsigned int i;
5953 +
5954 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5955 + if (!phdr) {
5956 + die("Unable to allocate %d program headers\n",
5957 + ehdr.e_phnum);
5958 + }
5959 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5960 + die("Seek to %d failed: %s\n",
5961 + ehdr.e_phoff, strerror(errno));
5962 + }
5963 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5964 + die("Cannot read ELF program headers: %s\n",
5965 + strerror(errno));
5966 + }
5967 + for(i = 0; i < ehdr.e_phnum; i++) {
5968 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5969 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5970 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5971 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5972 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5973 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5974 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5975 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5976 + }
5977 +
5978 +}
5979 +
5980 static void read_shdrs(FILE *fp)
5981 {
5982 - int i;
5983 + unsigned int i;
5984 Elf32_Shdr shdr;
5985
5986 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5987 @@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
5988
5989 static void read_strtabs(FILE *fp)
5990 {
5991 - int i;
5992 + unsigned int i;
5993 for (i = 0; i < ehdr.e_shnum; i++) {
5994 struct section *sec = &secs[i];
5995 if (sec->shdr.sh_type != SHT_STRTAB) {
5996 @@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
5997
5998 static void read_symtabs(FILE *fp)
5999 {
6000 - int i,j;
6001 + unsigned int i,j;
6002 for (i = 0; i < ehdr.e_shnum; i++) {
6003 struct section *sec = &secs[i];
6004 if (sec->shdr.sh_type != SHT_SYMTAB) {
6005 @@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
6006
6007 static void read_relocs(FILE *fp)
6008 {
6009 - int i,j;
6010 + unsigned int i,j;
6011 + uint32_t base;
6012 +
6013 for (i = 0; i < ehdr.e_shnum; i++) {
6014 struct section *sec = &secs[i];
6015 if (sec->shdr.sh_type != SHT_REL) {
6016 @@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
6017 die("Cannot read symbol table: %s\n",
6018 strerror(errno));
6019 }
6020 + base = 0;
6021 + for (j = 0; j < ehdr.e_phnum; j++) {
6022 + if (phdr[j].p_type != PT_LOAD )
6023 + continue;
6024 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
6025 + continue;
6026 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
6027 + break;
6028 + }
6029 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
6030 Elf32_Rel *rel = &sec->reltab[j];
6031 - rel->r_offset = elf32_to_cpu(rel->r_offset);
6032 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
6033 rel->r_info = elf32_to_cpu(rel->r_info);
6034 }
6035 }
6036 @@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
6037
6038 static void print_absolute_symbols(void)
6039 {
6040 - int i;
6041 + unsigned int i;
6042 printf("Absolute symbols\n");
6043 printf(" Num: Value Size Type Bind Visibility Name\n");
6044 for (i = 0; i < ehdr.e_shnum; i++) {
6045 struct section *sec = &secs[i];
6046 char *sym_strtab;
6047 Elf32_Sym *sh_symtab;
6048 - int j;
6049 + unsigned int j;
6050
6051 if (sec->shdr.sh_type != SHT_SYMTAB) {
6052 continue;
6053 @@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
6054
6055 static void print_absolute_relocs(void)
6056 {
6057 - int i, printed = 0;
6058 + unsigned int i, printed = 0;
6059
6060 for (i = 0; i < ehdr.e_shnum; i++) {
6061 struct section *sec = &secs[i];
6062 struct section *sec_applies, *sec_symtab;
6063 char *sym_strtab;
6064 Elf32_Sym *sh_symtab;
6065 - int j;
6066 + unsigned int j;
6067 if (sec->shdr.sh_type != SHT_REL) {
6068 continue;
6069 }
6070 @@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
6071
6072 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6073 {
6074 - int i;
6075 + unsigned int i;
6076 /* Walk through the relocations */
6077 for (i = 0; i < ehdr.e_shnum; i++) {
6078 char *sym_strtab;
6079 Elf32_Sym *sh_symtab;
6080 struct section *sec_applies, *sec_symtab;
6081 - int j;
6082 + unsigned int j;
6083 struct section *sec = &secs[i];
6084
6085 if (sec->shdr.sh_type != SHT_REL) {
6086 @@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(El
6087 if (sym->st_shndx == SHN_ABS) {
6088 continue;
6089 }
6090 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6091 + if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6092 + continue;
6093 +
6094 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6095 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6096 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6097 + continue;
6098 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6099 + continue;
6100 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6101 + continue;
6102 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6103 + continue;
6104 +#endif
6105 if (r_type == R_386_NONE || r_type == R_386_PC32) {
6106 /*
6107 * NONE can be ignored and and PC relative
6108 @@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, co
6109
6110 static void emit_relocs(int as_text)
6111 {
6112 - int i;
6113 + unsigned int i;
6114 /* Count how many relocations I have and allocate space for them. */
6115 reloc_count = 0;
6116 walk_relocs(count_reloc);
6117 @@ -634,6 +693,7 @@ int main(int argc, char **argv)
6118 fname, strerror(errno));
6119 }
6120 read_ehdr(fp);
6121 + read_phdrs(fp);
6122 read_shdrs(fp);
6123 read_strtabs(fp);
6124 read_symtabs(fp);
6125 diff -urNp linux-2.6.32.41/arch/x86/boot/cpucheck.c linux-2.6.32.41/arch/x86/boot/cpucheck.c
6126 --- linux-2.6.32.41/arch/x86/boot/cpucheck.c 2011-03-27 14:31:47.000000000 -0400
6127 +++ linux-2.6.32.41/arch/x86/boot/cpucheck.c 2011-04-17 15:56:46.000000000 -0400
6128 @@ -74,7 +74,7 @@ static int has_fpu(void)
6129 u16 fcw = -1, fsw = -1;
6130 u32 cr0;
6131
6132 - asm("movl %%cr0,%0" : "=r" (cr0));
6133 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
6134 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6135 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6136 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6137 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6138 {
6139 u32 f0, f1;
6140
6141 - asm("pushfl ; "
6142 + asm volatile("pushfl ; "
6143 "pushfl ; "
6144 "popl %0 ; "
6145 "movl %0,%1 ; "
6146 @@ -115,7 +115,7 @@ static void get_flags(void)
6147 set_bit(X86_FEATURE_FPU, cpu.flags);
6148
6149 if (has_eflag(X86_EFLAGS_ID)) {
6150 - asm("cpuid"
6151 + asm volatile("cpuid"
6152 : "=a" (max_intel_level),
6153 "=b" (cpu_vendor[0]),
6154 "=d" (cpu_vendor[1]),
6155 @@ -124,7 +124,7 @@ static void get_flags(void)
6156
6157 if (max_intel_level >= 0x00000001 &&
6158 max_intel_level <= 0x0000ffff) {
6159 - asm("cpuid"
6160 + asm volatile("cpuid"
6161 : "=a" (tfms),
6162 "=c" (cpu.flags[4]),
6163 "=d" (cpu.flags[0])
6164 @@ -136,7 +136,7 @@ static void get_flags(void)
6165 cpu.model += ((tfms >> 16) & 0xf) << 4;
6166 }
6167
6168 - asm("cpuid"
6169 + asm volatile("cpuid"
6170 : "=a" (max_amd_level)
6171 : "a" (0x80000000)
6172 : "ebx", "ecx", "edx");
6173 @@ -144,7 +144,7 @@ static void get_flags(void)
6174 if (max_amd_level >= 0x80000001 &&
6175 max_amd_level <= 0x8000ffff) {
6176 u32 eax = 0x80000001;
6177 - asm("cpuid"
6178 + asm volatile("cpuid"
6179 : "+a" (eax),
6180 "=c" (cpu.flags[6]),
6181 "=d" (cpu.flags[1])
6182 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6183 u32 ecx = MSR_K7_HWCR;
6184 u32 eax, edx;
6185
6186 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6187 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6188 eax &= ~(1 << 15);
6189 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6190 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6191
6192 get_flags(); /* Make sure it really did something */
6193 err = check_flags();
6194 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6195 u32 ecx = MSR_VIA_FCR;
6196 u32 eax, edx;
6197
6198 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6199 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6200 eax |= (1<<1)|(1<<7);
6201 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6202 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6203
6204 set_bit(X86_FEATURE_CX8, cpu.flags);
6205 err = check_flags();
6206 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
6207 u32 eax, edx;
6208 u32 level = 1;
6209
6210 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6211 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6212 - asm("cpuid"
6213 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6214 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6215 + asm volatile("cpuid"
6216 : "+a" (level), "=d" (cpu.flags[0])
6217 : : "ecx", "ebx");
6218 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6219 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6220
6221 err = check_flags();
6222 }
6223 diff -urNp linux-2.6.32.41/arch/x86/boot/header.S linux-2.6.32.41/arch/x86/boot/header.S
6224 --- linux-2.6.32.41/arch/x86/boot/header.S 2011-03-27 14:31:47.000000000 -0400
6225 +++ linux-2.6.32.41/arch/x86/boot/header.S 2011-04-17 15:56:46.000000000 -0400
6226 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
6227 # single linked list of
6228 # struct setup_data
6229
6230 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6231 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6232
6233 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6234 #define VO_INIT_SIZE (VO__end - VO__text)
6235 diff -urNp linux-2.6.32.41/arch/x86/boot/memory.c linux-2.6.32.41/arch/x86/boot/memory.c
6236 --- linux-2.6.32.41/arch/x86/boot/memory.c 2011-03-27 14:31:47.000000000 -0400
6237 +++ linux-2.6.32.41/arch/x86/boot/memory.c 2011-04-17 15:56:46.000000000 -0400
6238 @@ -19,7 +19,7 @@
6239
6240 static int detect_memory_e820(void)
6241 {
6242 - int count = 0;
6243 + unsigned int count = 0;
6244 struct biosregs ireg, oreg;
6245 struct e820entry *desc = boot_params.e820_map;
6246 static struct e820entry buf; /* static so it is zeroed */
6247 diff -urNp linux-2.6.32.41/arch/x86/boot/video.c linux-2.6.32.41/arch/x86/boot/video.c
6248 --- linux-2.6.32.41/arch/x86/boot/video.c 2011-03-27 14:31:47.000000000 -0400
6249 +++ linux-2.6.32.41/arch/x86/boot/video.c 2011-04-17 15:56:46.000000000 -0400
6250 @@ -90,7 +90,7 @@ static void store_mode_params(void)
6251 static unsigned int get_entry(void)
6252 {
6253 char entry_buf[4];
6254 - int i, len = 0;
6255 + unsigned int i, len = 0;
6256 int key;
6257 unsigned int v;
6258
6259 diff -urNp linux-2.6.32.41/arch/x86/boot/video-vesa.c linux-2.6.32.41/arch/x86/boot/video-vesa.c
6260 --- linux-2.6.32.41/arch/x86/boot/video-vesa.c 2011-03-27 14:31:47.000000000 -0400
6261 +++ linux-2.6.32.41/arch/x86/boot/video-vesa.c 2011-04-17 15:56:46.000000000 -0400
6262 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6263
6264 boot_params.screen_info.vesapm_seg = oreg.es;
6265 boot_params.screen_info.vesapm_off = oreg.di;
6266 + boot_params.screen_info.vesapm_size = oreg.cx;
6267 }
6268
6269 /*
6270 diff -urNp linux-2.6.32.41/arch/x86/ia32/ia32_aout.c linux-2.6.32.41/arch/x86/ia32/ia32_aout.c
6271 --- linux-2.6.32.41/arch/x86/ia32/ia32_aout.c 2011-03-27 14:31:47.000000000 -0400
6272 +++ linux-2.6.32.41/arch/x86/ia32/ia32_aout.c 2011-04-17 15:56:46.000000000 -0400
6273 @@ -169,6 +169,8 @@ static int aout_core_dump(long signr, st
6274 unsigned long dump_start, dump_size;
6275 struct user32 dump;
6276
6277 + memset(&dump, 0, sizeof(dump));
6278 +
6279 fs = get_fs();
6280 set_fs(KERNEL_DS);
6281 has_dumped = 1;
6282 @@ -218,12 +220,6 @@ static int aout_core_dump(long signr, st
6283 dump_size = dump.u_ssize << PAGE_SHIFT;
6284 DUMP_WRITE(dump_start, dump_size);
6285 }
6286 - /*
6287 - * Finally dump the task struct. Not be used by gdb, but
6288 - * could be useful
6289 - */
6290 - set_fs(KERNEL_DS);
6291 - DUMP_WRITE(current, sizeof(*current));
6292 end_coredump:
6293 set_fs(fs);
6294 return has_dumped;
6295 diff -urNp linux-2.6.32.41/arch/x86/ia32/ia32entry.S linux-2.6.32.41/arch/x86/ia32/ia32entry.S
6296 --- linux-2.6.32.41/arch/x86/ia32/ia32entry.S 2011-03-27 14:31:47.000000000 -0400
6297 +++ linux-2.6.32.41/arch/x86/ia32/ia32entry.S 2011-06-04 20:29:52.000000000 -0400
6298 @@ -13,6 +13,7 @@
6299 #include <asm/thread_info.h>
6300 #include <asm/segment.h>
6301 #include <asm/irqflags.h>
6302 +#include <asm/pgtable.h>
6303 #include <linux/linkage.h>
6304
6305 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6306 @@ -93,6 +94,30 @@ ENTRY(native_irq_enable_sysexit)
6307 ENDPROC(native_irq_enable_sysexit)
6308 #endif
6309
6310 + .macro pax_enter_kernel_user
6311 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6312 + call pax_enter_kernel_user
6313 +#endif
6314 + .endm
6315 +
6316 + .macro pax_exit_kernel_user
6317 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6318 + call pax_exit_kernel_user
6319 +#endif
6320 +#ifdef CONFIG_PAX_RANDKSTACK
6321 + pushq %rax
6322 + call pax_randomize_kstack
6323 + popq %rax
6324 +#endif
6325 + pax_erase_kstack
6326 + .endm
6327 +
6328 +.macro pax_erase_kstack
6329 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6330 + call pax_erase_kstack
6331 +#endif
6332 +.endm
6333 +
6334 /*
6335 * 32bit SYSENTER instruction entry.
6336 *
6337 @@ -119,7 +144,7 @@ ENTRY(ia32_sysenter_target)
6338 CFI_REGISTER rsp,rbp
6339 SWAPGS_UNSAFE_STACK
6340 movq PER_CPU_VAR(kernel_stack), %rsp
6341 - addq $(KERNEL_STACK_OFFSET),%rsp
6342 + pax_enter_kernel_user
6343 /*
6344 * No need to follow this irqs on/off section: the syscall
6345 * disabled irqs, here we enable it straight after entry:
6346 @@ -135,7 +160,8 @@ ENTRY(ia32_sysenter_target)
6347 pushfq
6348 CFI_ADJUST_CFA_OFFSET 8
6349 /*CFI_REL_OFFSET rflags,0*/
6350 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6351 + GET_THREAD_INFO(%r10)
6352 + movl TI_sysenter_return(%r10), %r10d
6353 CFI_REGISTER rip,r10
6354 pushq $__USER32_CS
6355 CFI_ADJUST_CFA_OFFSET 8
6356 @@ -150,6 +176,12 @@ ENTRY(ia32_sysenter_target)
6357 SAVE_ARGS 0,0,1
6358 /* no need to do an access_ok check here because rbp has been
6359 32bit zero extended */
6360 +
6361 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6362 + mov $PAX_USER_SHADOW_BASE,%r10
6363 + add %r10,%rbp
6364 +#endif
6365 +
6366 1: movl (%rbp),%ebp
6367 .section __ex_table,"a"
6368 .quad 1b,ia32_badarg
6369 @@ -172,6 +204,7 @@ sysenter_dispatch:
6370 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6371 jnz sysexit_audit
6372 sysexit_from_sys_call:
6373 + pax_exit_kernel_user
6374 andl $~TS_COMPAT,TI_status(%r10)
6375 /* clear IF, that popfq doesn't enable interrupts early */
6376 andl $~0x200,EFLAGS-R11(%rsp)
6377 @@ -200,6 +233,9 @@ sysexit_from_sys_call:
6378 movl %eax,%esi /* 2nd arg: syscall number */
6379 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6380 call audit_syscall_entry
6381 +
6382 + pax_erase_kstack
6383 +
6384 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6385 cmpq $(IA32_NR_syscalls-1),%rax
6386 ja ia32_badsys
6387 @@ -252,6 +288,9 @@ sysenter_tracesys:
6388 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
6389 movq %rsp,%rdi /* &pt_regs -> arg1 */
6390 call syscall_trace_enter
6391 +
6392 + pax_erase_kstack
6393 +
6394 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6395 RESTORE_REST
6396 cmpq $(IA32_NR_syscalls-1),%rax
6397 @@ -283,19 +322,24 @@ ENDPROC(ia32_sysenter_target)
6398 ENTRY(ia32_cstar_target)
6399 CFI_STARTPROC32 simple
6400 CFI_SIGNAL_FRAME
6401 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
6402 + CFI_DEF_CFA rsp,0
6403 CFI_REGISTER rip,rcx
6404 /*CFI_REGISTER rflags,r11*/
6405 SWAPGS_UNSAFE_STACK
6406 movl %esp,%r8d
6407 CFI_REGISTER rsp,r8
6408 movq PER_CPU_VAR(kernel_stack),%rsp
6409 +
6410 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6411 + pax_enter_kernel_user
6412 +#endif
6413 +
6414 /*
6415 * No need to follow this irqs on/off section: the syscall
6416 * disabled irqs and here we enable it straight after entry:
6417 */
6418 ENABLE_INTERRUPTS(CLBR_NONE)
6419 - SAVE_ARGS 8,1,1
6420 + SAVE_ARGS 8*6,1,1
6421 movl %eax,%eax /* zero extension */
6422 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
6423 movq %rcx,RIP-ARGOFFSET(%rsp)
6424 @@ -311,6 +355,12 @@ ENTRY(ia32_cstar_target)
6425 /* no need to do an access_ok check here because r8 has been
6426 32bit zero extended */
6427 /* hardware stack frame is complete now */
6428 +
6429 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6430 + mov $PAX_USER_SHADOW_BASE,%r10
6431 + add %r10,%r8
6432 +#endif
6433 +
6434 1: movl (%r8),%r9d
6435 .section __ex_table,"a"
6436 .quad 1b,ia32_badarg
6437 @@ -333,6 +383,7 @@ cstar_dispatch:
6438 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6439 jnz sysretl_audit
6440 sysretl_from_sys_call:
6441 + pax_exit_kernel_user
6442 andl $~TS_COMPAT,TI_status(%r10)
6443 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
6444 movl RIP-ARGOFFSET(%rsp),%ecx
6445 @@ -370,6 +421,9 @@ cstar_tracesys:
6446 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6447 movq %rsp,%rdi /* &pt_regs -> arg1 */
6448 call syscall_trace_enter
6449 +
6450 + pax_erase_kstack
6451 +
6452 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6453 RESTORE_REST
6454 xchgl %ebp,%r9d
6455 @@ -415,6 +469,7 @@ ENTRY(ia32_syscall)
6456 CFI_REL_OFFSET rip,RIP-RIP
6457 PARAVIRT_ADJUST_EXCEPTION_FRAME
6458 SWAPGS
6459 + pax_enter_kernel_user
6460 /*
6461 * No need to follow this irqs on/off section: the syscall
6462 * disabled irqs and here we enable it straight after entry:
6463 @@ -448,6 +503,9 @@ ia32_tracesys:
6464 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6465 movq %rsp,%rdi /* &pt_regs -> arg1 */
6466 call syscall_trace_enter
6467 +
6468 + pax_erase_kstack
6469 +
6470 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6471 RESTORE_REST
6472 cmpq $(IA32_NR_syscalls-1),%rax
6473 diff -urNp linux-2.6.32.41/arch/x86/ia32/ia32_signal.c linux-2.6.32.41/arch/x86/ia32/ia32_signal.c
6474 --- linux-2.6.32.41/arch/x86/ia32/ia32_signal.c 2011-03-27 14:31:47.000000000 -0400
6475 +++ linux-2.6.32.41/arch/x86/ia32/ia32_signal.c 2011-04-17 15:56:46.000000000 -0400
6476 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
6477 sp -= frame_size;
6478 /* Align the stack pointer according to the i386 ABI,
6479 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6480 - sp = ((sp + 4) & -16ul) - 4;
6481 + sp = ((sp - 12) & -16ul) - 4;
6482 return (void __user *) sp;
6483 }
6484
6485 @@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
6486 * These are actually not used anymore, but left because some
6487 * gdb versions depend on them as a marker.
6488 */
6489 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6490 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6491 } put_user_catch(err);
6492
6493 if (err)
6494 @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
6495 0xb8,
6496 __NR_ia32_rt_sigreturn,
6497 0x80cd,
6498 - 0,
6499 + 0
6500 };
6501
6502 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6503 @@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
6504
6505 if (ka->sa.sa_flags & SA_RESTORER)
6506 restorer = ka->sa.sa_restorer;
6507 + else if (current->mm->context.vdso)
6508 + /* Return stub is in 32bit vsyscall page */
6509 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6510 else
6511 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6512 - rt_sigreturn);
6513 + restorer = &frame->retcode;
6514 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6515
6516 /*
6517 * Not actually used anymore, but left because some gdb
6518 * versions need it.
6519 */
6520 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6521 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6522 } put_user_catch(err);
6523
6524 if (err)
6525 diff -urNp linux-2.6.32.41/arch/x86/include/asm/alternative.h linux-2.6.32.41/arch/x86/include/asm/alternative.h
6526 --- linux-2.6.32.41/arch/x86/include/asm/alternative.h 2011-03-27 14:31:47.000000000 -0400
6527 +++ linux-2.6.32.41/arch/x86/include/asm/alternative.h 2011-04-17 15:56:46.000000000 -0400
6528 @@ -85,7 +85,7 @@ static inline void alternatives_smp_swit
6529 " .byte 662b-661b\n" /* sourcelen */ \
6530 " .byte 664f-663f\n" /* replacementlen */ \
6531 ".previous\n" \
6532 - ".section .altinstr_replacement, \"ax\"\n" \
6533 + ".section .altinstr_replacement, \"a\"\n" \
6534 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
6535 ".previous"
6536
6537 diff -urNp linux-2.6.32.41/arch/x86/include/asm/apm.h linux-2.6.32.41/arch/x86/include/asm/apm.h
6538 --- linux-2.6.32.41/arch/x86/include/asm/apm.h 2011-03-27 14:31:47.000000000 -0400
6539 +++ linux-2.6.32.41/arch/x86/include/asm/apm.h 2011-04-17 15:56:46.000000000 -0400
6540 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
6541 __asm__ __volatile__(APM_DO_ZERO_SEGS
6542 "pushl %%edi\n\t"
6543 "pushl %%ebp\n\t"
6544 - "lcall *%%cs:apm_bios_entry\n\t"
6545 + "lcall *%%ss:apm_bios_entry\n\t"
6546 "setc %%al\n\t"
6547 "popl %%ebp\n\t"
6548 "popl %%edi\n\t"
6549 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
6550 __asm__ __volatile__(APM_DO_ZERO_SEGS
6551 "pushl %%edi\n\t"
6552 "pushl %%ebp\n\t"
6553 - "lcall *%%cs:apm_bios_entry\n\t"
6554 + "lcall *%%ss:apm_bios_entry\n\t"
6555 "setc %%bl\n\t"
6556 "popl %%ebp\n\t"
6557 "popl %%edi\n\t"
6558 diff -urNp linux-2.6.32.41/arch/x86/include/asm/atomic_32.h linux-2.6.32.41/arch/x86/include/asm/atomic_32.h
6559 --- linux-2.6.32.41/arch/x86/include/asm/atomic_32.h 2011-03-27 14:31:47.000000000 -0400
6560 +++ linux-2.6.32.41/arch/x86/include/asm/atomic_32.h 2011-05-04 17:56:20.000000000 -0400
6561 @@ -25,6 +25,17 @@ static inline int atomic_read(const atom
6562 }
6563
6564 /**
6565 + * atomic_read_unchecked - read atomic variable
6566 + * @v: pointer of type atomic_unchecked_t
6567 + *
6568 + * Atomically reads the value of @v.
6569 + */
6570 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6571 +{
6572 + return v->counter;
6573 +}
6574 +
6575 +/**
6576 * atomic_set - set atomic variable
6577 * @v: pointer of type atomic_t
6578 * @i: required value
6579 @@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *
6580 }
6581
6582 /**
6583 + * atomic_set_unchecked - set atomic variable
6584 + * @v: pointer of type atomic_unchecked_t
6585 + * @i: required value
6586 + *
6587 + * Atomically sets the value of @v to @i.
6588 + */
6589 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6590 +{
6591 + v->counter = i;
6592 +}
6593 +
6594 +/**
6595 * atomic_add - add integer to atomic variable
6596 * @i: integer value to add
6597 * @v: pointer of type atomic_t
6598 @@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *
6599 */
6600 static inline void atomic_add(int i, atomic_t *v)
6601 {
6602 - asm volatile(LOCK_PREFIX "addl %1,%0"
6603 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6604 +
6605 +#ifdef CONFIG_PAX_REFCOUNT
6606 + "jno 0f\n"
6607 + LOCK_PREFIX "subl %1,%0\n"
6608 + "int $4\n0:\n"
6609 + _ASM_EXTABLE(0b, 0b)
6610 +#endif
6611 +
6612 + : "+m" (v->counter)
6613 + : "ir" (i));
6614 +}
6615 +
6616 +/**
6617 + * atomic_add_unchecked - add integer to atomic variable
6618 + * @i: integer value to add
6619 + * @v: pointer of type atomic_unchecked_t
6620 + *
6621 + * Atomically adds @i to @v.
6622 + */
6623 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6624 +{
6625 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6626 : "+m" (v->counter)
6627 : "ir" (i));
6628 }
6629 @@ -59,7 +104,29 @@ static inline void atomic_add(int i, ato
6630 */
6631 static inline void atomic_sub(int i, atomic_t *v)
6632 {
6633 - asm volatile(LOCK_PREFIX "subl %1,%0"
6634 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6635 +
6636 +#ifdef CONFIG_PAX_REFCOUNT
6637 + "jno 0f\n"
6638 + LOCK_PREFIX "addl %1,%0\n"
6639 + "int $4\n0:\n"
6640 + _ASM_EXTABLE(0b, 0b)
6641 +#endif
6642 +
6643 + : "+m" (v->counter)
6644 + : "ir" (i));
6645 +}
6646 +
6647 +/**
6648 + * atomic_sub_unchecked - subtract integer from atomic variable
6649 + * @i: integer value to subtract
6650 + * @v: pointer of type atomic_unchecked_t
6651 + *
6652 + * Atomically subtracts @i from @v.
6653 + */
6654 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6655 +{
6656 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6657 : "+m" (v->counter)
6658 : "ir" (i));
6659 }
6660 @@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(in
6661 {
6662 unsigned char c;
6663
6664 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6665 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
6666 +
6667 +#ifdef CONFIG_PAX_REFCOUNT
6668 + "jno 0f\n"
6669 + LOCK_PREFIX "addl %2,%0\n"
6670 + "int $4\n0:\n"
6671 + _ASM_EXTABLE(0b, 0b)
6672 +#endif
6673 +
6674 + "sete %1\n"
6675 : "+m" (v->counter), "=qm" (c)
6676 : "ir" (i) : "memory");
6677 return c;
6678 @@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(in
6679 */
6680 static inline void atomic_inc(atomic_t *v)
6681 {
6682 - asm volatile(LOCK_PREFIX "incl %0"
6683 + asm volatile(LOCK_PREFIX "incl %0\n"
6684 +
6685 +#ifdef CONFIG_PAX_REFCOUNT
6686 + "jno 0f\n"
6687 + LOCK_PREFIX "decl %0\n"
6688 + "int $4\n0:\n"
6689 + _ASM_EXTABLE(0b, 0b)
6690 +#endif
6691 +
6692 + : "+m" (v->counter));
6693 +}
6694 +
6695 +/**
6696 + * atomic_inc_unchecked - increment atomic variable
6697 + * @v: pointer of type atomic_unchecked_t
6698 + *
6699 + * Atomically increments @v by 1.
6700 + */
6701 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6702 +{
6703 + asm volatile(LOCK_PREFIX "incl %0\n"
6704 : "+m" (v->counter));
6705 }
6706
6707 @@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *
6708 */
6709 static inline void atomic_dec(atomic_t *v)
6710 {
6711 - asm volatile(LOCK_PREFIX "decl %0"
6712 + asm volatile(LOCK_PREFIX "decl %0\n"
6713 +
6714 +#ifdef CONFIG_PAX_REFCOUNT
6715 + "jno 0f\n"
6716 + LOCK_PREFIX "incl %0\n"
6717 + "int $4\n0:\n"
6718 + _ASM_EXTABLE(0b, 0b)
6719 +#endif
6720 +
6721 + : "+m" (v->counter));
6722 +}
6723 +
6724 +/**
6725 + * atomic_dec_unchecked - decrement atomic variable
6726 + * @v: pointer of type atomic_unchecked_t
6727 + *
6728 + * Atomically decrements @v by 1.
6729 + */
6730 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6731 +{
6732 + asm volatile(LOCK_PREFIX "decl %0\n"
6733 : "+m" (v->counter));
6734 }
6735
6736 @@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(at
6737 {
6738 unsigned char c;
6739
6740 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
6741 + asm volatile(LOCK_PREFIX "decl %0\n"
6742 +
6743 +#ifdef CONFIG_PAX_REFCOUNT
6744 + "jno 0f\n"
6745 + LOCK_PREFIX "incl %0\n"
6746 + "int $4\n0:\n"
6747 + _ASM_EXTABLE(0b, 0b)
6748 +#endif
6749 +
6750 + "sete %1\n"
6751 : "+m" (v->counter), "=qm" (c)
6752 : : "memory");
6753 return c != 0;
6754 @@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(at
6755 {
6756 unsigned char c;
6757
6758 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
6759 + asm volatile(LOCK_PREFIX "incl %0\n"
6760 +
6761 +#ifdef CONFIG_PAX_REFCOUNT
6762 + "jno 0f\n"
6763 + LOCK_PREFIX "decl %0\n"
6764 + "into\n0:\n"
6765 + _ASM_EXTABLE(0b, 0b)
6766 +#endif
6767 +
6768 + "sete %1\n"
6769 + : "+m" (v->counter), "=qm" (c)
6770 + : : "memory");
6771 + return c != 0;
6772 +}
6773 +
6774 +/**
6775 + * atomic_inc_and_test_unchecked - increment and test
6776 + * @v: pointer of type atomic_unchecked_t
6777 + *
6778 + * Atomically increments @v by 1
6779 + * and returns true if the result is zero, or false for all
6780 + * other cases.
6781 + */
6782 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6783 +{
6784 + unsigned char c;
6785 +
6786 + asm volatile(LOCK_PREFIX "incl %0\n"
6787 + "sete %1\n"
6788 : "+m" (v->counter), "=qm" (c)
6789 : : "memory");
6790 return c != 0;
6791 @@ -156,7 +309,16 @@ static inline int atomic_add_negative(in
6792 {
6793 unsigned char c;
6794
6795 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
6796 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
6797 +
6798 +#ifdef CONFIG_PAX_REFCOUNT
6799 + "jno 0f\n"
6800 + LOCK_PREFIX "subl %2,%0\n"
6801 + "int $4\n0:\n"
6802 + _ASM_EXTABLE(0b, 0b)
6803 +#endif
6804 +
6805 + "sets %1\n"
6806 : "+m" (v->counter), "=qm" (c)
6807 : "ir" (i) : "memory");
6808 return c;
6809 @@ -179,6 +341,46 @@ static inline int atomic_add_return(int
6810 #endif
6811 /* Modern 486+ processor */
6812 __i = i;
6813 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
6814 +
6815 +#ifdef CONFIG_PAX_REFCOUNT
6816 + "jno 0f\n"
6817 + "movl %0, %1\n"
6818 + "int $4\n0:\n"
6819 + _ASM_EXTABLE(0b, 0b)
6820 +#endif
6821 +
6822 + : "+r" (i), "+m" (v->counter)
6823 + : : "memory");
6824 + return i + __i;
6825 +
6826 +#ifdef CONFIG_M386
6827 +no_xadd: /* Legacy 386 processor */
6828 + local_irq_save(flags);
6829 + __i = atomic_read(v);
6830 + atomic_set(v, i + __i);
6831 + local_irq_restore(flags);
6832 + return i + __i;
6833 +#endif
6834 +}
6835 +
6836 +/**
6837 + * atomic_add_return_unchecked - add integer and return
6838 + * @v: pointer of type atomic_unchecked_t
6839 + * @i: integer value to add
6840 + *
6841 + * Atomically adds @i to @v and returns @i + @v
6842 + */
6843 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6844 +{
6845 + int __i;
6846 +#ifdef CONFIG_M386
6847 + unsigned long flags;
6848 + if (unlikely(boot_cpu_data.x86 <= 3))
6849 + goto no_xadd;
6850 +#endif
6851 + /* Modern 486+ processor */
6852 + __i = i;
6853 asm volatile(LOCK_PREFIX "xaddl %0, %1"
6854 : "+r" (i), "+m" (v->counter)
6855 : : "memory");
6856 @@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_
6857 return cmpxchg(&v->counter, old, new);
6858 }
6859
6860 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
6861 +{
6862 + return cmpxchg(&v->counter, old, new);
6863 +}
6864 +
6865 static inline int atomic_xchg(atomic_t *v, int new)
6866 {
6867 return xchg(&v->counter, new);
6868 }
6869
6870 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6871 +{
6872 + return xchg(&v->counter, new);
6873 +}
6874 +
6875 /**
6876 * atomic_add_unless - add unless the number is already a given value
6877 * @v: pointer of type atomic_t
6878 @@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *
6879 */
6880 static inline int atomic_add_unless(atomic_t *v, int a, int u)
6881 {
6882 - int c, old;
6883 + int c, old, new;
6884 c = atomic_read(v);
6885 for (;;) {
6886 - if (unlikely(c == (u)))
6887 + if (unlikely(c == u))
6888 break;
6889 - old = atomic_cmpxchg((v), c, c + (a));
6890 +
6891 + asm volatile("addl %2,%0\n"
6892 +
6893 +#ifdef CONFIG_PAX_REFCOUNT
6894 + "jno 0f\n"
6895 + "subl %2,%0\n"
6896 + "int $4\n0:\n"
6897 + _ASM_EXTABLE(0b, 0b)
6898 +#endif
6899 +
6900 + : "=r" (new)
6901 + : "0" (c), "ir" (a));
6902 +
6903 + old = atomic_cmpxchg(v, c, new);
6904 if (likely(old == c))
6905 break;
6906 c = old;
6907 }
6908 - return c != (u);
6909 + return c != u;
6910 }
6911
6912 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
6913
6914 #define atomic_inc_return(v) (atomic_add_return(1, v))
6915 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6916 +{
6917 + return atomic_add_return_unchecked(1, v);
6918 +}
6919 #define atomic_dec_return(v) (atomic_sub_return(1, v))
6920
6921 /* These are x86-specific, used by some header files */
6922 @@ -266,9 +495,18 @@ typedef struct {
6923 u64 __aligned(8) counter;
6924 } atomic64_t;
6925
6926 +#ifdef CONFIG_PAX_REFCOUNT
6927 +typedef struct {
6928 + u64 __aligned(8) counter;
6929 +} atomic64_unchecked_t;
6930 +#else
6931 +typedef atomic64_t atomic64_unchecked_t;
6932 +#endif
6933 +
6934 #define ATOMIC64_INIT(val) { (val) }
6935
6936 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
6937 +extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
6938
6939 /**
6940 * atomic64_xchg - xchg atomic64 variable
6941 @@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *
6942 * the old value.
6943 */
6944 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
6945 +extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
6946
6947 /**
6948 * atomic64_set - set atomic64 variable
6949 @@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr
6950 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
6951
6952 /**
6953 + * atomic64_unchecked_set - set atomic64 variable
6954 + * @ptr: pointer to type atomic64_unchecked_t
6955 + * @new_val: value to assign
6956 + *
6957 + * Atomically sets the value of @ptr to @new_val.
6958 + */
6959 +extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
6960 +
6961 +/**
6962 * atomic64_read - read atomic64 variable
6963 * @ptr: pointer to type atomic64_t
6964 *
6965 @@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64
6966 return res;
6967 }
6968
6969 -extern u64 atomic64_read(atomic64_t *ptr);
6970 +/**
6971 + * atomic64_read_unchecked - read atomic64 variable
6972 + * @ptr: pointer to type atomic64_unchecked_t
6973 + *
6974 + * Atomically reads the value of @ptr and returns it.
6975 + */
6976 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
6977 +{
6978 + u64 res;
6979 +
6980 + /*
6981 + * Note, we inline this atomic64_unchecked_t primitive because
6982 + * it only clobbers EAX/EDX and leaves the others
6983 + * untouched. We also (somewhat subtly) rely on the
6984 + * fact that cmpxchg8b returns the current 64-bit value
6985 + * of the memory location we are touching:
6986 + */
6987 + asm volatile(
6988 + "mov %%ebx, %%eax\n\t"
6989 + "mov %%ecx, %%edx\n\t"
6990 + LOCK_PREFIX "cmpxchg8b %1\n"
6991 + : "=&A" (res)
6992 + : "m" (*ptr)
6993 + );
6994 +
6995 + return res;
6996 +}
6997
6998 /**
6999 * atomic64_add_return - add and return
7000 @@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta
7001 * Other variants with different arithmetic operators:
7002 */
7003 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
7004 +extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7005 extern u64 atomic64_inc_return(atomic64_t *ptr);
7006 +extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
7007 extern u64 atomic64_dec_return(atomic64_t *ptr);
7008 +extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
7009
7010 /**
7011 * atomic64_add - add integer to atomic64 variable
7012 @@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_
7013 extern void atomic64_add(u64 delta, atomic64_t *ptr);
7014
7015 /**
7016 + * atomic64_add_unchecked - add integer to atomic64 variable
7017 + * @delta: integer value to add
7018 + * @ptr: pointer to type atomic64_unchecked_t
7019 + *
7020 + * Atomically adds @delta to @ptr.
7021 + */
7022 +extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7023 +
7024 +/**
7025 * atomic64_sub - subtract the atomic64 variable
7026 * @delta: integer value to subtract
7027 * @ptr: pointer to type atomic64_t
7028 @@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atom
7029 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
7030
7031 /**
7032 + * atomic64_sub_unchecked - subtract the atomic64 variable
7033 + * @delta: integer value to subtract
7034 + * @ptr: pointer to type atomic64_unchecked_t
7035 + *
7036 + * Atomically subtracts @delta from @ptr.
7037 + */
7038 +extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7039 +
7040 +/**
7041 * atomic64_sub_and_test - subtract value from variable and test result
7042 * @delta: integer value to subtract
7043 * @ptr: pointer to type atomic64_t
7044 @@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 del
7045 extern void atomic64_inc(atomic64_t *ptr);
7046
7047 /**
7048 + * atomic64_inc_unchecked - increment atomic64 variable
7049 + * @ptr: pointer to type atomic64_unchecked_t
7050 + *
7051 + * Atomically increments @ptr by 1.
7052 + */
7053 +extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
7054 +
7055 +/**
7056 * atomic64_dec - decrement atomic64 variable
7057 * @ptr: pointer to type atomic64_t
7058 *
7059 @@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr
7060 extern void atomic64_dec(atomic64_t *ptr);
7061
7062 /**
7063 + * atomic64_dec_unchecked - decrement atomic64 variable
7064 + * @ptr: pointer to type atomic64_unchecked_t
7065 + *
7066 + * Atomically decrements @ptr by 1.
7067 + */
7068 +extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
7069 +
7070 +/**
7071 * atomic64_dec_and_test - decrement and test
7072 * @ptr: pointer to type atomic64_t
7073 *
7074 diff -urNp linux-2.6.32.41/arch/x86/include/asm/atomic_64.h linux-2.6.32.41/arch/x86/include/asm/atomic_64.h
7075 --- linux-2.6.32.41/arch/x86/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
7076 +++ linux-2.6.32.41/arch/x86/include/asm/atomic_64.h 2011-05-04 18:35:31.000000000 -0400
7077 @@ -24,6 +24,17 @@ static inline int atomic_read(const atom
7078 }
7079
7080 /**
7081 + * atomic_read_unchecked - read atomic variable
7082 + * @v: pointer of type atomic_unchecked_t
7083 + *
7084 + * Atomically reads the value of @v.
7085 + */
7086 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7087 +{
7088 + return v->counter;
7089 +}
7090 +
7091 +/**
7092 * atomic_set - set atomic variable
7093 * @v: pointer of type atomic_t
7094 * @i: required value
7095 @@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *
7096 }
7097
7098 /**
7099 + * atomic_set_unchecked - set atomic variable
7100 + * @v: pointer of type atomic_unchecked_t
7101 + * @i: required value
7102 + *
7103 + * Atomically sets the value of @v to @i.
7104 + */
7105 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7106 +{
7107 + v->counter = i;
7108 +}
7109 +
7110 +/**
7111 * atomic_add - add integer to atomic variable
7112 * @i: integer value to add
7113 * @v: pointer of type atomic_t
7114 @@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *
7115 */
7116 static inline void atomic_add(int i, atomic_t *v)
7117 {
7118 - asm volatile(LOCK_PREFIX "addl %1,%0"
7119 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7120 +
7121 +#ifdef CONFIG_PAX_REFCOUNT
7122 + "jno 0f\n"
7123 + LOCK_PREFIX "subl %1,%0\n"
7124 + "int $4\n0:\n"
7125 + _ASM_EXTABLE(0b, 0b)
7126 +#endif
7127 +
7128 + : "=m" (v->counter)
7129 + : "ir" (i), "m" (v->counter));
7130 +}
7131 +
7132 +/**
7133 + * atomic_add_unchecked - add integer to atomic variable
7134 + * @i: integer value to add
7135 + * @v: pointer of type atomic_unchecked_t
7136 + *
7137 + * Atomically adds @i to @v.
7138 + */
7139 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7140 +{
7141 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7142 : "=m" (v->counter)
7143 : "ir" (i), "m" (v->counter));
7144 }
7145 @@ -58,7 +103,29 @@ static inline void atomic_add(int i, ato
7146 */
7147 static inline void atomic_sub(int i, atomic_t *v)
7148 {
7149 - asm volatile(LOCK_PREFIX "subl %1,%0"
7150 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7151 +
7152 +#ifdef CONFIG_PAX_REFCOUNT
7153 + "jno 0f\n"
7154 + LOCK_PREFIX "addl %1,%0\n"
7155 + "int $4\n0:\n"
7156 + _ASM_EXTABLE(0b, 0b)
7157 +#endif
7158 +
7159 + : "=m" (v->counter)
7160 + : "ir" (i), "m" (v->counter));
7161 +}
7162 +
7163 +/**
7164 + * atomic_sub_unchecked - subtract the atomic variable
7165 + * @i: integer value to subtract
7166 + * @v: pointer of type atomic_unchecked_t
7167 + *
7168 + * Atomically subtracts @i from @v.
7169 + */
7170 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7171 +{
7172 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7173 : "=m" (v->counter)
7174 : "ir" (i), "m" (v->counter));
7175 }
7176 @@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(in
7177 {
7178 unsigned char c;
7179
7180 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7181 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
7182 +
7183 +#ifdef CONFIG_PAX_REFCOUNT
7184 + "jno 0f\n"
7185 + LOCK_PREFIX "addl %2,%0\n"
7186 + "int $4\n0:\n"
7187 + _ASM_EXTABLE(0b, 0b)
7188 +#endif
7189 +
7190 + "sete %1\n"
7191 : "=m" (v->counter), "=qm" (c)
7192 : "ir" (i), "m" (v->counter) : "memory");
7193 return c;
7194 @@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(in
7195 */
7196 static inline void atomic_inc(atomic_t *v)
7197 {
7198 - asm volatile(LOCK_PREFIX "incl %0"
7199 + asm volatile(LOCK_PREFIX "incl %0\n"
7200 +
7201 +#ifdef CONFIG_PAX_REFCOUNT
7202 + "jno 0f\n"
7203 + LOCK_PREFIX "decl %0\n"
7204 + "int $4\n0:\n"
7205 + _ASM_EXTABLE(0b, 0b)
7206 +#endif
7207 +
7208 + : "=m" (v->counter)
7209 + : "m" (v->counter));
7210 +}
7211 +
7212 +/**
7213 + * atomic_inc_unchecked - increment atomic variable
7214 + * @v: pointer of type atomic_unchecked_t
7215 + *
7216 + * Atomically increments @v by 1.
7217 + */
7218 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7219 +{
7220 + asm volatile(LOCK_PREFIX "incl %0\n"
7221 : "=m" (v->counter)
7222 : "m" (v->counter));
7223 }
7224 @@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *
7225 */
7226 static inline void atomic_dec(atomic_t *v)
7227 {
7228 - asm volatile(LOCK_PREFIX "decl %0"
7229 + asm volatile(LOCK_PREFIX "decl %0\n"
7230 +
7231 +#ifdef CONFIG_PAX_REFCOUNT
7232 + "jno 0f\n"
7233 + LOCK_PREFIX "incl %0\n"
7234 + "int $4\n0:\n"
7235 + _ASM_EXTABLE(0b, 0b)
7236 +#endif
7237 +
7238 + : "=m" (v->counter)
7239 + : "m" (v->counter));
7240 +}
7241 +
7242 +/**
7243 + * atomic_dec_unchecked - decrement atomic variable
7244 + * @v: pointer of type atomic_unchecked_t
7245 + *
7246 + * Atomically decrements @v by 1.
7247 + */
7248 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7249 +{
7250 + asm volatile(LOCK_PREFIX "decl %0\n"
7251 : "=m" (v->counter)
7252 : "m" (v->counter));
7253 }
7254 @@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(at
7255 {
7256 unsigned char c;
7257
7258 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
7259 + asm volatile(LOCK_PREFIX "decl %0\n"
7260 +
7261 +#ifdef CONFIG_PAX_REFCOUNT
7262 + "jno 0f\n"
7263 + LOCK_PREFIX "incl %0\n"
7264 + "int $4\n0:\n"
7265 + _ASM_EXTABLE(0b, 0b)
7266 +#endif
7267 +
7268 + "sete %1\n"
7269 : "=m" (v->counter), "=qm" (c)
7270 : "m" (v->counter) : "memory");
7271 return c != 0;
7272 @@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(at
7273 {
7274 unsigned char c;
7275
7276 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
7277 + asm volatile(LOCK_PREFIX "incl %0\n"
7278 +
7279 +#ifdef CONFIG_PAX_REFCOUNT
7280 + "jno 0f\n"
7281 + LOCK_PREFIX "decl %0\n"
7282 + "int $4\n0:\n"
7283 + _ASM_EXTABLE(0b, 0b)
7284 +#endif
7285 +
7286 + "sete %1\n"
7287 + : "=m" (v->counter), "=qm" (c)
7288 + : "m" (v->counter) : "memory");
7289 + return c != 0;
7290 +}
7291 +
7292 +/**
7293 + * atomic_inc_and_test_unchecked - increment and test
7294 + * @v: pointer of type atomic_unchecked_t
7295 + *
7296 + * Atomically increments @v by 1
7297 + * and returns true if the result is zero, or false for all
7298 + * other cases.
7299 + */
7300 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7301 +{
7302 + unsigned char c;
7303 +
7304 + asm volatile(LOCK_PREFIX "incl %0\n"
7305 + "sete %1\n"
7306 : "=m" (v->counter), "=qm" (c)
7307 : "m" (v->counter) : "memory");
7308 return c != 0;
7309 @@ -157,7 +312,16 @@ static inline int atomic_add_negative(in
7310 {
7311 unsigned char c;
7312
7313 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7314 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
7315 +
7316 +#ifdef CONFIG_PAX_REFCOUNT
7317 + "jno 0f\n"
7318 + LOCK_PREFIX "subl %2,%0\n"
7319 + "int $4\n0:\n"
7320 + _ASM_EXTABLE(0b, 0b)
7321 +#endif
7322 +
7323 + "sets %1\n"
7324 : "=m" (v->counter), "=qm" (c)
7325 : "ir" (i), "m" (v->counter) : "memory");
7326 return c;
7327 @@ -173,7 +337,31 @@ static inline int atomic_add_negative(in
7328 static inline int atomic_add_return(int i, atomic_t *v)
7329 {
7330 int __i = i;
7331 - asm volatile(LOCK_PREFIX "xaddl %0, %1"
7332 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7333 +
7334 +#ifdef CONFIG_PAX_REFCOUNT
7335 + "jno 0f\n"
7336 + "movl %0, %1\n"
7337 + "int $4\n0:\n"
7338 + _ASM_EXTABLE(0b, 0b)
7339 +#endif
7340 +
7341 + : "+r" (i), "+m" (v->counter)
7342 + : : "memory");
7343 + return i + __i;
7344 +}
7345 +
7346 +/**
7347 + * atomic_add_return_unchecked - add and return
7348 + * @i: integer value to add
7349 + * @v: pointer of type atomic_unchecked_t
7350 + *
7351 + * Atomically adds @i to @v and returns @i + @v
7352 + */
7353 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7354 +{
7355 + int __i = i;
7356 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7357 : "+r" (i), "+m" (v->counter)
7358 : : "memory");
7359 return i + __i;
7360 @@ -185,6 +373,10 @@ static inline int atomic_sub_return(int
7361 }
7362
7363 #define atomic_inc_return(v) (atomic_add_return(1, v))
7364 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7365 +{
7366 + return atomic_add_return_unchecked(1, v);
7367 +}
7368 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7369
7370 /* The 64-bit atomic type */
7371 @@ -204,6 +396,18 @@ static inline long atomic64_read(const a
7372 }
7373
7374 /**
7375 + * atomic64_read_unchecked - read atomic64 variable
7376 + * @v: pointer of type atomic64_unchecked_t
7377 + *
7378 + * Atomically reads the value of @v.
7379 + * Doesn't imply a read memory barrier.
7380 + */
7381 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7382 +{
7383 + return v->counter;
7384 +}
7385 +
7386 +/**
7387 * atomic64_set - set atomic64 variable
7388 * @v: pointer to type atomic64_t
7389 * @i: required value
7390 @@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64
7391 }
7392
7393 /**
7394 + * atomic64_set_unchecked - set atomic64 variable
7395 + * @v: pointer to type atomic64_unchecked_t
7396 + * @i: required value
7397 + *
7398 + * Atomically sets the value of @v to @i.
7399 + */
7400 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7401 +{
7402 + v->counter = i;
7403 +}
7404 +
7405 +/**
7406 * atomic64_add - add integer to atomic64 variable
7407 * @i: integer value to add
7408 * @v: pointer to type atomic64_t
7409 @@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64
7410 */
7411 static inline void atomic64_add(long i, atomic64_t *v)
7412 {
7413 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
7414 +
7415 +#ifdef CONFIG_PAX_REFCOUNT
7416 + "jno 0f\n"
7417 + LOCK_PREFIX "subq %1,%0\n"
7418 + "int $4\n0:\n"
7419 + _ASM_EXTABLE(0b, 0b)
7420 +#endif
7421 +
7422 + : "=m" (v->counter)
7423 + : "er" (i), "m" (v->counter));
7424 +}
7425 +
7426 +/**
7427 + * atomic64_add_unchecked - add integer to atomic64 variable
7428 + * @i: integer value to add
7429 + * @v: pointer to type atomic64_unchecked_t
7430 + *
7431 + * Atomically adds @i to @v.
7432 + */
7433 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
7434 +{
7435 asm volatile(LOCK_PREFIX "addq %1,%0"
7436 : "=m" (v->counter)
7437 : "er" (i), "m" (v->counter));
7438 @@ -238,7 +476,15 @@ static inline void atomic64_add(long i,
7439 */
7440 static inline void atomic64_sub(long i, atomic64_t *v)
7441 {
7442 - asm volatile(LOCK_PREFIX "subq %1,%0"
7443 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
7444 +
7445 +#ifdef CONFIG_PAX_REFCOUNT
7446 + "jno 0f\n"
7447 + LOCK_PREFIX "addq %1,%0\n"
7448 + "int $4\n0:\n"
7449 + _ASM_EXTABLE(0b, 0b)
7450 +#endif
7451 +
7452 : "=m" (v->counter)
7453 : "er" (i), "m" (v->counter));
7454 }
7455 @@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(
7456 {
7457 unsigned char c;
7458
7459 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
7460 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
7461 +
7462 +#ifdef CONFIG_PAX_REFCOUNT
7463 + "jno 0f\n"
7464 + LOCK_PREFIX "addq %2,%0\n"
7465 + "int $4\n0:\n"
7466 + _ASM_EXTABLE(0b, 0b)
7467 +#endif
7468 +
7469 + "sete %1\n"
7470 : "=m" (v->counter), "=qm" (c)
7471 : "er" (i), "m" (v->counter) : "memory");
7472 return c;
7473 @@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(
7474 */
7475 static inline void atomic64_inc(atomic64_t *v)
7476 {
7477 + asm volatile(LOCK_PREFIX "incq %0\n"
7478 +
7479 +#ifdef CONFIG_PAX_REFCOUNT
7480 + "jno 0f\n"
7481 + LOCK_PREFIX "decq %0\n"
7482 + "int $4\n0:\n"
7483 + _ASM_EXTABLE(0b, 0b)
7484 +#endif
7485 +
7486 + : "=m" (v->counter)
7487 + : "m" (v->counter));
7488 +}
7489 +
7490 +/**
7491 + * atomic64_inc_unchecked - increment atomic64 variable
7492 + * @v: pointer to type atomic64_unchecked_t
7493 + *
7494 + * Atomically increments @v by 1.
7495 + */
7496 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7497 +{
7498 asm volatile(LOCK_PREFIX "incq %0"
7499 : "=m" (v->counter)
7500 : "m" (v->counter));
7501 @@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64
7502 */
7503 static inline void atomic64_dec(atomic64_t *v)
7504 {
7505 - asm volatile(LOCK_PREFIX "decq %0"
7506 + asm volatile(LOCK_PREFIX "decq %0\n"
7507 +
7508 +#ifdef CONFIG_PAX_REFCOUNT
7509 + "jno 0f\n"
7510 + LOCK_PREFIX "incq %0\n"
7511 + "int $4\n0:\n"
7512 + _ASM_EXTABLE(0b, 0b)
7513 +#endif
7514 +
7515 + : "=m" (v->counter)
7516 + : "m" (v->counter));
7517 +}
7518 +
7519 +/**
7520 + * atomic64_dec_unchecked - decrement atomic64 variable
7521 + * @v: pointer to type atomic64_t
7522 + *
7523 + * Atomically decrements @v by 1.
7524 + */
7525 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7526 +{
7527 + asm volatile(LOCK_PREFIX "decq %0\n"
7528 : "=m" (v->counter)
7529 : "m" (v->counter));
7530 }
7531 @@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(
7532 {
7533 unsigned char c;
7534
7535 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
7536 + asm volatile(LOCK_PREFIX "decq %0\n"
7537 +
7538 +#ifdef CONFIG_PAX_REFCOUNT
7539 + "jno 0f\n"
7540 + LOCK_PREFIX "incq %0\n"
7541 + "int $4\n0:\n"
7542 + _ASM_EXTABLE(0b, 0b)
7543 +#endif
7544 +
7545 + "sete %1\n"
7546 : "=m" (v->counter), "=qm" (c)
7547 : "m" (v->counter) : "memory");
7548 return c != 0;
7549 @@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(
7550 {
7551 unsigned char c;
7552
7553 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
7554 + asm volatile(LOCK_PREFIX "incq %0\n"
7555 +
7556 +#ifdef CONFIG_PAX_REFCOUNT
7557 + "jno 0f\n"
7558 + LOCK_PREFIX "decq %0\n"
7559 + "int $4\n0:\n"
7560 + _ASM_EXTABLE(0b, 0b)
7561 +#endif
7562 +
7563 + "sete %1\n"
7564 : "=m" (v->counter), "=qm" (c)
7565 : "m" (v->counter) : "memory");
7566 return c != 0;
7567 @@ -337,7 +652,16 @@ static inline int atomic64_add_negative(
7568 {
7569 unsigned char c;
7570
7571 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
7572 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
7573 +
7574 +#ifdef CONFIG_PAX_REFCOUNT
7575 + "jno 0f\n"
7576 + LOCK_PREFIX "subq %2,%0\n"
7577 + "int $4\n0:\n"
7578 + _ASM_EXTABLE(0b, 0b)
7579 +#endif
7580 +
7581 + "sets %1\n"
7582 : "=m" (v->counter), "=qm" (c)
7583 : "er" (i), "m" (v->counter) : "memory");
7584 return c;
7585 @@ -353,7 +677,31 @@ static inline int atomic64_add_negative(
7586 static inline long atomic64_add_return(long i, atomic64_t *v)
7587 {
7588 long __i = i;
7589 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
7590 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
7591 +
7592 +#ifdef CONFIG_PAX_REFCOUNT
7593 + "jno 0f\n"
7594 + "movq %0, %1\n"
7595 + "int $4\n0:\n"
7596 + _ASM_EXTABLE(0b, 0b)
7597 +#endif
7598 +
7599 + : "+r" (i), "+m" (v->counter)
7600 + : : "memory");
7601 + return i + __i;
7602 +}
7603 +
7604 +/**
7605 + * atomic64_add_return_unchecked - add and return
7606 + * @i: integer value to add
7607 + * @v: pointer to type atomic64_unchecked_t
7608 + *
7609 + * Atomically adds @i to @v and returns @i + @v
7610 + */
7611 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7612 +{
7613 + long __i = i;
7614 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
7615 : "+r" (i), "+m" (v->counter)
7616 : : "memory");
7617 return i + __i;
7618 @@ -365,6 +713,10 @@ static inline long atomic64_sub_return(l
7619 }
7620
7621 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
7622 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7623 +{
7624 + return atomic64_add_return_unchecked(1, v);
7625 +}
7626 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
7627
7628 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
7629 @@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atom
7630 return cmpxchg(&v->counter, old, new);
7631 }
7632
7633 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
7634 +{
7635 + return cmpxchg(&v->counter, old, new);
7636 +}
7637 +
7638 static inline long atomic64_xchg(atomic64_t *v, long new)
7639 {
7640 return xchg(&v->counter, new);
7641 }
7642
7643 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
7644 +{
7645 + return xchg(&v->counter, new);
7646 +}
7647 +
7648 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
7649 {
7650 return cmpxchg(&v->counter, old, new);
7651 }
7652
7653 +static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7654 +{
7655 + return cmpxchg(&v->counter, old, new);
7656 +}
7657 +
7658 static inline long atomic_xchg(atomic_t *v, int new)
7659 {
7660 return xchg(&v->counter, new);
7661 }
7662
7663 +static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7664 +{
7665 + return xchg(&v->counter, new);
7666 +}
7667 +
7668 /**
7669 * atomic_add_unless - add unless the number is a given value
7670 * @v: pointer of type atomic_t
7671 @@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t
7672 */
7673 static inline int atomic_add_unless(atomic_t *v, int a, int u)
7674 {
7675 - int c, old;
7676 + int c, old, new;
7677 c = atomic_read(v);
7678 for (;;) {
7679 - if (unlikely(c == (u)))
7680 + if (unlikely(c == u))
7681 break;
7682 - old = atomic_cmpxchg((v), c, c + (a));
7683 +
7684 + asm volatile("addl %2,%0\n"
7685 +
7686 +#ifdef CONFIG_PAX_REFCOUNT
7687 + "jno 0f\n"
7688 + "subl %2,%0\n"
7689 + "int $4\n0:\n"
7690 + _ASM_EXTABLE(0b, 0b)
7691 +#endif
7692 +
7693 + : "=r" (new)
7694 + : "0" (c), "ir" (a));
7695 +
7696 + old = atomic_cmpxchg(v, c, new);
7697 if (likely(old == c))
7698 break;
7699 c = old;
7700 }
7701 - return c != (u);
7702 + return c != u;
7703 }
7704
7705 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
7706 @@ -424,17 +809,30 @@ static inline int atomic_add_unless(atom
7707 */
7708 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
7709 {
7710 - long c, old;
7711 + long c, old, new;
7712 c = atomic64_read(v);
7713 for (;;) {
7714 - if (unlikely(c == (u)))
7715 + if (unlikely(c == u))
7716 break;
7717 - old = atomic64_cmpxchg((v), c, c + (a));
7718 +
7719 + asm volatile("addq %2,%0\n"
7720 +
7721 +#ifdef CONFIG_PAX_REFCOUNT
7722 + "jno 0f\n"
7723 + "subq %2,%0\n"
7724 + "int $4\n0:\n"
7725 + _ASM_EXTABLE(0b, 0b)
7726 +#endif
7727 +
7728 + : "=r" (new)
7729 + : "0" (c), "er" (a));
7730 +
7731 + old = atomic64_cmpxchg(v, c, new);
7732 if (likely(old == c))
7733 break;
7734 c = old;
7735 }
7736 - return c != (u);
7737 + return c != u;
7738 }
7739
7740 /**
7741 diff -urNp linux-2.6.32.41/arch/x86/include/asm/bitops.h linux-2.6.32.41/arch/x86/include/asm/bitops.h
7742 --- linux-2.6.32.41/arch/x86/include/asm/bitops.h 2011-03-27 14:31:47.000000000 -0400
7743 +++ linux-2.6.32.41/arch/x86/include/asm/bitops.h 2011-04-17 15:56:46.000000000 -0400
7744 @@ -38,7 +38,7 @@
7745 * a mask operation on a byte.
7746 */
7747 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
7748 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
7749 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
7750 #define CONST_MASK(nr) (1 << ((nr) & 7))
7751
7752 /**
7753 diff -urNp linux-2.6.32.41/arch/x86/include/asm/boot.h linux-2.6.32.41/arch/x86/include/asm/boot.h
7754 --- linux-2.6.32.41/arch/x86/include/asm/boot.h 2011-03-27 14:31:47.000000000 -0400
7755 +++ linux-2.6.32.41/arch/x86/include/asm/boot.h 2011-04-17 15:56:46.000000000 -0400
7756 @@ -11,10 +11,15 @@
7757 #include <asm/pgtable_types.h>
7758
7759 /* Physical address where kernel should be loaded. */
7760 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
7761 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
7762 + (CONFIG_PHYSICAL_ALIGN - 1)) \
7763 & ~(CONFIG_PHYSICAL_ALIGN - 1))
7764
7765 +#ifndef __ASSEMBLY__
7766 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
7767 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
7768 +#endif
7769 +
7770 /* Minimum kernel alignment, as a power of two */
7771 #ifdef CONFIG_X86_64
7772 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
7773 diff -urNp linux-2.6.32.41/arch/x86/include/asm/cacheflush.h linux-2.6.32.41/arch/x86/include/asm/cacheflush.h
7774 --- linux-2.6.32.41/arch/x86/include/asm/cacheflush.h 2011-03-27 14:31:47.000000000 -0400
7775 +++ linux-2.6.32.41/arch/x86/include/asm/cacheflush.h 2011-04-17 15:56:46.000000000 -0400
7776 @@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
7777 static inline unsigned long get_page_memtype(struct page *pg)
7778 {
7779 if (!PageUncached(pg) && !PageWC(pg))
7780 - return -1;
7781 + return ~0UL;
7782 else if (!PageUncached(pg) && PageWC(pg))
7783 return _PAGE_CACHE_WC;
7784 else if (PageUncached(pg) && !PageWC(pg))
7785 @@ -85,7 +85,7 @@ static inline void set_page_memtype(stru
7786 SetPageWC(pg);
7787 break;
7788 default:
7789 - case -1:
7790 + case ~0UL:
7791 ClearPageUncached(pg);
7792 ClearPageWC(pg);
7793 break;
7794 diff -urNp linux-2.6.32.41/arch/x86/include/asm/cache.h linux-2.6.32.41/arch/x86/include/asm/cache.h
7795 --- linux-2.6.32.41/arch/x86/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
7796 +++ linux-2.6.32.41/arch/x86/include/asm/cache.h 2011-05-04 17:56:20.000000000 -0400
7797 @@ -5,9 +5,10 @@
7798
7799 /* L1 cache line size */
7800 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7801 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7802 +#define L1_CACHE_BYTES (_AC(1,U) << L1_CACHE_SHIFT)
7803
7804 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
7805 +#define __read_only __attribute__((__section__(".data.read_only")))
7806
7807 #ifdef CONFIG_X86_VSMP
7808 /* vSMP Internode cacheline shift */
7809 diff -urNp linux-2.6.32.41/arch/x86/include/asm/checksum_32.h linux-2.6.32.41/arch/x86/include/asm/checksum_32.h
7810 --- linux-2.6.32.41/arch/x86/include/asm/checksum_32.h 2011-03-27 14:31:47.000000000 -0400
7811 +++ linux-2.6.32.41/arch/x86/include/asm/checksum_32.h 2011-04-17 15:56:46.000000000 -0400
7812 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
7813 int len, __wsum sum,
7814 int *src_err_ptr, int *dst_err_ptr);
7815
7816 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
7817 + int len, __wsum sum,
7818 + int *src_err_ptr, int *dst_err_ptr);
7819 +
7820 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
7821 + int len, __wsum sum,
7822 + int *src_err_ptr, int *dst_err_ptr);
7823 +
7824 /*
7825 * Note: when you get a NULL pointer exception here this means someone
7826 * passed in an incorrect kernel address to one of these functions.
7827 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
7828 int *err_ptr)
7829 {
7830 might_sleep();
7831 - return csum_partial_copy_generic((__force void *)src, dst,
7832 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
7833 len, sum, err_ptr, NULL);
7834 }
7835
7836 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
7837 {
7838 might_sleep();
7839 if (access_ok(VERIFY_WRITE, dst, len))
7840 - return csum_partial_copy_generic(src, (__force void *)dst,
7841 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
7842 len, sum, NULL, err_ptr);
7843
7844 if (len)
7845 diff -urNp linux-2.6.32.41/arch/x86/include/asm/desc_defs.h linux-2.6.32.41/arch/x86/include/asm/desc_defs.h
7846 --- linux-2.6.32.41/arch/x86/include/asm/desc_defs.h 2011-03-27 14:31:47.000000000 -0400
7847 +++ linux-2.6.32.41/arch/x86/include/asm/desc_defs.h 2011-04-17 15:56:46.000000000 -0400
7848 @@ -31,6 +31,12 @@ struct desc_struct {
7849 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
7850 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
7851 };
7852 + struct {
7853 + u16 offset_low;
7854 + u16 seg;
7855 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
7856 + unsigned offset_high: 16;
7857 + } gate;
7858 };
7859 } __attribute__((packed));
7860
7861 diff -urNp linux-2.6.32.41/arch/x86/include/asm/desc.h linux-2.6.32.41/arch/x86/include/asm/desc.h
7862 --- linux-2.6.32.41/arch/x86/include/asm/desc.h 2011-03-27 14:31:47.000000000 -0400
7863 +++ linux-2.6.32.41/arch/x86/include/asm/desc.h 2011-04-23 12:56:10.000000000 -0400
7864 @@ -4,6 +4,7 @@
7865 #include <asm/desc_defs.h>
7866 #include <asm/ldt.h>
7867 #include <asm/mmu.h>
7868 +#include <asm/pgtable.h>
7869 #include <linux/smp.h>
7870
7871 static inline void fill_ldt(struct desc_struct *desc,
7872 @@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_
7873 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
7874 desc->type = (info->read_exec_only ^ 1) << 1;
7875 desc->type |= info->contents << 2;
7876 + desc->type |= info->seg_not_present ^ 1;
7877 desc->s = 1;
7878 desc->dpl = 0x3;
7879 desc->p = info->seg_not_present ^ 1;
7880 @@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_
7881 }
7882
7883 extern struct desc_ptr idt_descr;
7884 -extern gate_desc idt_table[];
7885 -
7886 -struct gdt_page {
7887 - struct desc_struct gdt[GDT_ENTRIES];
7888 -} __attribute__((aligned(PAGE_SIZE)));
7889 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
7890 +extern gate_desc idt_table[256];
7891
7892 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
7893 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
7894 {
7895 - return per_cpu(gdt_page, cpu).gdt;
7896 + return cpu_gdt_table[cpu];
7897 }
7898
7899 #ifdef CONFIG_X86_64
7900 @@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *
7901 unsigned long base, unsigned dpl, unsigned flags,
7902 unsigned short seg)
7903 {
7904 - gate->a = (seg << 16) | (base & 0xffff);
7905 - gate->b = (base & 0xffff0000) |
7906 - (((0x80 | type | (dpl << 5)) & 0xff) << 8);
7907 + gate->gate.offset_low = base;
7908 + gate->gate.seg = seg;
7909 + gate->gate.reserved = 0;
7910 + gate->gate.type = type;
7911 + gate->gate.s = 0;
7912 + gate->gate.dpl = dpl;
7913 + gate->gate.p = 1;
7914 + gate->gate.offset_high = base >> 16;
7915 }
7916
7917 #endif
7918 @@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(str
7919 static inline void native_write_idt_entry(gate_desc *idt, int entry,
7920 const gate_desc *gate)
7921 {
7922 + pax_open_kernel();
7923 memcpy(&idt[entry], gate, sizeof(*gate));
7924 + pax_close_kernel();
7925 }
7926
7927 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
7928 const void *desc)
7929 {
7930 + pax_open_kernel();
7931 memcpy(&ldt[entry], desc, 8);
7932 + pax_close_kernel();
7933 }
7934
7935 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
7936 @@ -139,7 +146,10 @@ static inline void native_write_gdt_entr
7937 size = sizeof(struct desc_struct);
7938 break;
7939 }
7940 +
7941 + pax_open_kernel();
7942 memcpy(&gdt[entry], desc, size);
7943 + pax_close_kernel();
7944 }
7945
7946 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
7947 @@ -211,7 +221,9 @@ static inline void native_set_ldt(const
7948
7949 static inline void native_load_tr_desc(void)
7950 {
7951 + pax_open_kernel();
7952 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
7953 + pax_close_kernel();
7954 }
7955
7956 static inline void native_load_gdt(const struct desc_ptr *dtr)
7957 @@ -246,8 +258,10 @@ static inline void native_load_tls(struc
7958 unsigned int i;
7959 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
7960
7961 + pax_open_kernel();
7962 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
7963 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
7964 + pax_close_kernel();
7965 }
7966
7967 #define _LDT_empty(info) \
7968 @@ -309,7 +323,7 @@ static inline void set_desc_limit(struct
7969 desc->limit = (limit >> 16) & 0xf;
7970 }
7971
7972 -static inline void _set_gate(int gate, unsigned type, void *addr,
7973 +static inline void _set_gate(int gate, unsigned type, const void *addr,
7974 unsigned dpl, unsigned ist, unsigned seg)
7975 {
7976 gate_desc s;
7977 @@ -327,7 +341,7 @@ static inline void _set_gate(int gate, u
7978 * Pentium F0 0F bugfix can have resulted in the mapped
7979 * IDT being write-protected.
7980 */
7981 -static inline void set_intr_gate(unsigned int n, void *addr)
7982 +static inline void set_intr_gate(unsigned int n, const void *addr)
7983 {
7984 BUG_ON((unsigned)n > 0xFF);
7985 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
7986 @@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsig
7987 /*
7988 * This routine sets up an interrupt gate at directory privilege level 3.
7989 */
7990 -static inline void set_system_intr_gate(unsigned int n, void *addr)
7991 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
7992 {
7993 BUG_ON((unsigned)n > 0xFF);
7994 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
7995 }
7996
7997 -static inline void set_system_trap_gate(unsigned int n, void *addr)
7998 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
7999 {
8000 BUG_ON((unsigned)n > 0xFF);
8001 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8002 }
8003
8004 -static inline void set_trap_gate(unsigned int n, void *addr)
8005 +static inline void set_trap_gate(unsigned int n, const void *addr)
8006 {
8007 BUG_ON((unsigned)n > 0xFF);
8008 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8009 @@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigne
8010 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8011 {
8012 BUG_ON((unsigned)n > 0xFF);
8013 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8014 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8015 }
8016
8017 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8018 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8019 {
8020 BUG_ON((unsigned)n > 0xFF);
8021 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8022 }
8023
8024 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8025 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8026 {
8027 BUG_ON((unsigned)n > 0xFF);
8028 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8029 }
8030
8031 +#ifdef CONFIG_X86_32
8032 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8033 +{
8034 + struct desc_struct d;
8035 +
8036 + if (likely(limit))
8037 + limit = (limit - 1UL) >> PAGE_SHIFT;
8038 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
8039 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8040 +}
8041 +#endif
8042 +
8043 #endif /* _ASM_X86_DESC_H */
8044 diff -urNp linux-2.6.32.41/arch/x86/include/asm/device.h linux-2.6.32.41/arch/x86/include/asm/device.h
8045 --- linux-2.6.32.41/arch/x86/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
8046 +++ linux-2.6.32.41/arch/x86/include/asm/device.h 2011-04-17 15:56:46.000000000 -0400
8047 @@ -6,7 +6,7 @@ struct dev_archdata {
8048 void *acpi_handle;
8049 #endif
8050 #ifdef CONFIG_X86_64
8051 -struct dma_map_ops *dma_ops;
8052 + const struct dma_map_ops *dma_ops;
8053 #endif
8054 #ifdef CONFIG_DMAR
8055 void *iommu; /* hook for IOMMU specific extension */
8056 diff -urNp linux-2.6.32.41/arch/x86/include/asm/dma-mapping.h linux-2.6.32.41/arch/x86/include/asm/dma-mapping.h
8057 --- linux-2.6.32.41/arch/x86/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
8058 +++ linux-2.6.32.41/arch/x86/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
8059 @@ -25,9 +25,9 @@ extern int iommu_merge;
8060 extern struct device x86_dma_fallback_dev;
8061 extern int panic_on_overflow;
8062
8063 -extern struct dma_map_ops *dma_ops;
8064 +extern const struct dma_map_ops *dma_ops;
8065
8066 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
8067 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
8068 {
8069 #ifdef CONFIG_X86_32
8070 return dma_ops;
8071 @@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dm
8072 /* Make sure we keep the same behaviour */
8073 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
8074 {
8075 - struct dma_map_ops *ops = get_dma_ops(dev);
8076 + const struct dma_map_ops *ops = get_dma_ops(dev);
8077 if (ops->mapping_error)
8078 return ops->mapping_error(dev, dma_addr);
8079
8080 @@ -122,7 +122,7 @@ static inline void *
8081 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
8082 gfp_t gfp)
8083 {
8084 - struct dma_map_ops *ops = get_dma_ops(dev);
8085 + const struct dma_map_ops *ops = get_dma_ops(dev);
8086 void *memory;
8087
8088 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
8089 @@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, s
8090 static inline void dma_free_coherent(struct device *dev, size_t size,
8091 void *vaddr, dma_addr_t bus)
8092 {
8093 - struct dma_map_ops *ops = get_dma_ops(dev);
8094 + const struct dma_map_ops *ops = get_dma_ops(dev);
8095
8096 WARN_ON(irqs_disabled()); /* for portability */
8097
8098 diff -urNp linux-2.6.32.41/arch/x86/include/asm/e820.h linux-2.6.32.41/arch/x86/include/asm/e820.h
8099 --- linux-2.6.32.41/arch/x86/include/asm/e820.h 2011-03-27 14:31:47.000000000 -0400
8100 +++ linux-2.6.32.41/arch/x86/include/asm/e820.h 2011-04-17 15:56:46.000000000 -0400
8101 @@ -133,7 +133,7 @@ extern char *default_machine_specific_me
8102 #define ISA_END_ADDRESS 0x100000
8103 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
8104
8105 -#define BIOS_BEGIN 0x000a0000
8106 +#define BIOS_BEGIN 0x000c0000
8107 #define BIOS_END 0x00100000
8108
8109 #ifdef __KERNEL__
8110 diff -urNp linux-2.6.32.41/arch/x86/include/asm/elf.h linux-2.6.32.41/arch/x86/include/asm/elf.h
8111 --- linux-2.6.32.41/arch/x86/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
8112 +++ linux-2.6.32.41/arch/x86/include/asm/elf.h 2011-04-17 15:56:46.000000000 -0400
8113 @@ -257,7 +257,25 @@ extern int force_personality32;
8114 the loader. We need to make sure that it is out of the way of the program
8115 that it will "exec", and that there is sufficient room for the brk. */
8116
8117 +#ifdef CONFIG_PAX_SEGMEXEC
8118 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8119 +#else
8120 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8121 +#endif
8122 +
8123 +#ifdef CONFIG_PAX_ASLR
8124 +#ifdef CONFIG_X86_32
8125 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8126 +
8127 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8128 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8129 +#else
8130 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
8131 +
8132 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8133 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8134 +#endif
8135 +#endif
8136
8137 /* This yields a mask that user programs can use to figure out what
8138 instruction set this CPU supports. This could be done in user space,
8139 @@ -311,8 +329,7 @@ do { \
8140 #define ARCH_DLINFO \
8141 do { \
8142 if (vdso_enabled) \
8143 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8144 - (unsigned long)current->mm->context.vdso); \
8145 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);\
8146 } while (0)
8147
8148 #define AT_SYSINFO 32
8149 @@ -323,7 +340,7 @@ do { \
8150
8151 #endif /* !CONFIG_X86_32 */
8152
8153 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8154 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8155
8156 #define VDSO_ENTRY \
8157 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8158 @@ -337,7 +354,4 @@ extern int arch_setup_additional_pages(s
8159 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8160 #define compat_arch_setup_additional_pages syscall32_setup_pages
8161
8162 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8163 -#define arch_randomize_brk arch_randomize_brk
8164 -
8165 #endif /* _ASM_X86_ELF_H */
8166 diff -urNp linux-2.6.32.41/arch/x86/include/asm/emergency-restart.h linux-2.6.32.41/arch/x86/include/asm/emergency-restart.h
8167 --- linux-2.6.32.41/arch/x86/include/asm/emergency-restart.h 2011-03-27 14:31:47.000000000 -0400
8168 +++ linux-2.6.32.41/arch/x86/include/asm/emergency-restart.h 2011-05-22 23:02:06.000000000 -0400
8169 @@ -15,6 +15,6 @@ enum reboot_type {
8170
8171 extern enum reboot_type reboot_type;
8172
8173 -extern void machine_emergency_restart(void);
8174 +extern void machine_emergency_restart(void) __noreturn;
8175
8176 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8177 diff -urNp linux-2.6.32.41/arch/x86/include/asm/futex.h linux-2.6.32.41/arch/x86/include/asm/futex.h
8178 --- linux-2.6.32.41/arch/x86/include/asm/futex.h 2011-03-27 14:31:47.000000000 -0400
8179 +++ linux-2.6.32.41/arch/x86/include/asm/futex.h 2011-04-17 15:56:46.000000000 -0400
8180 @@ -12,16 +12,18 @@
8181 #include <asm/system.h>
8182
8183 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8184 + typecheck(u32 *, uaddr); \
8185 asm volatile("1:\t" insn "\n" \
8186 "2:\t.section .fixup,\"ax\"\n" \
8187 "3:\tmov\t%3, %1\n" \
8188 "\tjmp\t2b\n" \
8189 "\t.previous\n" \
8190 _ASM_EXTABLE(1b, 3b) \
8191 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8192 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
8193 : "i" (-EFAULT), "0" (oparg), "1" (0))
8194
8195 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8196 + typecheck(u32 *, uaddr); \
8197 asm volatile("1:\tmovl %2, %0\n" \
8198 "\tmovl\t%0, %3\n" \
8199 "\t" insn "\n" \
8200 @@ -34,10 +36,10 @@
8201 _ASM_EXTABLE(1b, 4b) \
8202 _ASM_EXTABLE(2b, 4b) \
8203 : "=&a" (oldval), "=&r" (ret), \
8204 - "+m" (*uaddr), "=&r" (tem) \
8205 + "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
8206 : "r" (oparg), "i" (-EFAULT), "1" (0))
8207
8208 -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
8209 +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8210 {
8211 int op = (encoded_op >> 28) & 7;
8212 int cmp = (encoded_op >> 24) & 15;
8213 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
8214
8215 switch (op) {
8216 case FUTEX_OP_SET:
8217 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8218 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8219 break;
8220 case FUTEX_OP_ADD:
8221 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8222 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8223 uaddr, oparg);
8224 break;
8225 case FUTEX_OP_OR:
8226 @@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser
8227 return ret;
8228 }
8229
8230 -static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
8231 +static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
8232 int newval)
8233 {
8234
8235 @@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_i
8236 return -ENOSYS;
8237 #endif
8238
8239 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
8240 + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8241 return -EFAULT;
8242
8243 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
8244 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
8245 "2:\t.section .fixup, \"ax\"\n"
8246 "3:\tmov %2, %0\n"
8247 "\tjmp 2b\n"
8248 "\t.previous\n"
8249 _ASM_EXTABLE(1b, 3b)
8250 - : "=a" (oldval), "+m" (*uaddr)
8251 + : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
8252 : "i" (-EFAULT), "r" (newval), "0" (oldval)
8253 : "memory"
8254 );
8255 diff -urNp linux-2.6.32.41/arch/x86/include/asm/hw_irq.h linux-2.6.32.41/arch/x86/include/asm/hw_irq.h
8256 --- linux-2.6.32.41/arch/x86/include/asm/hw_irq.h 2011-03-27 14:31:47.000000000 -0400
8257 +++ linux-2.6.32.41/arch/x86/include/asm/hw_irq.h 2011-05-04 17:56:28.000000000 -0400
8258 @@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
8259 extern void enable_IO_APIC(void);
8260
8261 /* Statistics */
8262 -extern atomic_t irq_err_count;
8263 -extern atomic_t irq_mis_count;
8264 +extern atomic_unchecked_t irq_err_count;
8265 +extern atomic_unchecked_t irq_mis_count;
8266
8267 /* EISA */
8268 extern void eisa_set_level_irq(unsigned int irq);
8269 diff -urNp linux-2.6.32.41/arch/x86/include/asm/i387.h linux-2.6.32.41/arch/x86/include/asm/i387.h
8270 --- linux-2.6.32.41/arch/x86/include/asm/i387.h 2011-03-27 14:31:47.000000000 -0400
8271 +++ linux-2.6.32.41/arch/x86/include/asm/i387.h 2011-04-17 15:56:46.000000000 -0400
8272 @@ -60,6 +60,11 @@ static inline int fxrstor_checking(struc
8273 {
8274 int err;
8275
8276 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8277 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8278 + fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
8279 +#endif
8280 +
8281 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
8282 "2:\n"
8283 ".section .fixup,\"ax\"\n"
8284 @@ -105,6 +110,11 @@ static inline int fxsave_user(struct i38
8285 {
8286 int err;
8287
8288 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8289 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8290 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8291 +#endif
8292 +
8293 asm volatile("1: rex64/fxsave (%[fx])\n\t"
8294 "2:\n"
8295 ".section .fixup,\"ax\"\n"
8296 @@ -195,13 +205,8 @@ static inline int fxrstor_checking(struc
8297 }
8298
8299 /* We need a safe address that is cheap to find and that is already
8300 - in L1 during context switch. The best choices are unfortunately
8301 - different for UP and SMP */
8302 -#ifdef CONFIG_SMP
8303 -#define safe_address (__per_cpu_offset[0])
8304 -#else
8305 -#define safe_address (kstat_cpu(0).cpustat.user)
8306 -#endif
8307 + in L1 during context switch. */
8308 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
8309
8310 /*
8311 * These must be called with preempt disabled
8312 @@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void
8313 struct thread_info *me = current_thread_info();
8314 preempt_disable();
8315 if (me->status & TS_USEDFPU)
8316 - __save_init_fpu(me->task);
8317 + __save_init_fpu(current);
8318 else
8319 clts();
8320 }
8321 diff -urNp linux-2.6.32.41/arch/x86/include/asm/io_32.h linux-2.6.32.41/arch/x86/include/asm/io_32.h
8322 --- linux-2.6.32.41/arch/x86/include/asm/io_32.h 2011-03-27 14:31:47.000000000 -0400
8323 +++ linux-2.6.32.41/arch/x86/include/asm/io_32.h 2011-04-17 15:56:46.000000000 -0400
8324 @@ -3,6 +3,7 @@
8325
8326 #include <linux/string.h>
8327 #include <linux/compiler.h>
8328 +#include <asm/processor.h>
8329
8330 /*
8331 * This file contains the definitions for the x86 IO instructions
8332 @@ -42,6 +43,17 @@
8333
8334 #ifdef __KERNEL__
8335
8336 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8337 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8338 +{
8339 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8340 +}
8341 +
8342 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8343 +{
8344 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8345 +}
8346 +
8347 #include <asm-generic/iomap.h>
8348
8349 #include <linux/vmalloc.h>
8350 diff -urNp linux-2.6.32.41/arch/x86/include/asm/io_64.h linux-2.6.32.41/arch/x86/include/asm/io_64.h
8351 --- linux-2.6.32.41/arch/x86/include/asm/io_64.h 2011-03-27 14:31:47.000000000 -0400
8352 +++ linux-2.6.32.41/arch/x86/include/asm/io_64.h 2011-04-17 15:56:46.000000000 -0400
8353 @@ -140,6 +140,17 @@ __OUTS(l)
8354
8355 #include <linux/vmalloc.h>
8356
8357 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8358 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8359 +{
8360 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8361 +}
8362 +
8363 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8364 +{
8365 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8366 +}
8367 +
8368 #include <asm-generic/iomap.h>
8369
8370 void __memcpy_fromio(void *, unsigned long, unsigned);
8371 diff -urNp linux-2.6.32.41/arch/x86/include/asm/iommu.h linux-2.6.32.41/arch/x86/include/asm/iommu.h
8372 --- linux-2.6.32.41/arch/x86/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
8373 +++ linux-2.6.32.41/arch/x86/include/asm/iommu.h 2011-04-17 15:56:46.000000000 -0400
8374 @@ -3,7 +3,7 @@
8375
8376 extern void pci_iommu_shutdown(void);
8377 extern void no_iommu_init(void);
8378 -extern struct dma_map_ops nommu_dma_ops;
8379 +extern const struct dma_map_ops nommu_dma_ops;
8380 extern int force_iommu, no_iommu;
8381 extern int iommu_detected;
8382 extern int iommu_pass_through;
8383 diff -urNp linux-2.6.32.41/arch/x86/include/asm/irqflags.h linux-2.6.32.41/arch/x86/include/asm/irqflags.h
8384 --- linux-2.6.32.41/arch/x86/include/asm/irqflags.h 2011-03-27 14:31:47.000000000 -0400
8385 +++ linux-2.6.32.41/arch/x86/include/asm/irqflags.h 2011-04-17 15:56:46.000000000 -0400
8386 @@ -142,6 +142,11 @@ static inline unsigned long __raw_local_
8387 sti; \
8388 sysexit
8389
8390 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
8391 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8392 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
8393 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8394 +
8395 #else
8396 #define INTERRUPT_RETURN iret
8397 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8398 diff -urNp linux-2.6.32.41/arch/x86/include/asm/kprobes.h linux-2.6.32.41/arch/x86/include/asm/kprobes.h
8399 --- linux-2.6.32.41/arch/x86/include/asm/kprobes.h 2011-03-27 14:31:47.000000000 -0400
8400 +++ linux-2.6.32.41/arch/x86/include/asm/kprobes.h 2011-04-23 12:56:12.000000000 -0400
8401 @@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
8402 #define BREAKPOINT_INSTRUCTION 0xcc
8403 #define RELATIVEJUMP_INSTRUCTION 0xe9
8404 #define MAX_INSN_SIZE 16
8405 -#define MAX_STACK_SIZE 64
8406 -#define MIN_STACK_SIZE(ADDR) \
8407 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8408 - THREAD_SIZE - (unsigned long)(ADDR))) \
8409 - ? (MAX_STACK_SIZE) \
8410 - : (((unsigned long)current_thread_info()) + \
8411 - THREAD_SIZE - (unsigned long)(ADDR)))
8412 +#define MAX_STACK_SIZE 64UL
8413 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8414
8415 #define flush_insn_slot(p) do { } while (0)
8416
8417 diff -urNp linux-2.6.32.41/arch/x86/include/asm/kvm_host.h linux-2.6.32.41/arch/x86/include/asm/kvm_host.h
8418 --- linux-2.6.32.41/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:01.000000000 -0400
8419 +++ linux-2.6.32.41/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:26.000000000 -0400
8420 @@ -536,7 +536,7 @@ struct kvm_x86_ops {
8421 const struct trace_print_flags *exit_reasons_str;
8422 };
8423
8424 -extern struct kvm_x86_ops *kvm_x86_ops;
8425 +extern const struct kvm_x86_ops *kvm_x86_ops;
8426
8427 int kvm_mmu_module_init(void);
8428 void kvm_mmu_module_exit(void);
8429 diff -urNp linux-2.6.32.41/arch/x86/include/asm/local.h linux-2.6.32.41/arch/x86/include/asm/local.h
8430 --- linux-2.6.32.41/arch/x86/include/asm/local.h 2011-03-27 14:31:47.000000000 -0400
8431 +++ linux-2.6.32.41/arch/x86/include/asm/local.h 2011-04-17 15:56:46.000000000 -0400
8432 @@ -18,26 +18,58 @@ typedef struct {
8433
8434 static inline void local_inc(local_t *l)
8435 {
8436 - asm volatile(_ASM_INC "%0"
8437 + asm volatile(_ASM_INC "%0\n"
8438 +
8439 +#ifdef CONFIG_PAX_REFCOUNT
8440 + "jno 0f\n"
8441 + _ASM_DEC "%0\n"
8442 + "int $4\n0:\n"
8443 + _ASM_EXTABLE(0b, 0b)
8444 +#endif
8445 +
8446 : "+m" (l->a.counter));
8447 }
8448
8449 static inline void local_dec(local_t *l)
8450 {
8451 - asm volatile(_ASM_DEC "%0"
8452 + asm volatile(_ASM_DEC "%0\n"
8453 +
8454 +#ifdef CONFIG_PAX_REFCOUNT
8455 + "jno 0f\n"
8456 + _ASM_INC "%0\n"
8457 + "int $4\n0:\n"
8458 + _ASM_EXTABLE(0b, 0b)
8459 +#endif
8460 +
8461 : "+m" (l->a.counter));
8462 }
8463
8464 static inline void local_add(long i, local_t *l)
8465 {
8466 - asm volatile(_ASM_ADD "%1,%0"
8467 + asm volatile(_ASM_ADD "%1,%0\n"
8468 +
8469 +#ifdef CONFIG_PAX_REFCOUNT
8470 + "jno 0f\n"
8471 + _ASM_SUB "%1,%0\n"
8472 + "int $4\n0:\n"
8473 + _ASM_EXTABLE(0b, 0b)
8474 +#endif
8475 +
8476 : "+m" (l->a.counter)
8477 : "ir" (i));
8478 }
8479
8480 static inline void local_sub(long i, local_t *l)
8481 {
8482 - asm volatile(_ASM_SUB "%1,%0"
8483 + asm volatile(_ASM_SUB "%1,%0\n"
8484 +
8485 +#ifdef CONFIG_PAX_REFCOUNT
8486 + "jno 0f\n"
8487 + _ASM_ADD "%1,%0\n"
8488 + "int $4\n0:\n"
8489 + _ASM_EXTABLE(0b, 0b)
8490 +#endif
8491 +
8492 : "+m" (l->a.counter)
8493 : "ir" (i));
8494 }
8495 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
8496 {
8497 unsigned char c;
8498
8499 - asm volatile(_ASM_SUB "%2,%0; sete %1"
8500 + asm volatile(_ASM_SUB "%2,%0\n"
8501 +
8502 +#ifdef CONFIG_PAX_REFCOUNT
8503 + "jno 0f\n"
8504 + _ASM_ADD "%2,%0\n"
8505 + "int $4\n0:\n"
8506 + _ASM_EXTABLE(0b, 0b)
8507 +#endif
8508 +
8509 + "sete %1\n"
8510 : "+m" (l->a.counter), "=qm" (c)
8511 : "ir" (i) : "memory");
8512 return c;
8513 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
8514 {
8515 unsigned char c;
8516
8517 - asm volatile(_ASM_DEC "%0; sete %1"
8518 + asm volatile(_ASM_DEC "%0\n"
8519 +
8520 +#ifdef CONFIG_PAX_REFCOUNT
8521 + "jno 0f\n"
8522 + _ASM_INC "%0\n"
8523 + "int $4\n0:\n"
8524 + _ASM_EXTABLE(0b, 0b)
8525 +#endif
8526 +
8527 + "sete %1\n"
8528 : "+m" (l->a.counter), "=qm" (c)
8529 : : "memory");
8530 return c != 0;
8531 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
8532 {
8533 unsigned char c;
8534
8535 - asm volatile(_ASM_INC "%0; sete %1"
8536 + asm volatile(_ASM_INC "%0\n"
8537 +
8538 +#ifdef CONFIG_PAX_REFCOUNT
8539 + "jno 0f\n"
8540 + _ASM_DEC "%0\n"
8541 + "int $4\n0:\n"
8542 + _ASM_EXTABLE(0b, 0b)
8543 +#endif
8544 +
8545 + "sete %1\n"
8546 : "+m" (l->a.counter), "=qm" (c)
8547 : : "memory");
8548 return c != 0;
8549 @@ -110,7 +169,16 @@ static inline int local_add_negative(lon
8550 {
8551 unsigned char c;
8552
8553 - asm volatile(_ASM_ADD "%2,%0; sets %1"
8554 + asm volatile(_ASM_ADD "%2,%0\n"
8555 +
8556 +#ifdef CONFIG_PAX_REFCOUNT
8557 + "jno 0f\n"
8558 + _ASM_SUB "%2,%0\n"
8559 + "int $4\n0:\n"
8560 + _ASM_EXTABLE(0b, 0b)
8561 +#endif
8562 +
8563 + "sets %1\n"
8564 : "+m" (l->a.counter), "=qm" (c)
8565 : "ir" (i) : "memory");
8566 return c;
8567 @@ -133,7 +201,15 @@ static inline long local_add_return(long
8568 #endif
8569 /* Modern 486+ processor */
8570 __i = i;
8571 - asm volatile(_ASM_XADD "%0, %1;"
8572 + asm volatile(_ASM_XADD "%0, %1\n"
8573 +
8574 +#ifdef CONFIG_PAX_REFCOUNT
8575 + "jno 0f\n"
8576 + _ASM_MOV "%0,%1\n"
8577 + "int $4\n0:\n"
8578 + _ASM_EXTABLE(0b, 0b)
8579 +#endif
8580 +
8581 : "+r" (i), "+m" (l->a.counter)
8582 : : "memory");
8583 return i + __i;
8584 diff -urNp linux-2.6.32.41/arch/x86/include/asm/microcode.h linux-2.6.32.41/arch/x86/include/asm/microcode.h
8585 --- linux-2.6.32.41/arch/x86/include/asm/microcode.h 2011-03-27 14:31:47.000000000 -0400
8586 +++ linux-2.6.32.41/arch/x86/include/asm/microcode.h 2011-04-17 15:56:46.000000000 -0400
8587 @@ -12,13 +12,13 @@ struct device;
8588 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
8589
8590 struct microcode_ops {
8591 - enum ucode_state (*request_microcode_user) (int cpu,
8592 + enum ucode_state (* const request_microcode_user) (int cpu,
8593 const void __user *buf, size_t size);
8594
8595 - enum ucode_state (*request_microcode_fw) (int cpu,
8596 + enum ucode_state (* const request_microcode_fw) (int cpu,
8597 struct device *device);
8598
8599 - void (*microcode_fini_cpu) (int cpu);
8600 + void (* const microcode_fini_cpu) (int cpu);
8601
8602 /*
8603 * The generic 'microcode_core' part guarantees that
8604 @@ -38,18 +38,18 @@ struct ucode_cpu_info {
8605 extern struct ucode_cpu_info ucode_cpu_info[];
8606
8607 #ifdef CONFIG_MICROCODE_INTEL
8608 -extern struct microcode_ops * __init init_intel_microcode(void);
8609 +extern const struct microcode_ops * __init init_intel_microcode(void);
8610 #else
8611 -static inline struct microcode_ops * __init init_intel_microcode(void)
8612 +static inline const struct microcode_ops * __init init_intel_microcode(void)
8613 {
8614 return NULL;
8615 }
8616 #endif /* CONFIG_MICROCODE_INTEL */
8617
8618 #ifdef CONFIG_MICROCODE_AMD
8619 -extern struct microcode_ops * __init init_amd_microcode(void);
8620 +extern const struct microcode_ops * __init init_amd_microcode(void);
8621 #else
8622 -static inline struct microcode_ops * __init init_amd_microcode(void)
8623 +static inline const struct microcode_ops * __init init_amd_microcode(void)
8624 {
8625 return NULL;
8626 }
8627 diff -urNp linux-2.6.32.41/arch/x86/include/asm/mman.h linux-2.6.32.41/arch/x86/include/asm/mman.h
8628 --- linux-2.6.32.41/arch/x86/include/asm/mman.h 2011-03-27 14:31:47.000000000 -0400
8629 +++ linux-2.6.32.41/arch/x86/include/asm/mman.h 2011-04-17 15:56:46.000000000 -0400
8630 @@ -5,4 +5,14 @@
8631
8632 #include <asm-generic/mman.h>
8633
8634 +#ifdef __KERNEL__
8635 +#ifndef __ASSEMBLY__
8636 +#ifdef CONFIG_X86_32
8637 +#define arch_mmap_check i386_mmap_check
8638 +int i386_mmap_check(unsigned long addr, unsigned long len,
8639 + unsigned long flags);
8640 +#endif
8641 +#endif
8642 +#endif
8643 +
8644 #endif /* _ASM_X86_MMAN_H */
8645 diff -urNp linux-2.6.32.41/arch/x86/include/asm/mmu_context.h linux-2.6.32.41/arch/x86/include/asm/mmu_context.h
8646 --- linux-2.6.32.41/arch/x86/include/asm/mmu_context.h 2011-03-27 14:31:47.000000000 -0400
8647 +++ linux-2.6.32.41/arch/x86/include/asm/mmu_context.h 2011-04-17 15:56:46.000000000 -0400
8648 @@ -24,6 +24,21 @@ void destroy_context(struct mm_struct *m
8649
8650 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
8651 {
8652 +
8653 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8654 + unsigned int i;
8655 + pgd_t *pgd;
8656 +
8657 + pax_open_kernel();
8658 + pgd = get_cpu_pgd(smp_processor_id());
8659 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
8660 + if (paravirt_enabled())
8661 + set_pgd(pgd+i, native_make_pgd(0));
8662 + else
8663 + pgd[i] = native_make_pgd(0);
8664 + pax_close_kernel();
8665 +#endif
8666 +
8667 #ifdef CONFIG_SMP
8668 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
8669 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
8670 @@ -34,16 +49,30 @@ static inline void switch_mm(struct mm_s
8671 struct task_struct *tsk)
8672 {
8673 unsigned cpu = smp_processor_id();
8674 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
8675 + int tlbstate = TLBSTATE_OK;
8676 +#endif
8677
8678 if (likely(prev != next)) {
8679 #ifdef CONFIG_SMP
8680 +#ifdef CONFIG_X86_32
8681 + tlbstate = percpu_read(cpu_tlbstate.state);
8682 +#endif
8683 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8684 percpu_write(cpu_tlbstate.active_mm, next);
8685 #endif
8686 cpumask_set_cpu(cpu, mm_cpumask(next));
8687
8688 /* Re-load page tables */
8689 +#ifdef CONFIG_PAX_PER_CPU_PGD
8690 + pax_open_kernel();
8691 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
8692 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
8693 + pax_close_kernel();
8694 + load_cr3(get_cpu_pgd(cpu));
8695 +#else
8696 load_cr3(next->pgd);
8697 +#endif
8698
8699 /* stop flush ipis for the previous mm */
8700 cpumask_clear_cpu(cpu, mm_cpumask(prev));
8701 @@ -53,9 +82,38 @@ static inline void switch_mm(struct mm_s
8702 */
8703 if (unlikely(prev->context.ldt != next->context.ldt))
8704 load_LDT_nolock(&next->context);
8705 - }
8706 +
8707 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8708 + if (!nx_enabled) {
8709 + smp_mb__before_clear_bit();
8710 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
8711 + smp_mb__after_clear_bit();
8712 + cpu_set(cpu, next->context.cpu_user_cs_mask);
8713 + }
8714 +#endif
8715 +
8716 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8717 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
8718 + prev->context.user_cs_limit != next->context.user_cs_limit))
8719 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8720 #ifdef CONFIG_SMP
8721 + else if (unlikely(tlbstate != TLBSTATE_OK))
8722 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8723 +#endif
8724 +#endif
8725 +
8726 + }
8727 else {
8728 +
8729 +#ifdef CONFIG_PAX_PER_CPU_PGD
8730 + pax_open_kernel();
8731 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
8732 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
8733 + pax_close_kernel();
8734 + load_cr3(get_cpu_pgd(cpu));
8735 +#endif
8736 +
8737 +#ifdef CONFIG_SMP
8738 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8739 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
8740
8741 @@ -64,11 +122,28 @@ static inline void switch_mm(struct mm_s
8742 * tlb flush IPI delivery. We must reload CR3
8743 * to make sure to use no freed page tables.
8744 */
8745 +
8746 +#ifndef CONFIG_PAX_PER_CPU_PGD
8747 load_cr3(next->pgd);
8748 +#endif
8749 +
8750 load_LDT_nolock(&next->context);
8751 +
8752 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
8753 + if (!nx_enabled)
8754 + cpu_set(cpu, next->context.cpu_user_cs_mask);
8755 +#endif
8756 +
8757 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8758 +#ifdef CONFIG_PAX_PAGEEXEC
8759 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
8760 +#endif
8761 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8762 +#endif
8763 +
8764 }
8765 - }
8766 #endif
8767 + }
8768 }
8769
8770 #define activate_mm(prev, next) \
8771 diff -urNp linux-2.6.32.41/arch/x86/include/asm/mmu.h linux-2.6.32.41/arch/x86/include/asm/mmu.h
8772 --- linux-2.6.32.41/arch/x86/include/asm/mmu.h 2011-03-27 14:31:47.000000000 -0400
8773 +++ linux-2.6.32.41/arch/x86/include/asm/mmu.h 2011-04-17 15:56:46.000000000 -0400
8774 @@ -9,10 +9,23 @@
8775 * we put the segment information here.
8776 */
8777 typedef struct {
8778 - void *ldt;
8779 + struct desc_struct *ldt;
8780 int size;
8781 struct mutex lock;
8782 - void *vdso;
8783 + unsigned long vdso;
8784 +
8785 +#ifdef CONFIG_X86_32
8786 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
8787 + unsigned long user_cs_base;
8788 + unsigned long user_cs_limit;
8789 +
8790 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8791 + cpumask_t cpu_user_cs_mask;
8792 +#endif
8793 +
8794 +#endif
8795 +#endif
8796 +
8797 } mm_context_t;
8798
8799 #ifdef CONFIG_SMP
8800 diff -urNp linux-2.6.32.41/arch/x86/include/asm/module.h linux-2.6.32.41/arch/x86/include/asm/module.h
8801 --- linux-2.6.32.41/arch/x86/include/asm/module.h 2011-03-27 14:31:47.000000000 -0400
8802 +++ linux-2.6.32.41/arch/x86/include/asm/module.h 2011-04-23 13:18:57.000000000 -0400
8803 @@ -5,6 +5,7 @@
8804
8805 #ifdef CONFIG_X86_64
8806 /* X86_64 does not define MODULE_PROC_FAMILY */
8807 +#define MODULE_PROC_FAMILY ""
8808 #elif defined CONFIG_M386
8809 #define MODULE_PROC_FAMILY "386 "
8810 #elif defined CONFIG_M486
8811 @@ -59,13 +60,36 @@
8812 #error unknown processor family
8813 #endif
8814
8815 -#ifdef CONFIG_X86_32
8816 -# ifdef CONFIG_4KSTACKS
8817 -# define MODULE_STACKSIZE "4KSTACKS "
8818 -# else
8819 -# define MODULE_STACKSIZE ""
8820 -# endif
8821 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
8822 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8823 +#define MODULE_PAX_UDEREF "UDEREF "
8824 +#else
8825 +#define MODULE_PAX_UDEREF ""
8826 +#endif
8827 +
8828 +#ifdef CONFIG_PAX_KERNEXEC
8829 +#define MODULE_PAX_KERNEXEC "KERNEXEC "
8830 +#else
8831 +#define MODULE_PAX_KERNEXEC ""
8832 +#endif
8833 +
8834 +#ifdef CONFIG_PAX_REFCOUNT
8835 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
8836 +#else
8837 +#define MODULE_PAX_REFCOUNT ""
8838 #endif
8839
8840 +#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
8841 +#define MODULE_STACKSIZE "4KSTACKS "
8842 +#else
8843 +#define MODULE_STACKSIZE ""
8844 +#endif
8845 +
8846 +#ifdef CONFIG_GRKERNSEC
8847 +#define MODULE_GRSEC "GRSECURITY "
8848 +#else
8849 +#define MODULE_GRSEC ""
8850 +#endif
8851 +
8852 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
8853 +
8854 #endif /* _ASM_X86_MODULE_H */
8855 diff -urNp linux-2.6.32.41/arch/x86/include/asm/page_64_types.h linux-2.6.32.41/arch/x86/include/asm/page_64_types.h
8856 --- linux-2.6.32.41/arch/x86/include/asm/page_64_types.h 2011-03-27 14:31:47.000000000 -0400
8857 +++ linux-2.6.32.41/arch/x86/include/asm/page_64_types.h 2011-04-17 15:56:46.000000000 -0400
8858 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
8859
8860 /* duplicated to the one in bootmem.h */
8861 extern unsigned long max_pfn;
8862 -extern unsigned long phys_base;
8863 +extern const unsigned long phys_base;
8864
8865 extern unsigned long __phys_addr(unsigned long);
8866 #define __phys_reloc_hide(x) (x)
8867 diff -urNp linux-2.6.32.41/arch/x86/include/asm/paravirt.h linux-2.6.32.41/arch/x86/include/asm/paravirt.h
8868 --- linux-2.6.32.41/arch/x86/include/asm/paravirt.h 2011-03-27 14:31:47.000000000 -0400
8869 +++ linux-2.6.32.41/arch/x86/include/asm/paravirt.h 2011-04-17 15:56:46.000000000 -0400
8870 @@ -729,6 +729,21 @@ static inline void __set_fixmap(unsigned
8871 pv_mmu_ops.set_fixmap(idx, phys, flags);
8872 }
8873
8874 +#ifdef CONFIG_PAX_KERNEXEC
8875 +static inline unsigned long pax_open_kernel(void)
8876 +{
8877 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
8878 +}
8879 +
8880 +static inline unsigned long pax_close_kernel(void)
8881 +{
8882 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
8883 +}
8884 +#else
8885 +static inline unsigned long pax_open_kernel(void) { return 0; }
8886 +static inline unsigned long pax_close_kernel(void) { return 0; }
8887 +#endif
8888 +
8889 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
8890
8891 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
8892 @@ -945,7 +960,7 @@ extern void default_banner(void);
8893
8894 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
8895 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
8896 -#define PARA_INDIRECT(addr) *%cs:addr
8897 +#define PARA_INDIRECT(addr) *%ss:addr
8898 #endif
8899
8900 #define INTERRUPT_RETURN \
8901 @@ -1022,6 +1037,21 @@ extern void default_banner(void);
8902 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
8903 CLBR_NONE, \
8904 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
8905 +
8906 +#define GET_CR0_INTO_RDI \
8907 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
8908 + mov %rax,%rdi
8909 +
8910 +#define SET_RDI_INTO_CR0 \
8911 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
8912 +
8913 +#define GET_CR3_INTO_RDI \
8914 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
8915 + mov %rax,%rdi
8916 +
8917 +#define SET_RDI_INTO_CR3 \
8918 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
8919 +
8920 #endif /* CONFIG_X86_32 */
8921
8922 #endif /* __ASSEMBLY__ */
8923 diff -urNp linux-2.6.32.41/arch/x86/include/asm/paravirt_types.h linux-2.6.32.41/arch/x86/include/asm/paravirt_types.h
8924 --- linux-2.6.32.41/arch/x86/include/asm/paravirt_types.h 2011-03-27 14:31:47.000000000 -0400
8925 +++ linux-2.6.32.41/arch/x86/include/asm/paravirt_types.h 2011-04-17 15:56:46.000000000 -0400
8926 @@ -316,6 +316,12 @@ struct pv_mmu_ops {
8927 an mfn. We can tell which is which from the index. */
8928 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
8929 phys_addr_t phys, pgprot_t flags);
8930 +
8931 +#ifdef CONFIG_PAX_KERNEXEC
8932 + unsigned long (*pax_open_kernel)(void);
8933 + unsigned long (*pax_close_kernel)(void);
8934 +#endif
8935 +
8936 };
8937
8938 struct raw_spinlock;
8939 diff -urNp linux-2.6.32.41/arch/x86/include/asm/pci_x86.h linux-2.6.32.41/arch/x86/include/asm/pci_x86.h
8940 --- linux-2.6.32.41/arch/x86/include/asm/pci_x86.h 2011-03-27 14:31:47.000000000 -0400
8941 +++ linux-2.6.32.41/arch/x86/include/asm/pci_x86.h 2011-04-17 15:56:46.000000000 -0400
8942 @@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct
8943 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
8944
8945 struct pci_raw_ops {
8946 - int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
8947 + int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
8948 int reg, int len, u32 *val);
8949 - int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
8950 + int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
8951 int reg, int len, u32 val);
8952 };
8953
8954 -extern struct pci_raw_ops *raw_pci_ops;
8955 -extern struct pci_raw_ops *raw_pci_ext_ops;
8956 +extern const struct pci_raw_ops *raw_pci_ops;
8957 +extern const struct pci_raw_ops *raw_pci_ext_ops;
8958
8959 -extern struct pci_raw_ops pci_direct_conf1;
8960 +extern const struct pci_raw_ops pci_direct_conf1;
8961 extern bool port_cf9_safe;
8962
8963 /* arch_initcall level */
8964 diff -urNp linux-2.6.32.41/arch/x86/include/asm/pgalloc.h linux-2.6.32.41/arch/x86/include/asm/pgalloc.h
8965 --- linux-2.6.32.41/arch/x86/include/asm/pgalloc.h 2011-03-27 14:31:47.000000000 -0400
8966 +++ linux-2.6.32.41/arch/x86/include/asm/pgalloc.h 2011-04-17 15:56:46.000000000 -0400
8967 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
8968 pmd_t *pmd, pte_t *pte)
8969 {
8970 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
8971 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
8972 +}
8973 +
8974 +static inline void pmd_populate_user(struct mm_struct *mm,
8975 + pmd_t *pmd, pte_t *pte)
8976 +{
8977 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
8978 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
8979 }
8980
8981 diff -urNp linux-2.6.32.41/arch/x86/include/asm/pgtable-2level.h linux-2.6.32.41/arch/x86/include/asm/pgtable-2level.h
8982 --- linux-2.6.32.41/arch/x86/include/asm/pgtable-2level.h 2011-03-27 14:31:47.000000000 -0400
8983 +++ linux-2.6.32.41/arch/x86/include/asm/pgtable-2level.h 2011-04-17 15:56:46.000000000 -0400
8984 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
8985
8986 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8987 {
8988 + pax_open_kernel();
8989 *pmdp = pmd;
8990 + pax_close_kernel();
8991 }
8992
8993 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
8994 diff -urNp linux-2.6.32.41/arch/x86/include/asm/pgtable_32.h linux-2.6.32.41/arch/x86/include/asm/pgtable_32.h
8995 --- linux-2.6.32.41/arch/x86/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
8996 +++ linux-2.6.32.41/arch/x86/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
8997 @@ -26,9 +26,6 @@
8998 struct mm_struct;
8999 struct vm_area_struct;
9000
9001 -extern pgd_t swapper_pg_dir[1024];
9002 -extern pgd_t trampoline_pg_dir[1024];
9003 -
9004 static inline void pgtable_cache_init(void) { }
9005 static inline void check_pgt_cache(void) { }
9006 void paging_init(void);
9007 @@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, u
9008 # include <asm/pgtable-2level.h>
9009 #endif
9010
9011 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9012 +extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
9013 +#ifdef CONFIG_X86_PAE
9014 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9015 +#endif
9016 +
9017 #if defined(CONFIG_HIGHPTE)
9018 #define __KM_PTE \
9019 (in_nmi() ? KM_NMI_PTE : \
9020 @@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, u
9021 /* Clear a kernel PTE and flush it from the TLB */
9022 #define kpte_clear_flush(ptep, vaddr) \
9023 do { \
9024 + pax_open_kernel(); \
9025 pte_clear(&init_mm, (vaddr), (ptep)); \
9026 + pax_close_kernel(); \
9027 __flush_tlb_one((vaddr)); \
9028 } while (0)
9029
9030 @@ -85,6 +90,9 @@ do { \
9031
9032 #endif /* !__ASSEMBLY__ */
9033
9034 +#define HAVE_ARCH_UNMAPPED_AREA
9035 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9036 +
9037 /*
9038 * kern_addr_valid() is (1) for FLATMEM and (0) for
9039 * SPARSEMEM and DISCONTIGMEM
9040 diff -urNp linux-2.6.32.41/arch/x86/include/asm/pgtable_32_types.h linux-2.6.32.41/arch/x86/include/asm/pgtable_32_types.h
9041 --- linux-2.6.32.41/arch/x86/include/asm/pgtable_32_types.h 2011-03-27 14:31:47.000000000 -0400
9042 +++ linux-2.6.32.41/arch/x86/include/asm/pgtable_32_types.h 2011-04-17 15:56:46.000000000 -0400
9043 @@ -8,7 +8,7 @@
9044 */
9045 #ifdef CONFIG_X86_PAE
9046 # include <asm/pgtable-3level_types.h>
9047 -# define PMD_SIZE (1UL << PMD_SHIFT)
9048 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9049 # define PMD_MASK (~(PMD_SIZE - 1))
9050 #else
9051 # include <asm/pgtable-2level_types.h>
9052 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
9053 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9054 #endif
9055
9056 +#ifdef CONFIG_PAX_KERNEXEC
9057 +#ifndef __ASSEMBLY__
9058 +extern unsigned char MODULES_EXEC_VADDR[];
9059 +extern unsigned char MODULES_EXEC_END[];
9060 +#endif
9061 +#include <asm/boot.h>
9062 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9063 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9064 +#else
9065 +#define ktla_ktva(addr) (addr)
9066 +#define ktva_ktla(addr) (addr)
9067 +#endif
9068 +
9069 #define MODULES_VADDR VMALLOC_START
9070 #define MODULES_END VMALLOC_END
9071 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9072 diff -urNp linux-2.6.32.41/arch/x86/include/asm/pgtable-3level.h linux-2.6.32.41/arch/x86/include/asm/pgtable-3level.h
9073 --- linux-2.6.32.41/arch/x86/include/asm/pgtable-3level.h 2011-03-27 14:31:47.000000000 -0400
9074 +++ linux-2.6.32.41/arch/x86/include/asm/pgtable-3level.h 2011-04-17 15:56:46.000000000 -0400
9075 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
9076
9077 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9078 {
9079 + pax_open_kernel();
9080 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9081 + pax_close_kernel();
9082 }
9083
9084 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9085 {
9086 + pax_open_kernel();
9087 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9088 + pax_close_kernel();
9089 }
9090
9091 /*
9092 diff -urNp linux-2.6.32.41/arch/x86/include/asm/pgtable_64.h linux-2.6.32.41/arch/x86/include/asm/pgtable_64.h
9093 --- linux-2.6.32.41/arch/x86/include/asm/pgtable_64.h 2011-03-27 14:31:47.000000000 -0400
9094 +++ linux-2.6.32.41/arch/x86/include/asm/pgtable_64.h 2011-04-17 15:56:46.000000000 -0400
9095 @@ -16,10 +16,13 @@
9096
9097 extern pud_t level3_kernel_pgt[512];
9098 extern pud_t level3_ident_pgt[512];
9099 +extern pud_t level3_vmalloc_pgt[512];
9100 +extern pud_t level3_vmemmap_pgt[512];
9101 +extern pud_t level2_vmemmap_pgt[512];
9102 extern pmd_t level2_kernel_pgt[512];
9103 extern pmd_t level2_fixmap_pgt[512];
9104 -extern pmd_t level2_ident_pgt[512];
9105 -extern pgd_t init_level4_pgt[];
9106 +extern pmd_t level2_ident_pgt[512*2];
9107 +extern pgd_t init_level4_pgt[512];
9108
9109 #define swapper_pg_dir init_level4_pgt
9110
9111 @@ -74,7 +77,9 @@ static inline pte_t native_ptep_get_and_
9112
9113 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9114 {
9115 + pax_open_kernel();
9116 *pmdp = pmd;
9117 + pax_close_kernel();
9118 }
9119
9120 static inline void native_pmd_clear(pmd_t *pmd)
9121 @@ -94,7 +99,9 @@ static inline void native_pud_clear(pud_
9122
9123 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9124 {
9125 + pax_open_kernel();
9126 *pgdp = pgd;
9127 + pax_close_kernel();
9128 }
9129
9130 static inline void native_pgd_clear(pgd_t *pgd)
9131 diff -urNp linux-2.6.32.41/arch/x86/include/asm/pgtable_64_types.h linux-2.6.32.41/arch/x86/include/asm/pgtable_64_types.h
9132 --- linux-2.6.32.41/arch/x86/include/asm/pgtable_64_types.h 2011-03-27 14:31:47.000000000 -0400
9133 +++ linux-2.6.32.41/arch/x86/include/asm/pgtable_64_types.h 2011-04-17 15:56:46.000000000 -0400
9134 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9135 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9136 #define MODULES_END _AC(0xffffffffff000000, UL)
9137 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9138 +#define MODULES_EXEC_VADDR MODULES_VADDR
9139 +#define MODULES_EXEC_END MODULES_END
9140 +
9141 +#define ktla_ktva(addr) (addr)
9142 +#define ktva_ktla(addr) (addr)
9143
9144 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9145 diff -urNp linux-2.6.32.41/arch/x86/include/asm/pgtable.h linux-2.6.32.41/arch/x86/include/asm/pgtable.h
9146 --- linux-2.6.32.41/arch/x86/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
9147 +++ linux-2.6.32.41/arch/x86/include/asm/pgtable.h 2011-04-17 15:56:46.000000000 -0400
9148 @@ -74,12 +74,51 @@ extern struct list_head pgd_list;
9149
9150 #define arch_end_context_switch(prev) do {} while(0)
9151
9152 +#define pax_open_kernel() native_pax_open_kernel()
9153 +#define pax_close_kernel() native_pax_close_kernel()
9154 #endif /* CONFIG_PARAVIRT */
9155
9156 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
9157 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9158 +
9159 +#ifdef CONFIG_PAX_KERNEXEC
9160 +static inline unsigned long native_pax_open_kernel(void)
9161 +{
9162 + unsigned long cr0;
9163 +
9164 + preempt_disable();
9165 + barrier();
9166 + cr0 = read_cr0() ^ X86_CR0_WP;
9167 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
9168 + write_cr0(cr0);
9169 + return cr0 ^ X86_CR0_WP;
9170 +}
9171 +
9172 +static inline unsigned long native_pax_close_kernel(void)
9173 +{
9174 + unsigned long cr0;
9175 +
9176 + cr0 = read_cr0() ^ X86_CR0_WP;
9177 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9178 + write_cr0(cr0);
9179 + barrier();
9180 + preempt_enable_no_resched();
9181 + return cr0 ^ X86_CR0_WP;
9182 +}
9183 +#else
9184 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
9185 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
9186 +#endif
9187 +
9188 /*
9189 * The following only work if pte_present() is true.
9190 * Undefined behaviour if not..
9191 */
9192 +static inline int pte_user(pte_t pte)
9193 +{
9194 + return pte_val(pte) & _PAGE_USER;
9195 +}
9196 +
9197 static inline int pte_dirty(pte_t pte)
9198 {
9199 return pte_flags(pte) & _PAGE_DIRTY;
9200 @@ -167,9 +206,29 @@ static inline pte_t pte_wrprotect(pte_t
9201 return pte_clear_flags(pte, _PAGE_RW);
9202 }
9203
9204 +static inline pte_t pte_mkread(pte_t pte)
9205 +{
9206 + return __pte(pte_val(pte) | _PAGE_USER);
9207 +}
9208 +
9209 static inline pte_t pte_mkexec(pte_t pte)
9210 {
9211 - return pte_clear_flags(pte, _PAGE_NX);
9212 +#ifdef CONFIG_X86_PAE
9213 + if (__supported_pte_mask & _PAGE_NX)
9214 + return pte_clear_flags(pte, _PAGE_NX);
9215 + else
9216 +#endif
9217 + return pte_set_flags(pte, _PAGE_USER);
9218 +}
9219 +
9220 +static inline pte_t pte_exprotect(pte_t pte)
9221 +{
9222 +#ifdef CONFIG_X86_PAE
9223 + if (__supported_pte_mask & _PAGE_NX)
9224 + return pte_set_flags(pte, _PAGE_NX);
9225 + else
9226 +#endif
9227 + return pte_clear_flags(pte, _PAGE_USER);
9228 }
9229
9230 static inline pte_t pte_mkdirty(pte_t pte)
9231 @@ -302,6 +361,15 @@ pte_t *populate_extra_pte(unsigned long
9232 #endif
9233
9234 #ifndef __ASSEMBLY__
9235 +
9236 +#ifdef CONFIG_PAX_PER_CPU_PGD
9237 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9238 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9239 +{
9240 + return cpu_pgd[cpu];
9241 +}
9242 +#endif
9243 +
9244 #include <linux/mm_types.h>
9245
9246 static inline int pte_none(pte_t pte)
9247 @@ -472,7 +540,7 @@ static inline pud_t *pud_offset(pgd_t *p
9248
9249 static inline int pgd_bad(pgd_t pgd)
9250 {
9251 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9252 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9253 }
9254
9255 static inline int pgd_none(pgd_t pgd)
9256 @@ -495,7 +563,12 @@ static inline int pgd_none(pgd_t pgd)
9257 * pgd_offset() returns a (pgd_t *)
9258 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9259 */
9260 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9261 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9262 +
9263 +#ifdef CONFIG_PAX_PER_CPU_PGD
9264 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9265 +#endif
9266 +
9267 /*
9268 * a shortcut which implies the use of the kernel's pgd, instead
9269 * of a process's
9270 @@ -506,6 +579,20 @@ static inline int pgd_none(pgd_t pgd)
9271 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9272 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9273
9274 +#ifdef CONFIG_X86_32
9275 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9276 +#else
9277 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9278 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9279 +
9280 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9281 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9282 +#else
9283 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9284 +#endif
9285 +
9286 +#endif
9287 +
9288 #ifndef __ASSEMBLY__
9289
9290 extern int direct_gbpages;
9291 @@ -611,11 +698,23 @@ static inline void ptep_set_wrprotect(st
9292 * dst and src can be on the same page, but the range must not overlap,
9293 * and must not cross a page boundary.
9294 */
9295 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9296 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9297 {
9298 - memcpy(dst, src, count * sizeof(pgd_t));
9299 + pax_open_kernel();
9300 + while (count--)
9301 + *dst++ = *src++;
9302 + pax_close_kernel();
9303 }
9304
9305 +#ifdef CONFIG_PAX_PER_CPU_PGD
9306 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9307 +#endif
9308 +
9309 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9310 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9311 +#else
9312 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9313 +#endif
9314
9315 #include <asm-generic/pgtable.h>
9316 #endif /* __ASSEMBLY__ */
9317 diff -urNp linux-2.6.32.41/arch/x86/include/asm/pgtable_types.h linux-2.6.32.41/arch/x86/include/asm/pgtable_types.h
9318 --- linux-2.6.32.41/arch/x86/include/asm/pgtable_types.h 2011-03-27 14:31:47.000000000 -0400
9319 +++ linux-2.6.32.41/arch/x86/include/asm/pgtable_types.h 2011-04-17 15:56:46.000000000 -0400
9320 @@ -16,12 +16,11 @@
9321 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9322 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9323 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9324 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9325 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9326 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9327 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9328 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9329 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9330 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9331 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9332 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9333
9334 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9335 @@ -39,7 +38,6 @@
9336 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9337 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9338 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9339 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9340 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9341 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9342 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9343 @@ -55,8 +53,10 @@
9344
9345 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9346 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9347 -#else
9348 +#elif defined(CONFIG_KMEMCHECK)
9349 #define _PAGE_NX (_AT(pteval_t, 0))
9350 +#else
9351 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9352 #endif
9353
9354 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9355 @@ -93,6 +93,9 @@
9356 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9357 _PAGE_ACCESSED)
9358
9359 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
9360 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
9361 +
9362 #define __PAGE_KERNEL_EXEC \
9363 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9364 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9365 @@ -103,8 +106,8 @@
9366 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9367 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9368 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9369 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9370 -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
9371 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9372 +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
9373 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9374 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
9375 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
9376 @@ -163,8 +166,8 @@
9377 * bits are combined, this will alow user to access the high address mapped
9378 * VDSO in the presence of CONFIG_COMPAT_VDSO
9379 */
9380 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9381 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9382 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9383 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9384 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9385 #endif
9386
9387 @@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t p
9388 {
9389 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9390 }
9391 +#endif
9392
9393 +#if PAGETABLE_LEVELS == 3
9394 +#include <asm-generic/pgtable-nopud.h>
9395 +#endif
9396 +
9397 +#if PAGETABLE_LEVELS == 2
9398 +#include <asm-generic/pgtable-nopmd.h>
9399 +#endif
9400 +
9401 +#ifndef __ASSEMBLY__
9402 #if PAGETABLE_LEVELS > 3
9403 typedef struct { pudval_t pud; } pud_t;
9404
9405 @@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pu
9406 return pud.pud;
9407 }
9408 #else
9409 -#include <asm-generic/pgtable-nopud.h>
9410 -
9411 static inline pudval_t native_pud_val(pud_t pud)
9412 {
9413 return native_pgd_val(pud.pgd);
9414 @@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pm
9415 return pmd.pmd;
9416 }
9417 #else
9418 -#include <asm-generic/pgtable-nopmd.h>
9419 -
9420 static inline pmdval_t native_pmd_val(pmd_t pmd)
9421 {
9422 return native_pgd_val(pmd.pud.pgd);
9423 @@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
9424
9425 extern pteval_t __supported_pte_mask;
9426 extern void set_nx(void);
9427 +
9428 +#ifdef CONFIG_X86_32
9429 +#ifdef CONFIG_X86_PAE
9430 extern int nx_enabled;
9431 +#else
9432 +#define nx_enabled (0)
9433 +#endif
9434 +#else
9435 +#define nx_enabled (1)
9436 +#endif
9437
9438 #define pgprot_writecombine pgprot_writecombine
9439 extern pgprot_t pgprot_writecombine(pgprot_t prot);
9440 diff -urNp linux-2.6.32.41/arch/x86/include/asm/processor.h linux-2.6.32.41/arch/x86/include/asm/processor.h
9441 --- linux-2.6.32.41/arch/x86/include/asm/processor.h 2011-04-22 19:16:29.000000000 -0400
9442 +++ linux-2.6.32.41/arch/x86/include/asm/processor.h 2011-05-11 18:25:15.000000000 -0400
9443 @@ -272,7 +272,7 @@ struct tss_struct {
9444
9445 } ____cacheline_aligned;
9446
9447 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
9448 +extern struct tss_struct init_tss[NR_CPUS];
9449
9450 /*
9451 * Save the original ist values for checking stack pointers during debugging
9452 @@ -888,11 +888,18 @@ static inline void spin_lock_prefetch(co
9453 */
9454 #define TASK_SIZE PAGE_OFFSET
9455 #define TASK_SIZE_MAX TASK_SIZE
9456 +
9457 +#ifdef CONFIG_PAX_SEGMEXEC
9458 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
9459 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
9460 +#else
9461 #define STACK_TOP TASK_SIZE
9462 -#define STACK_TOP_MAX STACK_TOP
9463 +#endif
9464 +
9465 +#define STACK_TOP_MAX TASK_SIZE
9466
9467 #define INIT_THREAD { \
9468 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
9469 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9470 .vm86_info = NULL, \
9471 .sysenter_cs = __KERNEL_CS, \
9472 .io_bitmap_ptr = NULL, \
9473 @@ -906,7 +913,7 @@ static inline void spin_lock_prefetch(co
9474 */
9475 #define INIT_TSS { \
9476 .x86_tss = { \
9477 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
9478 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9479 .ss0 = __KERNEL_DS, \
9480 .ss1 = __KERNEL_CS, \
9481 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
9482 @@ -917,11 +924,7 @@ static inline void spin_lock_prefetch(co
9483 extern unsigned long thread_saved_pc(struct task_struct *tsk);
9484
9485 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
9486 -#define KSTK_TOP(info) \
9487 -({ \
9488 - unsigned long *__ptr = (unsigned long *)(info); \
9489 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
9490 -})
9491 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
9492
9493 /*
9494 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
9495 @@ -936,7 +939,7 @@ extern unsigned long thread_saved_pc(str
9496 #define task_pt_regs(task) \
9497 ({ \
9498 struct pt_regs *__regs__; \
9499 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
9500 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
9501 __regs__ - 1; \
9502 })
9503
9504 @@ -946,13 +949,13 @@ extern unsigned long thread_saved_pc(str
9505 /*
9506 * User space process size. 47bits minus one guard page.
9507 */
9508 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
9509 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
9510
9511 /* This decides where the kernel will search for a free chunk of vm
9512 * space during mmap's.
9513 */
9514 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
9515 - 0xc0000000 : 0xFFFFe000)
9516 + 0xc0000000 : 0xFFFFf000)
9517
9518 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
9519 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
9520 @@ -963,11 +966,11 @@ extern unsigned long thread_saved_pc(str
9521 #define STACK_TOP_MAX TASK_SIZE_MAX
9522
9523 #define INIT_THREAD { \
9524 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9525 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9526 }
9527
9528 #define INIT_TSS { \
9529 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9530 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9531 }
9532
9533 /*
9534 @@ -989,6 +992,10 @@ extern void start_thread(struct pt_regs
9535 */
9536 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
9537
9538 +#ifdef CONFIG_PAX_SEGMEXEC
9539 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
9540 +#endif
9541 +
9542 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
9543
9544 /* Get/set a process' ability to use the timestamp counter instruction */
9545 diff -urNp linux-2.6.32.41/arch/x86/include/asm/ptrace.h linux-2.6.32.41/arch/x86/include/asm/ptrace.h
9546 --- linux-2.6.32.41/arch/x86/include/asm/ptrace.h 2011-03-27 14:31:47.000000000 -0400
9547 +++ linux-2.6.32.41/arch/x86/include/asm/ptrace.h 2011-04-17 15:56:46.000000000 -0400
9548 @@ -151,28 +151,29 @@ static inline unsigned long regs_return_
9549 }
9550
9551 /*
9552 - * user_mode_vm(regs) determines whether a register set came from user mode.
9553 + * user_mode(regs) determines whether a register set came from user mode.
9554 * This is true if V8086 mode was enabled OR if the register set was from
9555 * protected mode with RPL-3 CS value. This tricky test checks that with
9556 * one comparison. Many places in the kernel can bypass this full check
9557 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
9558 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
9559 + * be used.
9560 */
9561 -static inline int user_mode(struct pt_regs *regs)
9562 +static inline int user_mode_novm(struct pt_regs *regs)
9563 {
9564 #ifdef CONFIG_X86_32
9565 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
9566 #else
9567 - return !!(regs->cs & 3);
9568 + return !!(regs->cs & SEGMENT_RPL_MASK);
9569 #endif
9570 }
9571
9572 -static inline int user_mode_vm(struct pt_regs *regs)
9573 +static inline int user_mode(struct pt_regs *regs)
9574 {
9575 #ifdef CONFIG_X86_32
9576 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
9577 USER_RPL;
9578 #else
9579 - return user_mode(regs);
9580 + return user_mode_novm(regs);
9581 #endif
9582 }
9583
9584 diff -urNp linux-2.6.32.41/arch/x86/include/asm/reboot.h linux-2.6.32.41/arch/x86/include/asm/reboot.h
9585 --- linux-2.6.32.41/arch/x86/include/asm/reboot.h 2011-03-27 14:31:47.000000000 -0400
9586 +++ linux-2.6.32.41/arch/x86/include/asm/reboot.h 2011-05-22 23:02:03.000000000 -0400
9587 @@ -6,19 +6,19 @@
9588 struct pt_regs;
9589
9590 struct machine_ops {
9591 - void (*restart)(char *cmd);
9592 - void (*halt)(void);
9593 - void (*power_off)(void);
9594 + void (* __noreturn restart)(char *cmd);
9595 + void (* __noreturn halt)(void);
9596 + void (* __noreturn power_off)(void);
9597 void (*shutdown)(void);
9598 void (*crash_shutdown)(struct pt_regs *);
9599 - void (*emergency_restart)(void);
9600 + void (* __noreturn emergency_restart)(void);
9601 };
9602
9603 extern struct machine_ops machine_ops;
9604
9605 void native_machine_crash_shutdown(struct pt_regs *regs);
9606 void native_machine_shutdown(void);
9607 -void machine_real_restart(const unsigned char *code, int length);
9608 +void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
9609
9610 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
9611 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
9612 diff -urNp linux-2.6.32.41/arch/x86/include/asm/rwsem.h linux-2.6.32.41/arch/x86/include/asm/rwsem.h
9613 --- linux-2.6.32.41/arch/x86/include/asm/rwsem.h 2011-03-27 14:31:47.000000000 -0400
9614 +++ linux-2.6.32.41/arch/x86/include/asm/rwsem.h 2011-04-17 15:56:46.000000000 -0400
9615 @@ -118,6 +118,14 @@ static inline void __down_read(struct rw
9616 {
9617 asm volatile("# beginning down_read\n\t"
9618 LOCK_PREFIX _ASM_INC "(%1)\n\t"
9619 +
9620 +#ifdef CONFIG_PAX_REFCOUNT
9621 + "jno 0f\n"
9622 + LOCK_PREFIX _ASM_DEC "(%1)\n\t"
9623 + "int $4\n0:\n"
9624 + _ASM_EXTABLE(0b, 0b)
9625 +#endif
9626 +
9627 /* adds 0x00000001, returns the old value */
9628 " jns 1f\n"
9629 " call call_rwsem_down_read_failed\n"
9630 @@ -139,6 +147,14 @@ static inline int __down_read_trylock(st
9631 "1:\n\t"
9632 " mov %1,%2\n\t"
9633 " add %3,%2\n\t"
9634 +
9635 +#ifdef CONFIG_PAX_REFCOUNT
9636 + "jno 0f\n"
9637 + "sub %3,%2\n"
9638 + "int $4\n0:\n"
9639 + _ASM_EXTABLE(0b, 0b)
9640 +#endif
9641 +
9642 " jle 2f\n\t"
9643 LOCK_PREFIX " cmpxchg %2,%0\n\t"
9644 " jnz 1b\n\t"
9645 @@ -160,6 +176,14 @@ static inline void __down_write_nested(s
9646 tmp = RWSEM_ACTIVE_WRITE_BIAS;
9647 asm volatile("# beginning down_write\n\t"
9648 LOCK_PREFIX " xadd %1,(%2)\n\t"
9649 +
9650 +#ifdef CONFIG_PAX_REFCOUNT
9651 + "jno 0f\n"
9652 + "mov %1,(%2)\n"
9653 + "int $4\n0:\n"
9654 + _ASM_EXTABLE(0b, 0b)
9655 +#endif
9656 +
9657 /* subtract 0x0000ffff, returns the old value */
9658 " test %1,%1\n\t"
9659 /* was the count 0 before? */
9660 @@ -198,6 +222,14 @@ static inline void __up_read(struct rw_s
9661 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
9662 asm volatile("# beginning __up_read\n\t"
9663 LOCK_PREFIX " xadd %1,(%2)\n\t"
9664 +
9665 +#ifdef CONFIG_PAX_REFCOUNT
9666 + "jno 0f\n"
9667 + "mov %1,(%2)\n"
9668 + "int $4\n0:\n"
9669 + _ASM_EXTABLE(0b, 0b)
9670 +#endif
9671 +
9672 /* subtracts 1, returns the old value */
9673 " jns 1f\n\t"
9674 " call call_rwsem_wake\n"
9675 @@ -216,6 +248,14 @@ static inline void __up_write(struct rw_
9676 rwsem_count_t tmp;
9677 asm volatile("# beginning __up_write\n\t"
9678 LOCK_PREFIX " xadd %1,(%2)\n\t"
9679 +
9680 +#ifdef CONFIG_PAX_REFCOUNT
9681 + "jno 0f\n"
9682 + "mov %1,(%2)\n"
9683 + "int $4\n0:\n"
9684 + _ASM_EXTABLE(0b, 0b)
9685 +#endif
9686 +
9687 /* tries to transition
9688 0xffff0001 -> 0x00000000 */
9689 " jz 1f\n"
9690 @@ -234,6 +274,14 @@ static inline void __downgrade_write(str
9691 {
9692 asm volatile("# beginning __downgrade_write\n\t"
9693 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
9694 +
9695 +#ifdef CONFIG_PAX_REFCOUNT
9696 + "jno 0f\n"
9697 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
9698 + "int $4\n0:\n"
9699 + _ASM_EXTABLE(0b, 0b)
9700 +#endif
9701 +
9702 /*
9703 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
9704 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
9705 @@ -253,7 +301,15 @@ static inline void __downgrade_write(str
9706 static inline void rwsem_atomic_add(rwsem_count_t delta,
9707 struct rw_semaphore *sem)
9708 {
9709 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
9710 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
9711 +
9712 +#ifdef CONFIG_PAX_REFCOUNT
9713 + "jno 0f\n"
9714 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
9715 + "int $4\n0:\n"
9716 + _ASM_EXTABLE(0b, 0b)
9717 +#endif
9718 +
9719 : "+m" (sem->count)
9720 : "er" (delta));
9721 }
9722 @@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic
9723 {
9724 rwsem_count_t tmp = delta;
9725
9726 - asm volatile(LOCK_PREFIX "xadd %0,%1"
9727 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
9728 +
9729 +#ifdef CONFIG_PAX_REFCOUNT
9730 + "jno 0f\n"
9731 + "mov %0,%1\n"
9732 + "int $4\n0:\n"
9733 + _ASM_EXTABLE(0b, 0b)
9734 +#endif
9735 +
9736 : "+r" (tmp), "+m" (sem->count)
9737 : : "memory");
9738
9739 diff -urNp linux-2.6.32.41/arch/x86/include/asm/segment.h linux-2.6.32.41/arch/x86/include/asm/segment.h
9740 --- linux-2.6.32.41/arch/x86/include/asm/segment.h 2011-03-27 14:31:47.000000000 -0400
9741 +++ linux-2.6.32.41/arch/x86/include/asm/segment.h 2011-04-17 15:56:46.000000000 -0400
9742 @@ -62,8 +62,8 @@
9743 * 26 - ESPFIX small SS
9744 * 27 - per-cpu [ offset to per-cpu data area ]
9745 * 28 - stack_canary-20 [ for stack protector ]
9746 - * 29 - unused
9747 - * 30 - unused
9748 + * 29 - PCI BIOS CS
9749 + * 30 - PCI BIOS DS
9750 * 31 - TSS for double fault handler
9751 */
9752 #define GDT_ENTRY_TLS_MIN 6
9753 @@ -77,6 +77,8 @@
9754
9755 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
9756
9757 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
9758 +
9759 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
9760
9761 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
9762 @@ -88,7 +90,7 @@
9763 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
9764 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
9765
9766 -#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
9767 +#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
9768 #ifdef CONFIG_SMP
9769 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
9770 #else
9771 @@ -102,6 +104,12 @@
9772 #define __KERNEL_STACK_CANARY 0
9773 #endif
9774
9775 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
9776 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
9777 +
9778 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
9779 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
9780 +
9781 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
9782
9783 /*
9784 @@ -139,7 +147,7 @@
9785 */
9786
9787 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
9788 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
9789 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
9790
9791
9792 #else
9793 @@ -163,6 +171,8 @@
9794 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
9795 #define __USER32_DS __USER_DS
9796
9797 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
9798 +
9799 #define GDT_ENTRY_TSS 8 /* needs two entries */
9800 #define GDT_ENTRY_LDT 10 /* needs two entries */
9801 #define GDT_ENTRY_TLS_MIN 12
9802 @@ -183,6 +193,7 @@
9803 #endif
9804
9805 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
9806 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
9807 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
9808 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
9809 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
9810 diff -urNp linux-2.6.32.41/arch/x86/include/asm/smp.h linux-2.6.32.41/arch/x86/include/asm/smp.h
9811 --- linux-2.6.32.41/arch/x86/include/asm/smp.h 2011-03-27 14:31:47.000000000 -0400
9812 +++ linux-2.6.32.41/arch/x86/include/asm/smp.h 2011-04-17 15:56:46.000000000 -0400
9813 @@ -24,7 +24,7 @@ extern unsigned int num_processors;
9814 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
9815 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
9816 DECLARE_PER_CPU(u16, cpu_llc_id);
9817 -DECLARE_PER_CPU(int, cpu_number);
9818 +DECLARE_PER_CPU(unsigned int, cpu_number);
9819
9820 static inline struct cpumask *cpu_sibling_mask(int cpu)
9821 {
9822 @@ -175,14 +175,8 @@ extern unsigned disabled_cpus __cpuinitd
9823 extern int safe_smp_processor_id(void);
9824
9825 #elif defined(CONFIG_X86_64_SMP)
9826 -#define raw_smp_processor_id() (percpu_read(cpu_number))
9827 -
9828 -#define stack_smp_processor_id() \
9829 -({ \
9830 - struct thread_info *ti; \
9831 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
9832 - ti->cpu; \
9833 -})
9834 +#define raw_smp_processor_id() (percpu_read(cpu_number))
9835 +#define stack_smp_processor_id() raw_smp_processor_id()
9836 #define safe_smp_processor_id() smp_processor_id()
9837
9838 #endif
9839 diff -urNp linux-2.6.32.41/arch/x86/include/asm/spinlock.h linux-2.6.32.41/arch/x86/include/asm/spinlock.h
9840 --- linux-2.6.32.41/arch/x86/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
9841 +++ linux-2.6.32.41/arch/x86/include/asm/spinlock.h 2011-04-17 15:56:46.000000000 -0400
9842 @@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(r
9843 static inline void __raw_read_lock(raw_rwlock_t *rw)
9844 {
9845 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
9846 +
9847 +#ifdef CONFIG_PAX_REFCOUNT
9848 + "jno 0f\n"
9849 + LOCK_PREFIX " addl $1,(%0)\n"
9850 + "int $4\n0:\n"
9851 + _ASM_EXTABLE(0b, 0b)
9852 +#endif
9853 +
9854 "jns 1f\n"
9855 "call __read_lock_failed\n\t"
9856 "1:\n"
9857 @@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_r
9858 static inline void __raw_write_lock(raw_rwlock_t *rw)
9859 {
9860 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
9861 +
9862 +#ifdef CONFIG_PAX_REFCOUNT
9863 + "jno 0f\n"
9864 + LOCK_PREFIX " addl %1,(%0)\n"
9865 + "int $4\n0:\n"
9866 + _ASM_EXTABLE(0b, 0b)
9867 +#endif
9868 +
9869 "jz 1f\n"
9870 "call __write_lock_failed\n\t"
9871 "1:\n"
9872 @@ -286,12 +302,29 @@ static inline int __raw_write_trylock(ra
9873
9874 static inline void __raw_read_unlock(raw_rwlock_t *rw)
9875 {
9876 - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
9877 + asm volatile(LOCK_PREFIX "incl %0\n"
9878 +
9879 +#ifdef CONFIG_PAX_REFCOUNT
9880 + "jno 0f\n"
9881 + LOCK_PREFIX "decl %0\n"
9882 + "int $4\n0:\n"
9883 + _ASM_EXTABLE(0b, 0b)
9884 +#endif
9885 +
9886 + :"+m" (rw->lock) : : "memory");
9887 }
9888
9889 static inline void __raw_write_unlock(raw_rwlock_t *rw)
9890 {
9891 - asm volatile(LOCK_PREFIX "addl %1, %0"
9892 + asm volatile(LOCK_PREFIX "addl %1, %0\n"
9893 +
9894 +#ifdef CONFIG_PAX_REFCOUNT
9895 + "jno 0f\n"
9896 + LOCK_PREFIX "subl %1, %0\n"
9897 + "int $4\n0:\n"
9898 + _ASM_EXTABLE(0b, 0b)
9899 +#endif
9900 +
9901 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
9902 }
9903
9904 diff -urNp linux-2.6.32.41/arch/x86/include/asm/stackprotector.h linux-2.6.32.41/arch/x86/include/asm/stackprotector.h
9905 --- linux-2.6.32.41/arch/x86/include/asm/stackprotector.h 2011-03-27 14:31:47.000000000 -0400
9906 +++ linux-2.6.32.41/arch/x86/include/asm/stackprotector.h 2011-04-17 15:56:46.000000000 -0400
9907 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
9908
9909 static inline void load_stack_canary_segment(void)
9910 {
9911 -#ifdef CONFIG_X86_32
9912 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
9913 asm volatile ("mov %0, %%gs" : : "r" (0));
9914 #endif
9915 }
9916 diff -urNp linux-2.6.32.41/arch/x86/include/asm/system.h linux-2.6.32.41/arch/x86/include/asm/system.h
9917 --- linux-2.6.32.41/arch/x86/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
9918 +++ linux-2.6.32.41/arch/x86/include/asm/system.h 2011-05-22 23:02:03.000000000 -0400
9919 @@ -132,7 +132,7 @@ do { \
9920 "thread_return:\n\t" \
9921 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
9922 __switch_canary \
9923 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
9924 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
9925 "movq %%rax,%%rdi\n\t" \
9926 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
9927 "jnz ret_from_fork\n\t" \
9928 @@ -143,7 +143,7 @@ do { \
9929 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
9930 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
9931 [_tif_fork] "i" (_TIF_FORK), \
9932 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
9933 + [thread_info] "m" (per_cpu_var(current_tinfo)), \
9934 [current_task] "m" (per_cpu_var(current_task)) \
9935 __switch_canary_iparam \
9936 : "memory", "cc" __EXTRA_CLOBBER)
9937 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
9938 {
9939 unsigned long __limit;
9940 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
9941 - return __limit + 1;
9942 + return __limit;
9943 }
9944
9945 static inline void native_clts(void)
9946 @@ -340,12 +340,12 @@ void enable_hlt(void);
9947
9948 void cpu_idle_wait(void);
9949
9950 -extern unsigned long arch_align_stack(unsigned long sp);
9951 +#define arch_align_stack(x) ((x) & ~0xfUL)
9952 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
9953
9954 void default_idle(void);
9955
9956 -void stop_this_cpu(void *dummy);
9957 +void stop_this_cpu(void *dummy) __noreturn;
9958
9959 /*
9960 * Force strict CPU ordering.
9961 diff -urNp linux-2.6.32.41/arch/x86/include/asm/thread_info.h linux-2.6.32.41/arch/x86/include/asm/thread_info.h
9962 --- linux-2.6.32.41/arch/x86/include/asm/thread_info.h 2011-03-27 14:31:47.000000000 -0400
9963 +++ linux-2.6.32.41/arch/x86/include/asm/thread_info.h 2011-05-17 19:26:34.000000000 -0400
9964 @@ -10,6 +10,7 @@
9965 #include <linux/compiler.h>
9966 #include <asm/page.h>
9967 #include <asm/types.h>
9968 +#include <asm/percpu.h>
9969
9970 /*
9971 * low level task data that entry.S needs immediate access to
9972 @@ -24,7 +25,6 @@ struct exec_domain;
9973 #include <asm/atomic.h>
9974
9975 struct thread_info {
9976 - struct task_struct *task; /* main task structure */
9977 struct exec_domain *exec_domain; /* execution domain */
9978 __u32 flags; /* low level flags */
9979 __u32 status; /* thread synchronous flags */
9980 @@ -34,18 +34,12 @@ struct thread_info {
9981 mm_segment_t addr_limit;
9982 struct restart_block restart_block;
9983 void __user *sysenter_return;
9984 -#ifdef CONFIG_X86_32
9985 - unsigned long previous_esp; /* ESP of the previous stack in
9986 - case of nested (IRQ) stacks
9987 - */
9988 - __u8 supervisor_stack[0];
9989 -#endif
9990 + unsigned long lowest_stack;
9991 int uaccess_err;
9992 };
9993
9994 -#define INIT_THREAD_INFO(tsk) \
9995 +#define INIT_THREAD_INFO \
9996 { \
9997 - .task = &tsk, \
9998 .exec_domain = &default_exec_domain, \
9999 .flags = 0, \
10000 .cpu = 0, \
10001 @@ -56,7 +50,7 @@ struct thread_info {
10002 }, \
10003 }
10004
10005 -#define init_thread_info (init_thread_union.thread_info)
10006 +#define init_thread_info (init_thread_union.stack)
10007 #define init_stack (init_thread_union.stack)
10008
10009 #else /* !__ASSEMBLY__ */
10010 @@ -163,6 +157,23 @@ struct thread_info {
10011 #define alloc_thread_info(tsk) \
10012 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
10013
10014 +#ifdef __ASSEMBLY__
10015 +/* how to get the thread information struct from ASM */
10016 +#define GET_THREAD_INFO(reg) \
10017 + mov PER_CPU_VAR(current_tinfo), reg
10018 +
10019 +/* use this one if reg already contains %esp */
10020 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10021 +#else
10022 +/* how to get the thread information struct from C */
10023 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10024 +
10025 +static __always_inline struct thread_info *current_thread_info(void)
10026 +{
10027 + return percpu_read_stable(current_tinfo);
10028 +}
10029 +#endif
10030 +
10031 #ifdef CONFIG_X86_32
10032
10033 #define STACK_WARN (THREAD_SIZE/8)
10034 @@ -173,35 +184,13 @@ struct thread_info {
10035 */
10036 #ifndef __ASSEMBLY__
10037
10038 -
10039 /* how to get the current stack pointer from C */
10040 register unsigned long current_stack_pointer asm("esp") __used;
10041
10042 -/* how to get the thread information struct from C */
10043 -static inline struct thread_info *current_thread_info(void)
10044 -{
10045 - return (struct thread_info *)
10046 - (current_stack_pointer & ~(THREAD_SIZE - 1));
10047 -}
10048 -
10049 -#else /* !__ASSEMBLY__ */
10050 -
10051 -/* how to get the thread information struct from ASM */
10052 -#define GET_THREAD_INFO(reg) \
10053 - movl $-THREAD_SIZE, reg; \
10054 - andl %esp, reg
10055 -
10056 -/* use this one if reg already contains %esp */
10057 -#define GET_THREAD_INFO_WITH_ESP(reg) \
10058 - andl $-THREAD_SIZE, reg
10059 -
10060 #endif
10061
10062 #else /* X86_32 */
10063
10064 -#include <asm/percpu.h>
10065 -#define KERNEL_STACK_OFFSET (5*8)
10066 -
10067 /*
10068 * macros/functions for gaining access to the thread information structure
10069 * preempt_count needs to be 1 initially, until the scheduler is functional.
10070 @@ -209,21 +198,8 @@ static inline struct thread_info *curren
10071 #ifndef __ASSEMBLY__
10072 DECLARE_PER_CPU(unsigned long, kernel_stack);
10073
10074 -static inline struct thread_info *current_thread_info(void)
10075 -{
10076 - struct thread_info *ti;
10077 - ti = (void *)(percpu_read_stable(kernel_stack) +
10078 - KERNEL_STACK_OFFSET - THREAD_SIZE);
10079 - return ti;
10080 -}
10081 -
10082 -#else /* !__ASSEMBLY__ */
10083 -
10084 -/* how to get the thread information struct from ASM */
10085 -#define GET_THREAD_INFO(reg) \
10086 - movq PER_CPU_VAR(kernel_stack),reg ; \
10087 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10088 -
10089 +/* how to get the current stack pointer from C */
10090 +register unsigned long current_stack_pointer asm("rsp") __used;
10091 #endif
10092
10093 #endif /* !X86_32 */
10094 @@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
10095 extern void free_thread_info(struct thread_info *ti);
10096 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10097 #define arch_task_cache_init arch_task_cache_init
10098 +
10099 +#define __HAVE_THREAD_FUNCTIONS
10100 +#define task_thread_info(task) (&(task)->tinfo)
10101 +#define task_stack_page(task) ((task)->stack)
10102 +#define setup_thread_stack(p, org) do {} while (0)
10103 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10104 +
10105 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10106 +extern struct task_struct *alloc_task_struct(void);
10107 +extern void free_task_struct(struct task_struct *);
10108 +
10109 #endif
10110 #endif /* _ASM_X86_THREAD_INFO_H */
10111 diff -urNp linux-2.6.32.41/arch/x86/include/asm/uaccess_32.h linux-2.6.32.41/arch/x86/include/asm/uaccess_32.h
10112 --- linux-2.6.32.41/arch/x86/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
10113 +++ linux-2.6.32.41/arch/x86/include/asm/uaccess_32.h 2011-05-16 21:46:57.000000000 -0400
10114 @@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_u
10115 static __always_inline unsigned long __must_check
10116 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10117 {
10118 + pax_track_stack();
10119 +
10120 + if ((long)n < 0)
10121 + return n;
10122 +
10123 if (__builtin_constant_p(n)) {
10124 unsigned long ret;
10125
10126 @@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to,
10127 return ret;
10128 }
10129 }
10130 + if (!__builtin_constant_p(n))
10131 + check_object_size(from, n, true);
10132 return __copy_to_user_ll(to, from, n);
10133 }
10134
10135 @@ -83,12 +90,16 @@ static __always_inline unsigned long __m
10136 __copy_to_user(void __user *to, const void *from, unsigned long n)
10137 {
10138 might_fault();
10139 +
10140 return __copy_to_user_inatomic(to, from, n);
10141 }
10142
10143 static __always_inline unsigned long
10144 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
10145 {
10146 + if ((long)n < 0)
10147 + return n;
10148 +
10149 /* Avoid zeroing the tail if the copy fails..
10150 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
10151 * but as the zeroing behaviour is only significant when n is not
10152 @@ -138,6 +149,12 @@ static __always_inline unsigned long
10153 __copy_from_user(void *to, const void __user *from, unsigned long n)
10154 {
10155 might_fault();
10156 +
10157 + pax_track_stack();
10158 +
10159 + if ((long)n < 0)
10160 + return n;
10161 +
10162 if (__builtin_constant_p(n)) {
10163 unsigned long ret;
10164
10165 @@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __
10166 return ret;
10167 }
10168 }
10169 + if (!__builtin_constant_p(n))
10170 + check_object_size(to, n, false);
10171 return __copy_from_user_ll(to, from, n);
10172 }
10173
10174 @@ -160,6 +179,10 @@ static __always_inline unsigned long __c
10175 const void __user *from, unsigned long n)
10176 {
10177 might_fault();
10178 +
10179 + if ((long)n < 0)
10180 + return n;
10181 +
10182 if (__builtin_constant_p(n)) {
10183 unsigned long ret;
10184
10185 @@ -182,14 +205,62 @@ static __always_inline unsigned long
10186 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
10187 unsigned long n)
10188 {
10189 - return __copy_from_user_ll_nocache_nozero(to, from, n);
10190 + if ((long)n < 0)
10191 + return n;
10192 +
10193 + return __copy_from_user_ll_nocache_nozero(to, from, n);
10194 +}
10195 +
10196 +/**
10197 + * copy_to_user: - Copy a block of data into user space.
10198 + * @to: Destination address, in user space.
10199 + * @from: Source address, in kernel space.
10200 + * @n: Number of bytes to copy.
10201 + *
10202 + * Context: User context only. This function may sleep.
10203 + *
10204 + * Copy data from kernel space to user space.
10205 + *
10206 + * Returns number of bytes that could not be copied.
10207 + * On success, this will be zero.
10208 + */
10209 +static __always_inline unsigned long __must_check
10210 +copy_to_user(void __user *to, const void *from, unsigned long n)
10211 +{
10212 + if (access_ok(VERIFY_WRITE, to, n))
10213 + n = __copy_to_user(to, from, n);
10214 + return n;
10215 +}
10216 +
10217 +/**
10218 + * copy_from_user: - Copy a block of data from user space.
10219 + * @to: Destination address, in kernel space.
10220 + * @from: Source address, in user space.
10221 + * @n: Number of bytes to copy.
10222 + *
10223 + * Context: User context only. This function may sleep.
10224 + *
10225 + * Copy data from user space to kernel space.
10226 + *
10227 + * Returns number of bytes that could not be copied.
10228 + * On success, this will be zero.
10229 + *
10230 + * If some data could not be copied, this function will pad the copied
10231 + * data to the requested size using zero bytes.
10232 + */
10233 +static __always_inline unsigned long __must_check
10234 +copy_from_user(void *to, const void __user *from, unsigned long n)
10235 +{
10236 + if (access_ok(VERIFY_READ, from, n))
10237 + n = __copy_from_user(to, from, n);
10238 + else if ((long)n > 0) {
10239 + if (!__builtin_constant_p(n))
10240 + check_object_size(to, n, false);
10241 + memset(to, 0, n);
10242 + }
10243 + return n;
10244 }
10245
10246 -unsigned long __must_check copy_to_user(void __user *to,
10247 - const void *from, unsigned long n);
10248 -unsigned long __must_check copy_from_user(void *to,
10249 - const void __user *from,
10250 - unsigned long n);
10251 long __must_check strncpy_from_user(char *dst, const char __user *src,
10252 long count);
10253 long __must_check __strncpy_from_user(char *dst,
10254 diff -urNp linux-2.6.32.41/arch/x86/include/asm/uaccess_64.h linux-2.6.32.41/arch/x86/include/asm/uaccess_64.h
10255 --- linux-2.6.32.41/arch/x86/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
10256 +++ linux-2.6.32.41/arch/x86/include/asm/uaccess_64.h 2011-05-16 21:46:57.000000000 -0400
10257 @@ -9,6 +9,9 @@
10258 #include <linux/prefetch.h>
10259 #include <linux/lockdep.h>
10260 #include <asm/page.h>
10261 +#include <asm/pgtable.h>
10262 +
10263 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
10264
10265 /*
10266 * Copy To/From Userspace
10267 @@ -19,113 +22,203 @@ __must_check unsigned long
10268 copy_user_generic(void *to, const void *from, unsigned len);
10269
10270 __must_check unsigned long
10271 -copy_to_user(void __user *to, const void *from, unsigned len);
10272 -__must_check unsigned long
10273 -copy_from_user(void *to, const void __user *from, unsigned len);
10274 -__must_check unsigned long
10275 copy_in_user(void __user *to, const void __user *from, unsigned len);
10276
10277 static __always_inline __must_check
10278 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
10279 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
10280 {
10281 - int ret = 0;
10282 + unsigned ret = 0;
10283
10284 might_fault();
10285 - if (!__builtin_constant_p(size))
10286 - return copy_user_generic(dst, (__force void *)src, size);
10287 +
10288 + if ((int)size < 0)
10289 + return size;
10290 +
10291 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10292 + if (!__access_ok(VERIFY_READ, src, size))
10293 + return size;
10294 +#endif
10295 +
10296 + if (!__builtin_constant_p(size)) {
10297 + check_object_size(dst, size, false);
10298 +
10299 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10300 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10301 + src += PAX_USER_SHADOW_BASE;
10302 +#endif
10303 +
10304 + return copy_user_generic(dst, (__force const void *)src, size);
10305 + }
10306 switch (size) {
10307 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
10308 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
10309 ret, "b", "b", "=q", 1);
10310 return ret;
10311 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
10312 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
10313 ret, "w", "w", "=r", 2);
10314 return ret;
10315 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
10316 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
10317 ret, "l", "k", "=r", 4);
10318 return ret;
10319 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
10320 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10321 ret, "q", "", "=r", 8);
10322 return ret;
10323 case 10:
10324 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10325 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10326 ret, "q", "", "=r", 10);
10327 if (unlikely(ret))
10328 return ret;
10329 __get_user_asm(*(u16 *)(8 + (char *)dst),
10330 - (u16 __user *)(8 + (char __user *)src),
10331 + (const u16 __user *)(8 + (const char __user *)src),
10332 ret, "w", "w", "=r", 2);
10333 return ret;
10334 case 16:
10335 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10336 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10337 ret, "q", "", "=r", 16);
10338 if (unlikely(ret))
10339 return ret;
10340 __get_user_asm(*(u64 *)(8 + (char *)dst),
10341 - (u64 __user *)(8 + (char __user *)src),
10342 + (const u64 __user *)(8 + (const char __user *)src),
10343 ret, "q", "", "=r", 8);
10344 return ret;
10345 default:
10346 - return copy_user_generic(dst, (__force void *)src, size);
10347 +
10348 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10349 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10350 + src += PAX_USER_SHADOW_BASE;
10351 +#endif
10352 +
10353 + return copy_user_generic(dst, (__force const void *)src, size);
10354 }
10355 }
10356
10357 static __always_inline __must_check
10358 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
10359 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
10360 {
10361 - int ret = 0;
10362 + unsigned ret = 0;
10363
10364 might_fault();
10365 - if (!__builtin_constant_p(size))
10366 +
10367 + pax_track_stack();
10368 +
10369 + if ((int)size < 0)
10370 + return size;
10371 +
10372 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10373 + if (!__access_ok(VERIFY_WRITE, dst, size))
10374 + return size;
10375 +#endif
10376 +
10377 + if (!__builtin_constant_p(size)) {
10378 + check_object_size(src, size, true);
10379 +
10380 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10381 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10382 + dst += PAX_USER_SHADOW_BASE;
10383 +#endif
10384 +
10385 return copy_user_generic((__force void *)dst, src, size);
10386 + }
10387 switch (size) {
10388 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
10389 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
10390 ret, "b", "b", "iq", 1);
10391 return ret;
10392 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
10393 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
10394 ret, "w", "w", "ir", 2);
10395 return ret;
10396 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
10397 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
10398 ret, "l", "k", "ir", 4);
10399 return ret;
10400 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
10401 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10402 ret, "q", "", "er", 8);
10403 return ret;
10404 case 10:
10405 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10406 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10407 ret, "q", "", "er", 10);
10408 if (unlikely(ret))
10409 return ret;
10410 asm("":::"memory");
10411 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
10412 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
10413 ret, "w", "w", "ir", 2);
10414 return ret;
10415 case 16:
10416 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10417 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10418 ret, "q", "", "er", 16);
10419 if (unlikely(ret))
10420 return ret;
10421 asm("":::"memory");
10422 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
10423 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
10424 ret, "q", "", "er", 8);
10425 return ret;
10426 default:
10427 +
10428 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10429 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10430 + dst += PAX_USER_SHADOW_BASE;
10431 +#endif
10432 +
10433 return copy_user_generic((__force void *)dst, src, size);
10434 }
10435 }
10436
10437 static __always_inline __must_check
10438 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10439 +unsigned long copy_to_user(void __user *to, const void *from, unsigned len)
10440 +{
10441 + if (access_ok(VERIFY_WRITE, to, len))
10442 + len = __copy_to_user(to, from, len);
10443 + return len;
10444 +}
10445 +
10446 +static __always_inline __must_check
10447 +unsigned long copy_from_user(void *to, const void __user *from, unsigned len)
10448 +{
10449 + if ((int)len < 0)
10450 + return len;
10451 +
10452 + if (access_ok(VERIFY_READ, from, len))
10453 + len = __copy_from_user(to, from, len);
10454 + else if ((int)len > 0) {
10455 + if (!__builtin_constant_p(len))
10456 + check_object_size(to, len, false);
10457 + memset(to, 0, len);
10458 + }
10459 + return len;
10460 +}
10461 +
10462 +static __always_inline __must_check
10463 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10464 {
10465 - int ret = 0;
10466 + unsigned ret = 0;
10467
10468 might_fault();
10469 - if (!__builtin_constant_p(size))
10470 +
10471 + pax_track_stack();
10472 +
10473 + if ((int)size < 0)
10474 + return size;
10475 +
10476 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10477 + if (!__access_ok(VERIFY_READ, src, size))
10478 + return size;
10479 + if (!__access_ok(VERIFY_WRITE, dst, size))
10480 + return size;
10481 +#endif
10482 +
10483 + if (!__builtin_constant_p(size)) {
10484 +
10485 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10486 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10487 + src += PAX_USER_SHADOW_BASE;
10488 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10489 + dst += PAX_USER_SHADOW_BASE;
10490 +#endif
10491 +
10492 return copy_user_generic((__force void *)dst,
10493 - (__force void *)src, size);
10494 + (__force const void *)src, size);
10495 + }
10496 switch (size) {
10497 case 1: {
10498 u8 tmp;
10499 - __get_user_asm(tmp, (u8 __user *)src,
10500 + __get_user_asm(tmp, (const u8 __user *)src,
10501 ret, "b", "b", "=q", 1);
10502 if (likely(!ret))
10503 __put_user_asm(tmp, (u8 __user *)dst,
10504 @@ -134,7 +227,7 @@ int __copy_in_user(void __user *dst, con
10505 }
10506 case 2: {
10507 u16 tmp;
10508 - __get_user_asm(tmp, (u16 __user *)src,
10509 + __get_user_asm(tmp, (const u16 __user *)src,
10510 ret, "w", "w", "=r", 2);
10511 if (likely(!ret))
10512 __put_user_asm(tmp, (u16 __user *)dst,
10513 @@ -144,7 +237,7 @@ int __copy_in_user(void __user *dst, con
10514
10515 case 4: {
10516 u32 tmp;
10517 - __get_user_asm(tmp, (u32 __user *)src,
10518 + __get_user_asm(tmp, (const u32 __user *)src,
10519 ret, "l", "k", "=r", 4);
10520 if (likely(!ret))
10521 __put_user_asm(tmp, (u32 __user *)dst,
10522 @@ -153,7 +246,7 @@ int __copy_in_user(void __user *dst, con
10523 }
10524 case 8: {
10525 u64 tmp;
10526 - __get_user_asm(tmp, (u64 __user *)src,
10527 + __get_user_asm(tmp, (const u64 __user *)src,
10528 ret, "q", "", "=r", 8);
10529 if (likely(!ret))
10530 __put_user_asm(tmp, (u64 __user *)dst,
10531 @@ -161,8 +254,16 @@ int __copy_in_user(void __user *dst, con
10532 return ret;
10533 }
10534 default:
10535 +
10536 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10537 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10538 + src += PAX_USER_SHADOW_BASE;
10539 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10540 + dst += PAX_USER_SHADOW_BASE;
10541 +#endif
10542 +
10543 return copy_user_generic((__force void *)dst,
10544 - (__force void *)src, size);
10545 + (__force const void *)src, size);
10546 }
10547 }
10548
10549 @@ -176,33 +277,75 @@ __must_check long strlen_user(const char
10550 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
10551 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
10552
10553 -__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
10554 - unsigned size);
10555 +static __must_check __always_inline unsigned long
10556 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
10557 +{
10558 + pax_track_stack();
10559 +
10560 + if ((int)size < 0)
10561 + return size;
10562
10563 -static __must_check __always_inline int
10564 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10565 + if (!__access_ok(VERIFY_READ, src, size))
10566 + return size;
10567 +
10568 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10569 + src += PAX_USER_SHADOW_BASE;
10570 +#endif
10571 +
10572 + return copy_user_generic(dst, (__force const void *)src, size);
10573 +}
10574 +
10575 +static __must_check __always_inline unsigned long
10576 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
10577 {
10578 + if ((int)size < 0)
10579 + return size;
10580 +
10581 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10582 + if (!__access_ok(VERIFY_WRITE, dst, size))
10583 + return size;
10584 +
10585 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10586 + dst += PAX_USER_SHADOW_BASE;
10587 +#endif
10588 +
10589 return copy_user_generic((__force void *)dst, src, size);
10590 }
10591
10592 -extern long __copy_user_nocache(void *dst, const void __user *src,
10593 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
10594 unsigned size, int zerorest);
10595
10596 -static inline int
10597 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
10598 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
10599 {
10600 might_sleep();
10601 +
10602 + if ((int)size < 0)
10603 + return size;
10604 +
10605 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10606 + if (!__access_ok(VERIFY_READ, src, size))
10607 + return size;
10608 +#endif
10609 +
10610 return __copy_user_nocache(dst, src, size, 1);
10611 }
10612
10613 -static inline int
10614 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
10615 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
10616 unsigned size)
10617 {
10618 + if ((int)size < 0)
10619 + return size;
10620 +
10621 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10622 + if (!__access_ok(VERIFY_READ, src, size))
10623 + return size;
10624 +#endif
10625 +
10626 return __copy_user_nocache(dst, src, size, 0);
10627 }
10628
10629 -unsigned long
10630 +extern unsigned long
10631 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
10632
10633 #endif /* _ASM_X86_UACCESS_64_H */
10634 diff -urNp linux-2.6.32.41/arch/x86/include/asm/uaccess.h linux-2.6.32.41/arch/x86/include/asm/uaccess.h
10635 --- linux-2.6.32.41/arch/x86/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
10636 +++ linux-2.6.32.41/arch/x86/include/asm/uaccess.h 2011-04-17 15:56:46.000000000 -0400
10637 @@ -8,12 +8,15 @@
10638 #include <linux/thread_info.h>
10639 #include <linux/prefetch.h>
10640 #include <linux/string.h>
10641 +#include <linux/sched.h>
10642 #include <asm/asm.h>
10643 #include <asm/page.h>
10644
10645 #define VERIFY_READ 0
10646 #define VERIFY_WRITE 1
10647
10648 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
10649 +
10650 /*
10651 * The fs value determines whether argument validity checking should be
10652 * performed or not. If get_fs() == USER_DS, checking is performed, with
10653 @@ -29,7 +32,12 @@
10654
10655 #define get_ds() (KERNEL_DS)
10656 #define get_fs() (current_thread_info()->addr_limit)
10657 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10658 +void __set_fs(mm_segment_t x);
10659 +void set_fs(mm_segment_t x);
10660 +#else
10661 #define set_fs(x) (current_thread_info()->addr_limit = (x))
10662 +#endif
10663
10664 #define segment_eq(a, b) ((a).seg == (b).seg)
10665
10666 @@ -77,7 +85,33 @@
10667 * checks that the pointer is in the user space range - after calling
10668 * this function, memory access functions may still return -EFAULT.
10669 */
10670 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10671 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10672 +#define access_ok(type, addr, size) \
10673 +({ \
10674 + long __size = size; \
10675 + unsigned long __addr = (unsigned long)addr; \
10676 + unsigned long __addr_ao = __addr & PAGE_MASK; \
10677 + unsigned long __end_ao = __addr + __size - 1; \
10678 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
10679 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
10680 + while(__addr_ao <= __end_ao) { \
10681 + char __c_ao; \
10682 + __addr_ao += PAGE_SIZE; \
10683 + if (__size > PAGE_SIZE) \
10684 + cond_resched(); \
10685 + if (__get_user(__c_ao, (char __user *)__addr)) \
10686 + break; \
10687 + if (type != VERIFY_WRITE) { \
10688 + __addr = __addr_ao; \
10689 + continue; \
10690 + } \
10691 + if (__put_user(__c_ao, (char __user *)__addr)) \
10692 + break; \
10693 + __addr = __addr_ao; \
10694 + } \
10695 + } \
10696 + __ret_ao; \
10697 +})
10698
10699 /*
10700 * The exception table consists of pairs of addresses: the first is the
10701 @@ -183,12 +217,20 @@ extern int __get_user_bad(void);
10702 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
10703 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
10704
10705 -
10706 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10707 +#define __copyuser_seg "gs;"
10708 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
10709 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
10710 +#else
10711 +#define __copyuser_seg
10712 +#define __COPYUSER_SET_ES
10713 +#define __COPYUSER_RESTORE_ES
10714 +#endif
10715
10716 #ifdef CONFIG_X86_32
10717 #define __put_user_asm_u64(x, addr, err, errret) \
10718 - asm volatile("1: movl %%eax,0(%2)\n" \
10719 - "2: movl %%edx,4(%2)\n" \
10720 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
10721 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
10722 "3:\n" \
10723 ".section .fixup,\"ax\"\n" \
10724 "4: movl %3,%0\n" \
10725 @@ -200,8 +242,8 @@ extern int __get_user_bad(void);
10726 : "A" (x), "r" (addr), "i" (errret), "0" (err))
10727
10728 #define __put_user_asm_ex_u64(x, addr) \
10729 - asm volatile("1: movl %%eax,0(%1)\n" \
10730 - "2: movl %%edx,4(%1)\n" \
10731 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
10732 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
10733 "3:\n" \
10734 _ASM_EXTABLE(1b, 2b - 1b) \
10735 _ASM_EXTABLE(2b, 3b - 2b) \
10736 @@ -374,7 +416,7 @@ do { \
10737 } while (0)
10738
10739 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10740 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
10741 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
10742 "2:\n" \
10743 ".section .fixup,\"ax\"\n" \
10744 "3: mov %3,%0\n" \
10745 @@ -382,7 +424,7 @@ do { \
10746 " jmp 2b\n" \
10747 ".previous\n" \
10748 _ASM_EXTABLE(1b, 3b) \
10749 - : "=r" (err), ltype(x) \
10750 + : "=r" (err), ltype (x) \
10751 : "m" (__m(addr)), "i" (errret), "0" (err))
10752
10753 #define __get_user_size_ex(x, ptr, size) \
10754 @@ -407,7 +449,7 @@ do { \
10755 } while (0)
10756
10757 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
10758 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
10759 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
10760 "2:\n" \
10761 _ASM_EXTABLE(1b, 2b - 1b) \
10762 : ltype(x) : "m" (__m(addr)))
10763 @@ -424,13 +466,24 @@ do { \
10764 int __gu_err; \
10765 unsigned long __gu_val; \
10766 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
10767 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
10768 + (x) = (__typeof__(*(ptr)))__gu_val; \
10769 __gu_err; \
10770 })
10771
10772 /* FIXME: this hack is definitely wrong -AK */
10773 struct __large_struct { unsigned long buf[100]; };
10774 -#define __m(x) (*(struct __large_struct __user *)(x))
10775 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10776 +#define ____m(x) \
10777 +({ \
10778 + unsigned long ____x = (unsigned long)(x); \
10779 + if (____x < PAX_USER_SHADOW_BASE) \
10780 + ____x += PAX_USER_SHADOW_BASE; \
10781 + (void __user *)____x; \
10782 +})
10783 +#else
10784 +#define ____m(x) (x)
10785 +#endif
10786 +#define __m(x) (*(struct __large_struct __user *)____m(x))
10787
10788 /*
10789 * Tell gcc we read from memory instead of writing: this is because
10790 @@ -438,7 +491,7 @@ struct __large_struct { unsigned long bu
10791 * aliasing issues.
10792 */
10793 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10794 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
10795 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
10796 "2:\n" \
10797 ".section .fixup,\"ax\"\n" \
10798 "3: mov %3,%0\n" \
10799 @@ -446,10 +499,10 @@ struct __large_struct { unsigned long bu
10800 ".previous\n" \
10801 _ASM_EXTABLE(1b, 3b) \
10802 : "=r"(err) \
10803 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
10804 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
10805
10806 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
10807 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
10808 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
10809 "2:\n" \
10810 _ASM_EXTABLE(1b, 2b - 1b) \
10811 : : ltype(x), "m" (__m(addr)))
10812 @@ -488,8 +541,12 @@ struct __large_struct { unsigned long bu
10813 * On error, the variable @x is set to zero.
10814 */
10815
10816 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10817 +#define __get_user(x, ptr) get_user((x), (ptr))
10818 +#else
10819 #define __get_user(x, ptr) \
10820 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
10821 +#endif
10822
10823 /**
10824 * __put_user: - Write a simple value into user space, with less checking.
10825 @@ -511,8 +568,12 @@ struct __large_struct { unsigned long bu
10826 * Returns zero on success, or -EFAULT on error.
10827 */
10828
10829 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10830 +#define __put_user(x, ptr) put_user((x), (ptr))
10831 +#else
10832 #define __put_user(x, ptr) \
10833 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
10834 +#endif
10835
10836 #define __get_user_unaligned __get_user
10837 #define __put_user_unaligned __put_user
10838 @@ -530,7 +591,7 @@ struct __large_struct { unsigned long bu
10839 #define get_user_ex(x, ptr) do { \
10840 unsigned long __gue_val; \
10841 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
10842 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
10843 + (x) = (__typeof__(*(ptr)))__gue_val; \
10844 } while (0)
10845
10846 #ifdef CONFIG_X86_WP_WORKS_OK
10847 @@ -567,6 +628,7 @@ extern struct movsl_mask {
10848
10849 #define ARCH_HAS_NOCACHE_UACCESS 1
10850
10851 +#define ARCH_HAS_SORT_EXTABLE
10852 #ifdef CONFIG_X86_32
10853 # include "uaccess_32.h"
10854 #else
10855 diff -urNp linux-2.6.32.41/arch/x86/include/asm/vgtod.h linux-2.6.32.41/arch/x86/include/asm/vgtod.h
10856 --- linux-2.6.32.41/arch/x86/include/asm/vgtod.h 2011-03-27 14:31:47.000000000 -0400
10857 +++ linux-2.6.32.41/arch/x86/include/asm/vgtod.h 2011-04-17 15:56:46.000000000 -0400
10858 @@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
10859 int sysctl_enabled;
10860 struct timezone sys_tz;
10861 struct { /* extract of a clocksource struct */
10862 + char name[8];
10863 cycle_t (*vread)(void);
10864 cycle_t cycle_last;
10865 cycle_t mask;
10866 diff -urNp linux-2.6.32.41/arch/x86/include/asm/vmi.h linux-2.6.32.41/arch/x86/include/asm/vmi.h
10867 --- linux-2.6.32.41/arch/x86/include/asm/vmi.h 2011-03-27 14:31:47.000000000 -0400
10868 +++ linux-2.6.32.41/arch/x86/include/asm/vmi.h 2011-04-17 15:56:46.000000000 -0400
10869 @@ -191,6 +191,7 @@ struct vrom_header {
10870 u8 reserved[96]; /* Reserved for headers */
10871 char vmi_init[8]; /* VMI_Init jump point */
10872 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
10873 + char rom_data[8048]; /* rest of the option ROM */
10874 } __attribute__((packed));
10875
10876 struct pnp_header {
10877 diff -urNp linux-2.6.32.41/arch/x86/include/asm/vsyscall.h linux-2.6.32.41/arch/x86/include/asm/vsyscall.h
10878 --- linux-2.6.32.41/arch/x86/include/asm/vsyscall.h 2011-03-27 14:31:47.000000000 -0400
10879 +++ linux-2.6.32.41/arch/x86/include/asm/vsyscall.h 2011-04-17 15:56:46.000000000 -0400
10880 @@ -15,9 +15,10 @@ enum vsyscall_num {
10881
10882 #ifdef __KERNEL__
10883 #include <linux/seqlock.h>
10884 +#include <linux/getcpu.h>
10885 +#include <linux/time.h>
10886
10887 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
10888 -#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
10889
10890 /* Definitions for CONFIG_GENERIC_TIME definitions */
10891 #define __section_vsyscall_gtod_data __attribute__ \
10892 @@ -31,7 +32,6 @@ enum vsyscall_num {
10893 #define VGETCPU_LSL 2
10894
10895 extern int __vgetcpu_mode;
10896 -extern volatile unsigned long __jiffies;
10897
10898 /* kernel space (writeable) */
10899 extern int vgetcpu_mode;
10900 @@ -39,6 +39,9 @@ extern struct timezone sys_tz;
10901
10902 extern void map_vsyscall(void);
10903
10904 +extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
10905 +extern time_t vtime(time_t *t);
10906 +extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
10907 #endif /* __KERNEL__ */
10908
10909 #endif /* _ASM_X86_VSYSCALL_H */
10910 diff -urNp linux-2.6.32.41/arch/x86/include/asm/xsave.h linux-2.6.32.41/arch/x86/include/asm/xsave.h
10911 --- linux-2.6.32.41/arch/x86/include/asm/xsave.h 2011-03-27 14:31:47.000000000 -0400
10912 +++ linux-2.6.32.41/arch/x86/include/asm/xsave.h 2011-04-17 15:56:46.000000000 -0400
10913 @@ -56,6 +56,12 @@ static inline int xrstor_checking(struct
10914 static inline int xsave_user(struct xsave_struct __user *buf)
10915 {
10916 int err;
10917 +
10918 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10919 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
10920 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
10921 +#endif
10922 +
10923 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
10924 "2:\n"
10925 ".section .fixup,\"ax\"\n"
10926 @@ -82,6 +88,11 @@ static inline int xrestore_user(struct x
10927 u32 lmask = mask;
10928 u32 hmask = mask >> 32;
10929
10930 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10931 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
10932 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
10933 +#endif
10934 +
10935 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
10936 "2:\n"
10937 ".section .fixup,\"ax\"\n"
10938 diff -urNp linux-2.6.32.41/arch/x86/Kconfig linux-2.6.32.41/arch/x86/Kconfig
10939 --- linux-2.6.32.41/arch/x86/Kconfig 2011-03-27 14:31:47.000000000 -0400
10940 +++ linux-2.6.32.41/arch/x86/Kconfig 2011-04-17 15:56:46.000000000 -0400
10941 @@ -223,7 +223,7 @@ config X86_TRAMPOLINE
10942
10943 config X86_32_LAZY_GS
10944 def_bool y
10945 - depends on X86_32 && !CC_STACKPROTECTOR
10946 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10947
10948 config KTIME_SCALAR
10949 def_bool X86_32
10950 @@ -1008,7 +1008,7 @@ choice
10951
10952 config NOHIGHMEM
10953 bool "off"
10954 - depends on !X86_NUMAQ
10955 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10956 ---help---
10957 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10958 However, the address space of 32-bit x86 processors is only 4
10959 @@ -1045,7 +1045,7 @@ config NOHIGHMEM
10960
10961 config HIGHMEM4G
10962 bool "4GB"
10963 - depends on !X86_NUMAQ
10964 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10965 ---help---
10966 Select this if you have a 32-bit processor and between 1 and 4
10967 gigabytes of physical RAM.
10968 @@ -1099,7 +1099,7 @@ config PAGE_OFFSET
10969 hex
10970 default 0xB0000000 if VMSPLIT_3G_OPT
10971 default 0x80000000 if VMSPLIT_2G
10972 - default 0x78000000 if VMSPLIT_2G_OPT
10973 + default 0x70000000 if VMSPLIT_2G_OPT
10974 default 0x40000000 if VMSPLIT_1G
10975 default 0xC0000000
10976 depends on X86_32
10977 @@ -1430,7 +1430,7 @@ config ARCH_USES_PG_UNCACHED
10978
10979 config EFI
10980 bool "EFI runtime service support"
10981 - depends on ACPI
10982 + depends on ACPI && !PAX_KERNEXEC
10983 ---help---
10984 This enables the kernel to use EFI runtime services that are
10985 available (such as the EFI variable services).
10986 @@ -1460,6 +1460,7 @@ config SECCOMP
10987
10988 config CC_STACKPROTECTOR
10989 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
10990 + depends on X86_64 || !PAX_MEMORY_UDEREF
10991 ---help---
10992 This option turns on the -fstack-protector GCC feature. This
10993 feature puts, at the beginning of functions, a canary value on
10994 @@ -1517,6 +1518,7 @@ config KEXEC_JUMP
10995 config PHYSICAL_START
10996 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
10997 default "0x1000000"
10998 + range 0x400000 0x40000000
10999 ---help---
11000 This gives the physical address where the kernel is loaded.
11001
11002 @@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
11003 hex
11004 prompt "Alignment value to which kernel should be aligned" if X86_32
11005 default "0x1000000"
11006 + range 0x400000 0x1000000 if PAX_KERNEXEC
11007 range 0x2000 0x1000000
11008 ---help---
11009 This value puts the alignment restrictions on physical address
11010 @@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
11011 Say N if you want to disable CPU hotplug.
11012
11013 config COMPAT_VDSO
11014 - def_bool y
11015 + def_bool n
11016 prompt "Compat VDSO support"
11017 depends on X86_32 || IA32_EMULATION
11018 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
11019 ---help---
11020 Map the 32-bit VDSO to the predictable old-style address too.
11021 ---help---
11022 diff -urNp linux-2.6.32.41/arch/x86/Kconfig.cpu linux-2.6.32.41/arch/x86/Kconfig.cpu
11023 --- linux-2.6.32.41/arch/x86/Kconfig.cpu 2011-03-27 14:31:47.000000000 -0400
11024 +++ linux-2.6.32.41/arch/x86/Kconfig.cpu 2011-04-17 15:56:46.000000000 -0400
11025 @@ -340,7 +340,7 @@ config X86_PPRO_FENCE
11026
11027 config X86_F00F_BUG
11028 def_bool y
11029 - depends on M586MMX || M586TSC || M586 || M486 || M386
11030 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
11031
11032 config X86_WP_WORKS_OK
11033 def_bool y
11034 @@ -360,7 +360,7 @@ config X86_POPAD_OK
11035
11036 config X86_ALIGNMENT_16
11037 def_bool y
11038 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11039 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11040
11041 config X86_INTEL_USERCOPY
11042 def_bool y
11043 @@ -406,7 +406,7 @@ config X86_CMPXCHG64
11044 # generates cmov.
11045 config X86_CMOV
11046 def_bool y
11047 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11048 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11049
11050 config X86_MINIMUM_CPU_FAMILY
11051 int
11052 diff -urNp linux-2.6.32.41/arch/x86/Kconfig.debug linux-2.6.32.41/arch/x86/Kconfig.debug
11053 --- linux-2.6.32.41/arch/x86/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
11054 +++ linux-2.6.32.41/arch/x86/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
11055 @@ -99,7 +99,7 @@ config X86_PTDUMP
11056 config DEBUG_RODATA
11057 bool "Write protect kernel read-only data structures"
11058 default y
11059 - depends on DEBUG_KERNEL
11060 + depends on DEBUG_KERNEL && BROKEN
11061 ---help---
11062 Mark the kernel read-only data as write-protected in the pagetables,
11063 in order to catch accidental (and incorrect) writes to such const
11064 diff -urNp linux-2.6.32.41/arch/x86/kernel/acpi/realmode/wakeup.S linux-2.6.32.41/arch/x86/kernel/acpi/realmode/wakeup.S
11065 --- linux-2.6.32.41/arch/x86/kernel/acpi/realmode/wakeup.S 2011-03-27 14:31:47.000000000 -0400
11066 +++ linux-2.6.32.41/arch/x86/kernel/acpi/realmode/wakeup.S 2011-04-17 15:56:46.000000000 -0400
11067 @@ -104,7 +104,7 @@ _start:
11068 movl %eax, %ecx
11069 orl %edx, %ecx
11070 jz 1f
11071 - movl $0xc0000080, %ecx
11072 + mov $MSR_EFER, %ecx
11073 wrmsr
11074 1:
11075
11076 diff -urNp linux-2.6.32.41/arch/x86/kernel/acpi/sleep.c linux-2.6.32.41/arch/x86/kernel/acpi/sleep.c
11077 --- linux-2.6.32.41/arch/x86/kernel/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
11078 +++ linux-2.6.32.41/arch/x86/kernel/acpi/sleep.c 2011-04-17 15:56:46.000000000 -0400
11079 @@ -11,11 +11,12 @@
11080 #include <linux/cpumask.h>
11081 #include <asm/segment.h>
11082 #include <asm/desc.h>
11083 +#include <asm/e820.h>
11084
11085 #include "realmode/wakeup.h"
11086 #include "sleep.h"
11087
11088 -unsigned long acpi_wakeup_address;
11089 +unsigned long acpi_wakeup_address = 0x2000;
11090 unsigned long acpi_realmode_flags;
11091
11092 /* address in low memory of the wakeup routine. */
11093 @@ -99,8 +100,12 @@ int acpi_save_state_mem(void)
11094 header->trampoline_segment = setup_trampoline() >> 4;
11095 #ifdef CONFIG_SMP
11096 stack_start.sp = temp_stack + sizeof(temp_stack);
11097 +
11098 + pax_open_kernel();
11099 early_gdt_descr.address =
11100 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11101 + pax_close_kernel();
11102 +
11103 initial_gs = per_cpu_offset(smp_processor_id());
11104 #endif
11105 initial_code = (unsigned long)wakeup_long64;
11106 @@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
11107 return;
11108 }
11109
11110 - acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
11111 -
11112 - if (!acpi_realmode) {
11113 - printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
11114 - return;
11115 - }
11116 -
11117 - acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
11118 + reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
11119 + acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
11120 }
11121
11122
11123 diff -urNp linux-2.6.32.41/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.32.41/arch/x86/kernel/acpi/wakeup_32.S
11124 --- linux-2.6.32.41/arch/x86/kernel/acpi/wakeup_32.S 2011-03-27 14:31:47.000000000 -0400
11125 +++ linux-2.6.32.41/arch/x86/kernel/acpi/wakeup_32.S 2011-04-17 15:56:46.000000000 -0400
11126 @@ -30,13 +30,11 @@ wakeup_pmode_return:
11127 # and restore the stack ... but you need gdt for this to work
11128 movl saved_context_esp, %esp
11129
11130 - movl %cs:saved_magic, %eax
11131 - cmpl $0x12345678, %eax
11132 + cmpl $0x12345678, saved_magic
11133 jne bogus_magic
11134
11135 # jump to place where we left off
11136 - movl saved_eip, %eax
11137 - jmp *%eax
11138 + jmp *(saved_eip)
11139
11140 bogus_magic:
11141 jmp bogus_magic
11142 diff -urNp linux-2.6.32.41/arch/x86/kernel/alternative.c linux-2.6.32.41/arch/x86/kernel/alternative.c
11143 --- linux-2.6.32.41/arch/x86/kernel/alternative.c 2011-03-27 14:31:47.000000000 -0400
11144 +++ linux-2.6.32.41/arch/x86/kernel/alternative.c 2011-04-17 15:56:46.000000000 -0400
11145 @@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(str
11146
11147 BUG_ON(p->len > MAX_PATCH_LEN);
11148 /* prep the buffer with the original instructions */
11149 - memcpy(insnbuf, p->instr, p->len);
11150 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11151 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11152 (unsigned long)p->instr, p->len);
11153
11154 @@ -475,7 +475,7 @@ void __init alternative_instructions(voi
11155 if (smp_alt_once)
11156 free_init_pages("SMP alternatives",
11157 (unsigned long)__smp_locks,
11158 - (unsigned long)__smp_locks_end);
11159 + PAGE_ALIGN((unsigned long)__smp_locks_end));
11160
11161 restart_nmi();
11162 }
11163 @@ -492,13 +492,17 @@ void __init alternative_instructions(voi
11164 * instructions. And on the local CPU you need to be protected again NMI or MCE
11165 * handlers seeing an inconsistent instruction while you patch.
11166 */
11167 -static void *__init_or_module text_poke_early(void *addr, const void *opcode,
11168 +static void *__kprobes text_poke_early(void *addr, const void *opcode,
11169 size_t len)
11170 {
11171 unsigned long flags;
11172 local_irq_save(flags);
11173 - memcpy(addr, opcode, len);
11174 +
11175 + pax_open_kernel();
11176 + memcpy(ktla_ktva(addr), opcode, len);
11177 sync_core();
11178 + pax_close_kernel();
11179 +
11180 local_irq_restore(flags);
11181 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11182 that causes hangs on some VIA CPUs. */
11183 @@ -520,35 +524,21 @@ static void *__init_or_module text_poke_
11184 */
11185 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11186 {
11187 - unsigned long flags;
11188 - char *vaddr;
11189 + unsigned char *vaddr = ktla_ktva(addr);
11190 struct page *pages[2];
11191 - int i;
11192 + size_t i;
11193
11194 if (!core_kernel_text((unsigned long)addr)) {
11195 - pages[0] = vmalloc_to_page(addr);
11196 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11197 + pages[0] = vmalloc_to_page(vaddr);
11198 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11199 } else {
11200 - pages[0] = virt_to_page(addr);
11201 + pages[0] = virt_to_page(vaddr);
11202 WARN_ON(!PageReserved(pages[0]));
11203 - pages[1] = virt_to_page(addr + PAGE_SIZE);
11204 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11205 }
11206 BUG_ON(!pages[0]);
11207 - local_irq_save(flags);
11208 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11209 - if (pages[1])
11210 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11211 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11212 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11213 - clear_fixmap(FIX_TEXT_POKE0);
11214 - if (pages[1])
11215 - clear_fixmap(FIX_TEXT_POKE1);
11216 - local_flush_tlb();
11217 - sync_core();
11218 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
11219 - that causes hangs on some VIA CPUs. */
11220 + text_poke_early(addr, opcode, len);
11221 for (i = 0; i < len; i++)
11222 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11223 - local_irq_restore(flags);
11224 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11225 return addr;
11226 }
11227 diff -urNp linux-2.6.32.41/arch/x86/kernel/amd_iommu.c linux-2.6.32.41/arch/x86/kernel/amd_iommu.c
11228 --- linux-2.6.32.41/arch/x86/kernel/amd_iommu.c 2011-03-27 14:31:47.000000000 -0400
11229 +++ linux-2.6.32.41/arch/x86/kernel/amd_iommu.c 2011-04-17 15:56:46.000000000 -0400
11230 @@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(
11231 }
11232 }
11233
11234 -static struct dma_map_ops amd_iommu_dma_ops = {
11235 +static const struct dma_map_ops amd_iommu_dma_ops = {
11236 .alloc_coherent = alloc_coherent,
11237 .free_coherent = free_coherent,
11238 .map_page = map_page,
11239 diff -urNp linux-2.6.32.41/arch/x86/kernel/apic/apic.c linux-2.6.32.41/arch/x86/kernel/apic/apic.c
11240 --- linux-2.6.32.41/arch/x86/kernel/apic/apic.c 2011-03-27 14:31:47.000000000 -0400
11241 +++ linux-2.6.32.41/arch/x86/kernel/apic/apic.c 2011-05-16 21:46:57.000000000 -0400
11242 @@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs
11243 apic_write(APIC_ESR, 0);
11244 v1 = apic_read(APIC_ESR);
11245 ack_APIC_irq();
11246 - atomic_inc(&irq_err_count);
11247 + atomic_inc_unchecked(&irq_err_count);
11248
11249 /*
11250 * Here is what the APIC error bits mean:
11251 @@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(vo
11252 u16 *bios_cpu_apicid;
11253 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
11254
11255 + pax_track_stack();
11256 +
11257 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
11258 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
11259
11260 diff -urNp linux-2.6.32.41/arch/x86/kernel/apic/io_apic.c linux-2.6.32.41/arch/x86/kernel/apic/io_apic.c
11261 --- linux-2.6.32.41/arch/x86/kernel/apic/io_apic.c 2011-03-27 14:31:47.000000000 -0400
11262 +++ linux-2.6.32.41/arch/x86/kernel/apic/io_apic.c 2011-05-04 17:56:20.000000000 -0400
11263 @@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapi
11264 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
11265 GFP_ATOMIC);
11266 if (!ioapic_entries)
11267 - return 0;
11268 + return NULL;
11269
11270 for (apic = 0; apic < nr_ioapics; apic++) {
11271 ioapic_entries[apic] =
11272 @@ -733,7 +733,7 @@ nomem:
11273 kfree(ioapic_entries[apic]);
11274 kfree(ioapic_entries);
11275
11276 - return 0;
11277 + return NULL;
11278 }
11279
11280 /*
11281 @@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
11282 }
11283 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
11284
11285 -void lock_vector_lock(void)
11286 +void lock_vector_lock(void) __acquires(vector_lock)
11287 {
11288 /* Used to the online set of cpus does not change
11289 * during assign_irq_vector.
11290 @@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
11291 spin_lock(&vector_lock);
11292 }
11293
11294 -void unlock_vector_lock(void)
11295 +void unlock_vector_lock(void) __releases(vector_lock)
11296 {
11297 spin_unlock(&vector_lock);
11298 }
11299 @@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int i
11300 ack_APIC_irq();
11301 }
11302
11303 -atomic_t irq_mis_count;
11304 +atomic_unchecked_t irq_mis_count;
11305
11306 static void ack_apic_level(unsigned int irq)
11307 {
11308 @@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int
11309
11310 /* Tail end of version 0x11 I/O APIC bug workaround */
11311 if (!(v & (1 << (i & 0x1f)))) {
11312 - atomic_inc(&irq_mis_count);
11313 + atomic_inc_unchecked(&irq_mis_count);
11314 spin_lock(&ioapic_lock);
11315 __mask_and_edge_IO_APIC_irq(cfg);
11316 __unmask_and_level_IO_APIC_irq(cfg);
11317 diff -urNp linux-2.6.32.41/arch/x86/kernel/apm_32.c linux-2.6.32.41/arch/x86/kernel/apm_32.c
11318 --- linux-2.6.32.41/arch/x86/kernel/apm_32.c 2011-03-27 14:31:47.000000000 -0400
11319 +++ linux-2.6.32.41/arch/x86/kernel/apm_32.c 2011-04-23 12:56:10.000000000 -0400
11320 @@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
11321 * This is for buggy BIOS's that refer to (real mode) segment 0x40
11322 * even though they are called in protected mode.
11323 */
11324 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
11325 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
11326 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
11327
11328 static const char driver_version[] = "1.16ac"; /* no spaces */
11329 @@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
11330 BUG_ON(cpu != 0);
11331 gdt = get_cpu_gdt_table(cpu);
11332 save_desc_40 = gdt[0x40 / 8];
11333 +
11334 + pax_open_kernel();
11335 gdt[0x40 / 8] = bad_bios_desc;
11336 + pax_close_kernel();
11337
11338 apm_irq_save(flags);
11339 APM_DO_SAVE_SEGS;
11340 @@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
11341 &call->esi);
11342 APM_DO_RESTORE_SEGS;
11343 apm_irq_restore(flags);
11344 +
11345 + pax_open_kernel();
11346 gdt[0x40 / 8] = save_desc_40;
11347 + pax_close_kernel();
11348 +
11349 put_cpu();
11350
11351 return call->eax & 0xff;
11352 @@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void
11353 BUG_ON(cpu != 0);
11354 gdt = get_cpu_gdt_table(cpu);
11355 save_desc_40 = gdt[0x40 / 8];
11356 +
11357 + pax_open_kernel();
11358 gdt[0x40 / 8] = bad_bios_desc;
11359 + pax_close_kernel();
11360
11361 apm_irq_save(flags);
11362 APM_DO_SAVE_SEGS;
11363 @@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void
11364 &call->eax);
11365 APM_DO_RESTORE_SEGS;
11366 apm_irq_restore(flags);
11367 +
11368 + pax_open_kernel();
11369 gdt[0x40 / 8] = save_desc_40;
11370 + pax_close_kernel();
11371 +
11372 put_cpu();
11373 return error;
11374 }
11375 @@ -975,7 +989,7 @@ recalc:
11376
11377 static void apm_power_off(void)
11378 {
11379 - unsigned char po_bios_call[] = {
11380 + const unsigned char po_bios_call[] = {
11381 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
11382 0x8e, 0xd0, /* movw ax,ss */
11383 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
11384 @@ -2357,12 +2371,15 @@ static int __init apm_init(void)
11385 * code to that CPU.
11386 */
11387 gdt = get_cpu_gdt_table(0);
11388 +
11389 + pax_open_kernel();
11390 set_desc_base(&gdt[APM_CS >> 3],
11391 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
11392 set_desc_base(&gdt[APM_CS_16 >> 3],
11393 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
11394 set_desc_base(&gdt[APM_DS >> 3],
11395 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
11396 + pax_close_kernel();
11397
11398 proc_create("apm", 0, NULL, &apm_file_ops);
11399
11400 diff -urNp linux-2.6.32.41/arch/x86/kernel/asm-offsets_32.c linux-2.6.32.41/arch/x86/kernel/asm-offsets_32.c
11401 --- linux-2.6.32.41/arch/x86/kernel/asm-offsets_32.c 2011-03-27 14:31:47.000000000 -0400
11402 +++ linux-2.6.32.41/arch/x86/kernel/asm-offsets_32.c 2011-05-16 21:46:57.000000000 -0400
11403 @@ -51,7 +51,6 @@ void foo(void)
11404 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
11405 BLANK();
11406
11407 - OFFSET(TI_task, thread_info, task);
11408 OFFSET(TI_exec_domain, thread_info, exec_domain);
11409 OFFSET(TI_flags, thread_info, flags);
11410 OFFSET(TI_status, thread_info, status);
11411 @@ -60,6 +59,8 @@ void foo(void)
11412 OFFSET(TI_restart_block, thread_info, restart_block);
11413 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
11414 OFFSET(TI_cpu, thread_info, cpu);
11415 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
11416 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11417 BLANK();
11418
11419 OFFSET(GDS_size, desc_ptr, size);
11420 @@ -99,6 +100,7 @@ void foo(void)
11421
11422 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11423 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
11424 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
11425 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
11426 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
11427 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
11428 @@ -115,6 +117,11 @@ void foo(void)
11429 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
11430 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11431 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11432 +
11433 +#ifdef CONFIG_PAX_KERNEXEC
11434 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11435 +#endif
11436 +
11437 #endif
11438
11439 #ifdef CONFIG_XEN
11440 diff -urNp linux-2.6.32.41/arch/x86/kernel/asm-offsets_64.c linux-2.6.32.41/arch/x86/kernel/asm-offsets_64.c
11441 --- linux-2.6.32.41/arch/x86/kernel/asm-offsets_64.c 2011-03-27 14:31:47.000000000 -0400
11442 +++ linux-2.6.32.41/arch/x86/kernel/asm-offsets_64.c 2011-05-16 21:46:57.000000000 -0400
11443 @@ -44,6 +44,8 @@ int main(void)
11444 ENTRY(addr_limit);
11445 ENTRY(preempt_count);
11446 ENTRY(status);
11447 + ENTRY(lowest_stack);
11448 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11449 #ifdef CONFIG_IA32_EMULATION
11450 ENTRY(sysenter_return);
11451 #endif
11452 @@ -63,6 +65,18 @@ int main(void)
11453 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11454 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
11455 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
11456 +
11457 +#ifdef CONFIG_PAX_KERNEXEC
11458 + OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11459 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11460 +#endif
11461 +
11462 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11463 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
11464 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
11465 + OFFSET(PV_MMU_set_pgd, pv_mmu_ops, set_pgd);
11466 +#endif
11467 +
11468 #endif
11469
11470
11471 @@ -115,6 +129,7 @@ int main(void)
11472 ENTRY(cr8);
11473 BLANK();
11474 #undef ENTRY
11475 + DEFINE(TSS_size, sizeof(struct tss_struct));
11476 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
11477 BLANK();
11478 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
11479 @@ -130,6 +145,7 @@ int main(void)
11480
11481 BLANK();
11482 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11483 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
11484 #ifdef CONFIG_XEN
11485 BLANK();
11486 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
11487 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/amd.c linux-2.6.32.41/arch/x86/kernel/cpu/amd.c
11488 --- linux-2.6.32.41/arch/x86/kernel/cpu/amd.c 2011-05-23 16:56:59.000000000 -0400
11489 +++ linux-2.6.32.41/arch/x86/kernel/cpu/amd.c 2011-05-23 16:57:13.000000000 -0400
11490 @@ -596,7 +596,7 @@ static unsigned int __cpuinit amd_size_c
11491 unsigned int size)
11492 {
11493 /* AMD errata T13 (order #21922) */
11494 - if ((c->x86 == 6)) {
11495 + if (c->x86 == 6) {
11496 /* Duron Rev A0 */
11497 if (c->x86_model == 3 && c->x86_mask == 0)
11498 size = 64;
11499 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/common.c linux-2.6.32.41/arch/x86/kernel/cpu/common.c
11500 --- linux-2.6.32.41/arch/x86/kernel/cpu/common.c 2011-03-27 14:31:47.000000000 -0400
11501 +++ linux-2.6.32.41/arch/x86/kernel/cpu/common.c 2011-05-11 18:25:15.000000000 -0400
11502 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
11503
11504 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
11505
11506 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
11507 -#ifdef CONFIG_X86_64
11508 - /*
11509 - * We need valid kernel segments for data and code in long mode too
11510 - * IRET will check the segment types kkeil 2000/10/28
11511 - * Also sysret mandates a special GDT layout
11512 - *
11513 - * TLS descriptors are currently at a different place compared to i386.
11514 - * Hopefully nobody expects them at a fixed place (Wine?)
11515 - */
11516 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
11517 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
11518 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
11519 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
11520 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
11521 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
11522 -#else
11523 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
11524 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11525 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
11526 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
11527 - /*
11528 - * Segments used for calling PnP BIOS have byte granularity.
11529 - * They code segments and data segments have fixed 64k limits,
11530 - * the transfer segment sizes are set at run time.
11531 - */
11532 - /* 32-bit code */
11533 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11534 - /* 16-bit code */
11535 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11536 - /* 16-bit data */
11537 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
11538 - /* 16-bit data */
11539 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
11540 - /* 16-bit data */
11541 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
11542 - /*
11543 - * The APM segments have byte granularity and their bases
11544 - * are set at run time. All have 64k limits.
11545 - */
11546 - /* 32-bit code */
11547 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11548 - /* 16-bit code */
11549 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11550 - /* data */
11551 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
11552 -
11553 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11554 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11555 - GDT_STACK_CANARY_INIT
11556 -#endif
11557 -} };
11558 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
11559 -
11560 static int __init x86_xsave_setup(char *s)
11561 {
11562 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
11563 @@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
11564 {
11565 struct desc_ptr gdt_descr;
11566
11567 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
11568 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
11569 gdt_descr.size = GDT_SIZE - 1;
11570 load_gdt(&gdt_descr);
11571 /* Reload the per-cpu base */
11572 @@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struc
11573 /* Filter out anything that depends on CPUID levels we don't have */
11574 filter_cpuid_features(c, true);
11575
11576 +#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
11577 + setup_clear_cpu_cap(X86_FEATURE_SEP);
11578 +#endif
11579 +
11580 /* If the model name is still unset, do table lookup. */
11581 if (!c->x86_model_id[0]) {
11582 const char *p;
11583 @@ -980,6 +930,9 @@ static __init int setup_disablecpuid(cha
11584 }
11585 __setup("clearcpuid=", setup_disablecpuid);
11586
11587 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
11588 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
11589 +
11590 #ifdef CONFIG_X86_64
11591 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
11592
11593 @@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
11594 EXPORT_PER_CPU_SYMBOL(current_task);
11595
11596 DEFINE_PER_CPU(unsigned long, kernel_stack) =
11597 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
11598 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
11599 EXPORT_PER_CPU_SYMBOL(kernel_stack);
11600
11601 DEFINE_PER_CPU(char *, irq_stack_ptr) =
11602 @@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(str
11603 {
11604 memset(regs, 0, sizeof(struct pt_regs));
11605 regs->fs = __KERNEL_PERCPU;
11606 - regs->gs = __KERNEL_STACK_CANARY;
11607 + savesegment(gs, regs->gs);
11608
11609 return regs;
11610 }
11611 @@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
11612 int i;
11613
11614 cpu = stack_smp_processor_id();
11615 - t = &per_cpu(init_tss, cpu);
11616 + t = init_tss + cpu;
11617 orig_ist = &per_cpu(orig_ist, cpu);
11618
11619 #ifdef CONFIG_NUMA
11620 @@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
11621 switch_to_new_gdt(cpu);
11622 loadsegment(fs, 0);
11623
11624 - load_idt((const struct desc_ptr *)&idt_descr);
11625 + load_idt(&idt_descr);
11626
11627 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
11628 syscall_init();
11629 @@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
11630 wrmsrl(MSR_KERNEL_GS_BASE, 0);
11631 barrier();
11632
11633 - check_efer();
11634 if (cpu != 0)
11635 enable_x2apic();
11636
11637 @@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
11638 {
11639 int cpu = smp_processor_id();
11640 struct task_struct *curr = current;
11641 - struct tss_struct *t = &per_cpu(init_tss, cpu);
11642 + struct tss_struct *t = init_tss + cpu;
11643 struct thread_struct *thread = &curr->thread;
11644
11645 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
11646 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/intel.c linux-2.6.32.41/arch/x86/kernel/cpu/intel.c
11647 --- linux-2.6.32.41/arch/x86/kernel/cpu/intel.c 2011-03-27 14:31:47.000000000 -0400
11648 +++ linux-2.6.32.41/arch/x86/kernel/cpu/intel.c 2011-04-17 15:56:46.000000000 -0400
11649 @@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug
11650 * Update the IDT descriptor and reload the IDT so that
11651 * it uses the read-only mapped virtual address.
11652 */
11653 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
11654 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
11655 load_idt(&idt_descr);
11656 }
11657 #endif
11658 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/intel_cacheinfo.c linux-2.6.32.41/arch/x86/kernel/cpu/intel_cacheinfo.c
11659 --- linux-2.6.32.41/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
11660 +++ linux-2.6.32.41/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-04-17 15:56:46.000000000 -0400
11661 @@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kob
11662 return ret;
11663 }
11664
11665 -static struct sysfs_ops sysfs_ops = {
11666 +static const struct sysfs_ops sysfs_ops = {
11667 .show = show,
11668 .store = store,
11669 };
11670 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/Makefile linux-2.6.32.41/arch/x86/kernel/cpu/Makefile
11671 --- linux-2.6.32.41/arch/x86/kernel/cpu/Makefile 2011-03-27 14:31:47.000000000 -0400
11672 +++ linux-2.6.32.41/arch/x86/kernel/cpu/Makefile 2011-04-17 15:56:46.000000000 -0400
11673 @@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
11674 CFLAGS_REMOVE_common.o = -pg
11675 endif
11676
11677 -# Make sure load_percpu_segment has no stackprotector
11678 -nostackp := $(call cc-option, -fno-stack-protector)
11679 -CFLAGS_common.o := $(nostackp)
11680 -
11681 obj-y := intel_cacheinfo.o addon_cpuid_features.o
11682 obj-y += proc.o capflags.o powerflags.o common.o
11683 obj-y += vmware.o hypervisor.o sched.o
11684 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/mcheck/mce_amd.c linux-2.6.32.41/arch/x86/kernel/cpu/mcheck/mce_amd.c
11685 --- linux-2.6.32.41/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:56:59.000000000 -0400
11686 +++ linux-2.6.32.41/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:57:13.000000000 -0400
11687 @@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kob
11688 return ret;
11689 }
11690
11691 -static struct sysfs_ops threshold_ops = {
11692 +static const struct sysfs_ops threshold_ops = {
11693 .show = show,
11694 .store = store,
11695 };
11696 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/mcheck/mce.c linux-2.6.32.41/arch/x86/kernel/cpu/mcheck/mce.c
11697 --- linux-2.6.32.41/arch/x86/kernel/cpu/mcheck/mce.c 2011-03-27 14:31:47.000000000 -0400
11698 +++ linux-2.6.32.41/arch/x86/kernel/cpu/mcheck/mce.c 2011-05-04 17:56:20.000000000 -0400
11699 @@ -43,6 +43,7 @@
11700 #include <asm/ipi.h>
11701 #include <asm/mce.h>
11702 #include <asm/msr.h>
11703 +#include <asm/local.h>
11704
11705 #include "mce-internal.h"
11706
11707 @@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
11708 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
11709 m->cs, m->ip);
11710
11711 - if (m->cs == __KERNEL_CS)
11712 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
11713 print_symbol("{%s}", m->ip);
11714 pr_cont("\n");
11715 }
11716 @@ -221,10 +222,10 @@ static void print_mce_tail(void)
11717
11718 #define PANIC_TIMEOUT 5 /* 5 seconds */
11719
11720 -static atomic_t mce_paniced;
11721 +static atomic_unchecked_t mce_paniced;
11722
11723 static int fake_panic;
11724 -static atomic_t mce_fake_paniced;
11725 +static atomic_unchecked_t mce_fake_paniced;
11726
11727 /* Panic in progress. Enable interrupts and wait for final IPI */
11728 static void wait_for_panic(void)
11729 @@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct
11730 /*
11731 * Make sure only one CPU runs in machine check panic
11732 */
11733 - if (atomic_inc_return(&mce_paniced) > 1)
11734 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
11735 wait_for_panic();
11736 barrier();
11737
11738 @@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct
11739 console_verbose();
11740 } else {
11741 /* Don't log too much for fake panic */
11742 - if (atomic_inc_return(&mce_fake_paniced) > 1)
11743 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
11744 return;
11745 }
11746 print_mce_head();
11747 @@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
11748 * might have been modified by someone else.
11749 */
11750 rmb();
11751 - if (atomic_read(&mce_paniced))
11752 + if (atomic_read_unchecked(&mce_paniced))
11753 wait_for_panic();
11754 if (!monarch_timeout)
11755 goto out;
11756 @@ -1429,14 +1430,14 @@ void __cpuinit mcheck_init(struct cpuinf
11757 */
11758
11759 static DEFINE_SPINLOCK(mce_state_lock);
11760 -static int open_count; /* #times opened */
11761 +static local_t open_count; /* #times opened */
11762 static int open_exclu; /* already open exclusive? */
11763
11764 static int mce_open(struct inode *inode, struct file *file)
11765 {
11766 spin_lock(&mce_state_lock);
11767
11768 - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
11769 + if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
11770 spin_unlock(&mce_state_lock);
11771
11772 return -EBUSY;
11773 @@ -1444,7 +1445,7 @@ static int mce_open(struct inode *inode,
11774
11775 if (file->f_flags & O_EXCL)
11776 open_exclu = 1;
11777 - open_count++;
11778 + local_inc(&open_count);
11779
11780 spin_unlock(&mce_state_lock);
11781
11782 @@ -1455,7 +1456,7 @@ static int mce_release(struct inode *ino
11783 {
11784 spin_lock(&mce_state_lock);
11785
11786 - open_count--;
11787 + local_dec(&open_count);
11788 open_exclu = 0;
11789
11790 spin_unlock(&mce_state_lock);
11791 @@ -2082,7 +2083,7 @@ struct dentry *mce_get_debugfs_dir(void)
11792 static void mce_reset(void)
11793 {
11794 cpu_missing = 0;
11795 - atomic_set(&mce_fake_paniced, 0);
11796 + atomic_set_unchecked(&mce_fake_paniced, 0);
11797 atomic_set(&mce_executing, 0);
11798 atomic_set(&mce_callin, 0);
11799 atomic_set(&global_nwo, 0);
11800 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/amd.c linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/amd.c
11801 --- linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/amd.c 2011-03-27 14:31:47.000000000 -0400
11802 +++ linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/amd.c 2011-04-17 15:56:46.000000000 -0400
11803 @@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base
11804 return 0;
11805 }
11806
11807 -static struct mtrr_ops amd_mtrr_ops = {
11808 +static const struct mtrr_ops amd_mtrr_ops = {
11809 .vendor = X86_VENDOR_AMD,
11810 .set = amd_set_mtrr,
11811 .get = amd_get_mtrr,
11812 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/centaur.c linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/centaur.c
11813 --- linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/centaur.c 2011-03-27 14:31:47.000000000 -0400
11814 +++ linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/centaur.c 2011-04-17 15:56:46.000000000 -0400
11815 @@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long
11816 return 0;
11817 }
11818
11819 -static struct mtrr_ops centaur_mtrr_ops = {
11820 +static const struct mtrr_ops centaur_mtrr_ops = {
11821 .vendor = X86_VENDOR_CENTAUR,
11822 .set = centaur_set_mcr,
11823 .get = centaur_get_mcr,
11824 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/cyrix.c linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/cyrix.c
11825 --- linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-03-27 14:31:47.000000000 -0400
11826 +++ linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-04-17 15:56:46.000000000 -0400
11827 @@ -265,7 +265,7 @@ static void cyrix_set_all(void)
11828 post_set();
11829 }
11830
11831 -static struct mtrr_ops cyrix_mtrr_ops = {
11832 +static const struct mtrr_ops cyrix_mtrr_ops = {
11833 .vendor = X86_VENDOR_CYRIX,
11834 .set_all = cyrix_set_all,
11835 .set = cyrix_set_arr,
11836 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/generic.c linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/generic.c
11837 --- linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/generic.c 2011-03-27 14:31:47.000000000 -0400
11838 +++ linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/generic.c 2011-04-23 12:56:10.000000000 -0400
11839 @@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
11840 /*
11841 * Generic structure...
11842 */
11843 -struct mtrr_ops generic_mtrr_ops = {
11844 +const struct mtrr_ops generic_mtrr_ops = {
11845 .use_intel_if = 1,
11846 .set_all = generic_set_all,
11847 .get = generic_get_mtrr,
11848 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/main.c linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/main.c
11849 --- linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:00:52.000000000 -0400
11850 +++ linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:03:05.000000000 -0400
11851 @@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
11852 u64 size_or_mask, size_and_mask;
11853 static bool mtrr_aps_delayed_init;
11854
11855 -static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
11856 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
11857
11858 -struct mtrr_ops *mtrr_if;
11859 +const struct mtrr_ops *mtrr_if;
11860
11861 static void set_mtrr(unsigned int reg, unsigned long base,
11862 unsigned long size, mtrr_type type);
11863
11864 -void set_mtrr_ops(struct mtrr_ops *ops)
11865 +void set_mtrr_ops(const struct mtrr_ops *ops)
11866 {
11867 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
11868 mtrr_ops[ops->vendor] = ops;
11869 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/mtrr.h linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/mtrr.h
11870 --- linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-03-27 14:31:47.000000000 -0400
11871 +++ linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-04-17 15:56:46.000000000 -0400
11872 @@ -12,19 +12,19 @@
11873 extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
11874
11875 struct mtrr_ops {
11876 - u32 vendor;
11877 - u32 use_intel_if;
11878 - void (*set)(unsigned int reg, unsigned long base,
11879 + const u32 vendor;
11880 + const u32 use_intel_if;
11881 + void (* const set)(unsigned int reg, unsigned long base,
11882 unsigned long size, mtrr_type type);
11883 - void (*set_all)(void);
11884 + void (* const set_all)(void);
11885
11886 - void (*get)(unsigned int reg, unsigned long *base,
11887 + void (* const get)(unsigned int reg, unsigned long *base,
11888 unsigned long *size, mtrr_type *type);
11889 - int (*get_free_region)(unsigned long base, unsigned long size,
11890 + int (* const get_free_region)(unsigned long base, unsigned long size,
11891 int replace_reg);
11892 - int (*validate_add_page)(unsigned long base, unsigned long size,
11893 + int (* const validate_add_page)(unsigned long base, unsigned long size,
11894 unsigned int type);
11895 - int (*have_wrcomb)(void);
11896 + int (* const have_wrcomb)(void);
11897 };
11898
11899 extern int generic_get_free_region(unsigned long base, unsigned long size,
11900 @@ -32,7 +32,7 @@ extern int generic_get_free_region(unsig
11901 extern int generic_validate_add_page(unsigned long base, unsigned long size,
11902 unsigned int type);
11903
11904 -extern struct mtrr_ops generic_mtrr_ops;
11905 +extern const struct mtrr_ops generic_mtrr_ops;
11906
11907 extern int positive_have_wrcomb(void);
11908
11909 @@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int in
11910 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
11911 void get_mtrr_state(void);
11912
11913 -extern void set_mtrr_ops(struct mtrr_ops *ops);
11914 +extern void set_mtrr_ops(const struct mtrr_ops *ops);
11915
11916 extern u64 size_or_mask, size_and_mask;
11917 -extern struct mtrr_ops *mtrr_if;
11918 +extern const struct mtrr_ops *mtrr_if;
11919
11920 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
11921 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
11922 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/perfctr-watchdog.c linux-2.6.32.41/arch/x86/kernel/cpu/perfctr-watchdog.c
11923 --- linux-2.6.32.41/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-03-27 14:31:47.000000000 -0400
11924 +++ linux-2.6.32.41/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-04-17 15:56:46.000000000 -0400
11925 @@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
11926
11927 /* Interface defining a CPU specific perfctr watchdog */
11928 struct wd_ops {
11929 - int (*reserve)(void);
11930 - void (*unreserve)(void);
11931 - int (*setup)(unsigned nmi_hz);
11932 - void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
11933 - void (*stop)(void);
11934 + int (* const reserve)(void);
11935 + void (* const unreserve)(void);
11936 + int (* const setup)(unsigned nmi_hz);
11937 + void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
11938 + void (* const stop)(void);
11939 unsigned perfctr;
11940 unsigned evntsel;
11941 u64 checkbit;
11942 @@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
11943 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
11944 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
11945
11946 +/* cannot be const */
11947 static struct wd_ops intel_arch_wd_ops;
11948
11949 static int setup_intel_arch_watchdog(unsigned nmi_hz)
11950 @@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(uns
11951 return 1;
11952 }
11953
11954 +/* cannot be const */
11955 static struct wd_ops intel_arch_wd_ops __read_mostly = {
11956 .reserve = single_msr_reserve,
11957 .unreserve = single_msr_unreserve,
11958 diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/perf_event.c linux-2.6.32.41/arch/x86/kernel/cpu/perf_event.c
11959 --- linux-2.6.32.41/arch/x86/kernel/cpu/perf_event.c 2011-03-27 14:31:47.000000000 -0400
11960 +++ linux-2.6.32.41/arch/x86/kernel/cpu/perf_event.c 2011-05-04 17:56:20.000000000 -0400
11961 @@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event
11962 * count to the generic event atomically:
11963 */
11964 again:
11965 - prev_raw_count = atomic64_read(&hwc->prev_count);
11966 + prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
11967 rdmsrl(hwc->event_base + idx, new_raw_count);
11968
11969 - if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
11970 + if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
11971 new_raw_count) != prev_raw_count)
11972 goto again;
11973
11974 @@ -741,7 +741,7 @@ again:
11975 delta = (new_raw_count << shift) - (prev_raw_count << shift);
11976 delta >>= shift;
11977
11978 - atomic64_add(delta, &event->count);
11979 + atomic64_add_unchecked(delta, &event->count);
11980 atomic64_sub(delta, &hwc->period_left);
11981
11982 return new_raw_count;
11983 @@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_ev
11984 * The hw event starts counting from this event offset,
11985 * mark it to be able to extra future deltas:
11986 */
11987 - atomic64_set(&hwc->prev_count, (u64)-left);
11988 + atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
11989
11990 err = checking_wrmsrl(hwc->event_base + idx,
11991 (u64)(-left) & x86_pmu.event_mask);
11992 @@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs
11993 break;
11994
11995 callchain_store(entry, frame.return_address);
11996 - fp = frame.next_frame;
11997 + fp = (__force const void __user *)frame.next_frame;
11998 }
11999 }
12000
12001 diff -urNp linux-2.6.32.41/arch/x86/kernel/crash.c linux-2.6.32.41/arch/x86/kernel/crash.c
12002 --- linux-2.6.32.41/arch/x86/kernel/crash.c 2011-03-27 14:31:47.000000000 -0400
12003 +++ linux-2.6.32.41/arch/x86/kernel/crash.c 2011-04-17 15:56:46.000000000 -0400
12004 @@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu,
12005 regs = args->regs;
12006
12007 #ifdef CONFIG_X86_32
12008 - if (!user_mode_vm(regs)) {
12009 + if (!user_mode(regs)) {
12010 crash_fixup_ss_esp(&fixed_regs, regs);
12011 regs = &fixed_regs;
12012 }
12013 diff -urNp linux-2.6.32.41/arch/x86/kernel/doublefault_32.c linux-2.6.32.41/arch/x86/kernel/doublefault_32.c
12014 --- linux-2.6.32.41/arch/x86/kernel/doublefault_32.c 2011-03-27 14:31:47.000000000 -0400
12015 +++ linux-2.6.32.41/arch/x86/kernel/doublefault_32.c 2011-04-17 15:56:46.000000000 -0400
12016 @@ -11,7 +11,7 @@
12017
12018 #define DOUBLEFAULT_STACKSIZE (1024)
12019 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12020 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12021 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12022
12023 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12024
12025 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
12026 unsigned long gdt, tss;
12027
12028 store_gdt(&gdt_desc);
12029 - gdt = gdt_desc.address;
12030 + gdt = (unsigned long)gdt_desc.address;
12031
12032 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12033
12034 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
12035 /* 0x2 bit is always set */
12036 .flags = X86_EFLAGS_SF | 0x2,
12037 .sp = STACK_START,
12038 - .es = __USER_DS,
12039 + .es = __KERNEL_DS,
12040 .cs = __KERNEL_CS,
12041 .ss = __KERNEL_DS,
12042 - .ds = __USER_DS,
12043 + .ds = __KERNEL_DS,
12044 .fs = __KERNEL_PERCPU,
12045
12046 .__cr3 = __pa_nodebug(swapper_pg_dir),
12047 diff -urNp linux-2.6.32.41/arch/x86/kernel/dumpstack_32.c linux-2.6.32.41/arch/x86/kernel/dumpstack_32.c
12048 --- linux-2.6.32.41/arch/x86/kernel/dumpstack_32.c 2011-03-27 14:31:47.000000000 -0400
12049 +++ linux-2.6.32.41/arch/x86/kernel/dumpstack_32.c 2011-04-17 15:56:46.000000000 -0400
12050 @@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task
12051 #endif
12052
12053 for (;;) {
12054 - struct thread_info *context;
12055 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12056 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12057
12058 - context = (struct thread_info *)
12059 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12060 - bp = print_context_stack(context, stack, bp, ops,
12061 - data, NULL, &graph);
12062 -
12063 - stack = (unsigned long *)context->previous_esp;
12064 - if (!stack)
12065 + if (stack_start == task_stack_page(task))
12066 break;
12067 + stack = *(unsigned long **)stack_start;
12068 if (ops->stack(data, "IRQ") < 0)
12069 break;
12070 touch_nmi_watchdog();
12071 @@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs
12072 * When in-kernel, we also print out the stack and code at the
12073 * time of the fault..
12074 */
12075 - if (!user_mode_vm(regs)) {
12076 + if (!user_mode(regs)) {
12077 unsigned int code_prologue = code_bytes * 43 / 64;
12078 unsigned int code_len = code_bytes;
12079 unsigned char c;
12080 u8 *ip;
12081 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12082
12083 printk(KERN_EMERG "Stack:\n");
12084 show_stack_log_lvl(NULL, regs, &regs->sp,
12085 @@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs
12086
12087 printk(KERN_EMERG "Code: ");
12088
12089 - ip = (u8 *)regs->ip - code_prologue;
12090 + ip = (u8 *)regs->ip - code_prologue + cs_base;
12091 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12092 /* try starting at IP */
12093 - ip = (u8 *)regs->ip;
12094 + ip = (u8 *)regs->ip + cs_base;
12095 code_len = code_len - code_prologue + 1;
12096 }
12097 for (i = 0; i < code_len; i++, ip++) {
12098 @@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs
12099 printk(" Bad EIP value.");
12100 break;
12101 }
12102 - if (ip == (u8 *)regs->ip)
12103 + if (ip == (u8 *)regs->ip + cs_base)
12104 printk("<%02x> ", c);
12105 else
12106 printk("%02x ", c);
12107 @@ -149,6 +146,7 @@ int is_valid_bugaddr(unsigned long ip)
12108 {
12109 unsigned short ud2;
12110
12111 + ip = ktla_ktva(ip);
12112 if (ip < PAGE_OFFSET)
12113 return 0;
12114 if (probe_kernel_address((unsigned short *)ip, ud2))
12115 diff -urNp linux-2.6.32.41/arch/x86/kernel/dumpstack_64.c linux-2.6.32.41/arch/x86/kernel/dumpstack_64.c
12116 --- linux-2.6.32.41/arch/x86/kernel/dumpstack_64.c 2011-03-27 14:31:47.000000000 -0400
12117 +++ linux-2.6.32.41/arch/x86/kernel/dumpstack_64.c 2011-04-17 15:56:46.000000000 -0400
12118 @@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task
12119 unsigned long *irq_stack_end =
12120 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12121 unsigned used = 0;
12122 - struct thread_info *tinfo;
12123 int graph = 0;
12124 + void *stack_start;
12125
12126 if (!task)
12127 task = current;
12128 @@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task
12129 * current stack address. If the stacks consist of nested
12130 * exceptions
12131 */
12132 - tinfo = task_thread_info(task);
12133 for (;;) {
12134 char *id;
12135 unsigned long *estack_end;
12136 +
12137 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12138 &used, &id);
12139
12140 @@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task
12141 if (ops->stack(data, id) < 0)
12142 break;
12143
12144 - bp = print_context_stack(tinfo, stack, bp, ops,
12145 + bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12146 data, estack_end, &graph);
12147 ops->stack(data, "<EOE>");
12148 /*
12149 @@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task
12150 if (stack >= irq_stack && stack < irq_stack_end) {
12151 if (ops->stack(data, "IRQ") < 0)
12152 break;
12153 - bp = print_context_stack(tinfo, stack, bp,
12154 + bp = print_context_stack(task, irq_stack, stack, bp,
12155 ops, data, irq_stack_end, &graph);
12156 /*
12157 * We link to the next stack (which would be
12158 @@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task
12159 /*
12160 * This handles the process stack:
12161 */
12162 - bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12163 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12164 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12165 put_cpu();
12166 }
12167 EXPORT_SYMBOL(dump_trace);
12168 diff -urNp linux-2.6.32.41/arch/x86/kernel/dumpstack.c linux-2.6.32.41/arch/x86/kernel/dumpstack.c
12169 --- linux-2.6.32.41/arch/x86/kernel/dumpstack.c 2011-03-27 14:31:47.000000000 -0400
12170 +++ linux-2.6.32.41/arch/x86/kernel/dumpstack.c 2011-04-17 15:56:46.000000000 -0400
12171 @@ -2,6 +2,9 @@
12172 * Copyright (C) 1991, 1992 Linus Torvalds
12173 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12174 */
12175 +#ifdef CONFIG_GRKERNSEC_HIDESYM
12176 +#define __INCLUDED_BY_HIDESYM 1
12177 +#endif
12178 #include <linux/kallsyms.h>
12179 #include <linux/kprobes.h>
12180 #include <linux/uaccess.h>
12181 @@ -28,7 +31,7 @@ static int die_counter;
12182
12183 void printk_address(unsigned long address, int reliable)
12184 {
12185 - printk(" [<%p>] %s%pS\n", (void *) address,
12186 + printk(" [<%p>] %s%pA\n", (void *) address,
12187 reliable ? "" : "? ", (void *) address);
12188 }
12189
12190 @@ -36,9 +39,8 @@ void printk_address(unsigned long addres
12191 static void
12192 print_ftrace_graph_addr(unsigned long addr, void *data,
12193 const struct stacktrace_ops *ops,
12194 - struct thread_info *tinfo, int *graph)
12195 + struct task_struct *task, int *graph)
12196 {
12197 - struct task_struct *task = tinfo->task;
12198 unsigned long ret_addr;
12199 int index = task->curr_ret_stack;
12200
12201 @@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long ad
12202 static inline void
12203 print_ftrace_graph_addr(unsigned long addr, void *data,
12204 const struct stacktrace_ops *ops,
12205 - struct thread_info *tinfo, int *graph)
12206 + struct task_struct *task, int *graph)
12207 { }
12208 #endif
12209
12210 @@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long ad
12211 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12212 */
12213
12214 -static inline int valid_stack_ptr(struct thread_info *tinfo,
12215 - void *p, unsigned int size, void *end)
12216 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12217 {
12218 - void *t = tinfo;
12219 if (end) {
12220 if (p < end && p >= (end-THREAD_SIZE))
12221 return 1;
12222 @@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct
12223 }
12224
12225 unsigned long
12226 -print_context_stack(struct thread_info *tinfo,
12227 +print_context_stack(struct task_struct *task, void *stack_start,
12228 unsigned long *stack, unsigned long bp,
12229 const struct stacktrace_ops *ops, void *data,
12230 unsigned long *end, int *graph)
12231 {
12232 struct stack_frame *frame = (struct stack_frame *)bp;
12233
12234 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12235 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12236 unsigned long addr;
12237
12238 addr = *stack;
12239 @@ -103,7 +103,7 @@ print_context_stack(struct thread_info *
12240 } else {
12241 ops->address(data, addr, 0);
12242 }
12243 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12244 + print_ftrace_graph_addr(addr, data, ops, task, graph);
12245 }
12246 stack++;
12247 }
12248 @@ -180,7 +180,7 @@ void dump_stack(void)
12249 #endif
12250
12251 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
12252 - current->pid, current->comm, print_tainted(),
12253 + task_pid_nr(current), current->comm, print_tainted(),
12254 init_utsname()->release,
12255 (int)strcspn(init_utsname()->version, " "),
12256 init_utsname()->version);
12257 @@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
12258 return flags;
12259 }
12260
12261 +extern void gr_handle_kernel_exploit(void);
12262 +
12263 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12264 {
12265 if (regs && kexec_should_crash(current))
12266 @@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long fl
12267 panic("Fatal exception in interrupt");
12268 if (panic_on_oops)
12269 panic("Fatal exception");
12270 - do_exit(signr);
12271 +
12272 + gr_handle_kernel_exploit();
12273 +
12274 + do_group_exit(signr);
12275 }
12276
12277 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12278 @@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs
12279 unsigned long flags = oops_begin();
12280 int sig = SIGSEGV;
12281
12282 - if (!user_mode_vm(regs))
12283 + if (!user_mode(regs))
12284 report_bug(regs->ip, regs);
12285
12286 if (__die(str, regs, err))
12287 diff -urNp linux-2.6.32.41/arch/x86/kernel/dumpstack.h linux-2.6.32.41/arch/x86/kernel/dumpstack.h
12288 --- linux-2.6.32.41/arch/x86/kernel/dumpstack.h 2011-03-27 14:31:47.000000000 -0400
12289 +++ linux-2.6.32.41/arch/x86/kernel/dumpstack.h 2011-04-23 13:25:26.000000000 -0400
12290 @@ -15,7 +15,7 @@
12291 #endif
12292
12293 extern unsigned long
12294 -print_context_stack(struct thread_info *tinfo,
12295 +print_context_stack(struct task_struct *task, void *stack_start,
12296 unsigned long *stack, unsigned long bp,
12297 const struct stacktrace_ops *ops, void *data,
12298 unsigned long *end, int *graph);
12299 diff -urNp linux-2.6.32.41/arch/x86/kernel/e820.c linux-2.6.32.41/arch/x86/kernel/e820.c
12300 --- linux-2.6.32.41/arch/x86/kernel/e820.c 2011-03-27 14:31:47.000000000 -0400
12301 +++ linux-2.6.32.41/arch/x86/kernel/e820.c 2011-04-17 15:56:46.000000000 -0400
12302 @@ -733,7 +733,7 @@ struct early_res {
12303 };
12304 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
12305 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
12306 - {}
12307 + { 0, 0, {0}, 0 }
12308 };
12309
12310 static int __init find_overlapped_early(u64 start, u64 end)
12311 diff -urNp linux-2.6.32.41/arch/x86/kernel/early_printk.c linux-2.6.32.41/arch/x86/kernel/early_printk.c
12312 --- linux-2.6.32.41/arch/x86/kernel/early_printk.c 2011-03-27 14:31:47.000000000 -0400
12313 +++ linux-2.6.32.41/arch/x86/kernel/early_printk.c 2011-05-16 21:46:57.000000000 -0400
12314 @@ -7,6 +7,7 @@
12315 #include <linux/pci_regs.h>
12316 #include <linux/pci_ids.h>
12317 #include <linux/errno.h>
12318 +#include <linux/sched.h>
12319 #include <asm/io.h>
12320 #include <asm/processor.h>
12321 #include <asm/fcntl.h>
12322 @@ -170,6 +171,8 @@ asmlinkage void early_printk(const char
12323 int n;
12324 va_list ap;
12325
12326 + pax_track_stack();
12327 +
12328 va_start(ap, fmt);
12329 n = vscnprintf(buf, sizeof(buf), fmt, ap);
12330 early_console->write(early_console, buf, n);
12331 diff -urNp linux-2.6.32.41/arch/x86/kernel/efi_32.c linux-2.6.32.41/arch/x86/kernel/efi_32.c
12332 --- linux-2.6.32.41/arch/x86/kernel/efi_32.c 2011-03-27 14:31:47.000000000 -0400
12333 +++ linux-2.6.32.41/arch/x86/kernel/efi_32.c 2011-04-17 15:56:46.000000000 -0400
12334 @@ -38,70 +38,38 @@
12335 */
12336
12337 static unsigned long efi_rt_eflags;
12338 -static pgd_t efi_bak_pg_dir_pointer[2];
12339 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
12340
12341 -void efi_call_phys_prelog(void)
12342 +void __init efi_call_phys_prelog(void)
12343 {
12344 - unsigned long cr4;
12345 - unsigned long temp;
12346 struct desc_ptr gdt_descr;
12347
12348 local_irq_save(efi_rt_eflags);
12349
12350 - /*
12351 - * If I don't have PAE, I should just duplicate two entries in page
12352 - * directory. If I have PAE, I just need to duplicate one entry in
12353 - * page directory.
12354 - */
12355 - cr4 = read_cr4_safe();
12356
12357 - if (cr4 & X86_CR4_PAE) {
12358 - efi_bak_pg_dir_pointer[0].pgd =
12359 - swapper_pg_dir[pgd_index(0)].pgd;
12360 - swapper_pg_dir[0].pgd =
12361 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
12362 - } else {
12363 - efi_bak_pg_dir_pointer[0].pgd =
12364 - swapper_pg_dir[pgd_index(0)].pgd;
12365 - efi_bak_pg_dir_pointer[1].pgd =
12366 - swapper_pg_dir[pgd_index(0x400000)].pgd;
12367 - swapper_pg_dir[pgd_index(0)].pgd =
12368 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
12369 - temp = PAGE_OFFSET + 0x400000;
12370 - swapper_pg_dir[pgd_index(0x400000)].pgd =
12371 - swapper_pg_dir[pgd_index(temp)].pgd;
12372 - }
12373 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
12374 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
12375 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
12376
12377 /*
12378 * After the lock is released, the original page table is restored.
12379 */
12380 __flush_tlb_all();
12381
12382 - gdt_descr.address = __pa(get_cpu_gdt_table(0));
12383 + gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
12384 gdt_descr.size = GDT_SIZE - 1;
12385 load_gdt(&gdt_descr);
12386 }
12387
12388 -void efi_call_phys_epilog(void)
12389 +void __init efi_call_phys_epilog(void)
12390 {
12391 - unsigned long cr4;
12392 struct desc_ptr gdt_descr;
12393
12394 - gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
12395 + gdt_descr.address = get_cpu_gdt_table(0);
12396 gdt_descr.size = GDT_SIZE - 1;
12397 load_gdt(&gdt_descr);
12398
12399 - cr4 = read_cr4_safe();
12400 -
12401 - if (cr4 & X86_CR4_PAE) {
12402 - swapper_pg_dir[pgd_index(0)].pgd =
12403 - efi_bak_pg_dir_pointer[0].pgd;
12404 - } else {
12405 - swapper_pg_dir[pgd_index(0)].pgd =
12406 - efi_bak_pg_dir_pointer[0].pgd;
12407 - swapper_pg_dir[pgd_index(0x400000)].pgd =
12408 - efi_bak_pg_dir_pointer[1].pgd;
12409 - }
12410 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
12411
12412 /*
12413 * After the lock is released, the original page table is restored.
12414 diff -urNp linux-2.6.32.41/arch/x86/kernel/efi_stub_32.S linux-2.6.32.41/arch/x86/kernel/efi_stub_32.S
12415 --- linux-2.6.32.41/arch/x86/kernel/efi_stub_32.S 2011-03-27 14:31:47.000000000 -0400
12416 +++ linux-2.6.32.41/arch/x86/kernel/efi_stub_32.S 2011-04-17 15:56:46.000000000 -0400
12417 @@ -6,6 +6,7 @@
12418 */
12419
12420 #include <linux/linkage.h>
12421 +#include <linux/init.h>
12422 #include <asm/page_types.h>
12423
12424 /*
12425 @@ -20,7 +21,7 @@
12426 * service functions will comply with gcc calling convention, too.
12427 */
12428
12429 -.text
12430 +__INIT
12431 ENTRY(efi_call_phys)
12432 /*
12433 * 0. The function can only be called in Linux kernel. So CS has been
12434 @@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
12435 * The mapping of lower virtual memory has been created in prelog and
12436 * epilog.
12437 */
12438 - movl $1f, %edx
12439 - subl $__PAGE_OFFSET, %edx
12440 - jmp *%edx
12441 + jmp 1f-__PAGE_OFFSET
12442 1:
12443
12444 /*
12445 @@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
12446 * parameter 2, ..., param n. To make things easy, we save the return
12447 * address of efi_call_phys in a global variable.
12448 */
12449 - popl %edx
12450 - movl %edx, saved_return_addr
12451 - /* get the function pointer into ECX*/
12452 - popl %ecx
12453 - movl %ecx, efi_rt_function_ptr
12454 - movl $2f, %edx
12455 - subl $__PAGE_OFFSET, %edx
12456 - pushl %edx
12457 + popl (saved_return_addr)
12458 + popl (efi_rt_function_ptr)
12459
12460 /*
12461 * 3. Clear PG bit in %CR0.
12462 @@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
12463 /*
12464 * 5. Call the physical function.
12465 */
12466 - jmp *%ecx
12467 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
12468
12469 -2:
12470 /*
12471 * 6. After EFI runtime service returns, control will return to
12472 * following instruction. We'd better readjust stack pointer first.
12473 @@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
12474 movl %cr0, %edx
12475 orl $0x80000000, %edx
12476 movl %edx, %cr0
12477 - jmp 1f
12478 -1:
12479 +
12480 /*
12481 * 8. Now restore the virtual mode from flat mode by
12482 * adding EIP with PAGE_OFFSET.
12483 */
12484 - movl $1f, %edx
12485 - jmp *%edx
12486 + jmp 1f+__PAGE_OFFSET
12487 1:
12488
12489 /*
12490 * 9. Balance the stack. And because EAX contain the return value,
12491 * we'd better not clobber it.
12492 */
12493 - leal efi_rt_function_ptr, %edx
12494 - movl (%edx), %ecx
12495 - pushl %ecx
12496 + pushl (efi_rt_function_ptr)
12497
12498 /*
12499 - * 10. Push the saved return address onto the stack and return.
12500 + * 10. Return to the saved return address.
12501 */
12502 - leal saved_return_addr, %edx
12503 - movl (%edx), %ecx
12504 - pushl %ecx
12505 - ret
12506 + jmpl *(saved_return_addr)
12507 ENDPROC(efi_call_phys)
12508 .previous
12509
12510 -.data
12511 +__INITDATA
12512 saved_return_addr:
12513 .long 0
12514 efi_rt_function_ptr:
12515 diff -urNp linux-2.6.32.41/arch/x86/kernel/entry_32.S linux-2.6.32.41/arch/x86/kernel/entry_32.S
12516 --- linux-2.6.32.41/arch/x86/kernel/entry_32.S 2011-03-27 14:31:47.000000000 -0400
12517 +++ linux-2.6.32.41/arch/x86/kernel/entry_32.S 2011-05-22 23:02:03.000000000 -0400
12518 @@ -185,13 +185,146 @@
12519 /*CFI_REL_OFFSET gs, PT_GS*/
12520 .endm
12521 .macro SET_KERNEL_GS reg
12522 +
12523 +#ifdef CONFIG_CC_STACKPROTECTOR
12524 movl $(__KERNEL_STACK_CANARY), \reg
12525 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12526 + movl $(__USER_DS), \reg
12527 +#else
12528 + xorl \reg, \reg
12529 +#endif
12530 +
12531 movl \reg, %gs
12532 .endm
12533
12534 #endif /* CONFIG_X86_32_LAZY_GS */
12535
12536 -.macro SAVE_ALL
12537 +.macro pax_enter_kernel
12538 +#ifdef CONFIG_PAX_KERNEXEC
12539 + call pax_enter_kernel
12540 +#endif
12541 +.endm
12542 +
12543 +.macro pax_exit_kernel
12544 +#ifdef CONFIG_PAX_KERNEXEC
12545 + call pax_exit_kernel
12546 +#endif
12547 +.endm
12548 +
12549 +#ifdef CONFIG_PAX_KERNEXEC
12550 +ENTRY(pax_enter_kernel)
12551 +#ifdef CONFIG_PARAVIRT
12552 + pushl %eax
12553 + pushl %ecx
12554 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
12555 + mov %eax, %esi
12556 +#else
12557 + mov %cr0, %esi
12558 +#endif
12559 + bts $16, %esi
12560 + jnc 1f
12561 + mov %cs, %esi
12562 + cmp $__KERNEL_CS, %esi
12563 + jz 3f
12564 + ljmp $__KERNEL_CS, $3f
12565 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
12566 +2:
12567 +#ifdef CONFIG_PARAVIRT
12568 + mov %esi, %eax
12569 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
12570 +#else
12571 + mov %esi, %cr0
12572 +#endif
12573 +3:
12574 +#ifdef CONFIG_PARAVIRT
12575 + popl %ecx
12576 + popl %eax
12577 +#endif
12578 + ret
12579 +ENDPROC(pax_enter_kernel)
12580 +
12581 +ENTRY(pax_exit_kernel)
12582 +#ifdef CONFIG_PARAVIRT
12583 + pushl %eax
12584 + pushl %ecx
12585 +#endif
12586 + mov %cs, %esi
12587 + cmp $__KERNEXEC_KERNEL_CS, %esi
12588 + jnz 2f
12589 +#ifdef CONFIG_PARAVIRT
12590 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
12591 + mov %eax, %esi
12592 +#else
12593 + mov %cr0, %esi
12594 +#endif
12595 + btr $16, %esi
12596 + ljmp $__KERNEL_CS, $1f
12597 +1:
12598 +#ifdef CONFIG_PARAVIRT
12599 + mov %esi, %eax
12600 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
12601 +#else
12602 + mov %esi, %cr0
12603 +#endif
12604 +2:
12605 +#ifdef CONFIG_PARAVIRT
12606 + popl %ecx
12607 + popl %eax
12608 +#endif
12609 + ret
12610 +ENDPROC(pax_exit_kernel)
12611 +#endif
12612 +
12613 +.macro pax_erase_kstack
12614 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12615 + call pax_erase_kstack
12616 +#endif
12617 +.endm
12618 +
12619 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12620 +/*
12621 + * ebp: thread_info
12622 + * ecx, edx: can be clobbered
12623 + */
12624 +ENTRY(pax_erase_kstack)
12625 + pushl %edi
12626 + pushl %eax
12627 +
12628 + mov TI_lowest_stack(%ebp), %edi
12629 + mov $-0xBEEF, %eax
12630 + std
12631 +
12632 +1: mov %edi, %ecx
12633 + and $THREAD_SIZE_asm - 1, %ecx
12634 + shr $2, %ecx
12635 + repne scasl
12636 + jecxz 2f
12637 +
12638 + cmp $2*16, %ecx
12639 + jc 2f
12640 +
12641 + mov $2*16, %ecx
12642 + repe scasl
12643 + jecxz 2f
12644 + jne 1b
12645 +
12646 +2: cld
12647 + mov %esp, %ecx
12648 + sub %edi, %ecx
12649 + shr $2, %ecx
12650 + rep stosl
12651 +
12652 + mov TI_task_thread_sp0(%ebp), %edi
12653 + sub $128, %edi
12654 + mov %edi, TI_lowest_stack(%ebp)
12655 +
12656 + popl %eax
12657 + popl %edi
12658 + ret
12659 +ENDPROC(pax_erase_kstack)
12660 +#endif
12661 +
12662 +.macro __SAVE_ALL _DS
12663 cld
12664 PUSH_GS
12665 pushl %fs
12666 @@ -224,7 +357,7 @@
12667 pushl %ebx
12668 CFI_ADJUST_CFA_OFFSET 4
12669 CFI_REL_OFFSET ebx, 0
12670 - movl $(__USER_DS), %edx
12671 + movl $\_DS, %edx
12672 movl %edx, %ds
12673 movl %edx, %es
12674 movl $(__KERNEL_PERCPU), %edx
12675 @@ -232,6 +365,15 @@
12676 SET_KERNEL_GS %edx
12677 .endm
12678
12679 +.macro SAVE_ALL
12680 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
12681 + __SAVE_ALL __KERNEL_DS
12682 + pax_enter_kernel
12683 +#else
12684 + __SAVE_ALL __USER_DS
12685 +#endif
12686 +.endm
12687 +
12688 .macro RESTORE_INT_REGS
12689 popl %ebx
12690 CFI_ADJUST_CFA_OFFSET -4
12691 @@ -352,7 +494,15 @@ check_userspace:
12692 movb PT_CS(%esp), %al
12693 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
12694 cmpl $USER_RPL, %eax
12695 +
12696 +#ifdef CONFIG_PAX_KERNEXEC
12697 + jae resume_userspace
12698 +
12699 + PAX_EXIT_KERNEL
12700 + jmp resume_kernel
12701 +#else
12702 jb resume_kernel # not returning to v8086 or userspace
12703 +#endif
12704
12705 ENTRY(resume_userspace)
12706 LOCKDEP_SYS_EXIT
12707 @@ -364,7 +514,7 @@ ENTRY(resume_userspace)
12708 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
12709 # int/exception return?
12710 jne work_pending
12711 - jmp restore_all
12712 + jmp restore_all_pax
12713 END(ret_from_exception)
12714
12715 #ifdef CONFIG_PREEMPT
12716 @@ -414,25 +564,36 @@ sysenter_past_esp:
12717 /*CFI_REL_OFFSET cs, 0*/
12718 /*
12719 * Push current_thread_info()->sysenter_return to the stack.
12720 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
12721 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
12722 */
12723 - pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
12724 + pushl $0
12725 CFI_ADJUST_CFA_OFFSET 4
12726 CFI_REL_OFFSET eip, 0
12727
12728 pushl %eax
12729 CFI_ADJUST_CFA_OFFSET 4
12730 SAVE_ALL
12731 + GET_THREAD_INFO(%ebp)
12732 + movl TI_sysenter_return(%ebp),%ebp
12733 + movl %ebp,PT_EIP(%esp)
12734 ENABLE_INTERRUPTS(CLBR_NONE)
12735
12736 /*
12737 * Load the potential sixth argument from user stack.
12738 * Careful about security.
12739 */
12740 + movl PT_OLDESP(%esp),%ebp
12741 +
12742 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12743 + mov PT_OLDSS(%esp),%ds
12744 +1: movl %ds:(%ebp),%ebp
12745 + push %ss
12746 + pop %ds
12747 +#else
12748 cmpl $__PAGE_OFFSET-3,%ebp
12749 jae syscall_fault
12750 1: movl (%ebp),%ebp
12751 +#endif
12752 +
12753 movl %ebp,PT_EBP(%esp)
12754 .section __ex_table,"a"
12755 .align 4
12756 @@ -455,12 +616,23 @@ sysenter_do_call:
12757 testl $_TIF_ALLWORK_MASK, %ecx
12758 jne sysexit_audit
12759 sysenter_exit:
12760 +
12761 +#ifdef CONFIG_PAX_RANDKSTACK
12762 + pushl_cfi %eax
12763 + call pax_randomize_kstack
12764 + popl_cfi %eax
12765 +#endif
12766 +
12767 + pax_erase_kstack
12768 +
12769 /* if something modifies registers it must also disable sysexit */
12770 movl PT_EIP(%esp), %edx
12771 movl PT_OLDESP(%esp), %ecx
12772 xorl %ebp,%ebp
12773 TRACE_IRQS_ON
12774 1: mov PT_FS(%esp), %fs
12775 +2: mov PT_DS(%esp), %ds
12776 +3: mov PT_ES(%esp), %es
12777 PTGS_TO_GS
12778 ENABLE_INTERRUPTS_SYSEXIT
12779
12780 @@ -477,6 +649,9 @@ sysenter_audit:
12781 movl %eax,%edx /* 2nd arg: syscall number */
12782 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
12783 call audit_syscall_entry
12784 +
12785 + pax_erase_kstack
12786 +
12787 pushl %ebx
12788 CFI_ADJUST_CFA_OFFSET 4
12789 movl PT_EAX(%esp),%eax /* reload syscall number */
12790 @@ -504,11 +679,17 @@ sysexit_audit:
12791
12792 CFI_ENDPROC
12793 .pushsection .fixup,"ax"
12794 -2: movl $0,PT_FS(%esp)
12795 +4: movl $0,PT_FS(%esp)
12796 + jmp 1b
12797 +5: movl $0,PT_DS(%esp)
12798 + jmp 1b
12799 +6: movl $0,PT_ES(%esp)
12800 jmp 1b
12801 .section __ex_table,"a"
12802 .align 4
12803 - .long 1b,2b
12804 + .long 1b,4b
12805 + .long 2b,5b
12806 + .long 3b,6b
12807 .popsection
12808 PTGS_TO_GS_EX
12809 ENDPROC(ia32_sysenter_target)
12810 @@ -538,6 +719,14 @@ syscall_exit:
12811 testl $_TIF_ALLWORK_MASK, %ecx # current->work
12812 jne syscall_exit_work
12813
12814 +restore_all_pax:
12815 +
12816 +#ifdef CONFIG_PAX_RANDKSTACK
12817 + call pax_randomize_kstack
12818 +#endif
12819 +
12820 + pax_erase_kstack
12821 +
12822 restore_all:
12823 TRACE_IRQS_IRET
12824 restore_all_notrace:
12825 @@ -602,7 +791,13 @@ ldt_ss:
12826 mov PT_OLDESP(%esp), %eax /* load userspace esp */
12827 mov %dx, %ax /* eax: new kernel esp */
12828 sub %eax, %edx /* offset (low word is 0) */
12829 - PER_CPU(gdt_page, %ebx)
12830 +#ifdef CONFIG_SMP
12831 + movl PER_CPU_VAR(cpu_number), %ebx
12832 + shll $PAGE_SHIFT_asm, %ebx
12833 + addl $cpu_gdt_table, %ebx
12834 +#else
12835 + movl $cpu_gdt_table, %ebx
12836 +#endif
12837 shr $16, %edx
12838 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
12839 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
12840 @@ -636,31 +831,25 @@ work_resched:
12841 movl TI_flags(%ebp), %ecx
12842 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
12843 # than syscall tracing?
12844 - jz restore_all
12845 + jz restore_all_pax
12846 testb $_TIF_NEED_RESCHED, %cl
12847 jnz work_resched
12848
12849 work_notifysig: # deal with pending signals and
12850 # notify-resume requests
12851 + movl %esp, %eax
12852 #ifdef CONFIG_VM86
12853 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
12854 - movl %esp, %eax
12855 - jne work_notifysig_v86 # returning to kernel-space or
12856 + jz 1f # returning to kernel-space or
12857 # vm86-space
12858 - xorl %edx, %edx
12859 - call do_notify_resume
12860 - jmp resume_userspace_sig
12861
12862 - ALIGN
12863 -work_notifysig_v86:
12864 pushl %ecx # save ti_flags for do_notify_resume
12865 CFI_ADJUST_CFA_OFFSET 4
12866 call save_v86_state # %eax contains pt_regs pointer
12867 popl %ecx
12868 CFI_ADJUST_CFA_OFFSET -4
12869 movl %eax, %esp
12870 -#else
12871 - movl %esp, %eax
12872 +1:
12873 #endif
12874 xorl %edx, %edx
12875 call do_notify_resume
12876 @@ -673,6 +862,9 @@ syscall_trace_entry:
12877 movl $-ENOSYS,PT_EAX(%esp)
12878 movl %esp, %eax
12879 call syscall_trace_enter
12880 +
12881 + pax_erase_kstack
12882 +
12883 /* What it returned is what we'll actually use. */
12884 cmpl $(nr_syscalls), %eax
12885 jnae syscall_call
12886 @@ -695,6 +887,10 @@ END(syscall_exit_work)
12887
12888 RING0_INT_FRAME # can't unwind into user space anyway
12889 syscall_fault:
12890 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12891 + push %ss
12892 + pop %ds
12893 +#endif
12894 GET_THREAD_INFO(%ebp)
12895 movl $-EFAULT,PT_EAX(%esp)
12896 jmp resume_userspace
12897 @@ -726,6 +922,33 @@ PTREGSCALL(rt_sigreturn)
12898 PTREGSCALL(vm86)
12899 PTREGSCALL(vm86old)
12900
12901 + ALIGN;
12902 +ENTRY(kernel_execve)
12903 + push %ebp
12904 + sub $PT_OLDSS+4,%esp
12905 + push %edi
12906 + push %ecx
12907 + push %eax
12908 + lea 3*4(%esp),%edi
12909 + mov $PT_OLDSS/4+1,%ecx
12910 + xorl %eax,%eax
12911 + rep stosl
12912 + pop %eax
12913 + pop %ecx
12914 + pop %edi
12915 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
12916 + mov %eax,PT_EBX(%esp)
12917 + mov %edx,PT_ECX(%esp)
12918 + mov %ecx,PT_EDX(%esp)
12919 + mov %esp,%eax
12920 + call sys_execve
12921 + GET_THREAD_INFO(%ebp)
12922 + test %eax,%eax
12923 + jz syscall_exit
12924 + add $PT_OLDSS+4,%esp
12925 + pop %ebp
12926 + ret
12927 +
12928 .macro FIXUP_ESPFIX_STACK
12929 /*
12930 * Switch back for ESPFIX stack to the normal zerobased stack
12931 @@ -735,7 +958,13 @@ PTREGSCALL(vm86old)
12932 * normal stack and adjusts ESP with the matching offset.
12933 */
12934 /* fixup the stack */
12935 - PER_CPU(gdt_page, %ebx)
12936 +#ifdef CONFIG_SMP
12937 + movl PER_CPU_VAR(cpu_number), %ebx
12938 + shll $PAGE_SHIFT_asm, %ebx
12939 + addl $cpu_gdt_table, %ebx
12940 +#else
12941 + movl $cpu_gdt_table, %ebx
12942 +#endif
12943 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
12944 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
12945 shl $16, %eax
12946 @@ -1198,7 +1427,6 @@ return_to_handler:
12947 ret
12948 #endif
12949
12950 -.section .rodata,"a"
12951 #include "syscall_table_32.S"
12952
12953 syscall_table_size=(.-sys_call_table)
12954 @@ -1255,9 +1483,12 @@ error_code:
12955 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
12956 REG_TO_PTGS %ecx
12957 SET_KERNEL_GS %ecx
12958 - movl $(__USER_DS), %ecx
12959 + movl $(__KERNEL_DS), %ecx
12960 movl %ecx, %ds
12961 movl %ecx, %es
12962 +
12963 + pax_enter_kernel
12964 +
12965 TRACE_IRQS_OFF
12966 movl %esp,%eax # pt_regs pointer
12967 call *%edi
12968 @@ -1351,6 +1582,9 @@ nmi_stack_correct:
12969 xorl %edx,%edx # zero error code
12970 movl %esp,%eax # pt_regs pointer
12971 call do_nmi
12972 +
12973 + pax_exit_kernel
12974 +
12975 jmp restore_all_notrace
12976 CFI_ENDPROC
12977
12978 @@ -1391,6 +1625,9 @@ nmi_espfix_stack:
12979 FIXUP_ESPFIX_STACK # %eax == %esp
12980 xorl %edx,%edx # zero error code
12981 call do_nmi
12982 +
12983 + pax_exit_kernel
12984 +
12985 RESTORE_REGS
12986 lss 12+4(%esp), %esp # back to espfix stack
12987 CFI_ADJUST_CFA_OFFSET -24
12988 diff -urNp linux-2.6.32.41/arch/x86/kernel/entry_64.S linux-2.6.32.41/arch/x86/kernel/entry_64.S
12989 --- linux-2.6.32.41/arch/x86/kernel/entry_64.S 2011-03-27 14:31:47.000000000 -0400
12990 +++ linux-2.6.32.41/arch/x86/kernel/entry_64.S 2011-06-04 20:30:53.000000000 -0400
12991 @@ -53,6 +53,7 @@
12992 #include <asm/paravirt.h>
12993 #include <asm/ftrace.h>
12994 #include <asm/percpu.h>
12995 +#include <asm/pgtable.h>
12996
12997 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
12998 #include <linux/elf-em.h>
12999 @@ -174,6 +175,257 @@ ENTRY(native_usergs_sysret64)
13000 ENDPROC(native_usergs_sysret64)
13001 #endif /* CONFIG_PARAVIRT */
13002
13003 + .macro ljmpq sel, off
13004 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
13005 + .byte 0x48; ljmp *1234f(%rip)
13006 + .pushsection .rodata
13007 + .align 16
13008 + 1234: .quad \off; .word \sel
13009 + .popsection
13010 +#else
13011 + pushq $\sel
13012 + pushq $\off
13013 + lretq
13014 +#endif
13015 + .endm
13016 +
13017 + .macro pax_enter_kernel
13018 +#ifdef CONFIG_PAX_KERNEXEC
13019 + call pax_enter_kernel
13020 +#endif
13021 + .endm
13022 +
13023 + .macro pax_exit_kernel
13024 +#ifdef CONFIG_PAX_KERNEXEC
13025 + call pax_exit_kernel
13026 +#endif
13027 + .endm
13028 +
13029 +#ifdef CONFIG_PAX_KERNEXEC
13030 +ENTRY(pax_enter_kernel)
13031 + pushq %rdi
13032 +
13033 +#ifdef CONFIG_PARAVIRT
13034 + PV_SAVE_REGS(CLBR_RDI)
13035 +#endif
13036 +
13037 + GET_CR0_INTO_RDI
13038 + bts $16,%rdi
13039 + jnc 1f
13040 + mov %cs,%edi
13041 + cmp $__KERNEL_CS,%edi
13042 + jz 3f
13043 + ljmpq __KERNEL_CS,3f
13044 +1: ljmpq __KERNEXEC_KERNEL_CS,2f
13045 +2: SET_RDI_INTO_CR0
13046 +3:
13047 +
13048 +#ifdef CONFIG_PARAVIRT
13049 + PV_RESTORE_REGS(CLBR_RDI)
13050 +#endif
13051 +
13052 + popq %rdi
13053 + retq
13054 +ENDPROC(pax_enter_kernel)
13055 +
13056 +ENTRY(pax_exit_kernel)
13057 + pushq %rdi
13058 +
13059 +#ifdef CONFIG_PARAVIRT
13060 + PV_SAVE_REGS(CLBR_RDI)
13061 +#endif
13062 +
13063 + mov %cs,%rdi
13064 + cmp $__KERNEXEC_KERNEL_CS,%edi
13065 + jnz 2f
13066 + GET_CR0_INTO_RDI
13067 + btr $16,%rdi
13068 + ljmpq __KERNEL_CS,1f
13069 +1: SET_RDI_INTO_CR0
13070 +2:
13071 +
13072 +#ifdef CONFIG_PARAVIRT
13073 + PV_RESTORE_REGS(CLBR_RDI);
13074 +#endif
13075 +
13076 + popq %rdi
13077 + retq
13078 +ENDPROC(pax_exit_kernel)
13079 +#endif
13080 +
13081 + .macro pax_enter_kernel_user
13082 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13083 + call pax_enter_kernel_user
13084 +#endif
13085 + .endm
13086 +
13087 + .macro pax_exit_kernel_user
13088 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13089 + call pax_exit_kernel_user
13090 +#endif
13091 +#ifdef CONFIG_PAX_RANDKSTACK
13092 + push %rax
13093 + call pax_randomize_kstack
13094 + pop %rax
13095 +#endif
13096 + pax_erase_kstack
13097 + .endm
13098 +
13099 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13100 +ENTRY(pax_enter_kernel_user)
13101 + pushq %rdi
13102 + pushq %rbx
13103 +
13104 +#ifdef CONFIG_PARAVIRT
13105 + PV_SAVE_REGS(CLBR_RDI)
13106 +#endif
13107 +
13108 + GET_CR3_INTO_RDI
13109 + mov %rdi,%rbx
13110 + add $__START_KERNEL_map,%rbx
13111 + sub phys_base(%rip),%rbx
13112 +
13113 +#ifdef CONFIG_PARAVIRT
13114 + pushq %rdi
13115 + cmpl $0, pv_info+PARAVIRT_enabled
13116 + jz 1f
13117 + i = 0
13118 + .rept USER_PGD_PTRS
13119 + mov i*8(%rbx),%rsi
13120 + mov $0,%sil
13121 + lea i*8(%rbx),%rdi
13122 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
13123 + i = i + 1
13124 + .endr
13125 + jmp 2f
13126 +1:
13127 +#endif
13128 +
13129 + i = 0
13130 + .rept USER_PGD_PTRS
13131 + movb $0,i*8(%rbx)
13132 + i = i + 1
13133 + .endr
13134 +
13135 +#ifdef CONFIG_PARAVIRT
13136 +2: popq %rdi
13137 +#endif
13138 + SET_RDI_INTO_CR3
13139 +
13140 +#ifdef CONFIG_PAX_KERNEXEC
13141 + GET_CR0_INTO_RDI
13142 + bts $16,%rdi
13143 + SET_RDI_INTO_CR0
13144 +#endif
13145 +
13146 +#ifdef CONFIG_PARAVIRT
13147 + PV_RESTORE_REGS(CLBR_RDI)
13148 +#endif
13149 +
13150 + popq %rbx
13151 + popq %rdi
13152 + retq
13153 +ENDPROC(pax_enter_kernel_user)
13154 +
13155 +ENTRY(pax_exit_kernel_user)
13156 + push %rdi
13157 +
13158 +#ifdef CONFIG_PARAVIRT
13159 + pushq %rbx
13160 + PV_SAVE_REGS(CLBR_RDI)
13161 +#endif
13162 +
13163 +#ifdef CONFIG_PAX_KERNEXEC
13164 + GET_CR0_INTO_RDI
13165 + btr $16,%rdi
13166 + SET_RDI_INTO_CR0
13167 +#endif
13168 +
13169 + GET_CR3_INTO_RDI
13170 + add $__START_KERNEL_map,%rdi
13171 + sub phys_base(%rip),%rdi
13172 +
13173 +#ifdef CONFIG_PARAVIRT
13174 + cmpl $0, pv_info+PARAVIRT_enabled
13175 + jz 1f
13176 + mov %rdi,%rbx
13177 + i = 0
13178 + .rept USER_PGD_PTRS
13179 + mov i*8(%rbx),%rsi
13180 + mov $0x67,%sil
13181 + lea i*8(%rbx),%rdi
13182 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
13183 + i = i + 1
13184 + .endr
13185 + jmp 2f
13186 +1:
13187 +#endif
13188 +
13189 + i = 0
13190 + .rept USER_PGD_PTRS
13191 + movb $0x67,i*8(%rdi)
13192 + i = i + 1
13193 + .endr
13194 +
13195 +#ifdef CONFIG_PARAVIRT
13196 +2: PV_RESTORE_REGS(CLBR_RDI)
13197 + popq %rbx
13198 +#endif
13199 +
13200 + popq %rdi
13201 + retq
13202 +ENDPROC(pax_exit_kernel_user)
13203 +#endif
13204 +
13205 +.macro pax_erase_kstack
13206 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13207 + call pax_erase_kstack
13208 +#endif
13209 +.endm
13210 +
13211 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13212 +/*
13213 + * r10: thread_info
13214 + * rcx, rdx: can be clobbered
13215 + */
13216 +ENTRY(pax_erase_kstack)
13217 + pushq %rdi
13218 + pushq %rax
13219 +
13220 + GET_THREAD_INFO(%r10)
13221 + mov TI_lowest_stack(%r10), %rdi
13222 + mov $-0xBEEF, %rax
13223 + std
13224 +
13225 +1: mov %edi, %ecx
13226 + and $THREAD_SIZE_asm - 1, %ecx
13227 + shr $3, %ecx
13228 + repne scasq
13229 + jecxz 2f
13230 +
13231 + cmp $2*8, %ecx
13232 + jc 2f
13233 +
13234 + mov $2*8, %ecx
13235 + repe scasq
13236 + jecxz 2f
13237 + jne 1b
13238 +
13239 +2: cld
13240 + mov %esp, %ecx
13241 + sub %edi, %ecx
13242 + shr $3, %ecx
13243 + rep stosq
13244 +
13245 + mov TI_task_thread_sp0(%r10), %rdi
13246 + sub $256, %rdi
13247 + mov %rdi, TI_lowest_stack(%r10)
13248 +
13249 + popq %rax
13250 + popq %rdi
13251 + ret
13252 +ENDPROC(pax_erase_kstack)
13253 +#endif
13254
13255 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
13256 #ifdef CONFIG_TRACE_IRQFLAGS
13257 @@ -317,7 +569,7 @@ ENTRY(save_args)
13258 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
13259 movq_cfi rbp, 8 /* push %rbp */
13260 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
13261 - testl $3, CS(%rdi)
13262 + testb $3, CS(%rdi)
13263 je 1f
13264 SWAPGS
13265 /*
13266 @@ -409,7 +661,7 @@ ENTRY(ret_from_fork)
13267
13268 RESTORE_REST
13269
13270 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13271 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13272 je int_ret_from_sys_call
13273
13274 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
13275 @@ -455,7 +707,7 @@ END(ret_from_fork)
13276 ENTRY(system_call)
13277 CFI_STARTPROC simple
13278 CFI_SIGNAL_FRAME
13279 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
13280 + CFI_DEF_CFA rsp,0
13281 CFI_REGISTER rip,rcx
13282 /*CFI_REGISTER rflags,r11*/
13283 SWAPGS_UNSAFE_STACK
13284 @@ -468,12 +720,13 @@ ENTRY(system_call_after_swapgs)
13285
13286 movq %rsp,PER_CPU_VAR(old_rsp)
13287 movq PER_CPU_VAR(kernel_stack),%rsp
13288 + pax_enter_kernel_user
13289 /*
13290 * No need to follow this irqs off/on section - it's straight
13291 * and short:
13292 */
13293 ENABLE_INTERRUPTS(CLBR_NONE)
13294 - SAVE_ARGS 8,1
13295 + SAVE_ARGS 8*6,1
13296 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
13297 movq %rcx,RIP-ARGOFFSET(%rsp)
13298 CFI_REL_OFFSET rip,RIP-ARGOFFSET
13299 @@ -502,6 +755,7 @@ sysret_check:
13300 andl %edi,%edx
13301 jnz sysret_careful
13302 CFI_REMEMBER_STATE
13303 + pax_exit_kernel_user
13304 /*
13305 * sysretq will re-enable interrupts:
13306 */
13307 @@ -562,6 +816,9 @@ auditsys:
13308 movq %rax,%rsi /* 2nd arg: syscall number */
13309 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
13310 call audit_syscall_entry
13311 +
13312 + pax_erase_kstack
13313 +
13314 LOAD_ARGS 0 /* reload call-clobbered registers */
13315 jmp system_call_fastpath
13316
13317 @@ -592,6 +849,9 @@ tracesys:
13318 FIXUP_TOP_OF_STACK %rdi
13319 movq %rsp,%rdi
13320 call syscall_trace_enter
13321 +
13322 + pax_erase_kstack
13323 +
13324 /*
13325 * Reload arg registers from stack in case ptrace changed them.
13326 * We don't reload %rax because syscall_trace_enter() returned
13327 @@ -613,7 +873,7 @@ tracesys:
13328 GLOBAL(int_ret_from_sys_call)
13329 DISABLE_INTERRUPTS(CLBR_NONE)
13330 TRACE_IRQS_OFF
13331 - testl $3,CS-ARGOFFSET(%rsp)
13332 + testb $3,CS-ARGOFFSET(%rsp)
13333 je retint_restore_args
13334 movl $_TIF_ALLWORK_MASK,%edi
13335 /* edi: mask to check */
13336 @@ -800,6 +1060,16 @@ END(interrupt)
13337 CFI_ADJUST_CFA_OFFSET 10*8
13338 call save_args
13339 PARTIAL_FRAME 0
13340 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13341 + testb $3, CS(%rdi)
13342 + jnz 1f
13343 + pax_enter_kernel
13344 + jmp 2f
13345 +1: pax_enter_kernel_user
13346 +2:
13347 +#else
13348 + pax_enter_kernel
13349 +#endif
13350 call \func
13351 .endm
13352
13353 @@ -822,7 +1092,7 @@ ret_from_intr:
13354 CFI_ADJUST_CFA_OFFSET -8
13355 exit_intr:
13356 GET_THREAD_INFO(%rcx)
13357 - testl $3,CS-ARGOFFSET(%rsp)
13358 + testb $3,CS-ARGOFFSET(%rsp)
13359 je retint_kernel
13360
13361 /* Interrupt came from user space */
13362 @@ -844,12 +1114,14 @@ retint_swapgs: /* return to user-space
13363 * The iretq could re-enable interrupts:
13364 */
13365 DISABLE_INTERRUPTS(CLBR_ANY)
13366 + pax_exit_kernel_user
13367 TRACE_IRQS_IRETQ
13368 SWAPGS
13369 jmp restore_args
13370
13371 retint_restore_args: /* return to kernel space */
13372 DISABLE_INTERRUPTS(CLBR_ANY)
13373 + pax_exit_kernel
13374 /*
13375 * The iretq could re-enable interrupts:
13376 */
13377 @@ -1032,6 +1304,16 @@ ENTRY(\sym)
13378 CFI_ADJUST_CFA_OFFSET 15*8
13379 call error_entry
13380 DEFAULT_FRAME 0
13381 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13382 + testb $3, CS(%rsp)
13383 + jnz 1f
13384 + pax_enter_kernel
13385 + jmp 2f
13386 +1: pax_enter_kernel_user
13387 +2:
13388 +#else
13389 + pax_enter_kernel
13390 +#endif
13391 movq %rsp,%rdi /* pt_regs pointer */
13392 xorl %esi,%esi /* no error code */
13393 call \do_sym
13394 @@ -1049,6 +1331,16 @@ ENTRY(\sym)
13395 subq $15*8, %rsp
13396 call save_paranoid
13397 TRACE_IRQS_OFF
13398 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13399 + testb $3, CS(%rsp)
13400 + jnz 1f
13401 + pax_enter_kernel
13402 + jmp 2f
13403 +1: pax_enter_kernel_user
13404 +2:
13405 +#else
13406 + pax_enter_kernel
13407 +#endif
13408 movq %rsp,%rdi /* pt_regs pointer */
13409 xorl %esi,%esi /* no error code */
13410 call \do_sym
13411 @@ -1066,9 +1358,24 @@ ENTRY(\sym)
13412 subq $15*8, %rsp
13413 call save_paranoid
13414 TRACE_IRQS_OFF
13415 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13416 + testb $3, CS(%rsp)
13417 + jnz 1f
13418 + pax_enter_kernel
13419 + jmp 2f
13420 +1: pax_enter_kernel_user
13421 +2:
13422 +#else
13423 + pax_enter_kernel
13424 +#endif
13425 movq %rsp,%rdi /* pt_regs pointer */
13426 xorl %esi,%esi /* no error code */
13427 - PER_CPU(init_tss, %rbp)
13428 +#ifdef CONFIG_SMP
13429 + imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
13430 + lea init_tss(%rbp), %rbp
13431 +#else
13432 + lea init_tss(%rip), %rbp
13433 +#endif
13434 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
13435 call \do_sym
13436 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
13437 @@ -1085,6 +1392,16 @@ ENTRY(\sym)
13438 CFI_ADJUST_CFA_OFFSET 15*8
13439 call error_entry
13440 DEFAULT_FRAME 0
13441 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13442 + testb $3, CS(%rsp)
13443 + jnz 1f
13444 + pax_enter_kernel
13445 + jmp 2f
13446 +1: pax_enter_kernel_user
13447 +2:
13448 +#else
13449 + pax_enter_kernel
13450 +#endif
13451 movq %rsp,%rdi /* pt_regs pointer */
13452 movq ORIG_RAX(%rsp),%rsi /* get error code */
13453 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
13454 @@ -1104,6 +1421,16 @@ ENTRY(\sym)
13455 call save_paranoid
13456 DEFAULT_FRAME 0
13457 TRACE_IRQS_OFF
13458 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13459 + testb $3, CS(%rsp)
13460 + jnz 1f
13461 + pax_enter_kernel
13462 + jmp 2f
13463 +1: pax_enter_kernel_user
13464 +2:
13465 +#else
13466 + pax_enter_kernel
13467 +#endif
13468 movq %rsp,%rdi /* pt_regs pointer */
13469 movq ORIG_RAX(%rsp),%rsi /* get error code */
13470 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
13471 @@ -1405,14 +1732,27 @@ ENTRY(paranoid_exit)
13472 TRACE_IRQS_OFF
13473 testl %ebx,%ebx /* swapgs needed? */
13474 jnz paranoid_restore
13475 - testl $3,CS(%rsp)
13476 + testb $3,CS(%rsp)
13477 jnz paranoid_userspace
13478 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13479 + pax_exit_kernel
13480 + TRACE_IRQS_IRETQ 0
13481 + SWAPGS_UNSAFE_STACK
13482 + RESTORE_ALL 8
13483 + jmp irq_return
13484 +#endif
13485 paranoid_swapgs:
13486 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13487 + pax_exit_kernel_user
13488 +#else
13489 + pax_exit_kernel
13490 +#endif
13491 TRACE_IRQS_IRETQ 0
13492 SWAPGS_UNSAFE_STACK
13493 RESTORE_ALL 8
13494 jmp irq_return
13495 paranoid_restore:
13496 + pax_exit_kernel
13497 TRACE_IRQS_IRETQ 0
13498 RESTORE_ALL 8
13499 jmp irq_return
13500 @@ -1470,7 +1810,7 @@ ENTRY(error_entry)
13501 movq_cfi r14, R14+8
13502 movq_cfi r15, R15+8
13503 xorl %ebx,%ebx
13504 - testl $3,CS+8(%rsp)
13505 + testb $3,CS+8(%rsp)
13506 je error_kernelspace
13507 error_swapgs:
13508 SWAPGS
13509 @@ -1529,6 +1869,16 @@ ENTRY(nmi)
13510 CFI_ADJUST_CFA_OFFSET 15*8
13511 call save_paranoid
13512 DEFAULT_FRAME 0
13513 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13514 + testb $3, CS(%rsp)
13515 + jnz 1f
13516 + pax_enter_kernel
13517 + jmp 2f
13518 +1: pax_enter_kernel_user
13519 +2:
13520 +#else
13521 + pax_enter_kernel
13522 +#endif
13523 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
13524 movq %rsp,%rdi
13525 movq $-1,%rsi
13526 @@ -1539,11 +1889,25 @@ ENTRY(nmi)
13527 DISABLE_INTERRUPTS(CLBR_NONE)
13528 testl %ebx,%ebx /* swapgs needed? */
13529 jnz nmi_restore
13530 - testl $3,CS(%rsp)
13531 + testb $3,CS(%rsp)
13532 jnz nmi_userspace
13533 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13534 + pax_exit_kernel
13535 + SWAPGS_UNSAFE_STACK
13536 + RESTORE_ALL 8
13537 + jmp irq_return
13538 +#endif
13539 nmi_swapgs:
13540 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13541 + pax_exit_kernel_user
13542 +#else
13543 + pax_exit_kernel
13544 +#endif
13545 SWAPGS_UNSAFE_STACK
13546 + RESTORE_ALL 8
13547 + jmp irq_return
13548 nmi_restore:
13549 + pax_exit_kernel
13550 RESTORE_ALL 8
13551 jmp irq_return
13552 nmi_userspace:
13553 diff -urNp linux-2.6.32.41/arch/x86/kernel/ftrace.c linux-2.6.32.41/arch/x86/kernel/ftrace.c
13554 --- linux-2.6.32.41/arch/x86/kernel/ftrace.c 2011-03-27 14:31:47.000000000 -0400
13555 +++ linux-2.6.32.41/arch/x86/kernel/ftrace.c 2011-05-04 17:56:20.000000000 -0400
13556 @@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the
13557 static void *mod_code_newcode; /* holds the text to write to the IP */
13558
13559 static unsigned nmi_wait_count;
13560 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
13561 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
13562
13563 int ftrace_arch_read_dyn_info(char *buf, int size)
13564 {
13565 @@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf,
13566
13567 r = snprintf(buf, size, "%u %u",
13568 nmi_wait_count,
13569 - atomic_read(&nmi_update_count));
13570 + atomic_read_unchecked(&nmi_update_count));
13571 return r;
13572 }
13573
13574 @@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
13575 {
13576 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
13577 smp_rmb();
13578 + pax_open_kernel();
13579 ftrace_mod_code();
13580 - atomic_inc(&nmi_update_count);
13581 + pax_close_kernel();
13582 + atomic_inc_unchecked(&nmi_update_count);
13583 }
13584 /* Must have previous changes seen before executions */
13585 smp_mb();
13586 @@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, voi
13587
13588
13589
13590 -static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
13591 +static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
13592
13593 static unsigned char *ftrace_nop_replace(void)
13594 {
13595 @@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, uns
13596 {
13597 unsigned char replaced[MCOUNT_INSN_SIZE];
13598
13599 + ip = ktla_ktva(ip);
13600 +
13601 /*
13602 * Note: Due to modules and __init, code can
13603 * disappear and change, we need to protect against faulting
13604 @@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_fun
13605 unsigned char old[MCOUNT_INSN_SIZE], *new;
13606 int ret;
13607
13608 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
13609 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
13610 new = ftrace_call_replace(ip, (unsigned long)func);
13611 ret = ftrace_modify_code(ip, old, new);
13612
13613 @@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *da
13614 switch (faulted) {
13615 case 0:
13616 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
13617 - memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
13618 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
13619 break;
13620 case 1:
13621 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
13622 - memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
13623 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
13624 break;
13625 case 2:
13626 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
13627 - memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
13628 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
13629 break;
13630 }
13631
13632 @@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long
13633 {
13634 unsigned char code[MCOUNT_INSN_SIZE];
13635
13636 + ip = ktla_ktva(ip);
13637 +
13638 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
13639 return -EFAULT;
13640
13641 diff -urNp linux-2.6.32.41/arch/x86/kernel/head32.c linux-2.6.32.41/arch/x86/kernel/head32.c
13642 --- linux-2.6.32.41/arch/x86/kernel/head32.c 2011-03-27 14:31:47.000000000 -0400
13643 +++ linux-2.6.32.41/arch/x86/kernel/head32.c 2011-04-17 15:56:46.000000000 -0400
13644 @@ -16,6 +16,7 @@
13645 #include <asm/apic.h>
13646 #include <asm/io_apic.h>
13647 #include <asm/bios_ebda.h>
13648 +#include <asm/boot.h>
13649
13650 static void __init i386_default_early_setup(void)
13651 {
13652 @@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
13653 {
13654 reserve_trampoline_memory();
13655
13656 - reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
13657 + reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
13658
13659 #ifdef CONFIG_BLK_DEV_INITRD
13660 /* Reserve INITRD */
13661 diff -urNp linux-2.6.32.41/arch/x86/kernel/head_32.S linux-2.6.32.41/arch/x86/kernel/head_32.S
13662 --- linux-2.6.32.41/arch/x86/kernel/head_32.S 2011-03-27 14:31:47.000000000 -0400
13663 +++ linux-2.6.32.41/arch/x86/kernel/head_32.S 2011-04-17 15:56:46.000000000 -0400
13664 @@ -19,10 +19,17 @@
13665 #include <asm/setup.h>
13666 #include <asm/processor-flags.h>
13667 #include <asm/percpu.h>
13668 +#include <asm/msr-index.h>
13669
13670 /* Physical address */
13671 #define pa(X) ((X) - __PAGE_OFFSET)
13672
13673 +#ifdef CONFIG_PAX_KERNEXEC
13674 +#define ta(X) (X)
13675 +#else
13676 +#define ta(X) ((X) - __PAGE_OFFSET)
13677 +#endif
13678 +
13679 /*
13680 * References to members of the new_cpu_data structure.
13681 */
13682 @@ -52,11 +59,7 @@
13683 * and small than max_low_pfn, otherwise will waste some page table entries
13684 */
13685
13686 -#if PTRS_PER_PMD > 1
13687 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
13688 -#else
13689 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
13690 -#endif
13691 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
13692
13693 /* Enough space to fit pagetables for the low memory linear map */
13694 MAPPING_BEYOND_END = \
13695 @@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
13696 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
13697
13698 /*
13699 + * Real beginning of normal "text" segment
13700 + */
13701 +ENTRY(stext)
13702 +ENTRY(_stext)
13703 +
13704 +/*
13705 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
13706 * %esi points to the real-mode code as a 32-bit pointer.
13707 * CS and DS must be 4 GB flat segments, but we don't depend on
13708 @@ -80,6 +89,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
13709 * can.
13710 */
13711 __HEAD
13712 +
13713 +#ifdef CONFIG_PAX_KERNEXEC
13714 + jmp startup_32
13715 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
13716 +.fill PAGE_SIZE-5,1,0xcc
13717 +#endif
13718 +
13719 ENTRY(startup_32)
13720 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
13721 us to not reload segments */
13722 @@ -97,6 +113,57 @@ ENTRY(startup_32)
13723 movl %eax,%gs
13724 2:
13725
13726 +#ifdef CONFIG_SMP
13727 + movl $pa(cpu_gdt_table),%edi
13728 + movl $__per_cpu_load,%eax
13729 + movw %ax,__KERNEL_PERCPU + 2(%edi)
13730 + rorl $16,%eax
13731 + movb %al,__KERNEL_PERCPU + 4(%edi)
13732 + movb %ah,__KERNEL_PERCPU + 7(%edi)
13733 + movl $__per_cpu_end - 1,%eax
13734 + subl $__per_cpu_start,%eax
13735 + movw %ax,__KERNEL_PERCPU + 0(%edi)
13736 +#endif
13737 +
13738 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13739 + movl $NR_CPUS,%ecx
13740 + movl $pa(cpu_gdt_table),%edi
13741 +1:
13742 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
13743 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
13744 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
13745 + addl $PAGE_SIZE_asm,%edi
13746 + loop 1b
13747 +#endif
13748 +
13749 +#ifdef CONFIG_PAX_KERNEXEC
13750 + movl $pa(boot_gdt),%edi
13751 + movl $__LOAD_PHYSICAL_ADDR,%eax
13752 + movw %ax,__BOOT_CS + 2(%edi)
13753 + rorl $16,%eax
13754 + movb %al,__BOOT_CS + 4(%edi)
13755 + movb %ah,__BOOT_CS + 7(%edi)
13756 + rorl $16,%eax
13757 +
13758 + ljmp $(__BOOT_CS),$1f
13759 +1:
13760 +
13761 + movl $NR_CPUS,%ecx
13762 + movl $pa(cpu_gdt_table),%edi
13763 + addl $__PAGE_OFFSET,%eax
13764 +1:
13765 + movw %ax,__KERNEL_CS + 2(%edi)
13766 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
13767 + rorl $16,%eax
13768 + movb %al,__KERNEL_CS + 4(%edi)
13769 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
13770 + movb %ah,__KERNEL_CS + 7(%edi)
13771 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
13772 + rorl $16,%eax
13773 + addl $PAGE_SIZE_asm,%edi
13774 + loop 1b
13775 +#endif
13776 +
13777 /*
13778 * Clear BSS first so that there are no surprises...
13779 */
13780 @@ -140,9 +207,7 @@ ENTRY(startup_32)
13781 cmpl $num_subarch_entries, %eax
13782 jae bad_subarch
13783
13784 - movl pa(subarch_entries)(,%eax,4), %eax
13785 - subl $__PAGE_OFFSET, %eax
13786 - jmp *%eax
13787 + jmp *pa(subarch_entries)(,%eax,4)
13788
13789 bad_subarch:
13790 WEAK(lguest_entry)
13791 @@ -154,10 +219,10 @@ WEAK(xen_entry)
13792 __INITDATA
13793
13794 subarch_entries:
13795 - .long default_entry /* normal x86/PC */
13796 - .long lguest_entry /* lguest hypervisor */
13797 - .long xen_entry /* Xen hypervisor */
13798 - .long default_entry /* Moorestown MID */
13799 + .long ta(default_entry) /* normal x86/PC */
13800 + .long ta(lguest_entry) /* lguest hypervisor */
13801 + .long ta(xen_entry) /* Xen hypervisor */
13802 + .long ta(default_entry) /* Moorestown MID */
13803 num_subarch_entries = (. - subarch_entries) / 4
13804 .previous
13805 #endif /* CONFIG_PARAVIRT */
13806 @@ -218,8 +283,11 @@ default_entry:
13807 movl %eax, pa(max_pfn_mapped)
13808
13809 /* Do early initialization of the fixmap area */
13810 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
13811 - movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
13812 +#ifdef CONFIG_COMPAT_VDSO
13813 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
13814 +#else
13815 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
13816 +#endif
13817 #else /* Not PAE */
13818
13819 page_pde_offset = (__PAGE_OFFSET >> 20);
13820 @@ -249,8 +317,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
13821 movl %eax, pa(max_pfn_mapped)
13822
13823 /* Do early initialization of the fixmap area */
13824 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
13825 - movl %eax,pa(swapper_pg_dir+0xffc)
13826 +#ifdef CONFIG_COMPAT_VDSO
13827 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
13828 +#else
13829 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
13830 +#endif
13831 #endif
13832 jmp 3f
13833 /*
13834 @@ -297,6 +368,7 @@ ENTRY(startup_32_smp)
13835 orl %edx,%eax
13836 movl %eax,%cr4
13837
13838 +#ifdef CONFIG_X86_PAE
13839 btl $5, %eax # check if PAE is enabled
13840 jnc 6f
13841
13842 @@ -312,13 +384,17 @@ ENTRY(startup_32_smp)
13843 jnc 6f
13844
13845 /* Setup EFER (Extended Feature Enable Register) */
13846 - movl $0xc0000080, %ecx
13847 + movl $MSR_EFER, %ecx
13848 rdmsr
13849
13850 btsl $11, %eax
13851 /* Make changes effective */
13852 wrmsr
13853
13854 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
13855 + movl $1,pa(nx_enabled)
13856 +#endif
13857 +
13858 6:
13859
13860 /*
13861 @@ -344,9 +420,7 @@ ENTRY(startup_32_smp)
13862
13863 #ifdef CONFIG_SMP
13864 cmpb $0, ready
13865 - jz 1f /* Initial CPU cleans BSS */
13866 - jmp checkCPUtype
13867 -1:
13868 + jnz checkCPUtype /* Initial CPU cleans BSS */
13869 #endif /* CONFIG_SMP */
13870
13871 /*
13872 @@ -424,7 +498,7 @@ is386: movl $2,%ecx # set MP
13873 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
13874 movl %eax,%ss # after changing gdt.
13875
13876 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
13877 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
13878 movl %eax,%ds
13879 movl %eax,%es
13880
13881 @@ -438,15 +512,22 @@ is386: movl $2,%ecx # set MP
13882 */
13883 cmpb $0,ready
13884 jne 1f
13885 - movl $per_cpu__gdt_page,%eax
13886 + movl $cpu_gdt_table,%eax
13887 movl $per_cpu__stack_canary,%ecx
13888 +#ifdef CONFIG_SMP
13889 + addl $__per_cpu_load,%ecx
13890 +#endif
13891 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
13892 shrl $16, %ecx
13893 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
13894 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
13895 1:
13896 -#endif
13897 movl $(__KERNEL_STACK_CANARY),%eax
13898 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
13899 + movl $(__USER_DS),%eax
13900 +#else
13901 + xorl %eax,%eax
13902 +#endif
13903 movl %eax,%gs
13904
13905 xorl %eax,%eax # Clear LDT
13906 @@ -457,10 +538,6 @@ is386: movl $2,%ecx # set MP
13907 #ifdef CONFIG_SMP
13908 movb ready, %cl
13909 movb $1, ready
13910 - cmpb $0,%cl # the first CPU calls start_kernel
13911 - je 1f
13912 - movl (stack_start), %esp
13913 -1:
13914 #endif /* CONFIG_SMP */
13915 jmp *(initial_code)
13916
13917 @@ -546,22 +623,22 @@ early_page_fault:
13918 jmp early_fault
13919
13920 early_fault:
13921 - cld
13922 #ifdef CONFIG_PRINTK
13923 + cmpl $1,%ss:early_recursion_flag
13924 + je hlt_loop
13925 + incl %ss:early_recursion_flag
13926 + cld
13927 pusha
13928 movl $(__KERNEL_DS),%eax
13929 movl %eax,%ds
13930 movl %eax,%es
13931 - cmpl $2,early_recursion_flag
13932 - je hlt_loop
13933 - incl early_recursion_flag
13934 movl %cr2,%eax
13935 pushl %eax
13936 pushl %edx /* trapno */
13937 pushl $fault_msg
13938 call printk
13939 +; call dump_stack
13940 #endif
13941 - call dump_stack
13942 hlt_loop:
13943 hlt
13944 jmp hlt_loop
13945 @@ -569,8 +646,11 @@ hlt_loop:
13946 /* This is the default interrupt "handler" :-) */
13947 ALIGN
13948 ignore_int:
13949 - cld
13950 #ifdef CONFIG_PRINTK
13951 + cmpl $2,%ss:early_recursion_flag
13952 + je hlt_loop
13953 + incl %ss:early_recursion_flag
13954 + cld
13955 pushl %eax
13956 pushl %ecx
13957 pushl %edx
13958 @@ -579,9 +659,6 @@ ignore_int:
13959 movl $(__KERNEL_DS),%eax
13960 movl %eax,%ds
13961 movl %eax,%es
13962 - cmpl $2,early_recursion_flag
13963 - je hlt_loop
13964 - incl early_recursion_flag
13965 pushl 16(%esp)
13966 pushl 24(%esp)
13967 pushl 32(%esp)
13968 @@ -610,31 +687,47 @@ ENTRY(initial_page_table)
13969 /*
13970 * BSS section
13971 */
13972 -__PAGE_ALIGNED_BSS
13973 - .align PAGE_SIZE_asm
13974 #ifdef CONFIG_X86_PAE
13975 +.section .swapper_pg_pmd,"a",@progbits
13976 swapper_pg_pmd:
13977 .fill 1024*KPMDS,4,0
13978 #else
13979 +.section .swapper_pg_dir,"a",@progbits
13980 ENTRY(swapper_pg_dir)
13981 .fill 1024,4,0
13982 #endif
13983 +.section .swapper_pg_fixmap,"a",@progbits
13984 swapper_pg_fixmap:
13985 .fill 1024,4,0
13986 #ifdef CONFIG_X86_TRAMPOLINE
13987 +.section .trampoline_pg_dir,"a",@progbits
13988 ENTRY(trampoline_pg_dir)
13989 +#ifdef CONFIG_X86_PAE
13990 + .fill 4,8,0
13991 +#else
13992 .fill 1024,4,0
13993 #endif
13994 +#endif
13995 +
13996 +.section .empty_zero_page,"a",@progbits
13997 ENTRY(empty_zero_page)
13998 .fill 4096,1,0
13999
14000 /*
14001 + * The IDT has to be page-aligned to simplify the Pentium
14002 + * F0 0F bug workaround.. We have a special link segment
14003 + * for this.
14004 + */
14005 +.section .idt,"a",@progbits
14006 +ENTRY(idt_table)
14007 + .fill 256,8,0
14008 +
14009 +/*
14010 * This starts the data section.
14011 */
14012 #ifdef CONFIG_X86_PAE
14013 -__PAGE_ALIGNED_DATA
14014 - /* Page-aligned for the benefit of paravirt? */
14015 - .align PAGE_SIZE_asm
14016 +.section .swapper_pg_dir,"a",@progbits
14017 +
14018 ENTRY(swapper_pg_dir)
14019 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
14020 # if KPMDS == 3
14021 @@ -653,15 +746,24 @@ ENTRY(swapper_pg_dir)
14022 # error "Kernel PMDs should be 1, 2 or 3"
14023 # endif
14024 .align PAGE_SIZE_asm /* needs to be page-sized too */
14025 +
14026 +#ifdef CONFIG_PAX_PER_CPU_PGD
14027 +ENTRY(cpu_pgd)
14028 + .rept NR_CPUS
14029 + .fill 4,8,0
14030 + .endr
14031 +#endif
14032 +
14033 #endif
14034
14035 .data
14036 ENTRY(stack_start)
14037 - .long init_thread_union+THREAD_SIZE
14038 + .long init_thread_union+THREAD_SIZE-8
14039 .long __BOOT_DS
14040
14041 ready: .byte 0
14042
14043 +.section .rodata,"a",@progbits
14044 early_recursion_flag:
14045 .long 0
14046
14047 @@ -697,7 +799,7 @@ fault_msg:
14048 .word 0 # 32 bit align gdt_desc.address
14049 boot_gdt_descr:
14050 .word __BOOT_DS+7
14051 - .long boot_gdt - __PAGE_OFFSET
14052 + .long pa(boot_gdt)
14053
14054 .word 0 # 32-bit align idt_desc.address
14055 idt_descr:
14056 @@ -708,7 +810,7 @@ idt_descr:
14057 .word 0 # 32 bit align gdt_desc.address
14058 ENTRY(early_gdt_descr)
14059 .word GDT_ENTRIES*8-1
14060 - .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
14061 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
14062
14063 /*
14064 * The boot_gdt must mirror the equivalent in setup.S and is
14065 @@ -717,5 +819,65 @@ ENTRY(early_gdt_descr)
14066 .align L1_CACHE_BYTES
14067 ENTRY(boot_gdt)
14068 .fill GDT_ENTRY_BOOT_CS,8,0
14069 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
14070 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
14071 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
14072 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
14073 +
14074 + .align PAGE_SIZE_asm
14075 +ENTRY(cpu_gdt_table)
14076 + .rept NR_CPUS
14077 + .quad 0x0000000000000000 /* NULL descriptor */
14078 + .quad 0x0000000000000000 /* 0x0b reserved */
14079 + .quad 0x0000000000000000 /* 0x13 reserved */
14080 + .quad 0x0000000000000000 /* 0x1b reserved */
14081 +
14082 +#ifdef CONFIG_PAX_KERNEXEC
14083 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
14084 +#else
14085 + .quad 0x0000000000000000 /* 0x20 unused */
14086 +#endif
14087 +
14088 + .quad 0x0000000000000000 /* 0x28 unused */
14089 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
14090 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
14091 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
14092 + .quad 0x0000000000000000 /* 0x4b reserved */
14093 + .quad 0x0000000000000000 /* 0x53 reserved */
14094 + .quad 0x0000000000000000 /* 0x5b reserved */
14095 +
14096 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
14097 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
14098 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
14099 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
14100 +
14101 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
14102 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
14103 +
14104 + /*
14105 + * Segments used for calling PnP BIOS have byte granularity.
14106 + * The code segments and data segments have fixed 64k limits,
14107 + * the transfer segment sizes are set at run time.
14108 + */
14109 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
14110 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
14111 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
14112 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
14113 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
14114 +
14115 + /*
14116 + * The APM segments have byte granularity and their bases
14117 + * are set at run time. All have 64k limits.
14118 + */
14119 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
14120 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
14121 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
14122 +
14123 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
14124 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
14125 + .quad 0x0040910000000018 /* 0xe0 - STACK_CANARY */
14126 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
14127 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
14128 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
14129 +
14130 + /* Be sure this is zeroed to avoid false validations in Xen */
14131 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
14132 + .endr
14133 diff -urNp linux-2.6.32.41/arch/x86/kernel/head_64.S linux-2.6.32.41/arch/x86/kernel/head_64.S
14134 --- linux-2.6.32.41/arch/x86/kernel/head_64.S 2011-03-27 14:31:47.000000000 -0400
14135 +++ linux-2.6.32.41/arch/x86/kernel/head_64.S 2011-04-17 15:56:46.000000000 -0400
14136 @@ -19,6 +19,7 @@
14137 #include <asm/cache.h>
14138 #include <asm/processor-flags.h>
14139 #include <asm/percpu.h>
14140 +#include <asm/cpufeature.h>
14141
14142 #ifdef CONFIG_PARAVIRT
14143 #include <asm/asm-offsets.h>
14144 @@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
14145 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
14146 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
14147 L3_START_KERNEL = pud_index(__START_KERNEL_map)
14148 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
14149 +L3_VMALLOC_START = pud_index(VMALLOC_START)
14150 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
14151 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
14152
14153 .text
14154 __HEAD
14155 @@ -85,35 +90,22 @@ startup_64:
14156 */
14157 addq %rbp, init_level4_pgt + 0(%rip)
14158 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
14159 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
14160 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
14161 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
14162
14163 addq %rbp, level3_ident_pgt + 0(%rip)
14164 +#ifndef CONFIG_XEN
14165 + addq %rbp, level3_ident_pgt + 8(%rip)
14166 +#endif
14167
14168 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
14169 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
14170 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
14171
14172 - addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14173 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
14174 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
14175
14176 - /* Add an Identity mapping if I am above 1G */
14177 - leaq _text(%rip), %rdi
14178 - andq $PMD_PAGE_MASK, %rdi
14179 -
14180 - movq %rdi, %rax
14181 - shrq $PUD_SHIFT, %rax
14182 - andq $(PTRS_PER_PUD - 1), %rax
14183 - jz ident_complete
14184 -
14185 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
14186 - leaq level3_ident_pgt(%rip), %rbx
14187 - movq %rdx, 0(%rbx, %rax, 8)
14188 -
14189 - movq %rdi, %rax
14190 - shrq $PMD_SHIFT, %rax
14191 - andq $(PTRS_PER_PMD - 1), %rax
14192 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
14193 - leaq level2_spare_pgt(%rip), %rbx
14194 - movq %rdx, 0(%rbx, %rax, 8)
14195 -ident_complete:
14196 + addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14197 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
14198
14199 /*
14200 * Fixup the kernel text+data virtual addresses. Note that
14201 @@ -161,8 +153,8 @@ ENTRY(secondary_startup_64)
14202 * after the boot processor executes this code.
14203 */
14204
14205 - /* Enable PAE mode and PGE */
14206 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
14207 + /* Enable PAE mode and PSE/PGE */
14208 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
14209 movq %rax, %cr4
14210
14211 /* Setup early boot stage 4 level pagetables. */
14212 @@ -184,9 +176,13 @@ ENTRY(secondary_startup_64)
14213 movl $MSR_EFER, %ecx
14214 rdmsr
14215 btsl $_EFER_SCE, %eax /* Enable System Call */
14216 - btl $20,%edi /* No Execute supported? */
14217 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
14218 jnc 1f
14219 btsl $_EFER_NX, %eax
14220 + leaq init_level4_pgt(%rip), %rdi
14221 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
14222 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
14223 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
14224 1: wrmsr /* Make changes effective */
14225
14226 /* Setup cr0 */
14227 @@ -262,16 +258,16 @@ ENTRY(secondary_startup_64)
14228 .quad x86_64_start_kernel
14229 ENTRY(initial_gs)
14230 .quad INIT_PER_CPU_VAR(irq_stack_union)
14231 - __FINITDATA
14232
14233 ENTRY(stack_start)
14234 .quad init_thread_union+THREAD_SIZE-8
14235 .word 0
14236 + __FINITDATA
14237
14238 bad_address:
14239 jmp bad_address
14240
14241 - .section ".init.text","ax"
14242 + __INIT
14243 #ifdef CONFIG_EARLY_PRINTK
14244 .globl early_idt_handlers
14245 early_idt_handlers:
14246 @@ -316,18 +312,23 @@ ENTRY(early_idt_handler)
14247 #endif /* EARLY_PRINTK */
14248 1: hlt
14249 jmp 1b
14250 + .previous
14251
14252 #ifdef CONFIG_EARLY_PRINTK
14253 + __INITDATA
14254 early_recursion_flag:
14255 .long 0
14256 + .previous
14257
14258 + .section .rodata,"a",@progbits
14259 early_idt_msg:
14260 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
14261 early_idt_ripmsg:
14262 .asciz "RIP %s\n"
14263 -#endif /* CONFIG_EARLY_PRINTK */
14264 .previous
14265 +#endif /* CONFIG_EARLY_PRINTK */
14266
14267 + .section .rodata,"a",@progbits
14268 #define NEXT_PAGE(name) \
14269 .balign PAGE_SIZE; \
14270 ENTRY(name)
14271 @@ -350,13 +351,36 @@ NEXT_PAGE(init_level4_pgt)
14272 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14273 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
14274 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14275 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
14276 + .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
14277 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
14278 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
14279 .org init_level4_pgt + L4_START_KERNEL*8, 0
14280 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
14281 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
14282
14283 +#ifdef CONFIG_PAX_PER_CPU_PGD
14284 +NEXT_PAGE(cpu_pgd)
14285 + .rept NR_CPUS
14286 + .fill 512,8,0
14287 + .endr
14288 +#endif
14289 +
14290 NEXT_PAGE(level3_ident_pgt)
14291 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14292 +#ifdef CONFIG_XEN
14293 .fill 511,8,0
14294 +#else
14295 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
14296 + .fill 510,8,0
14297 +#endif
14298 +
14299 +NEXT_PAGE(level3_vmalloc_pgt)
14300 + .fill 512,8,0
14301 +
14302 +NEXT_PAGE(level3_vmemmap_pgt)
14303 + .fill L3_VMEMMAP_START,8,0
14304 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
14305
14306 NEXT_PAGE(level3_kernel_pgt)
14307 .fill L3_START_KERNEL,8,0
14308 @@ -364,20 +388,23 @@ NEXT_PAGE(level3_kernel_pgt)
14309 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
14310 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
14311
14312 +NEXT_PAGE(level2_vmemmap_pgt)
14313 + .fill 512,8,0
14314 +
14315 NEXT_PAGE(level2_fixmap_pgt)
14316 - .fill 506,8,0
14317 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
14318 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
14319 - .fill 5,8,0
14320 + .fill 507,8,0
14321 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
14322 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
14323 + .fill 4,8,0
14324
14325 -NEXT_PAGE(level1_fixmap_pgt)
14326 +NEXT_PAGE(level1_vsyscall_pgt)
14327 .fill 512,8,0
14328
14329 -NEXT_PAGE(level2_ident_pgt)
14330 - /* Since I easily can, map the first 1G.
14331 + /* Since I easily can, map the first 2G.
14332 * Don't set NX because code runs from these pages.
14333 */
14334 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
14335 +NEXT_PAGE(level2_ident_pgt)
14336 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
14337
14338 NEXT_PAGE(level2_kernel_pgt)
14339 /*
14340 @@ -390,33 +417,55 @@ NEXT_PAGE(level2_kernel_pgt)
14341 * If you want to increase this then increase MODULES_VADDR
14342 * too.)
14343 */
14344 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
14345 - KERNEL_IMAGE_SIZE/PMD_SIZE)
14346 -
14347 -NEXT_PAGE(level2_spare_pgt)
14348 - .fill 512, 8, 0
14349 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
14350
14351 #undef PMDS
14352 #undef NEXT_PAGE
14353
14354 - .data
14355 + .align PAGE_SIZE
14356 +ENTRY(cpu_gdt_table)
14357 + .rept NR_CPUS
14358 + .quad 0x0000000000000000 /* NULL descriptor */
14359 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
14360 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
14361 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
14362 + .quad 0x00cffb000000ffff /* __USER32_CS */
14363 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
14364 + .quad 0x00affb000000ffff /* __USER_CS */
14365 +
14366 +#ifdef CONFIG_PAX_KERNEXEC
14367 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
14368 +#else
14369 + .quad 0x0 /* unused */
14370 +#endif
14371 +
14372 + .quad 0,0 /* TSS */
14373 + .quad 0,0 /* LDT */
14374 + .quad 0,0,0 /* three TLS descriptors */
14375 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
14376 + /* asm/segment.h:GDT_ENTRIES must match this */
14377 +
14378 + /* zero the remaining page */
14379 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
14380 + .endr
14381 +
14382 .align 16
14383 .globl early_gdt_descr
14384 early_gdt_descr:
14385 .word GDT_ENTRIES*8-1
14386 early_gdt_descr_base:
14387 - .quad INIT_PER_CPU_VAR(gdt_page)
14388 + .quad cpu_gdt_table
14389
14390 ENTRY(phys_base)
14391 /* This must match the first entry in level2_kernel_pgt */
14392 .quad 0x0000000000000000
14393
14394 #include "../../x86/xen/xen-head.S"
14395 -
14396 - .section .bss, "aw", @nobits
14397 +
14398 + .section .rodata,"a",@progbits
14399 .align L1_CACHE_BYTES
14400 ENTRY(idt_table)
14401 - .skip IDT_ENTRIES * 16
14402 + .fill 512,8,0
14403
14404 __PAGE_ALIGNED_BSS
14405 .align PAGE_SIZE
14406 diff -urNp linux-2.6.32.41/arch/x86/kernel/i386_ksyms_32.c linux-2.6.32.41/arch/x86/kernel/i386_ksyms_32.c
14407 --- linux-2.6.32.41/arch/x86/kernel/i386_ksyms_32.c 2011-03-27 14:31:47.000000000 -0400
14408 +++ linux-2.6.32.41/arch/x86/kernel/i386_ksyms_32.c 2011-04-17 15:56:46.000000000 -0400
14409 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
14410 EXPORT_SYMBOL(cmpxchg8b_emu);
14411 #endif
14412
14413 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
14414 +
14415 /* Networking helper routines. */
14416 EXPORT_SYMBOL(csum_partial_copy_generic);
14417 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
14418 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
14419
14420 EXPORT_SYMBOL(__get_user_1);
14421 EXPORT_SYMBOL(__get_user_2);
14422 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
14423
14424 EXPORT_SYMBOL(csum_partial);
14425 EXPORT_SYMBOL(empty_zero_page);
14426 +
14427 +#ifdef CONFIG_PAX_KERNEXEC
14428 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
14429 +#endif
14430 diff -urNp linux-2.6.32.41/arch/x86/kernel/i8259.c linux-2.6.32.41/arch/x86/kernel/i8259.c
14431 --- linux-2.6.32.41/arch/x86/kernel/i8259.c 2011-03-27 14:31:47.000000000 -0400
14432 +++ linux-2.6.32.41/arch/x86/kernel/i8259.c 2011-05-04 17:56:28.000000000 -0400
14433 @@ -208,7 +208,7 @@ spurious_8259A_irq:
14434 "spurious 8259A interrupt: IRQ%d.\n", irq);
14435 spurious_irq_mask |= irqmask;
14436 }
14437 - atomic_inc(&irq_err_count);
14438 + atomic_inc_unchecked(&irq_err_count);
14439 /*
14440 * Theoretically we do not have to handle this IRQ,
14441 * but in Linux this does not cause problems and is
14442 diff -urNp linux-2.6.32.41/arch/x86/kernel/init_task.c linux-2.6.32.41/arch/x86/kernel/init_task.c
14443 --- linux-2.6.32.41/arch/x86/kernel/init_task.c 2011-03-27 14:31:47.000000000 -0400
14444 +++ linux-2.6.32.41/arch/x86/kernel/init_task.c 2011-04-17 15:56:46.000000000 -0400
14445 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
14446 * way process stacks are handled. This is done by having a special
14447 * "init_task" linker map entry..
14448 */
14449 -union thread_union init_thread_union __init_task_data =
14450 - { INIT_THREAD_INFO(init_task) };
14451 +union thread_union init_thread_union __init_task_data;
14452
14453 /*
14454 * Initial task structure.
14455 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
14456 * section. Since TSS's are completely CPU-local, we want them
14457 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
14458 */
14459 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
14460 -
14461 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
14462 +EXPORT_SYMBOL(init_tss);
14463 diff -urNp linux-2.6.32.41/arch/x86/kernel/ioport.c linux-2.6.32.41/arch/x86/kernel/ioport.c
14464 --- linux-2.6.32.41/arch/x86/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
14465 +++ linux-2.6.32.41/arch/x86/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
14466 @@ -6,6 +6,7 @@
14467 #include <linux/sched.h>
14468 #include <linux/kernel.h>
14469 #include <linux/capability.h>
14470 +#include <linux/security.h>
14471 #include <linux/errno.h>
14472 #include <linux/types.h>
14473 #include <linux/ioport.h>
14474 @@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long
14475
14476 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
14477 return -EINVAL;
14478 +#ifdef CONFIG_GRKERNSEC_IO
14479 + if (turn_on && grsec_disable_privio) {
14480 + gr_handle_ioperm();
14481 + return -EPERM;
14482 + }
14483 +#endif
14484 if (turn_on && !capable(CAP_SYS_RAWIO))
14485 return -EPERM;
14486
14487 @@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long
14488 * because the ->io_bitmap_max value must match the bitmap
14489 * contents:
14490 */
14491 - tss = &per_cpu(init_tss, get_cpu());
14492 + tss = init_tss + get_cpu();
14493
14494 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
14495
14496 @@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, s
14497 return -EINVAL;
14498 /* Trying to gain more privileges? */
14499 if (level > old) {
14500 +#ifdef CONFIG_GRKERNSEC_IO
14501 + if (grsec_disable_privio) {
14502 + gr_handle_iopl();
14503 + return -EPERM;
14504 + }
14505 +#endif
14506 if (!capable(CAP_SYS_RAWIO))
14507 return -EPERM;
14508 }
14509 diff -urNp linux-2.6.32.41/arch/x86/kernel/irq_32.c linux-2.6.32.41/arch/x86/kernel/irq_32.c
14510 --- linux-2.6.32.41/arch/x86/kernel/irq_32.c 2011-03-27 14:31:47.000000000 -0400
14511 +++ linux-2.6.32.41/arch/x86/kernel/irq_32.c 2011-04-23 13:26:46.000000000 -0400
14512 @@ -35,7 +35,7 @@ static int check_stack_overflow(void)
14513 __asm__ __volatile__("andl %%esp,%0" :
14514 "=r" (sp) : "0" (THREAD_SIZE - 1));
14515
14516 - return sp < (sizeof(struct thread_info) + STACK_WARN);
14517 + return sp < STACK_WARN;
14518 }
14519
14520 static void print_stack_overflow(void)
14521 @@ -54,9 +54,9 @@ static inline void print_stack_overflow(
14522 * per-CPU IRQ handling contexts (thread information and stack)
14523 */
14524 union irq_ctx {
14525 - struct thread_info tinfo;
14526 - u32 stack[THREAD_SIZE/sizeof(u32)];
14527 -} __attribute__((aligned(PAGE_SIZE)));
14528 + unsigned long previous_esp;
14529 + u32 stack[THREAD_SIZE/sizeof(u32)];
14530 +} __attribute__((aligned(THREAD_SIZE)));
14531
14532 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
14533 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
14534 @@ -78,10 +78,9 @@ static void call_on_stack(void *func, vo
14535 static inline int
14536 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
14537 {
14538 - union irq_ctx *curctx, *irqctx;
14539 + union irq_ctx *irqctx;
14540 u32 *isp, arg1, arg2;
14541
14542 - curctx = (union irq_ctx *) current_thread_info();
14543 irqctx = __get_cpu_var(hardirq_ctx);
14544
14545 /*
14546 @@ -90,21 +89,17 @@ execute_on_irq_stack(int overflow, struc
14547 * handler) we can't do that and just have to keep using the
14548 * current stack (which is the irq stack already after all)
14549 */
14550 - if (unlikely(curctx == irqctx))
14551 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
14552 return 0;
14553
14554 /* build the stack frame on the IRQ stack */
14555 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
14556 - irqctx->tinfo.task = curctx->tinfo.task;
14557 - irqctx->tinfo.previous_esp = current_stack_pointer;
14558 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
14559 + irqctx->previous_esp = current_stack_pointer;
14560 + add_preempt_count(HARDIRQ_OFFSET);
14561
14562 - /*
14563 - * Copy the softirq bits in preempt_count so that the
14564 - * softirq checks work in the hardirq context.
14565 - */
14566 - irqctx->tinfo.preempt_count =
14567 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
14568 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
14569 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14570 + __set_fs(MAKE_MM_SEG(0));
14571 +#endif
14572
14573 if (unlikely(overflow))
14574 call_on_stack(print_stack_overflow, isp);
14575 @@ -116,6 +111,12 @@ execute_on_irq_stack(int overflow, struc
14576 : "0" (irq), "1" (desc), "2" (isp),
14577 "D" (desc->handle_irq)
14578 : "memory", "cc", "ecx");
14579 +
14580 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14581 + __set_fs(current_thread_info()->addr_limit);
14582 +#endif
14583 +
14584 + sub_preempt_count(HARDIRQ_OFFSET);
14585 return 1;
14586 }
14587
14588 @@ -124,28 +125,11 @@ execute_on_irq_stack(int overflow, struc
14589 */
14590 void __cpuinit irq_ctx_init(int cpu)
14591 {
14592 - union irq_ctx *irqctx;
14593 -
14594 if (per_cpu(hardirq_ctx, cpu))
14595 return;
14596
14597 - irqctx = &per_cpu(hardirq_stack, cpu);
14598 - irqctx->tinfo.task = NULL;
14599 - irqctx->tinfo.exec_domain = NULL;
14600 - irqctx->tinfo.cpu = cpu;
14601 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
14602 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
14603 -
14604 - per_cpu(hardirq_ctx, cpu) = irqctx;
14605 -
14606 - irqctx = &per_cpu(softirq_stack, cpu);
14607 - irqctx->tinfo.task = NULL;
14608 - irqctx->tinfo.exec_domain = NULL;
14609 - irqctx->tinfo.cpu = cpu;
14610 - irqctx->tinfo.preempt_count = 0;
14611 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
14612 -
14613 - per_cpu(softirq_ctx, cpu) = irqctx;
14614 + per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
14615 + per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
14616
14617 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
14618 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
14619 @@ -159,7 +143,6 @@ void irq_ctx_exit(int cpu)
14620 asmlinkage void do_softirq(void)
14621 {
14622 unsigned long flags;
14623 - struct thread_info *curctx;
14624 union irq_ctx *irqctx;
14625 u32 *isp;
14626
14627 @@ -169,15 +152,22 @@ asmlinkage void do_softirq(void)
14628 local_irq_save(flags);
14629
14630 if (local_softirq_pending()) {
14631 - curctx = current_thread_info();
14632 irqctx = __get_cpu_var(softirq_ctx);
14633 - irqctx->tinfo.task = curctx->task;
14634 - irqctx->tinfo.previous_esp = current_stack_pointer;
14635 + irqctx->previous_esp = current_stack_pointer;
14636
14637 /* build the stack frame on the softirq stack */
14638 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
14639 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
14640 +
14641 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14642 + __set_fs(MAKE_MM_SEG(0));
14643 +#endif
14644
14645 call_on_stack(__do_softirq, isp);
14646 +
14647 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14648 + __set_fs(current_thread_info()->addr_limit);
14649 +#endif
14650 +
14651 /*
14652 * Shouldnt happen, we returned above if in_interrupt():
14653 */
14654 diff -urNp linux-2.6.32.41/arch/x86/kernel/irq.c linux-2.6.32.41/arch/x86/kernel/irq.c
14655 --- linux-2.6.32.41/arch/x86/kernel/irq.c 2011-03-27 14:31:47.000000000 -0400
14656 +++ linux-2.6.32.41/arch/x86/kernel/irq.c 2011-05-04 17:56:28.000000000 -0400
14657 @@ -15,7 +15,7 @@
14658 #include <asm/mce.h>
14659 #include <asm/hw_irq.h>
14660
14661 -atomic_t irq_err_count;
14662 +atomic_unchecked_t irq_err_count;
14663
14664 /* Function pointer for generic interrupt vector handling */
14665 void (*generic_interrupt_extension)(void) = NULL;
14666 @@ -114,9 +114,9 @@ static int show_other_interrupts(struct
14667 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
14668 seq_printf(p, " Machine check polls\n");
14669 #endif
14670 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
14671 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
14672 #if defined(CONFIG_X86_IO_APIC)
14673 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
14674 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
14675 #endif
14676 return 0;
14677 }
14678 @@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
14679
14680 u64 arch_irq_stat(void)
14681 {
14682 - u64 sum = atomic_read(&irq_err_count);
14683 + u64 sum = atomic_read_unchecked(&irq_err_count);
14684
14685 #ifdef CONFIG_X86_IO_APIC
14686 - sum += atomic_read(&irq_mis_count);
14687 + sum += atomic_read_unchecked(&irq_mis_count);
14688 #endif
14689 return sum;
14690 }
14691 diff -urNp linux-2.6.32.41/arch/x86/kernel/kgdb.c linux-2.6.32.41/arch/x86/kernel/kgdb.c
14692 --- linux-2.6.32.41/arch/x86/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
14693 +++ linux-2.6.32.41/arch/x86/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
14694 @@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vec
14695
14696 /* clear the trace bit */
14697 linux_regs->flags &= ~X86_EFLAGS_TF;
14698 - atomic_set(&kgdb_cpu_doing_single_step, -1);
14699 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
14700
14701 /* set the trace bit if we're stepping */
14702 if (remcomInBuffer[0] == 's') {
14703 linux_regs->flags |= X86_EFLAGS_TF;
14704 kgdb_single_step = 1;
14705 - atomic_set(&kgdb_cpu_doing_single_step,
14706 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
14707 raw_smp_processor_id());
14708 }
14709
14710 @@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args
14711 break;
14712
14713 case DIE_DEBUG:
14714 - if (atomic_read(&kgdb_cpu_doing_single_step) ==
14715 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
14716 raw_smp_processor_id()) {
14717 if (user_mode(regs))
14718 return single_step_cont(regs, args);
14719 @@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception
14720 return instruction_pointer(regs);
14721 }
14722
14723 -struct kgdb_arch arch_kgdb_ops = {
14724 +const struct kgdb_arch arch_kgdb_ops = {
14725 /* Breakpoint instruction: */
14726 .gdb_bpt_instr = { 0xcc },
14727 .flags = KGDB_HW_BREAKPOINT,
14728 diff -urNp linux-2.6.32.41/arch/x86/kernel/kprobes.c linux-2.6.32.41/arch/x86/kernel/kprobes.c
14729 --- linux-2.6.32.41/arch/x86/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
14730 +++ linux-2.6.32.41/arch/x86/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
14731 @@ -166,9 +166,13 @@ static void __kprobes set_jmp_op(void *f
14732 char op;
14733 s32 raddr;
14734 } __attribute__((packed)) * jop;
14735 - jop = (struct __arch_jmp_op *)from;
14736 +
14737 + jop = (struct __arch_jmp_op *)(ktla_ktva(from));
14738 +
14739 + pax_open_kernel();
14740 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
14741 jop->op = RELATIVEJUMP_INSTRUCTION;
14742 + pax_close_kernel();
14743 }
14744
14745 /*
14746 @@ -193,7 +197,7 @@ static int __kprobes can_boost(kprobe_op
14747 kprobe_opcode_t opcode;
14748 kprobe_opcode_t *orig_opcodes = opcodes;
14749
14750 - if (search_exception_tables((unsigned long)opcodes))
14751 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
14752 return 0; /* Page fault may occur on this address. */
14753
14754 retry:
14755 @@ -337,7 +341,9 @@ static void __kprobes fix_riprel(struct
14756 disp = (u8 *) p->addr + *((s32 *) insn) -
14757 (u8 *) p->ainsn.insn;
14758 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
14759 + pax_open_kernel();
14760 *(s32 *)insn = (s32) disp;
14761 + pax_close_kernel();
14762 }
14763 }
14764 #endif
14765 @@ -345,16 +351,18 @@ static void __kprobes fix_riprel(struct
14766
14767 static void __kprobes arch_copy_kprobe(struct kprobe *p)
14768 {
14769 - memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
14770 + pax_open_kernel();
14771 + memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
14772 + pax_close_kernel();
14773
14774 fix_riprel(p);
14775
14776 - if (can_boost(p->addr))
14777 + if (can_boost(ktla_ktva(p->addr)))
14778 p->ainsn.boostable = 0;
14779 else
14780 p->ainsn.boostable = -1;
14781
14782 - p->opcode = *p->addr;
14783 + p->opcode = *(ktla_ktva(p->addr));
14784 }
14785
14786 int __kprobes arch_prepare_kprobe(struct kprobe *p)
14787 @@ -432,7 +440,7 @@ static void __kprobes prepare_singlestep
14788 if (p->opcode == BREAKPOINT_INSTRUCTION)
14789 regs->ip = (unsigned long)p->addr;
14790 else
14791 - regs->ip = (unsigned long)p->ainsn.insn;
14792 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
14793 }
14794
14795 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
14796 @@ -453,7 +461,7 @@ static void __kprobes setup_singlestep(s
14797 if (p->ainsn.boostable == 1 && !p->post_handler) {
14798 /* Boost up -- we can execute copied instructions directly */
14799 reset_current_kprobe();
14800 - regs->ip = (unsigned long)p->ainsn.insn;
14801 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
14802 preempt_enable_no_resched();
14803 return;
14804 }
14805 @@ -523,7 +531,7 @@ static int __kprobes kprobe_handler(stru
14806 struct kprobe_ctlblk *kcb;
14807
14808 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
14809 - if (*addr != BREAKPOINT_INSTRUCTION) {
14810 + if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
14811 /*
14812 * The breakpoint instruction was removed right
14813 * after we hit it. Another cpu has removed
14814 @@ -775,7 +783,7 @@ static void __kprobes resume_execution(s
14815 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
14816 {
14817 unsigned long *tos = stack_addr(regs);
14818 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
14819 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
14820 unsigned long orig_ip = (unsigned long)p->addr;
14821 kprobe_opcode_t *insn = p->ainsn.insn;
14822
14823 @@ -958,7 +966,7 @@ int __kprobes kprobe_exceptions_notify(s
14824 struct die_args *args = data;
14825 int ret = NOTIFY_DONE;
14826
14827 - if (args->regs && user_mode_vm(args->regs))
14828 + if (args->regs && user_mode(args->regs))
14829 return ret;
14830
14831 switch (val) {
14832 diff -urNp linux-2.6.32.41/arch/x86/kernel/ldt.c linux-2.6.32.41/arch/x86/kernel/ldt.c
14833 --- linux-2.6.32.41/arch/x86/kernel/ldt.c 2011-03-27 14:31:47.000000000 -0400
14834 +++ linux-2.6.32.41/arch/x86/kernel/ldt.c 2011-04-17 15:56:46.000000000 -0400
14835 @@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, i
14836 if (reload) {
14837 #ifdef CONFIG_SMP
14838 preempt_disable();
14839 - load_LDT(pc);
14840 + load_LDT_nolock(pc);
14841 if (!cpumask_equal(mm_cpumask(current->mm),
14842 cpumask_of(smp_processor_id())))
14843 smp_call_function(flush_ldt, current->mm, 1);
14844 preempt_enable();
14845 #else
14846 - load_LDT(pc);
14847 + load_LDT_nolock(pc);
14848 #endif
14849 }
14850 if (oldsize) {
14851 @@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t
14852 return err;
14853
14854 for (i = 0; i < old->size; i++)
14855 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
14856 + write_ldt_entry(new->ldt, i, old->ldt + i);
14857 return 0;
14858 }
14859
14860 @@ -115,6 +115,24 @@ int init_new_context(struct task_struct
14861 retval = copy_ldt(&mm->context, &old_mm->context);
14862 mutex_unlock(&old_mm->context.lock);
14863 }
14864 +
14865 + if (tsk == current) {
14866 + mm->context.vdso = 0;
14867 +
14868 +#ifdef CONFIG_X86_32
14869 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
14870 + mm->context.user_cs_base = 0UL;
14871 + mm->context.user_cs_limit = ~0UL;
14872 +
14873 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
14874 + cpus_clear(mm->context.cpu_user_cs_mask);
14875 +#endif
14876 +
14877 +#endif
14878 +#endif
14879 +
14880 + }
14881 +
14882 return retval;
14883 }
14884
14885 @@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, u
14886 }
14887 }
14888
14889 +#ifdef CONFIG_PAX_SEGMEXEC
14890 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
14891 + error = -EINVAL;
14892 + goto out_unlock;
14893 + }
14894 +#endif
14895 +
14896 fill_ldt(&ldt, &ldt_info);
14897 if (oldmode)
14898 ldt.avl = 0;
14899 diff -urNp linux-2.6.32.41/arch/x86/kernel/machine_kexec_32.c linux-2.6.32.41/arch/x86/kernel/machine_kexec_32.c
14900 --- linux-2.6.32.41/arch/x86/kernel/machine_kexec_32.c 2011-03-27 14:31:47.000000000 -0400
14901 +++ linux-2.6.32.41/arch/x86/kernel/machine_kexec_32.c 2011-04-17 15:56:46.000000000 -0400
14902 @@ -26,7 +26,7 @@
14903 #include <asm/system.h>
14904 #include <asm/cacheflush.h>
14905
14906 -static void set_idt(void *newidt, __u16 limit)
14907 +static void set_idt(struct desc_struct *newidt, __u16 limit)
14908 {
14909 struct desc_ptr curidt;
14910
14911 @@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16
14912 }
14913
14914
14915 -static void set_gdt(void *newgdt, __u16 limit)
14916 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
14917 {
14918 struct desc_ptr curgdt;
14919
14920 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
14921 }
14922
14923 control_page = page_address(image->control_code_page);
14924 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
14925 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
14926
14927 relocate_kernel_ptr = control_page;
14928 page_list[PA_CONTROL_PAGE] = __pa(control_page);
14929 diff -urNp linux-2.6.32.41/arch/x86/kernel/microcode_amd.c linux-2.6.32.41/arch/x86/kernel/microcode_amd.c
14930 --- linux-2.6.32.41/arch/x86/kernel/microcode_amd.c 2011-04-17 17:00:52.000000000 -0400
14931 +++ linux-2.6.32.41/arch/x86/kernel/microcode_amd.c 2011-04-17 17:03:05.000000000 -0400
14932 @@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int c
14933 uci->mc = NULL;
14934 }
14935
14936 -static struct microcode_ops microcode_amd_ops = {
14937 +static const struct microcode_ops microcode_amd_ops = {
14938 .request_microcode_user = request_microcode_user,
14939 .request_microcode_fw = request_microcode_fw,
14940 .collect_cpu_info = collect_cpu_info_amd,
14941 @@ -372,7 +372,7 @@ static struct microcode_ops microcode_am
14942 .microcode_fini_cpu = microcode_fini_cpu_amd,
14943 };
14944
14945 -struct microcode_ops * __init init_amd_microcode(void)
14946 +const struct microcode_ops * __init init_amd_microcode(void)
14947 {
14948 return &microcode_amd_ops;
14949 }
14950 diff -urNp linux-2.6.32.41/arch/x86/kernel/microcode_core.c linux-2.6.32.41/arch/x86/kernel/microcode_core.c
14951 --- linux-2.6.32.41/arch/x86/kernel/microcode_core.c 2011-03-27 14:31:47.000000000 -0400
14952 +++ linux-2.6.32.41/arch/x86/kernel/microcode_core.c 2011-04-17 15:56:46.000000000 -0400
14953 @@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
14954
14955 #define MICROCODE_VERSION "2.00"
14956
14957 -static struct microcode_ops *microcode_ops;
14958 +static const struct microcode_ops *microcode_ops;
14959
14960 /*
14961 * Synchronization.
14962 diff -urNp linux-2.6.32.41/arch/x86/kernel/microcode_intel.c linux-2.6.32.41/arch/x86/kernel/microcode_intel.c
14963 --- linux-2.6.32.41/arch/x86/kernel/microcode_intel.c 2011-03-27 14:31:47.000000000 -0400
14964 +++ linux-2.6.32.41/arch/x86/kernel/microcode_intel.c 2011-04-17 15:56:46.000000000 -0400
14965 @@ -443,13 +443,13 @@ static enum ucode_state request_microcod
14966
14967 static int get_ucode_user(void *to, const void *from, size_t n)
14968 {
14969 - return copy_from_user(to, from, n);
14970 + return copy_from_user(to, (__force const void __user *)from, n);
14971 }
14972
14973 static enum ucode_state
14974 request_microcode_user(int cpu, const void __user *buf, size_t size)
14975 {
14976 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
14977 + return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
14978 }
14979
14980 static void microcode_fini_cpu(int cpu)
14981 @@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
14982 uci->mc = NULL;
14983 }
14984
14985 -static struct microcode_ops microcode_intel_ops = {
14986 +static const struct microcode_ops microcode_intel_ops = {
14987 .request_microcode_user = request_microcode_user,
14988 .request_microcode_fw = request_microcode_fw,
14989 .collect_cpu_info = collect_cpu_info,
14990 @@ -468,7 +468,7 @@ static struct microcode_ops microcode_in
14991 .microcode_fini_cpu = microcode_fini_cpu,
14992 };
14993
14994 -struct microcode_ops * __init init_intel_microcode(void)
14995 +const struct microcode_ops * __init init_intel_microcode(void)
14996 {
14997 return &microcode_intel_ops;
14998 }
14999 diff -urNp linux-2.6.32.41/arch/x86/kernel/module.c linux-2.6.32.41/arch/x86/kernel/module.c
15000 --- linux-2.6.32.41/arch/x86/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
15001 +++ linux-2.6.32.41/arch/x86/kernel/module.c 2011-04-17 15:56:46.000000000 -0400
15002 @@ -34,7 +34,7 @@
15003 #define DEBUGP(fmt...)
15004 #endif
15005
15006 -void *module_alloc(unsigned long size)
15007 +static void *__module_alloc(unsigned long size, pgprot_t prot)
15008 {
15009 struct vm_struct *area;
15010
15011 @@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
15012 if (!area)
15013 return NULL;
15014
15015 - return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
15016 - PAGE_KERNEL_EXEC);
15017 + return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
15018 +}
15019 +
15020 +void *module_alloc(unsigned long size)
15021 +{
15022 +
15023 +#ifdef CONFIG_PAX_KERNEXEC
15024 + return __module_alloc(size, PAGE_KERNEL);
15025 +#else
15026 + return __module_alloc(size, PAGE_KERNEL_EXEC);
15027 +#endif
15028 +
15029 }
15030
15031 /* Free memory returned from module_alloc */
15032 @@ -58,6 +68,40 @@ void module_free(struct module *mod, voi
15033 vfree(module_region);
15034 }
15035
15036 +#ifdef CONFIG_PAX_KERNEXEC
15037 +#ifdef CONFIG_X86_32
15038 +void *module_alloc_exec(unsigned long size)
15039 +{
15040 + struct vm_struct *area;
15041 +
15042 + if (size == 0)
15043 + return NULL;
15044 +
15045 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
15046 + return area ? area->addr : NULL;
15047 +}
15048 +EXPORT_SYMBOL(module_alloc_exec);
15049 +
15050 +void module_free_exec(struct module *mod, void *module_region)
15051 +{
15052 + vunmap(module_region);
15053 +}
15054 +EXPORT_SYMBOL(module_free_exec);
15055 +#else
15056 +void module_free_exec(struct module *mod, void *module_region)
15057 +{
15058 + module_free(mod, module_region);
15059 +}
15060 +EXPORT_SYMBOL(module_free_exec);
15061 +
15062 +void *module_alloc_exec(unsigned long size)
15063 +{
15064 + return __module_alloc(size, PAGE_KERNEL_RX);
15065 +}
15066 +EXPORT_SYMBOL(module_alloc_exec);
15067 +#endif
15068 +#endif
15069 +
15070 /* We don't need anything special. */
15071 int module_frob_arch_sections(Elf_Ehdr *hdr,
15072 Elf_Shdr *sechdrs,
15073 @@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15074 unsigned int i;
15075 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
15076 Elf32_Sym *sym;
15077 - uint32_t *location;
15078 + uint32_t *plocation, location;
15079
15080 DEBUGP("Applying relocate section %u to %u\n", relsec,
15081 sechdrs[relsec].sh_info);
15082 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
15083 /* This is where to make the change */
15084 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
15085 - + rel[i].r_offset;
15086 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
15087 + location = (uint32_t)plocation;
15088 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
15089 + plocation = ktla_ktva((void *)plocation);
15090 /* This is the symbol it is referring to. Note that all
15091 undefined symbols have been resolved. */
15092 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
15093 @@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15094 switch (ELF32_R_TYPE(rel[i].r_info)) {
15095 case R_386_32:
15096 /* We add the value into the location given */
15097 - *location += sym->st_value;
15098 + pax_open_kernel();
15099 + *plocation += sym->st_value;
15100 + pax_close_kernel();
15101 break;
15102 case R_386_PC32:
15103 /* Add the value, subtract its postition */
15104 - *location += sym->st_value - (uint32_t)location;
15105 + pax_open_kernel();
15106 + *plocation += sym->st_value - location;
15107 + pax_close_kernel();
15108 break;
15109 default:
15110 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
15111 @@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
15112 case R_X86_64_NONE:
15113 break;
15114 case R_X86_64_64:
15115 + pax_open_kernel();
15116 *(u64 *)loc = val;
15117 + pax_close_kernel();
15118 break;
15119 case R_X86_64_32:
15120 + pax_open_kernel();
15121 *(u32 *)loc = val;
15122 + pax_close_kernel();
15123 if (val != *(u32 *)loc)
15124 goto overflow;
15125 break;
15126 case R_X86_64_32S:
15127 + pax_open_kernel();
15128 *(s32 *)loc = val;
15129 + pax_close_kernel();
15130 if ((s64)val != *(s32 *)loc)
15131 goto overflow;
15132 break;
15133 case R_X86_64_PC32:
15134 val -= (u64)loc;
15135 + pax_open_kernel();
15136 *(u32 *)loc = val;
15137 + pax_close_kernel();
15138 +
15139 #if 0
15140 if ((s64)val != *(s32 *)loc)
15141 goto overflow;
15142 diff -urNp linux-2.6.32.41/arch/x86/kernel/paravirt.c linux-2.6.32.41/arch/x86/kernel/paravirt.c
15143 --- linux-2.6.32.41/arch/x86/kernel/paravirt.c 2011-03-27 14:31:47.000000000 -0400
15144 +++ linux-2.6.32.41/arch/x86/kernel/paravirt.c 2011-05-16 21:46:57.000000000 -0400
15145 @@ -122,7 +122,7 @@ unsigned paravirt_patch_jmp(void *insnbu
15146 * corresponding structure. */
15147 static void *get_call_destination(u8 type)
15148 {
15149 - struct paravirt_patch_template tmpl = {
15150 + const struct paravirt_patch_template tmpl = {
15151 .pv_init_ops = pv_init_ops,
15152 .pv_time_ops = pv_time_ops,
15153 .pv_cpu_ops = pv_cpu_ops,
15154 @@ -133,6 +133,9 @@ static void *get_call_destination(u8 typ
15155 .pv_lock_ops = pv_lock_ops,
15156 #endif
15157 };
15158 +
15159 + pax_track_stack();
15160 +
15161 return *((void **)&tmpl + type);
15162 }
15163
15164 @@ -145,14 +148,14 @@ unsigned paravirt_patch_default(u8 type,
15165 if (opfunc == NULL)
15166 /* If there's no function, patch it with a ud2a (BUG) */
15167 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
15168 - else if (opfunc == _paravirt_nop)
15169 + else if (opfunc == (void *)_paravirt_nop)
15170 /* If the operation is a nop, then nop the callsite */
15171 ret = paravirt_patch_nop();
15172
15173 /* identity functions just return their single argument */
15174 - else if (opfunc == _paravirt_ident_32)
15175 + else if (opfunc == (void *)_paravirt_ident_32)
15176 ret = paravirt_patch_ident_32(insnbuf, len);
15177 - else if (opfunc == _paravirt_ident_64)
15178 + else if (opfunc == (void *)_paravirt_ident_64)
15179 ret = paravirt_patch_ident_64(insnbuf, len);
15180
15181 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
15182 @@ -178,7 +181,7 @@ unsigned paravirt_patch_insns(void *insn
15183 if (insn_len > len || start == NULL)
15184 insn_len = len;
15185 else
15186 - memcpy(insnbuf, start, insn_len);
15187 + memcpy(insnbuf, ktla_ktva(start), insn_len);
15188
15189 return insn_len;
15190 }
15191 @@ -294,22 +297,22 @@ void arch_flush_lazy_mmu_mode(void)
15192 preempt_enable();
15193 }
15194
15195 -struct pv_info pv_info = {
15196 +struct pv_info pv_info __read_only = {
15197 .name = "bare hardware",
15198 .paravirt_enabled = 0,
15199 .kernel_rpl = 0,
15200 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
15201 };
15202
15203 -struct pv_init_ops pv_init_ops = {
15204 +struct pv_init_ops pv_init_ops __read_only = {
15205 .patch = native_patch,
15206 };
15207
15208 -struct pv_time_ops pv_time_ops = {
15209 +struct pv_time_ops pv_time_ops __read_only = {
15210 .sched_clock = native_sched_clock,
15211 };
15212
15213 -struct pv_irq_ops pv_irq_ops = {
15214 +struct pv_irq_ops pv_irq_ops __read_only = {
15215 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
15216 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
15217 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
15218 @@ -321,7 +324,7 @@ struct pv_irq_ops pv_irq_ops = {
15219 #endif
15220 };
15221
15222 -struct pv_cpu_ops pv_cpu_ops = {
15223 +struct pv_cpu_ops pv_cpu_ops __read_only = {
15224 .cpuid = native_cpuid,
15225 .get_debugreg = native_get_debugreg,
15226 .set_debugreg = native_set_debugreg,
15227 @@ -382,7 +385,7 @@ struct pv_cpu_ops pv_cpu_ops = {
15228 .end_context_switch = paravirt_nop,
15229 };
15230
15231 -struct pv_apic_ops pv_apic_ops = {
15232 +struct pv_apic_ops pv_apic_ops __read_only = {
15233 #ifdef CONFIG_X86_LOCAL_APIC
15234 .startup_ipi_hook = paravirt_nop,
15235 #endif
15236 @@ -396,7 +399,7 @@ struct pv_apic_ops pv_apic_ops = {
15237 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
15238 #endif
15239
15240 -struct pv_mmu_ops pv_mmu_ops = {
15241 +struct pv_mmu_ops pv_mmu_ops __read_only = {
15242
15243 .read_cr2 = native_read_cr2,
15244 .write_cr2 = native_write_cr2,
15245 @@ -467,6 +470,12 @@ struct pv_mmu_ops pv_mmu_ops = {
15246 },
15247
15248 .set_fixmap = native_set_fixmap,
15249 +
15250 +#ifdef CONFIG_PAX_KERNEXEC
15251 + .pax_open_kernel = native_pax_open_kernel,
15252 + .pax_close_kernel = native_pax_close_kernel,
15253 +#endif
15254 +
15255 };
15256
15257 EXPORT_SYMBOL_GPL(pv_time_ops);
15258 diff -urNp linux-2.6.32.41/arch/x86/kernel/paravirt-spinlocks.c linux-2.6.32.41/arch/x86/kernel/paravirt-spinlocks.c
15259 --- linux-2.6.32.41/arch/x86/kernel/paravirt-spinlocks.c 2011-03-27 14:31:47.000000000 -0400
15260 +++ linux-2.6.32.41/arch/x86/kernel/paravirt-spinlocks.c 2011-04-17 15:56:46.000000000 -0400
15261 @@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *
15262 __raw_spin_lock(lock);
15263 }
15264
15265 -struct pv_lock_ops pv_lock_ops = {
15266 +struct pv_lock_ops pv_lock_ops __read_only = {
15267 #ifdef CONFIG_SMP
15268 .spin_is_locked = __ticket_spin_is_locked,
15269 .spin_is_contended = __ticket_spin_is_contended,
15270 diff -urNp linux-2.6.32.41/arch/x86/kernel/pci-calgary_64.c linux-2.6.32.41/arch/x86/kernel/pci-calgary_64.c
15271 --- linux-2.6.32.41/arch/x86/kernel/pci-calgary_64.c 2011-03-27 14:31:47.000000000 -0400
15272 +++ linux-2.6.32.41/arch/x86/kernel/pci-calgary_64.c 2011-04-17 15:56:46.000000000 -0400
15273 @@ -477,7 +477,7 @@ static void calgary_free_coherent(struct
15274 free_pages((unsigned long)vaddr, get_order(size));
15275 }
15276
15277 -static struct dma_map_ops calgary_dma_ops = {
15278 +static const struct dma_map_ops calgary_dma_ops = {
15279 .alloc_coherent = calgary_alloc_coherent,
15280 .free_coherent = calgary_free_coherent,
15281 .map_sg = calgary_map_sg,
15282 diff -urNp linux-2.6.32.41/arch/x86/kernel/pci-dma.c linux-2.6.32.41/arch/x86/kernel/pci-dma.c
15283 --- linux-2.6.32.41/arch/x86/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
15284 +++ linux-2.6.32.41/arch/x86/kernel/pci-dma.c 2011-04-17 15:56:46.000000000 -0400
15285 @@ -14,7 +14,7 @@
15286
15287 static int forbid_dac __read_mostly;
15288
15289 -struct dma_map_ops *dma_ops;
15290 +const struct dma_map_ops *dma_ops;
15291 EXPORT_SYMBOL(dma_ops);
15292
15293 static int iommu_sac_force __read_mostly;
15294 @@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
15295
15296 int dma_supported(struct device *dev, u64 mask)
15297 {
15298 - struct dma_map_ops *ops = get_dma_ops(dev);
15299 + const struct dma_map_ops *ops = get_dma_ops(dev);
15300
15301 #ifdef CONFIG_PCI
15302 if (mask > 0xffffffff && forbid_dac > 0) {
15303 diff -urNp linux-2.6.32.41/arch/x86/kernel/pci-gart_64.c linux-2.6.32.41/arch/x86/kernel/pci-gart_64.c
15304 --- linux-2.6.32.41/arch/x86/kernel/pci-gart_64.c 2011-03-27 14:31:47.000000000 -0400
15305 +++ linux-2.6.32.41/arch/x86/kernel/pci-gart_64.c 2011-04-17 15:56:46.000000000 -0400
15306 @@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct ag
15307 return -1;
15308 }
15309
15310 -static struct dma_map_ops gart_dma_ops = {
15311 +static const struct dma_map_ops gart_dma_ops = {
15312 .map_sg = gart_map_sg,
15313 .unmap_sg = gart_unmap_sg,
15314 .map_page = gart_map_page,
15315 diff -urNp linux-2.6.32.41/arch/x86/kernel/pci-nommu.c linux-2.6.32.41/arch/x86/kernel/pci-nommu.c
15316 --- linux-2.6.32.41/arch/x86/kernel/pci-nommu.c 2011-03-27 14:31:47.000000000 -0400
15317 +++ linux-2.6.32.41/arch/x86/kernel/pci-nommu.c 2011-04-17 15:56:46.000000000 -0400
15318 @@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(str
15319 flush_write_buffers();
15320 }
15321
15322 -struct dma_map_ops nommu_dma_ops = {
15323 +const struct dma_map_ops nommu_dma_ops = {
15324 .alloc_coherent = dma_generic_alloc_coherent,
15325 .free_coherent = nommu_free_coherent,
15326 .map_sg = nommu_map_sg,
15327 diff -urNp linux-2.6.32.41/arch/x86/kernel/pci-swiotlb.c linux-2.6.32.41/arch/x86/kernel/pci-swiotlb.c
15328 --- linux-2.6.32.41/arch/x86/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
15329 +++ linux-2.6.32.41/arch/x86/kernel/pci-swiotlb.c 2011-04-17 15:56:46.000000000 -0400
15330 @@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(
15331 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
15332 }
15333
15334 -static struct dma_map_ops swiotlb_dma_ops = {
15335 +static const struct dma_map_ops swiotlb_dma_ops = {
15336 .mapping_error = swiotlb_dma_mapping_error,
15337 .alloc_coherent = x86_swiotlb_alloc_coherent,
15338 .free_coherent = swiotlb_free_coherent,
15339 diff -urNp linux-2.6.32.41/arch/x86/kernel/process_32.c linux-2.6.32.41/arch/x86/kernel/process_32.c
15340 --- linux-2.6.32.41/arch/x86/kernel/process_32.c 2011-03-27 14:31:47.000000000 -0400
15341 +++ linux-2.6.32.41/arch/x86/kernel/process_32.c 2011-05-16 21:46:57.000000000 -0400
15342 @@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __as
15343 unsigned long thread_saved_pc(struct task_struct *tsk)
15344 {
15345 return ((unsigned long *)tsk->thread.sp)[3];
15346 +//XXX return tsk->thread.eip;
15347 }
15348
15349 #ifndef CONFIG_SMP
15350 @@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, i
15351 unsigned short ss, gs;
15352 const char *board;
15353
15354 - if (user_mode_vm(regs)) {
15355 + if (user_mode(regs)) {
15356 sp = regs->sp;
15357 ss = regs->ss & 0xffff;
15358 - gs = get_user_gs(regs);
15359 } else {
15360 sp = (unsigned long) (&regs->sp);
15361 savesegment(ss, ss);
15362 - savesegment(gs, gs);
15363 }
15364 + gs = get_user_gs(regs);
15365
15366 printk("\n");
15367
15368 @@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), voi
15369 regs.bx = (unsigned long) fn;
15370 regs.dx = (unsigned long) arg;
15371
15372 - regs.ds = __USER_DS;
15373 - regs.es = __USER_DS;
15374 + regs.ds = __KERNEL_DS;
15375 + regs.es = __KERNEL_DS;
15376 regs.fs = __KERNEL_PERCPU;
15377 - regs.gs = __KERNEL_STACK_CANARY;
15378 + savesegment(gs, regs.gs);
15379 regs.orig_ax = -1;
15380 regs.ip = (unsigned long) kernel_thread_helper;
15381 regs.cs = __KERNEL_CS | get_kernel_rpl();
15382 @@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flag
15383 struct task_struct *tsk;
15384 int err;
15385
15386 - childregs = task_pt_regs(p);
15387 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
15388 *childregs = *regs;
15389 childregs->ax = 0;
15390 childregs->sp = sp;
15391
15392 p->thread.sp = (unsigned long) childregs;
15393 p->thread.sp0 = (unsigned long) (childregs+1);
15394 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
15395
15396 p->thread.ip = (unsigned long) ret_from_fork;
15397
15398 @@ -346,7 +347,7 @@ __switch_to(struct task_struct *prev_p,
15399 struct thread_struct *prev = &prev_p->thread,
15400 *next = &next_p->thread;
15401 int cpu = smp_processor_id();
15402 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
15403 + struct tss_struct *tss = init_tss + cpu;
15404 bool preload_fpu;
15405
15406 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
15407 @@ -381,6 +382,10 @@ __switch_to(struct task_struct *prev_p,
15408 */
15409 lazy_save_gs(prev->gs);
15410
15411 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15412 + __set_fs(task_thread_info(next_p)->addr_limit);
15413 +#endif
15414 +
15415 /*
15416 * Load the per-thread Thread-Local Storage descriptor.
15417 */
15418 @@ -416,6 +421,9 @@ __switch_to(struct task_struct *prev_p,
15419 */
15420 arch_end_context_switch(next_p);
15421
15422 + percpu_write(current_task, next_p);
15423 + percpu_write(current_tinfo, &next_p->tinfo);
15424 +
15425 if (preload_fpu)
15426 __math_state_restore();
15427
15428 @@ -425,8 +433,6 @@ __switch_to(struct task_struct *prev_p,
15429 if (prev->gs | next->gs)
15430 lazy_load_gs(next->gs);
15431
15432 - percpu_write(current_task, next_p);
15433 -
15434 return prev_p;
15435 }
15436
15437 @@ -496,4 +502,3 @@ unsigned long get_wchan(struct task_stru
15438 } while (count++ < 16);
15439 return 0;
15440 }
15441 -
15442 diff -urNp linux-2.6.32.41/arch/x86/kernel/process_64.c linux-2.6.32.41/arch/x86/kernel/process_64.c
15443 --- linux-2.6.32.41/arch/x86/kernel/process_64.c 2011-03-27 14:31:47.000000000 -0400
15444 +++ linux-2.6.32.41/arch/x86/kernel/process_64.c 2011-05-16 21:46:57.000000000 -0400
15445 @@ -91,7 +91,7 @@ static void __exit_idle(void)
15446 void exit_idle(void)
15447 {
15448 /* idle loop has pid 0 */
15449 - if (current->pid)
15450 + if (task_pid_nr(current))
15451 return;
15452 __exit_idle();
15453 }
15454 @@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, i
15455 if (!board)
15456 board = "";
15457 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
15458 - current->pid, current->comm, print_tainted(),
15459 + task_pid_nr(current), current->comm, print_tainted(),
15460 init_utsname()->release,
15461 (int)strcspn(init_utsname()->version, " "),
15462 init_utsname()->version, board);
15463 @@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flag
15464 struct pt_regs *childregs;
15465 struct task_struct *me = current;
15466
15467 - childregs = ((struct pt_regs *)
15468 - (THREAD_SIZE + task_stack_page(p))) - 1;
15469 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
15470 *childregs = *regs;
15471
15472 childregs->ax = 0;
15473 @@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flag
15474 p->thread.sp = (unsigned long) childregs;
15475 p->thread.sp0 = (unsigned long) (childregs+1);
15476 p->thread.usersp = me->thread.usersp;
15477 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
15478
15479 set_tsk_thread_flag(p, TIF_FORK);
15480
15481 @@ -380,7 +380,7 @@ __switch_to(struct task_struct *prev_p,
15482 struct thread_struct *prev = &prev_p->thread;
15483 struct thread_struct *next = &next_p->thread;
15484 int cpu = smp_processor_id();
15485 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
15486 + struct tss_struct *tss = init_tss + cpu;
15487 unsigned fsindex, gsindex;
15488 bool preload_fpu;
15489
15490 @@ -476,10 +476,9 @@ __switch_to(struct task_struct *prev_p,
15491 prev->usersp = percpu_read(old_rsp);
15492 percpu_write(old_rsp, next->usersp);
15493 percpu_write(current_task, next_p);
15494 + percpu_write(current_tinfo, &next_p->tinfo);
15495
15496 - percpu_write(kernel_stack,
15497 - (unsigned long)task_stack_page(next_p) +
15498 - THREAD_SIZE - KERNEL_STACK_OFFSET);
15499 + percpu_write(kernel_stack, next->sp0);
15500
15501 /*
15502 * Now maybe reload the debug registers and handle I/O bitmaps
15503 @@ -560,12 +559,11 @@ unsigned long get_wchan(struct task_stru
15504 if (!p || p == current || p->state == TASK_RUNNING)
15505 return 0;
15506 stack = (unsigned long)task_stack_page(p);
15507 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
15508 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
15509 return 0;
15510 fp = *(u64 *)(p->thread.sp);
15511 do {
15512 - if (fp < (unsigned long)stack ||
15513 - fp >= (unsigned long)stack+THREAD_SIZE)
15514 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
15515 return 0;
15516 ip = *(u64 *)(fp+8);
15517 if (!in_sched_functions(ip))
15518 diff -urNp linux-2.6.32.41/arch/x86/kernel/process.c linux-2.6.32.41/arch/x86/kernel/process.c
15519 --- linux-2.6.32.41/arch/x86/kernel/process.c 2011-04-22 19:16:29.000000000 -0400
15520 +++ linux-2.6.32.41/arch/x86/kernel/process.c 2011-05-22 23:02:03.000000000 -0400
15521 @@ -51,16 +51,33 @@ void free_thread_xstate(struct task_stru
15522
15523 void free_thread_info(struct thread_info *ti)
15524 {
15525 - free_thread_xstate(ti->task);
15526 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
15527 }
15528
15529 +static struct kmem_cache *task_struct_cachep;
15530 +
15531 void arch_task_cache_init(void)
15532 {
15533 - task_xstate_cachep =
15534 - kmem_cache_create("task_xstate", xstate_size,
15535 + /* create a slab on which task_structs can be allocated */
15536 + task_struct_cachep =
15537 + kmem_cache_create("task_struct", sizeof(struct task_struct),
15538 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
15539 +
15540 + task_xstate_cachep =
15541 + kmem_cache_create("task_xstate", xstate_size,
15542 __alignof__(union thread_xstate),
15543 - SLAB_PANIC | SLAB_NOTRACK, NULL);
15544 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
15545 +}
15546 +
15547 +struct task_struct *alloc_task_struct(void)
15548 +{
15549 + return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
15550 +}
15551 +
15552 +void free_task_struct(struct task_struct *task)
15553 +{
15554 + free_thread_xstate(task);
15555 + kmem_cache_free(task_struct_cachep, task);
15556 }
15557
15558 /*
15559 @@ -73,7 +90,7 @@ void exit_thread(void)
15560 unsigned long *bp = t->io_bitmap_ptr;
15561
15562 if (bp) {
15563 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
15564 + struct tss_struct *tss = init_tss + get_cpu();
15565
15566 t->io_bitmap_ptr = NULL;
15567 clear_thread_flag(TIF_IO_BITMAP);
15568 @@ -93,6 +110,9 @@ void flush_thread(void)
15569
15570 clear_tsk_thread_flag(tsk, TIF_DEBUG);
15571
15572 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
15573 + loadsegment(gs, 0);
15574 +#endif
15575 tsk->thread.debugreg0 = 0;
15576 tsk->thread.debugreg1 = 0;
15577 tsk->thread.debugreg2 = 0;
15578 @@ -307,7 +327,7 @@ void default_idle(void)
15579 EXPORT_SYMBOL(default_idle);
15580 #endif
15581
15582 -void stop_this_cpu(void *dummy)
15583 +__noreturn void stop_this_cpu(void *dummy)
15584 {
15585 local_irq_disable();
15586 /*
15587 @@ -568,16 +588,35 @@ static int __init idle_setup(char *str)
15588 }
15589 early_param("idle", idle_setup);
15590
15591 -unsigned long arch_align_stack(unsigned long sp)
15592 +#ifdef CONFIG_PAX_RANDKSTACK
15593 +asmlinkage void pax_randomize_kstack(void)
15594 {
15595 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
15596 - sp -= get_random_int() % 8192;
15597 - return sp & ~0xf;
15598 -}
15599 + struct thread_struct *thread = &current->thread;
15600 + unsigned long time;
15601
15602 -unsigned long arch_randomize_brk(struct mm_struct *mm)
15603 -{
15604 - unsigned long range_end = mm->brk + 0x02000000;
15605 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
15606 + if (!randomize_va_space)
15607 + return;
15608 +
15609 + rdtscl(time);
15610 +
15611 + /* P4 seems to return a 0 LSB, ignore it */
15612 +#ifdef CONFIG_MPENTIUM4
15613 + time &= 0x3EUL;
15614 + time <<= 2;
15615 +#elif defined(CONFIG_X86_64)
15616 + time &= 0xFUL;
15617 + time <<= 4;
15618 +#else
15619 + time &= 0x1FUL;
15620 + time <<= 3;
15621 +#endif
15622 +
15623 + thread->sp0 ^= time;
15624 + load_sp0(init_tss + smp_processor_id(), thread);
15625 +
15626 +#ifdef CONFIG_X86_64
15627 + percpu_write(kernel_stack, thread->sp0);
15628 +#endif
15629 }
15630 +#endif
15631
15632 diff -urNp linux-2.6.32.41/arch/x86/kernel/ptrace.c linux-2.6.32.41/arch/x86/kernel/ptrace.c
15633 --- linux-2.6.32.41/arch/x86/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
15634 +++ linux-2.6.32.41/arch/x86/kernel/ptrace.c 2011-04-17 15:56:46.000000000 -0400
15635 @@ -925,7 +925,7 @@ static const struct user_regset_view use
15636 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
15637 {
15638 int ret;
15639 - unsigned long __user *datap = (unsigned long __user *)data;
15640 + unsigned long __user *datap = (__force unsigned long __user *)data;
15641
15642 switch (request) {
15643 /* read the word at location addr in the USER area. */
15644 @@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *chi
15645 if (addr < 0)
15646 return -EIO;
15647 ret = do_get_thread_area(child, addr,
15648 - (struct user_desc __user *) data);
15649 + (__force struct user_desc __user *) data);
15650 break;
15651
15652 case PTRACE_SET_THREAD_AREA:
15653 if (addr < 0)
15654 return -EIO;
15655 ret = do_set_thread_area(child, addr,
15656 - (struct user_desc __user *) data, 0);
15657 + (__force struct user_desc __user *) data, 0);
15658 break;
15659 #endif
15660
15661 @@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *chi
15662 #ifdef CONFIG_X86_PTRACE_BTS
15663 case PTRACE_BTS_CONFIG:
15664 ret = ptrace_bts_config
15665 - (child, data, (struct ptrace_bts_config __user *)addr);
15666 + (child, data, (__force struct ptrace_bts_config __user *)addr);
15667 break;
15668
15669 case PTRACE_BTS_STATUS:
15670 ret = ptrace_bts_status
15671 - (child, data, (struct ptrace_bts_config __user *)addr);
15672 + (child, data, (__force struct ptrace_bts_config __user *)addr);
15673 break;
15674
15675 case PTRACE_BTS_SIZE:
15676 @@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *chi
15677
15678 case PTRACE_BTS_GET:
15679 ret = ptrace_bts_read_record
15680 - (child, data, (struct bts_struct __user *) addr);
15681 + (child, data, (__force struct bts_struct __user *) addr);
15682 break;
15683
15684 case PTRACE_BTS_CLEAR:
15685 @@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *chi
15686
15687 case PTRACE_BTS_DRAIN:
15688 ret = ptrace_bts_drain
15689 - (child, data, (struct bts_struct __user *) addr);
15690 + (child, data, (__force struct bts_struct __user *) addr);
15691 break;
15692 #endif /* CONFIG_X86_PTRACE_BTS */
15693
15694 @@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *ts
15695 info.si_code = si_code;
15696
15697 /* User-mode ip? */
15698 - info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
15699 + info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
15700
15701 /* Send us the fake SIGTRAP */
15702 force_sig_info(SIGTRAP, &info, tsk);
15703 @@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *ts
15704 * We must return the syscall number to actually look up in the table.
15705 * This can be -1L to skip running any syscall at all.
15706 */
15707 -asmregparm long syscall_trace_enter(struct pt_regs *regs)
15708 +long syscall_trace_enter(struct pt_regs *regs)
15709 {
15710 long ret = 0;
15711
15712 @@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(stru
15713 return ret ?: regs->orig_ax;
15714 }
15715
15716 -asmregparm void syscall_trace_leave(struct pt_regs *regs)
15717 +void syscall_trace_leave(struct pt_regs *regs)
15718 {
15719 if (unlikely(current->audit_context))
15720 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
15721 diff -urNp linux-2.6.32.41/arch/x86/kernel/reboot.c linux-2.6.32.41/arch/x86/kernel/reboot.c
15722 --- linux-2.6.32.41/arch/x86/kernel/reboot.c 2011-03-27 14:31:47.000000000 -0400
15723 +++ linux-2.6.32.41/arch/x86/kernel/reboot.c 2011-05-22 23:02:03.000000000 -0400
15724 @@ -33,7 +33,7 @@ void (*pm_power_off)(void);
15725 EXPORT_SYMBOL(pm_power_off);
15726
15727 static const struct desc_ptr no_idt = {};
15728 -static int reboot_mode;
15729 +static unsigned short reboot_mode;
15730 enum reboot_type reboot_type = BOOT_KBD;
15731 int reboot_force;
15732
15733 @@ -292,12 +292,12 @@ core_initcall(reboot_init);
15734 controller to pulse the CPU reset line, which is more thorough, but
15735 doesn't work with at least one type of 486 motherboard. It is easy
15736 to stop this code working; hence the copious comments. */
15737 -static const unsigned long long
15738 -real_mode_gdt_entries [3] =
15739 +static struct desc_struct
15740 +real_mode_gdt_entries [3] __read_only =
15741 {
15742 - 0x0000000000000000ULL, /* Null descriptor */
15743 - 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
15744 - 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
15745 + GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
15746 + GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
15747 + GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
15748 };
15749
15750 static const struct desc_ptr
15751 @@ -346,7 +346,7 @@ static const unsigned char jump_to_bios
15752 * specified by the code and length parameters.
15753 * We assume that length will aways be less that 100!
15754 */
15755 -void machine_real_restart(const unsigned char *code, int length)
15756 +__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
15757 {
15758 local_irq_disable();
15759
15760 @@ -366,8 +366,8 @@ void machine_real_restart(const unsigned
15761 /* Remap the kernel at virtual address zero, as well as offset zero
15762 from the kernel segment. This assumes the kernel segment starts at
15763 virtual address PAGE_OFFSET. */
15764 - memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
15765 - sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
15766 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
15767 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
15768
15769 /*
15770 * Use `swapper_pg_dir' as our page directory.
15771 @@ -379,16 +379,15 @@ void machine_real_restart(const unsigned
15772 boot)". This seems like a fairly standard thing that gets set by
15773 REBOOT.COM programs, and the previous reset routine did this
15774 too. */
15775 - *((unsigned short *)0x472) = reboot_mode;
15776 + *(unsigned short *)(__va(0x472)) = reboot_mode;
15777
15778 /* For the switch to real mode, copy some code to low memory. It has
15779 to be in the first 64k because it is running in 16-bit mode, and it
15780 has to have the same physical and virtual address, because it turns
15781 off paging. Copy it near the end of the first page, out of the way
15782 of BIOS variables. */
15783 - memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
15784 - real_mode_switch, sizeof (real_mode_switch));
15785 - memcpy((void *)(0x1000 - 100), code, length);
15786 + memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
15787 + memcpy(__va(0x1000 - 100), code, length);
15788
15789 /* Set up the IDT for real mode. */
15790 load_idt(&real_mode_idt);
15791 @@ -416,6 +415,7 @@ void machine_real_restart(const unsigned
15792 __asm__ __volatile__ ("ljmp $0x0008,%0"
15793 :
15794 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
15795 + do { } while (1);
15796 }
15797 #ifdef CONFIG_APM_MODULE
15798 EXPORT_SYMBOL(machine_real_restart);
15799 @@ -536,7 +536,7 @@ void __attribute__((weak)) mach_reboot_f
15800 {
15801 }
15802
15803 -static void native_machine_emergency_restart(void)
15804 +__noreturn static void native_machine_emergency_restart(void)
15805 {
15806 int i;
15807
15808 @@ -651,13 +651,13 @@ void native_machine_shutdown(void)
15809 #endif
15810 }
15811
15812 -static void __machine_emergency_restart(int emergency)
15813 +static __noreturn void __machine_emergency_restart(int emergency)
15814 {
15815 reboot_emergency = emergency;
15816 machine_ops.emergency_restart();
15817 }
15818
15819 -static void native_machine_restart(char *__unused)
15820 +static __noreturn void native_machine_restart(char *__unused)
15821 {
15822 printk("machine restart\n");
15823
15824 @@ -666,7 +666,7 @@ static void native_machine_restart(char
15825 __machine_emergency_restart(0);
15826 }
15827
15828 -static void native_machine_halt(void)
15829 +static __noreturn void native_machine_halt(void)
15830 {
15831 /* stop other cpus and apics */
15832 machine_shutdown();
15833 @@ -677,7 +677,7 @@ static void native_machine_halt(void)
15834 stop_this_cpu(NULL);
15835 }
15836
15837 -static void native_machine_power_off(void)
15838 +__noreturn static void native_machine_power_off(void)
15839 {
15840 if (pm_power_off) {
15841 if (!reboot_force)
15842 @@ -686,6 +686,7 @@ static void native_machine_power_off(voi
15843 }
15844 /* a fallback in case there is no PM info available */
15845 tboot_shutdown(TB_SHUTDOWN_HALT);
15846 + do { } while (1);
15847 }
15848
15849 struct machine_ops machine_ops = {
15850 diff -urNp linux-2.6.32.41/arch/x86/kernel/setup.c linux-2.6.32.41/arch/x86/kernel/setup.c
15851 --- linux-2.6.32.41/arch/x86/kernel/setup.c 2011-04-17 17:00:52.000000000 -0400
15852 +++ linux-2.6.32.41/arch/x86/kernel/setup.c 2011-04-17 17:03:05.000000000 -0400
15853 @@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
15854
15855 if (!boot_params.hdr.root_flags)
15856 root_mountflags &= ~MS_RDONLY;
15857 - init_mm.start_code = (unsigned long) _text;
15858 - init_mm.end_code = (unsigned long) _etext;
15859 + init_mm.start_code = ktla_ktva((unsigned long) _text);
15860 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
15861 init_mm.end_data = (unsigned long) _edata;
15862 init_mm.brk = _brk_end;
15863
15864 - code_resource.start = virt_to_phys(_text);
15865 - code_resource.end = virt_to_phys(_etext)-1;
15866 - data_resource.start = virt_to_phys(_etext);
15867 + code_resource.start = virt_to_phys(ktla_ktva(_text));
15868 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
15869 + data_resource.start = virt_to_phys(_sdata);
15870 data_resource.end = virt_to_phys(_edata)-1;
15871 bss_resource.start = virt_to_phys(&__bss_start);
15872 bss_resource.end = virt_to_phys(&__bss_stop)-1;
15873 diff -urNp linux-2.6.32.41/arch/x86/kernel/setup_percpu.c linux-2.6.32.41/arch/x86/kernel/setup_percpu.c
15874 --- linux-2.6.32.41/arch/x86/kernel/setup_percpu.c 2011-03-27 14:31:47.000000000 -0400
15875 +++ linux-2.6.32.41/arch/x86/kernel/setup_percpu.c 2011-06-04 20:36:29.000000000 -0400
15876 @@ -25,19 +25,17 @@
15877 # define DBG(x...)
15878 #endif
15879
15880 -DEFINE_PER_CPU(int, cpu_number);
15881 +#ifdef CONFIG_SMP
15882 +DEFINE_PER_CPU(unsigned int, cpu_number);
15883 EXPORT_PER_CPU_SYMBOL(cpu_number);
15884 +#endif
15885
15886 -#ifdef CONFIG_X86_64
15887 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
15888 -#else
15889 -#define BOOT_PERCPU_OFFSET 0
15890 -#endif
15891
15892 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
15893 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
15894
15895 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
15896 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
15897 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
15898 };
15899 EXPORT_SYMBOL(__per_cpu_offset);
15900 @@ -159,10 +157,10 @@ static inline void setup_percpu_segment(
15901 {
15902 #ifdef CONFIG_X86_32
15903 struct desc_struct gdt;
15904 + unsigned long base = per_cpu_offset(cpu);
15905
15906 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
15907 - 0x2 | DESCTYPE_S, 0x8);
15908 - gdt.s = 1;
15909 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
15910 + 0x83 | DESCTYPE_S, 0xC);
15911 write_gdt_entry(get_cpu_gdt_table(cpu),
15912 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
15913 #endif
15914 @@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
15915 /* alrighty, percpu areas up and running */
15916 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
15917 for_each_possible_cpu(cpu) {
15918 +#ifdef CONFIG_CC_STACKPROTECTOR
15919 +#ifdef CONFIG_X86_32
15920 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
15921 +#endif
15922 +#endif
15923 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
15924 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
15925 per_cpu(cpu_number, cpu) = cpu;
15926 @@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
15927 early_per_cpu_map(x86_cpu_to_node_map, cpu);
15928 #endif
15929 #endif
15930 +#ifdef CONFIG_CC_STACKPROTECTOR
15931 +#ifdef CONFIG_X86_32
15932 + if (!cpu)
15933 + per_cpu(stack_canary.canary, cpu) = canary;
15934 +#endif
15935 +#endif
15936 /*
15937 * Up to this point, the boot CPU has been using .data.init
15938 * area. Reload any changed state for the boot CPU.
15939 diff -urNp linux-2.6.32.41/arch/x86/kernel/signal.c linux-2.6.32.41/arch/x86/kernel/signal.c
15940 --- linux-2.6.32.41/arch/x86/kernel/signal.c 2011-03-27 14:31:47.000000000 -0400
15941 +++ linux-2.6.32.41/arch/x86/kernel/signal.c 2011-05-22 23:02:03.000000000 -0400
15942 @@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsi
15943 * Align the stack pointer according to the i386 ABI,
15944 * i.e. so that on function entry ((sp + 4) & 15) == 0.
15945 */
15946 - sp = ((sp + 4) & -16ul) - 4;
15947 + sp = ((sp - 12) & -16ul) - 4;
15948 #else /* !CONFIG_X86_32 */
15949 sp = round_down(sp, 16) - 8;
15950 #endif
15951 @@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, str
15952 * Return an always-bogus address instead so we will die with SIGSEGV.
15953 */
15954 if (onsigstack && !likely(on_sig_stack(sp)))
15955 - return (void __user *)-1L;
15956 + return (__force void __user *)-1L;
15957
15958 /* save i387 state */
15959 if (used_math() && save_i387_xstate(*fpstate) < 0)
15960 - return (void __user *)-1L;
15961 + return (__force void __user *)-1L;
15962
15963 return (void __user *)sp;
15964 }
15965 @@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigactio
15966 }
15967
15968 if (current->mm->context.vdso)
15969 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
15970 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
15971 else
15972 - restorer = &frame->retcode;
15973 + restorer = (void __user *)&frame->retcode;
15974 if (ka->sa.sa_flags & SA_RESTORER)
15975 restorer = ka->sa.sa_restorer;
15976
15977 @@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigactio
15978 * reasons and because gdb uses it as a signature to notice
15979 * signal handler stack frames.
15980 */
15981 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
15982 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
15983
15984 if (err)
15985 return -EFAULT;
15986 @@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, str
15987 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
15988
15989 /* Set up to return from userspace. */
15990 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
15991 + if (current->mm->context.vdso)
15992 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
15993 + else
15994 + restorer = (void __user *)&frame->retcode;
15995 if (ka->sa.sa_flags & SA_RESTORER)
15996 restorer = ka->sa.sa_restorer;
15997 put_user_ex(restorer, &frame->pretcode);
15998 @@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, str
15999 * reasons and because gdb uses it as a signature to notice
16000 * signal handler stack frames.
16001 */
16002 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
16003 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
16004 } put_user_catch(err);
16005
16006 if (err)
16007 @@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *re
16008 int signr;
16009 sigset_t *oldset;
16010
16011 + pax_track_stack();
16012 +
16013 /*
16014 * We want the common case to go fast, which is why we may in certain
16015 * cases get here from kernel mode. Just return without doing anything
16016 @@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *re
16017 * X86_32: vm86 regs switched out by assembly code before reaching
16018 * here, so testing against kernel CS suffices.
16019 */
16020 - if (!user_mode(regs))
16021 + if (!user_mode_novm(regs))
16022 return;
16023
16024 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
16025 diff -urNp linux-2.6.32.41/arch/x86/kernel/smpboot.c linux-2.6.32.41/arch/x86/kernel/smpboot.c
16026 --- linux-2.6.32.41/arch/x86/kernel/smpboot.c 2011-03-27 14:31:47.000000000 -0400
16027 +++ linux-2.6.32.41/arch/x86/kernel/smpboot.c 2011-05-11 18:25:15.000000000 -0400
16028 @@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct
16029 */
16030 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
16031
16032 -void cpu_hotplug_driver_lock()
16033 +void cpu_hotplug_driver_lock(void)
16034 {
16035 - mutex_lock(&x86_cpu_hotplug_driver_mutex);
16036 + mutex_lock(&x86_cpu_hotplug_driver_mutex);
16037 }
16038
16039 -void cpu_hotplug_driver_unlock()
16040 +void cpu_hotplug_driver_unlock(void)
16041 {
16042 - mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16043 + mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16044 }
16045
16046 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
16047 @@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int api
16048 set_idle_for_cpu(cpu, c_idle.idle);
16049 do_rest:
16050 per_cpu(current_task, cpu) = c_idle.idle;
16051 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
16052 #ifdef CONFIG_X86_32
16053 /* Stack for startup_32 can be just as for start_secondary onwards */
16054 irq_ctx_init(cpu);
16055 @@ -750,11 +751,13 @@ do_rest:
16056 #else
16057 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
16058 initial_gs = per_cpu_offset(cpu);
16059 - per_cpu(kernel_stack, cpu) =
16060 - (unsigned long)task_stack_page(c_idle.idle) -
16061 - KERNEL_STACK_OFFSET + THREAD_SIZE;
16062 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
16063 #endif
16064 +
16065 + pax_open_kernel();
16066 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
16067 + pax_close_kernel();
16068 +
16069 initial_code = (unsigned long)start_secondary;
16070 stack_start.sp = (void *) c_idle.idle->thread.sp;
16071
16072 @@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int
16073
16074 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
16075
16076 +#ifdef CONFIG_PAX_PER_CPU_PGD
16077 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
16078 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16079 + KERNEL_PGD_PTRS);
16080 +#endif
16081 +
16082 err = do_boot_cpu(apicid, cpu);
16083
16084 if (err) {
16085 diff -urNp linux-2.6.32.41/arch/x86/kernel/step.c linux-2.6.32.41/arch/x86/kernel/step.c
16086 --- linux-2.6.32.41/arch/x86/kernel/step.c 2011-03-27 14:31:47.000000000 -0400
16087 +++ linux-2.6.32.41/arch/x86/kernel/step.c 2011-04-17 15:56:46.000000000 -0400
16088 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
16089 struct desc_struct *desc;
16090 unsigned long base;
16091
16092 - seg &= ~7UL;
16093 + seg >>= 3;
16094
16095 mutex_lock(&child->mm->context.lock);
16096 - if (unlikely((seg >> 3) >= child->mm->context.size))
16097 + if (unlikely(seg >= child->mm->context.size))
16098 addr = -1L; /* bogus selector, access would fault */
16099 else {
16100 desc = child->mm->context.ldt + seg;
16101 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
16102 addr += base;
16103 }
16104 mutex_unlock(&child->mm->context.lock);
16105 - }
16106 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
16107 + addr = ktla_ktva(addr);
16108
16109 return addr;
16110 }
16111 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
16112 unsigned char opcode[15];
16113 unsigned long addr = convert_ip_to_linear(child, regs);
16114
16115 + if (addr == -EINVAL)
16116 + return 0;
16117 +
16118 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
16119 for (i = 0; i < copied; i++) {
16120 switch (opcode[i]) {
16121 @@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
16122
16123 #ifdef CONFIG_X86_64
16124 case 0x40 ... 0x4f:
16125 - if (regs->cs != __USER_CS)
16126 + if ((regs->cs & 0xffff) != __USER_CS)
16127 /* 32-bit mode: register increment */
16128 return 0;
16129 /* 64-bit mode: REX prefix */
16130 diff -urNp linux-2.6.32.41/arch/x86/kernel/syscall_table_32.S linux-2.6.32.41/arch/x86/kernel/syscall_table_32.S
16131 --- linux-2.6.32.41/arch/x86/kernel/syscall_table_32.S 2011-03-27 14:31:47.000000000 -0400
16132 +++ linux-2.6.32.41/arch/x86/kernel/syscall_table_32.S 2011-04-17 15:56:46.000000000 -0400
16133 @@ -1,3 +1,4 @@
16134 +.section .rodata,"a",@progbits
16135 ENTRY(sys_call_table)
16136 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
16137 .long sys_exit
16138 diff -urNp linux-2.6.32.41/arch/x86/kernel/sys_i386_32.c linux-2.6.32.41/arch/x86/kernel/sys_i386_32.c
16139 --- linux-2.6.32.41/arch/x86/kernel/sys_i386_32.c 2011-03-27 14:31:47.000000000 -0400
16140 +++ linux-2.6.32.41/arch/x86/kernel/sys_i386_32.c 2011-04-17 15:56:46.000000000 -0400
16141 @@ -24,6 +24,21 @@
16142
16143 #include <asm/syscalls.h>
16144
16145 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
16146 +{
16147 + unsigned long pax_task_size = TASK_SIZE;
16148 +
16149 +#ifdef CONFIG_PAX_SEGMEXEC
16150 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
16151 + pax_task_size = SEGMEXEC_TASK_SIZE;
16152 +#endif
16153 +
16154 + if (len > pax_task_size || addr > pax_task_size - len)
16155 + return -EINVAL;
16156 +
16157 + return 0;
16158 +}
16159 +
16160 /*
16161 * Perform the select(nd, in, out, ex, tv) and mmap() system
16162 * calls. Linux/i386 didn't use to be able to handle more than
16163 @@ -58,6 +73,212 @@ out:
16164 return err;
16165 }
16166
16167 +unsigned long
16168 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
16169 + unsigned long len, unsigned long pgoff, unsigned long flags)
16170 +{
16171 + struct mm_struct *mm = current->mm;
16172 + struct vm_area_struct *vma;
16173 + unsigned long start_addr, pax_task_size = TASK_SIZE;
16174 +
16175 +#ifdef CONFIG_PAX_SEGMEXEC
16176 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
16177 + pax_task_size = SEGMEXEC_TASK_SIZE;
16178 +#endif
16179 +
16180 + pax_task_size -= PAGE_SIZE;
16181 +
16182 + if (len > pax_task_size)
16183 + return -ENOMEM;
16184 +
16185 + if (flags & MAP_FIXED)
16186 + return addr;
16187 +
16188 +#ifdef CONFIG_PAX_RANDMMAP
16189 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16190 +#endif
16191 +
16192 + if (addr) {
16193 + addr = PAGE_ALIGN(addr);
16194 + if (pax_task_size - len >= addr) {
16195 + vma = find_vma(mm, addr);
16196 + if (check_heap_stack_gap(vma, addr, len))
16197 + return addr;
16198 + }
16199 + }
16200 + if (len > mm->cached_hole_size) {
16201 + start_addr = addr = mm->free_area_cache;
16202 + } else {
16203 + start_addr = addr = mm->mmap_base;
16204 + mm->cached_hole_size = 0;
16205 + }
16206 +
16207 +#ifdef CONFIG_PAX_PAGEEXEC
16208 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
16209 + start_addr = 0x00110000UL;
16210 +
16211 +#ifdef CONFIG_PAX_RANDMMAP
16212 + if (mm->pax_flags & MF_PAX_RANDMMAP)
16213 + start_addr += mm->delta_mmap & 0x03FFF000UL;
16214 +#endif
16215 +
16216 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
16217 + start_addr = addr = mm->mmap_base;
16218 + else
16219 + addr = start_addr;
16220 + }
16221 +#endif
16222 +
16223 +full_search:
16224 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
16225 + /* At this point: (!vma || addr < vma->vm_end). */
16226 + if (pax_task_size - len < addr) {
16227 + /*
16228 + * Start a new search - just in case we missed
16229 + * some holes.
16230 + */
16231 + if (start_addr != mm->mmap_base) {
16232 + start_addr = addr = mm->mmap_base;
16233 + mm->cached_hole_size = 0;
16234 + goto full_search;
16235 + }
16236 + return -ENOMEM;
16237 + }
16238 + if (check_heap_stack_gap(vma, addr, len))
16239 + break;
16240 + if (addr + mm->cached_hole_size < vma->vm_start)
16241 + mm->cached_hole_size = vma->vm_start - addr;
16242 + addr = vma->vm_end;
16243 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
16244 + start_addr = addr = mm->mmap_base;
16245 + mm->cached_hole_size = 0;
16246 + goto full_search;
16247 + }
16248 + }
16249 +
16250 + /*
16251 + * Remember the place where we stopped the search:
16252 + */
16253 + mm->free_area_cache = addr + len;
16254 + return addr;
16255 +}
16256 +
16257 +unsigned long
16258 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16259 + const unsigned long len, const unsigned long pgoff,
16260 + const unsigned long flags)
16261 +{
16262 + struct vm_area_struct *vma;
16263 + struct mm_struct *mm = current->mm;
16264 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
16265 +
16266 +#ifdef CONFIG_PAX_SEGMEXEC
16267 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
16268 + pax_task_size = SEGMEXEC_TASK_SIZE;
16269 +#endif
16270 +
16271 + pax_task_size -= PAGE_SIZE;
16272 +
16273 + /* requested length too big for entire address space */
16274 + if (len > pax_task_size)
16275 + return -ENOMEM;
16276 +
16277 + if (flags & MAP_FIXED)
16278 + return addr;
16279 +
16280 +#ifdef CONFIG_PAX_PAGEEXEC
16281 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
16282 + goto bottomup;
16283 +#endif
16284 +
16285 +#ifdef CONFIG_PAX_RANDMMAP
16286 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16287 +#endif
16288 +
16289 + /* requesting a specific address */
16290 + if (addr) {
16291 + addr = PAGE_ALIGN(addr);
16292 + if (pax_task_size - len >= addr) {
16293 + vma = find_vma(mm, addr);
16294 + if (check_heap_stack_gap(vma, addr, len))
16295 + return addr;
16296 + }
16297 + }
16298 +
16299 + /* check if free_area_cache is useful for us */
16300 + if (len <= mm->cached_hole_size) {
16301 + mm->cached_hole_size = 0;
16302 + mm->free_area_cache = mm->mmap_base;
16303 + }
16304 +
16305 + /* either no address requested or can't fit in requested address hole */
16306 + addr = mm->free_area_cache;
16307 +
16308 + /* make sure it can fit in the remaining address space */
16309 + if (addr > len) {
16310 + vma = find_vma(mm, addr-len);
16311 + if (check_heap_stack_gap(vma, addr - len, len))
16312 + /* remember the address as a hint for next time */
16313 + return (mm->free_area_cache = addr-len);
16314 + }
16315 +
16316 + if (mm->mmap_base < len)
16317 + goto bottomup;
16318 +
16319 + addr = mm->mmap_base-len;
16320 +
16321 + do {
16322 + /*
16323 + * Lookup failure means no vma is above this address,
16324 + * else if new region fits below vma->vm_start,
16325 + * return with success:
16326 + */
16327 + vma = find_vma(mm, addr);
16328 + if (check_heap_stack_gap(vma, addr, len))
16329 + /* remember the address as a hint for next time */
16330 + return (mm->free_area_cache = addr);
16331 +
16332 + /* remember the largest hole we saw so far */
16333 + if (addr + mm->cached_hole_size < vma->vm_start)
16334 + mm->cached_hole_size = vma->vm_start - addr;
16335 +
16336 + /* try just below the current vma->vm_start */
16337 + addr = skip_heap_stack_gap(vma, len);
16338 + } while (!IS_ERR_VALUE(addr));
16339 +
16340 +bottomup:
16341 + /*
16342 + * A failed mmap() very likely causes application failure,
16343 + * so fall back to the bottom-up function here. This scenario
16344 + * can happen with large stack limits and large mmap()
16345 + * allocations.
16346 + */
16347 +
16348 +#ifdef CONFIG_PAX_SEGMEXEC
16349 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
16350 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
16351 + else
16352 +#endif
16353 +
16354 + mm->mmap_base = TASK_UNMAPPED_BASE;
16355 +
16356 +#ifdef CONFIG_PAX_RANDMMAP
16357 + if (mm->pax_flags & MF_PAX_RANDMMAP)
16358 + mm->mmap_base += mm->delta_mmap;
16359 +#endif
16360 +
16361 + mm->free_area_cache = mm->mmap_base;
16362 + mm->cached_hole_size = ~0UL;
16363 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
16364 + /*
16365 + * Restore the topdown base:
16366 + */
16367 + mm->mmap_base = base;
16368 + mm->free_area_cache = base;
16369 + mm->cached_hole_size = ~0UL;
16370 +
16371 + return addr;
16372 +}
16373
16374 struct sel_arg_struct {
16375 unsigned long n;
16376 @@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int fi
16377 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
16378 case SEMTIMEDOP:
16379 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
16380 - (const struct timespec __user *)fifth);
16381 + (__force const struct timespec __user *)fifth);
16382
16383 case SEMGET:
16384 return sys_semget(first, second, third);
16385 @@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int fi
16386 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
16387 if (ret)
16388 return ret;
16389 - return put_user(raddr, (ulong __user *) third);
16390 + return put_user(raddr, (__force ulong __user *) third);
16391 }
16392 case 1: /* iBCS2 emulator entry point */
16393 if (!segment_eq(get_fs(), get_ds()))
16394 @@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldol
16395
16396 return error;
16397 }
16398 -
16399 -
16400 -/*
16401 - * Do a system call from kernel instead of calling sys_execve so we
16402 - * end up with proper pt_regs.
16403 - */
16404 -int kernel_execve(const char *filename, char *const argv[], char *const envp[])
16405 -{
16406 - long __res;
16407 - asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
16408 - : "=a" (__res)
16409 - : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
16410 - return __res;
16411 -}
16412 diff -urNp linux-2.6.32.41/arch/x86/kernel/sys_x86_64.c linux-2.6.32.41/arch/x86/kernel/sys_x86_64.c
16413 --- linux-2.6.32.41/arch/x86/kernel/sys_x86_64.c 2011-03-27 14:31:47.000000000 -0400
16414 +++ linux-2.6.32.41/arch/x86/kernel/sys_x86_64.c 2011-04-17 15:56:46.000000000 -0400
16415 @@ -32,8 +32,8 @@ out:
16416 return error;
16417 }
16418
16419 -static void find_start_end(unsigned long flags, unsigned long *begin,
16420 - unsigned long *end)
16421 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
16422 + unsigned long *begin, unsigned long *end)
16423 {
16424 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
16425 unsigned long new_begin;
16426 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long
16427 *begin = new_begin;
16428 }
16429 } else {
16430 - *begin = TASK_UNMAPPED_BASE;
16431 + *begin = mm->mmap_base;
16432 *end = TASK_SIZE;
16433 }
16434 }
16435 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
16436 if (flags & MAP_FIXED)
16437 return addr;
16438
16439 - find_start_end(flags, &begin, &end);
16440 + find_start_end(mm, flags, &begin, &end);
16441
16442 if (len > end)
16443 return -ENOMEM;
16444
16445 +#ifdef CONFIG_PAX_RANDMMAP
16446 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16447 +#endif
16448 +
16449 if (addr) {
16450 addr = PAGE_ALIGN(addr);
16451 vma = find_vma(mm, addr);
16452 - if (end - len >= addr &&
16453 - (!vma || addr + len <= vma->vm_start))
16454 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
16455 return addr;
16456 }
16457 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
16458 @@ -106,7 +109,7 @@ full_search:
16459 }
16460 return -ENOMEM;
16461 }
16462 - if (!vma || addr + len <= vma->vm_start) {
16463 + if (check_heap_stack_gap(vma, addr, len)) {
16464 /*
16465 * Remember the place where we stopped the search:
16466 */
16467 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
16468 {
16469 struct vm_area_struct *vma;
16470 struct mm_struct *mm = current->mm;
16471 - unsigned long addr = addr0;
16472 + unsigned long base = mm->mmap_base, addr = addr0;
16473
16474 /* requested length too big for entire address space */
16475 if (len > TASK_SIZE)
16476 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
16477 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
16478 goto bottomup;
16479
16480 +#ifdef CONFIG_PAX_RANDMMAP
16481 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16482 +#endif
16483 +
16484 /* requesting a specific address */
16485 if (addr) {
16486 addr = PAGE_ALIGN(addr);
16487 - vma = find_vma(mm, addr);
16488 - if (TASK_SIZE - len >= addr &&
16489 - (!vma || addr + len <= vma->vm_start))
16490 - return addr;
16491 + if (TASK_SIZE - len >= addr) {
16492 + vma = find_vma(mm, addr);
16493 + if (check_heap_stack_gap(vma, addr, len))
16494 + return addr;
16495 + }
16496 }
16497
16498 /* check if free_area_cache is useful for us */
16499 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
16500 /* make sure it can fit in the remaining address space */
16501 if (addr > len) {
16502 vma = find_vma(mm, addr-len);
16503 - if (!vma || addr <= vma->vm_start)
16504 + if (check_heap_stack_gap(vma, addr - len, len))
16505 /* remember the address as a hint for next time */
16506 return mm->free_area_cache = addr-len;
16507 }
16508 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
16509 * return with success:
16510 */
16511 vma = find_vma(mm, addr);
16512 - if (!vma || addr+len <= vma->vm_start)
16513 + if (check_heap_stack_gap(vma, addr, len))
16514 /* remember the address as a hint for next time */
16515 return mm->free_area_cache = addr;
16516
16517 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
16518 mm->cached_hole_size = vma->vm_start - addr;
16519
16520 /* try just below the current vma->vm_start */
16521 - addr = vma->vm_start-len;
16522 - } while (len < vma->vm_start);
16523 + addr = skip_heap_stack_gap(vma, len);
16524 + } while (!IS_ERR_VALUE(addr));
16525
16526 bottomup:
16527 /*
16528 @@ -198,13 +206,21 @@ bottomup:
16529 * can happen with large stack limits and large mmap()
16530 * allocations.
16531 */
16532 + mm->mmap_base = TASK_UNMAPPED_BASE;
16533 +
16534 +#ifdef CONFIG_PAX_RANDMMAP
16535 + if (mm->pax_flags & MF_PAX_RANDMMAP)
16536 + mm->mmap_base += mm->delta_mmap;
16537 +#endif
16538 +
16539 + mm->free_area_cache = mm->mmap_base;
16540 mm->cached_hole_size = ~0UL;
16541 - mm->free_area_cache = TASK_UNMAPPED_BASE;
16542 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
16543 /*
16544 * Restore the topdown base:
16545 */
16546 - mm->free_area_cache = mm->mmap_base;
16547 + mm->mmap_base = base;
16548 + mm->free_area_cache = base;
16549 mm->cached_hole_size = ~0UL;
16550
16551 return addr;
16552 diff -urNp linux-2.6.32.41/arch/x86/kernel/tboot.c linux-2.6.32.41/arch/x86/kernel/tboot.c
16553 --- linux-2.6.32.41/arch/x86/kernel/tboot.c 2011-03-27 14:31:47.000000000 -0400
16554 +++ linux-2.6.32.41/arch/x86/kernel/tboot.c 2011-05-22 23:02:03.000000000 -0400
16555 @@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
16556
16557 void tboot_shutdown(u32 shutdown_type)
16558 {
16559 - void (*shutdown)(void);
16560 + void (* __noreturn shutdown)(void);
16561
16562 if (!tboot_enabled())
16563 return;
16564 @@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
16565
16566 switch_to_tboot_pt();
16567
16568 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
16569 + shutdown = (void *)tboot->shutdown_entry;
16570 shutdown();
16571
16572 /* should not reach here */
16573 @@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
16574 tboot_shutdown(acpi_shutdown_map[sleep_state]);
16575 }
16576
16577 -static atomic_t ap_wfs_count;
16578 +static atomic_unchecked_t ap_wfs_count;
16579
16580 static int tboot_wait_for_aps(int num_aps)
16581 {
16582 @@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(
16583 {
16584 switch (action) {
16585 case CPU_DYING:
16586 - atomic_inc(&ap_wfs_count);
16587 + atomic_inc_unchecked(&ap_wfs_count);
16588 if (num_online_cpus() == 1)
16589 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
16590 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
16591 return NOTIFY_BAD;
16592 break;
16593 }
16594 @@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
16595
16596 tboot_create_trampoline();
16597
16598 - atomic_set(&ap_wfs_count, 0);
16599 + atomic_set_unchecked(&ap_wfs_count, 0);
16600 register_hotcpu_notifier(&tboot_cpu_notifier);
16601 return 0;
16602 }
16603 diff -urNp linux-2.6.32.41/arch/x86/kernel/time.c linux-2.6.32.41/arch/x86/kernel/time.c
16604 --- linux-2.6.32.41/arch/x86/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
16605 +++ linux-2.6.32.41/arch/x86/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
16606 @@ -26,17 +26,13 @@
16607 int timer_ack;
16608 #endif
16609
16610 -#ifdef CONFIG_X86_64
16611 -volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
16612 -#endif
16613 -
16614 unsigned long profile_pc(struct pt_regs *regs)
16615 {
16616 unsigned long pc = instruction_pointer(regs);
16617
16618 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
16619 + if (!user_mode(regs) && in_lock_functions(pc)) {
16620 #ifdef CONFIG_FRAME_POINTER
16621 - return *(unsigned long *)(regs->bp + sizeof(long));
16622 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
16623 #else
16624 unsigned long *sp =
16625 (unsigned long *)kernel_stack_pointer(regs);
16626 @@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
16627 * or above a saved flags. Eflags has bits 22-31 zero,
16628 * kernel addresses don't.
16629 */
16630 +
16631 +#ifdef CONFIG_PAX_KERNEXEC
16632 + return ktla_ktva(sp[0]);
16633 +#else
16634 if (sp[0] >> 22)
16635 return sp[0];
16636 if (sp[1] >> 22)
16637 return sp[1];
16638 #endif
16639 +
16640 +#endif
16641 }
16642 return pc;
16643 }
16644 diff -urNp linux-2.6.32.41/arch/x86/kernel/tls.c linux-2.6.32.41/arch/x86/kernel/tls.c
16645 --- linux-2.6.32.41/arch/x86/kernel/tls.c 2011-03-27 14:31:47.000000000 -0400
16646 +++ linux-2.6.32.41/arch/x86/kernel/tls.c 2011-04-17 15:56:46.000000000 -0400
16647 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
16648 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
16649 return -EINVAL;
16650
16651 +#ifdef CONFIG_PAX_SEGMEXEC
16652 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
16653 + return -EINVAL;
16654 +#endif
16655 +
16656 set_tls_desc(p, idx, &info, 1);
16657
16658 return 0;
16659 diff -urNp linux-2.6.32.41/arch/x86/kernel/trampoline_32.S linux-2.6.32.41/arch/x86/kernel/trampoline_32.S
16660 --- linux-2.6.32.41/arch/x86/kernel/trampoline_32.S 2011-03-27 14:31:47.000000000 -0400
16661 +++ linux-2.6.32.41/arch/x86/kernel/trampoline_32.S 2011-04-17 15:56:46.000000000 -0400
16662 @@ -32,6 +32,12 @@
16663 #include <asm/segment.h>
16664 #include <asm/page_types.h>
16665
16666 +#ifdef CONFIG_PAX_KERNEXEC
16667 +#define ta(X) (X)
16668 +#else
16669 +#define ta(X) ((X) - __PAGE_OFFSET)
16670 +#endif
16671 +
16672 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
16673 __CPUINITRODATA
16674 .code16
16675 @@ -60,7 +66,7 @@ r_base = .
16676 inc %ax # protected mode (PE) bit
16677 lmsw %ax # into protected mode
16678 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
16679 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
16680 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
16681
16682 # These need to be in the same 64K segment as the above;
16683 # hence we don't use the boot_gdt_descr defined in head.S
16684 diff -urNp linux-2.6.32.41/arch/x86/kernel/trampoline_64.S linux-2.6.32.41/arch/x86/kernel/trampoline_64.S
16685 --- linux-2.6.32.41/arch/x86/kernel/trampoline_64.S 2011-03-27 14:31:47.000000000 -0400
16686 +++ linux-2.6.32.41/arch/x86/kernel/trampoline_64.S 2011-04-17 15:56:46.000000000 -0400
16687 @@ -91,7 +91,7 @@ startup_32:
16688 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
16689 movl %eax, %ds
16690
16691 - movl $X86_CR4_PAE, %eax
16692 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
16693 movl %eax, %cr4 # Enable PAE mode
16694
16695 # Setup trampoline 4 level pagetables
16696 @@ -138,7 +138,7 @@ tidt:
16697 # so the kernel can live anywhere
16698 .balign 4
16699 tgdt:
16700 - .short tgdt_end - tgdt # gdt limit
16701 + .short tgdt_end - tgdt - 1 # gdt limit
16702 .long tgdt - r_base
16703 .short 0
16704 .quad 0x00cf9b000000ffff # __KERNEL32_CS
16705 diff -urNp linux-2.6.32.41/arch/x86/kernel/traps.c linux-2.6.32.41/arch/x86/kernel/traps.c
16706 --- linux-2.6.32.41/arch/x86/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
16707 +++ linux-2.6.32.41/arch/x86/kernel/traps.c 2011-04-17 15:56:46.000000000 -0400
16708 @@ -69,12 +69,6 @@ asmlinkage int system_call(void);
16709
16710 /* Do we ignore FPU interrupts ? */
16711 char ignore_fpu_irq;
16712 -
16713 -/*
16714 - * The IDT has to be page-aligned to simplify the Pentium
16715 - * F0 0F bug workaround.
16716 - */
16717 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
16718 #endif
16719
16720 DECLARE_BITMAP(used_vectors, NR_VECTORS);
16721 @@ -112,19 +106,19 @@ static inline void preempt_conditional_c
16722 static inline void
16723 die_if_kernel(const char *str, struct pt_regs *regs, long err)
16724 {
16725 - if (!user_mode_vm(regs))
16726 + if (!user_mode(regs))
16727 die(str, regs, err);
16728 }
16729 #endif
16730
16731 static void __kprobes
16732 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
16733 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
16734 long error_code, siginfo_t *info)
16735 {
16736 struct task_struct *tsk = current;
16737
16738 #ifdef CONFIG_X86_32
16739 - if (regs->flags & X86_VM_MASK) {
16740 + if (v8086_mode(regs)) {
16741 /*
16742 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
16743 * On nmi (interrupt 2), do_trap should not be called.
16744 @@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str
16745 }
16746 #endif
16747
16748 - if (!user_mode(regs))
16749 + if (!user_mode_novm(regs))
16750 goto kernel_trap;
16751
16752 #ifdef CONFIG_X86_32
16753 @@ -158,7 +152,7 @@ trap_signal:
16754 printk_ratelimit()) {
16755 printk(KERN_INFO
16756 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
16757 - tsk->comm, tsk->pid, str,
16758 + tsk->comm, task_pid_nr(tsk), str,
16759 regs->ip, regs->sp, error_code);
16760 print_vma_addr(" in ", regs->ip);
16761 printk("\n");
16762 @@ -175,8 +169,20 @@ kernel_trap:
16763 if (!fixup_exception(regs)) {
16764 tsk->thread.error_code = error_code;
16765 tsk->thread.trap_no = trapnr;
16766 +
16767 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16768 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
16769 + str = "PAX: suspicious stack segment fault";
16770 +#endif
16771 +
16772 die(str, regs, error_code);
16773 }
16774 +
16775 +#ifdef CONFIG_PAX_REFCOUNT
16776 + if (trapnr == 4)
16777 + pax_report_refcount_overflow(regs);
16778 +#endif
16779 +
16780 return;
16781
16782 #ifdef CONFIG_X86_32
16783 @@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *re
16784 conditional_sti(regs);
16785
16786 #ifdef CONFIG_X86_32
16787 - if (regs->flags & X86_VM_MASK)
16788 + if (v8086_mode(regs))
16789 goto gp_in_vm86;
16790 #endif
16791
16792 tsk = current;
16793 - if (!user_mode(regs))
16794 + if (!user_mode_novm(regs))
16795 goto gp_in_kernel;
16796
16797 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
16798 + if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
16799 + struct mm_struct *mm = tsk->mm;
16800 + unsigned long limit;
16801 +
16802 + down_write(&mm->mmap_sem);
16803 + limit = mm->context.user_cs_limit;
16804 + if (limit < TASK_SIZE) {
16805 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
16806 + up_write(&mm->mmap_sem);
16807 + return;
16808 + }
16809 + up_write(&mm->mmap_sem);
16810 + }
16811 +#endif
16812 +
16813 tsk->thread.error_code = error_code;
16814 tsk->thread.trap_no = 13;
16815
16816 @@ -305,6 +327,13 @@ gp_in_kernel:
16817 if (notify_die(DIE_GPF, "general protection fault", regs,
16818 error_code, 13, SIGSEGV) == NOTIFY_STOP)
16819 return;
16820 +
16821 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16822 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
16823 + die("PAX: suspicious general protection fault", regs, error_code);
16824 + else
16825 +#endif
16826 +
16827 die("general protection fault", regs, error_code);
16828 }
16829
16830 @@ -558,7 +587,7 @@ dotraplinkage void __kprobes do_debug(st
16831 }
16832
16833 #ifdef CONFIG_X86_32
16834 - if (regs->flags & X86_VM_MASK)
16835 + if (v8086_mode(regs))
16836 goto debug_vm86;
16837 #endif
16838
16839 @@ -570,7 +599,7 @@ dotraplinkage void __kprobes do_debug(st
16840 * kernel space (but re-enable TF when returning to user mode).
16841 */
16842 if (condition & DR_STEP) {
16843 - if (!user_mode(regs))
16844 + if (!user_mode_novm(regs))
16845 goto clear_TF_reenable;
16846 }
16847
16848 @@ -757,7 +786,7 @@ do_simd_coprocessor_error(struct pt_regs
16849 * Handle strange cache flush from user space exception
16850 * in all other cases. This is undocumented behaviour.
16851 */
16852 - if (regs->flags & X86_VM_MASK) {
16853 + if (v8086_mode(regs)) {
16854 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
16855 return;
16856 }
16857 @@ -798,7 +827,7 @@ asmlinkage void __attribute__((weak)) sm
16858 void __math_state_restore(void)
16859 {
16860 struct thread_info *thread = current_thread_info();
16861 - struct task_struct *tsk = thread->task;
16862 + struct task_struct *tsk = current;
16863
16864 /*
16865 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
16866 @@ -825,8 +854,7 @@ void __math_state_restore(void)
16867 */
16868 asmlinkage void math_state_restore(void)
16869 {
16870 - struct thread_info *thread = current_thread_info();
16871 - struct task_struct *tsk = thread->task;
16872 + struct task_struct *tsk = current;
16873
16874 if (!tsk_used_math(tsk)) {
16875 local_irq_enable();
16876 diff -urNp linux-2.6.32.41/arch/x86/kernel/vm86_32.c linux-2.6.32.41/arch/x86/kernel/vm86_32.c
16877 --- linux-2.6.32.41/arch/x86/kernel/vm86_32.c 2011-03-27 14:31:47.000000000 -0400
16878 +++ linux-2.6.32.41/arch/x86/kernel/vm86_32.c 2011-04-17 15:56:46.000000000 -0400
16879 @@ -41,6 +41,7 @@
16880 #include <linux/ptrace.h>
16881 #include <linux/audit.h>
16882 #include <linux/stddef.h>
16883 +#include <linux/grsecurity.h>
16884
16885 #include <asm/uaccess.h>
16886 #include <asm/io.h>
16887 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
16888 do_exit(SIGSEGV);
16889 }
16890
16891 - tss = &per_cpu(init_tss, get_cpu());
16892 + tss = init_tss + get_cpu();
16893 current->thread.sp0 = current->thread.saved_sp0;
16894 current->thread.sysenter_cs = __KERNEL_CS;
16895 load_sp0(tss, &current->thread);
16896 @@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
16897 struct task_struct *tsk;
16898 int tmp, ret = -EPERM;
16899
16900 +#ifdef CONFIG_GRKERNSEC_VM86
16901 + if (!capable(CAP_SYS_RAWIO)) {
16902 + gr_handle_vm86();
16903 + goto out;
16904 + }
16905 +#endif
16906 +
16907 tsk = current;
16908 if (tsk->thread.saved_sp0)
16909 goto out;
16910 @@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
16911 int tmp, ret;
16912 struct vm86plus_struct __user *v86;
16913
16914 +#ifdef CONFIG_GRKERNSEC_VM86
16915 + if (!capable(CAP_SYS_RAWIO)) {
16916 + gr_handle_vm86();
16917 + ret = -EPERM;
16918 + goto out;
16919 + }
16920 +#endif
16921 +
16922 tsk = current;
16923 switch (regs->bx) {
16924 case VM86_REQUEST_IRQ:
16925 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
16926 tsk->thread.saved_fs = info->regs32->fs;
16927 tsk->thread.saved_gs = get_user_gs(info->regs32);
16928
16929 - tss = &per_cpu(init_tss, get_cpu());
16930 + tss = init_tss + get_cpu();
16931 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
16932 if (cpu_has_sep)
16933 tsk->thread.sysenter_cs = 0;
16934 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
16935 goto cannot_handle;
16936 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
16937 goto cannot_handle;
16938 - intr_ptr = (unsigned long __user *) (i << 2);
16939 + intr_ptr = (__force unsigned long __user *) (i << 2);
16940 if (get_user(segoffs, intr_ptr))
16941 goto cannot_handle;
16942 if ((segoffs >> 16) == BIOSSEG)
16943 diff -urNp linux-2.6.32.41/arch/x86/kernel/vmi_32.c linux-2.6.32.41/arch/x86/kernel/vmi_32.c
16944 --- linux-2.6.32.41/arch/x86/kernel/vmi_32.c 2011-03-27 14:31:47.000000000 -0400
16945 +++ linux-2.6.32.41/arch/x86/kernel/vmi_32.c 2011-04-17 15:56:46.000000000 -0400
16946 @@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1)))
16947 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
16948
16949 #define call_vrom_func(rom,func) \
16950 - (((VROMFUNC *)(rom->func))())
16951 + (((VROMFUNC *)(ktva_ktla(rom.func)))())
16952
16953 #define call_vrom_long_func(rom,func,arg) \
16954 - (((VROMLONGFUNC *)(rom->func)) (arg))
16955 +({\
16956 + u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
16957 + struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
16958 + __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
16959 + __reloc;\
16960 +})
16961
16962 -static struct vrom_header *vmi_rom;
16963 +static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
16964 static int disable_pge;
16965 static int disable_pse;
16966 static int disable_sep;
16967 @@ -76,10 +81,10 @@ static struct {
16968 void (*set_initial_ap_state)(int, int);
16969 void (*halt)(void);
16970 void (*set_lazy_mode)(int mode);
16971 -} vmi_ops;
16972 +} vmi_ops __read_only;
16973
16974 /* Cached VMI operations */
16975 -struct vmi_timer_ops vmi_timer_ops;
16976 +struct vmi_timer_ops vmi_timer_ops __read_only;
16977
16978 /*
16979 * VMI patching routines.
16980 @@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
16981 static inline void patch_offset(void *insnbuf,
16982 unsigned long ip, unsigned long dest)
16983 {
16984 - *(unsigned long *)(insnbuf+1) = dest-ip-5;
16985 + *(unsigned long *)(insnbuf+1) = dest-ip-5;
16986 }
16987
16988 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
16989 @@ -102,6 +107,7 @@ static unsigned patch_internal(int call,
16990 {
16991 u64 reloc;
16992 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
16993 +
16994 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
16995 switch(rel->type) {
16996 case VMI_RELOCATION_CALL_REL:
16997 @@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud
16998
16999 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
17000 {
17001 - const pte_t pte = { .pte = 0 };
17002 + const pte_t pte = __pte(0ULL);
17003 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
17004 }
17005
17006 static void vmi_pmd_clear(pmd_t *pmd)
17007 {
17008 - const pte_t pte = { .pte = 0 };
17009 + const pte_t pte = __pte(0ULL);
17010 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
17011 }
17012 #endif
17013 @@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, un
17014 ap.ss = __KERNEL_DS;
17015 ap.esp = (unsigned long) start_esp;
17016
17017 - ap.ds = __USER_DS;
17018 - ap.es = __USER_DS;
17019 + ap.ds = __KERNEL_DS;
17020 + ap.es = __KERNEL_DS;
17021 ap.fs = __KERNEL_PERCPU;
17022 - ap.gs = __KERNEL_STACK_CANARY;
17023 + savesegment(gs, ap.gs);
17024
17025 ap.eflags = 0;
17026
17027 @@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
17028 paravirt_leave_lazy_mmu();
17029 }
17030
17031 +#ifdef CONFIG_PAX_KERNEXEC
17032 +static unsigned long vmi_pax_open_kernel(void)
17033 +{
17034 + return 0;
17035 +}
17036 +
17037 +static unsigned long vmi_pax_close_kernel(void)
17038 +{
17039 + return 0;
17040 +}
17041 +#endif
17042 +
17043 static inline int __init check_vmi_rom(struct vrom_header *rom)
17044 {
17045 struct pci_header *pci;
17046 @@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(s
17047 return 0;
17048 if (rom->vrom_signature != VMI_SIGNATURE)
17049 return 0;
17050 + if (rom->rom_length * 512 > sizeof(*rom)) {
17051 + printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
17052 + return 0;
17053 + }
17054 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
17055 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
17056 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
17057 @@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(v
17058 struct vrom_header *romstart;
17059 romstart = (struct vrom_header *)isa_bus_to_virt(base);
17060 if (check_vmi_rom(romstart)) {
17061 - vmi_rom = romstart;
17062 + vmi_rom = *romstart;
17063 return 1;
17064 }
17065 }
17066 @@ -836,6 +858,11 @@ static inline int __init activate_vmi(vo
17067
17068 para_fill(pv_irq_ops.safe_halt, Halt);
17069
17070 +#ifdef CONFIG_PAX_KERNEXEC
17071 + pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
17072 + pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
17073 +#endif
17074 +
17075 /*
17076 * Alternative instruction rewriting doesn't happen soon enough
17077 * to convert VMI_IRET to a call instead of a jump; so we have
17078 @@ -853,16 +880,16 @@ static inline int __init activate_vmi(vo
17079
17080 void __init vmi_init(void)
17081 {
17082 - if (!vmi_rom)
17083 + if (!vmi_rom.rom_signature)
17084 probe_vmi_rom();
17085 else
17086 - check_vmi_rom(vmi_rom);
17087 + check_vmi_rom(&vmi_rom);
17088
17089 /* In case probing for or validating the ROM failed, basil */
17090 - if (!vmi_rom)
17091 + if (!vmi_rom.rom_signature)
17092 return;
17093
17094 - reserve_top_address(-vmi_rom->virtual_top);
17095 + reserve_top_address(-vmi_rom.virtual_top);
17096
17097 #ifdef CONFIG_X86_IO_APIC
17098 /* This is virtual hardware; timer routing is wired correctly */
17099 @@ -874,7 +901,7 @@ void __init vmi_activate(void)
17100 {
17101 unsigned long flags;
17102
17103 - if (!vmi_rom)
17104 + if (!vmi_rom.rom_signature)
17105 return;
17106
17107 local_irq_save(flags);
17108 diff -urNp linux-2.6.32.41/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.41/arch/x86/kernel/vmlinux.lds.S
17109 --- linux-2.6.32.41/arch/x86/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
17110 +++ linux-2.6.32.41/arch/x86/kernel/vmlinux.lds.S 2011-04-17 15:56:46.000000000 -0400
17111 @@ -26,6 +26,13 @@
17112 #include <asm/page_types.h>
17113 #include <asm/cache.h>
17114 #include <asm/boot.h>
17115 +#include <asm/segment.h>
17116 +
17117 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17118 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
17119 +#else
17120 +#define __KERNEL_TEXT_OFFSET 0
17121 +#endif
17122
17123 #undef i386 /* in case the preprocessor is a 32bit one */
17124
17125 @@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
17126 #ifdef CONFIG_X86_32
17127 OUTPUT_ARCH(i386)
17128 ENTRY(phys_startup_32)
17129 -jiffies = jiffies_64;
17130 #else
17131 OUTPUT_ARCH(i386:x86-64)
17132 ENTRY(phys_startup_64)
17133 -jiffies_64 = jiffies;
17134 #endif
17135
17136 PHDRS {
17137 text PT_LOAD FLAGS(5); /* R_E */
17138 - data PT_LOAD FLAGS(7); /* RWE */
17139 +#ifdef CONFIG_X86_32
17140 + module PT_LOAD FLAGS(5); /* R_E */
17141 +#endif
17142 +#ifdef CONFIG_XEN
17143 + rodata PT_LOAD FLAGS(5); /* R_E */
17144 +#else
17145 + rodata PT_LOAD FLAGS(4); /* R__ */
17146 +#endif
17147 + data PT_LOAD FLAGS(6); /* RW_ */
17148 #ifdef CONFIG_X86_64
17149 user PT_LOAD FLAGS(5); /* R_E */
17150 +#endif
17151 + init.begin PT_LOAD FLAGS(6); /* RW_ */
17152 #ifdef CONFIG_SMP
17153 percpu PT_LOAD FLAGS(6); /* RW_ */
17154 #endif
17155 + text.init PT_LOAD FLAGS(5); /* R_E */
17156 + text.exit PT_LOAD FLAGS(5); /* R_E */
17157 init PT_LOAD FLAGS(7); /* RWE */
17158 -#endif
17159 note PT_NOTE FLAGS(0); /* ___ */
17160 }
17161
17162 SECTIONS
17163 {
17164 #ifdef CONFIG_X86_32
17165 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
17166 - phys_startup_32 = startup_32 - LOAD_OFFSET;
17167 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
17168 #else
17169 - . = __START_KERNEL;
17170 - phys_startup_64 = startup_64 - LOAD_OFFSET;
17171 + . = __START_KERNEL;
17172 #endif
17173
17174 /* Text and read-only data */
17175 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
17176 - _text = .;
17177 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17178 /* bootstrapping code */
17179 +#ifdef CONFIG_X86_32
17180 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17181 +#else
17182 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17183 +#endif
17184 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17185 + _text = .;
17186 HEAD_TEXT
17187 #ifdef CONFIG_X86_32
17188 . = ALIGN(PAGE_SIZE);
17189 @@ -82,28 +102,71 @@ SECTIONS
17190 IRQENTRY_TEXT
17191 *(.fixup)
17192 *(.gnu.warning)
17193 - /* End of text section */
17194 - _etext = .;
17195 } :text = 0x9090
17196
17197 - NOTES :text :note
17198 + . += __KERNEL_TEXT_OFFSET;
17199 +
17200 +#ifdef CONFIG_X86_32
17201 + . = ALIGN(PAGE_SIZE);
17202 + .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
17203 + *(.vmi.rom)
17204 + } :module
17205 +
17206 + . = ALIGN(PAGE_SIZE);
17207 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
17208 +
17209 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
17210 + MODULES_EXEC_VADDR = .;
17211 + BYTE(0)
17212 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
17213 + . = ALIGN(HPAGE_SIZE);
17214 + MODULES_EXEC_END = . - 1;
17215 +#endif
17216 +
17217 + } :module
17218 +#endif
17219
17220 - EXCEPTION_TABLE(16) :text = 0x9090
17221 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
17222 + /* End of text section */
17223 + _etext = . - __KERNEL_TEXT_OFFSET;
17224 + }
17225 +
17226 +#ifdef CONFIG_X86_32
17227 + . = ALIGN(PAGE_SIZE);
17228 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
17229 + *(.idt)
17230 + . = ALIGN(PAGE_SIZE);
17231 + *(.empty_zero_page)
17232 + *(.swapper_pg_fixmap)
17233 + *(.swapper_pg_pmd)
17234 + *(.swapper_pg_dir)
17235 + *(.trampoline_pg_dir)
17236 + } :rodata
17237 +#endif
17238 +
17239 + . = ALIGN(PAGE_SIZE);
17240 + NOTES :rodata :note
17241 +
17242 + EXCEPTION_TABLE(16) :rodata
17243
17244 RO_DATA(PAGE_SIZE)
17245
17246 /* Data */
17247 .data : AT(ADDR(.data) - LOAD_OFFSET) {
17248 +
17249 +#ifdef CONFIG_PAX_KERNEXEC
17250 + . = ALIGN(HPAGE_SIZE);
17251 +#else
17252 + . = ALIGN(PAGE_SIZE);
17253 +#endif
17254 +
17255 /* Start of data section */
17256 _sdata = .;
17257
17258 /* init_task */
17259 INIT_TASK_DATA(THREAD_SIZE)
17260
17261 -#ifdef CONFIG_X86_32
17262 - /* 32 bit has nosave before _edata */
17263 NOSAVE_DATA
17264 -#endif
17265
17266 PAGE_ALIGNED_DATA(PAGE_SIZE)
17267
17268 @@ -112,6 +175,8 @@ SECTIONS
17269 DATA_DATA
17270 CONSTRUCTORS
17271
17272 + jiffies = jiffies_64;
17273 +
17274 /* rarely changed data like cpu maps */
17275 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
17276
17277 @@ -166,12 +231,6 @@ SECTIONS
17278 }
17279 vgetcpu_mode = VVIRT(.vgetcpu_mode);
17280
17281 - . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
17282 - .jiffies : AT(VLOAD(.jiffies)) {
17283 - *(.jiffies)
17284 - }
17285 - jiffies = VVIRT(.jiffies);
17286 -
17287 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
17288 *(.vsyscall_3)
17289 }
17290 @@ -187,12 +246,19 @@ SECTIONS
17291 #endif /* CONFIG_X86_64 */
17292
17293 /* Init code and data - will be freed after init */
17294 - . = ALIGN(PAGE_SIZE);
17295 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
17296 + BYTE(0)
17297 +
17298 +#ifdef CONFIG_PAX_KERNEXEC
17299 + . = ALIGN(HPAGE_SIZE);
17300 +#else
17301 + . = ALIGN(PAGE_SIZE);
17302 +#endif
17303 +
17304 __init_begin = .; /* paired with __init_end */
17305 - }
17306 + } :init.begin
17307
17308 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
17309 +#ifdef CONFIG_SMP
17310 /*
17311 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
17312 * output PHDR, so the next output section - .init.text - should
17313 @@ -201,12 +267,27 @@ SECTIONS
17314 PERCPU_VADDR(0, :percpu)
17315 #endif
17316
17317 - INIT_TEXT_SECTION(PAGE_SIZE)
17318 -#ifdef CONFIG_X86_64
17319 - :init
17320 -#endif
17321 + . = ALIGN(PAGE_SIZE);
17322 + init_begin = .;
17323 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
17324 + VMLINUX_SYMBOL(_sinittext) = .;
17325 + INIT_TEXT
17326 + VMLINUX_SYMBOL(_einittext) = .;
17327 + . = ALIGN(PAGE_SIZE);
17328 + } :text.init
17329
17330 - INIT_DATA_SECTION(16)
17331 + /*
17332 + * .exit.text is discard at runtime, not link time, to deal with
17333 + * references from .altinstructions and .eh_frame
17334 + */
17335 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17336 + EXIT_TEXT
17337 + . = ALIGN(16);
17338 + } :text.exit
17339 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
17340 +
17341 + . = ALIGN(PAGE_SIZE);
17342 + INIT_DATA_SECTION(16) :init
17343
17344 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
17345 __x86_cpu_dev_start = .;
17346 @@ -232,19 +313,11 @@ SECTIONS
17347 *(.altinstr_replacement)
17348 }
17349
17350 - /*
17351 - * .exit.text is discard at runtime, not link time, to deal with
17352 - * references from .altinstructions and .eh_frame
17353 - */
17354 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
17355 - EXIT_TEXT
17356 - }
17357 -
17358 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
17359 EXIT_DATA
17360 }
17361
17362 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
17363 +#ifndef CONFIG_SMP
17364 PERCPU(PAGE_SIZE)
17365 #endif
17366
17367 @@ -267,12 +340,6 @@ SECTIONS
17368 . = ALIGN(PAGE_SIZE);
17369 }
17370
17371 -#ifdef CONFIG_X86_64
17372 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
17373 - NOSAVE_DATA
17374 - }
17375 -#endif
17376 -
17377 /* BSS */
17378 . = ALIGN(PAGE_SIZE);
17379 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
17380 @@ -288,6 +355,7 @@ SECTIONS
17381 __brk_base = .;
17382 . += 64 * 1024; /* 64k alignment slop space */
17383 *(.brk_reservation) /* areas brk users have reserved */
17384 + . = ALIGN(HPAGE_SIZE);
17385 __brk_limit = .;
17386 }
17387
17388 @@ -316,13 +384,12 @@ SECTIONS
17389 * for the boot processor.
17390 */
17391 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
17392 -INIT_PER_CPU(gdt_page);
17393 INIT_PER_CPU(irq_stack_union);
17394
17395 /*
17396 * Build-time check on the image size:
17397 */
17398 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
17399 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
17400 "kernel image bigger than KERNEL_IMAGE_SIZE");
17401
17402 #ifdef CONFIG_SMP
17403 diff -urNp linux-2.6.32.41/arch/x86/kernel/vsyscall_64.c linux-2.6.32.41/arch/x86/kernel/vsyscall_64.c
17404 --- linux-2.6.32.41/arch/x86/kernel/vsyscall_64.c 2011-03-27 14:31:47.000000000 -0400
17405 +++ linux-2.6.32.41/arch/x86/kernel/vsyscall_64.c 2011-04-23 12:56:10.000000000 -0400
17406 @@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wa
17407
17408 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
17409 /* copy vsyscall data */
17410 + strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
17411 vsyscall_gtod_data.clock.vread = clock->vread;
17412 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
17413 vsyscall_gtod_data.clock.mask = clock->mask;
17414 @@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s
17415 We do this here because otherwise user space would do it on
17416 its own in a likely inferior way (no access to jiffies).
17417 If you don't like it pass NULL. */
17418 - if (tcache && tcache->blob[0] == (j = __jiffies)) {
17419 + if (tcache && tcache->blob[0] == (j = jiffies)) {
17420 p = tcache->blob[1];
17421 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
17422 /* Load per CPU data from RDTSCP */
17423 diff -urNp linux-2.6.32.41/arch/x86/kernel/x8664_ksyms_64.c linux-2.6.32.41/arch/x86/kernel/x8664_ksyms_64.c
17424 --- linux-2.6.32.41/arch/x86/kernel/x8664_ksyms_64.c 2011-03-27 14:31:47.000000000 -0400
17425 +++ linux-2.6.32.41/arch/x86/kernel/x8664_ksyms_64.c 2011-04-17 15:56:46.000000000 -0400
17426 @@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
17427
17428 EXPORT_SYMBOL(copy_user_generic);
17429 EXPORT_SYMBOL(__copy_user_nocache);
17430 -EXPORT_SYMBOL(copy_from_user);
17431 -EXPORT_SYMBOL(copy_to_user);
17432 EXPORT_SYMBOL(__copy_from_user_inatomic);
17433
17434 EXPORT_SYMBOL(copy_page);
17435 diff -urNp linux-2.6.32.41/arch/x86/kernel/xsave.c linux-2.6.32.41/arch/x86/kernel/xsave.c
17436 --- linux-2.6.32.41/arch/x86/kernel/xsave.c 2011-03-27 14:31:47.000000000 -0400
17437 +++ linux-2.6.32.41/arch/x86/kernel/xsave.c 2011-04-17 15:56:46.000000000 -0400
17438 @@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_
17439 fx_sw_user->xstate_size > fx_sw_user->extended_size)
17440 return -1;
17441
17442 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
17443 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
17444 fx_sw_user->extended_size -
17445 FP_XSTATE_MAGIC2_SIZE));
17446 /*
17447 @@ -196,7 +196,7 @@ fx_only:
17448 * the other extended state.
17449 */
17450 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
17451 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
17452 + return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
17453 }
17454
17455 /*
17456 @@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf
17457 if (task_thread_info(tsk)->status & TS_XSAVE)
17458 err = restore_user_xstate(buf);
17459 else
17460 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
17461 + err = fxrstor_checking((struct i387_fxsave_struct __user *)
17462 buf);
17463 if (unlikely(err)) {
17464 /*
17465 diff -urNp linux-2.6.32.41/arch/x86/kvm/emulate.c linux-2.6.32.41/arch/x86/kvm/emulate.c
17466 --- linux-2.6.32.41/arch/x86/kvm/emulate.c 2011-03-27 14:31:47.000000000 -0400
17467 +++ linux-2.6.32.41/arch/x86/kvm/emulate.c 2011-04-17 15:56:46.000000000 -0400
17468 @@ -81,8 +81,8 @@
17469 #define Src2CL (1<<29)
17470 #define Src2ImmByte (2<<29)
17471 #define Src2One (3<<29)
17472 -#define Src2Imm16 (4<<29)
17473 -#define Src2Mask (7<<29)
17474 +#define Src2Imm16 (4U<<29)
17475 +#define Src2Mask (7U<<29)
17476
17477 enum {
17478 Group1_80, Group1_81, Group1_82, Group1_83,
17479 @@ -411,6 +411,7 @@ static u32 group2_table[] = {
17480
17481 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
17482 do { \
17483 + unsigned long _tmp; \
17484 __asm__ __volatile__ ( \
17485 _PRE_EFLAGS("0", "4", "2") \
17486 _op _suffix " %"_x"3,%1; " \
17487 @@ -424,8 +425,6 @@ static u32 group2_table[] = {
17488 /* Raw emulation: instruction has two explicit operands. */
17489 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
17490 do { \
17491 - unsigned long _tmp; \
17492 - \
17493 switch ((_dst).bytes) { \
17494 case 2: \
17495 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
17496 @@ -441,7 +440,6 @@ static u32 group2_table[] = {
17497
17498 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
17499 do { \
17500 - unsigned long _tmp; \
17501 switch ((_dst).bytes) { \
17502 case 1: \
17503 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
17504 diff -urNp linux-2.6.32.41/arch/x86/kvm/lapic.c linux-2.6.32.41/arch/x86/kvm/lapic.c
17505 --- linux-2.6.32.41/arch/x86/kvm/lapic.c 2011-03-27 14:31:47.000000000 -0400
17506 +++ linux-2.6.32.41/arch/x86/kvm/lapic.c 2011-04-17 15:56:46.000000000 -0400
17507 @@ -52,7 +52,7 @@
17508 #define APIC_BUS_CYCLE_NS 1
17509
17510 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
17511 -#define apic_debug(fmt, arg...)
17512 +#define apic_debug(fmt, arg...) do {} while (0)
17513
17514 #define APIC_LVT_NUM 6
17515 /* 14 is the version for Xeon and Pentium 8.4.8*/
17516 diff -urNp linux-2.6.32.41/arch/x86/kvm/paging_tmpl.h linux-2.6.32.41/arch/x86/kvm/paging_tmpl.h
17517 --- linux-2.6.32.41/arch/x86/kvm/paging_tmpl.h 2011-03-27 14:31:47.000000000 -0400
17518 +++ linux-2.6.32.41/arch/x86/kvm/paging_tmpl.h 2011-05-16 21:46:57.000000000 -0400
17519 @@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_
17520 int level = PT_PAGE_TABLE_LEVEL;
17521 unsigned long mmu_seq;
17522
17523 + pax_track_stack();
17524 +
17525 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
17526 kvm_mmu_audit(vcpu, "pre page fault");
17527
17528 diff -urNp linux-2.6.32.41/arch/x86/kvm/svm.c linux-2.6.32.41/arch/x86/kvm/svm.c
17529 --- linux-2.6.32.41/arch/x86/kvm/svm.c 2011-03-27 14:31:47.000000000 -0400
17530 +++ linux-2.6.32.41/arch/x86/kvm/svm.c 2011-04-17 15:56:46.000000000 -0400
17531 @@ -2483,9 +2483,12 @@ static int handle_exit(struct kvm_run *k
17532 static void reload_tss(struct kvm_vcpu *vcpu)
17533 {
17534 int cpu = raw_smp_processor_id();
17535 -
17536 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
17537 +
17538 + pax_open_kernel();
17539 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
17540 + pax_close_kernel();
17541 +
17542 load_TR_desc();
17543 }
17544
17545 @@ -2946,7 +2949,7 @@ static bool svm_gb_page_enable(void)
17546 return true;
17547 }
17548
17549 -static struct kvm_x86_ops svm_x86_ops = {
17550 +static const struct kvm_x86_ops svm_x86_ops = {
17551 .cpu_has_kvm_support = has_svm,
17552 .disabled_by_bios = is_disabled,
17553 .hardware_setup = svm_hardware_setup,
17554 diff -urNp linux-2.6.32.41/arch/x86/kvm/vmx.c linux-2.6.32.41/arch/x86/kvm/vmx.c
17555 --- linux-2.6.32.41/arch/x86/kvm/vmx.c 2011-03-27 14:31:47.000000000 -0400
17556 +++ linux-2.6.32.41/arch/x86/kvm/vmx.c 2011-05-04 17:56:20.000000000 -0400
17557 @@ -570,7 +570,11 @@ static void reload_tss(void)
17558
17559 kvm_get_gdt(&gdt);
17560 descs = (void *)gdt.base;
17561 +
17562 + pax_open_kernel();
17563 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
17564 + pax_close_kernel();
17565 +
17566 load_TR_desc();
17567 }
17568
17569 @@ -1409,8 +1413,11 @@ static __init int hardware_setup(void)
17570 if (!cpu_has_vmx_flexpriority())
17571 flexpriority_enabled = 0;
17572
17573 - if (!cpu_has_vmx_tpr_shadow())
17574 - kvm_x86_ops->update_cr8_intercept = NULL;
17575 + if (!cpu_has_vmx_tpr_shadow()) {
17576 + pax_open_kernel();
17577 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
17578 + pax_close_kernel();
17579 + }
17580
17581 if (enable_ept && !cpu_has_vmx_ept_2m_page())
17582 kvm_disable_largepages();
17583 @@ -2361,7 +2368,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
17584 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
17585
17586 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
17587 - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
17588 + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
17589 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
17590 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
17591 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
17592 @@ -3717,6 +3724,12 @@ static void vmx_vcpu_run(struct kvm_vcpu
17593 "jmp .Lkvm_vmx_return \n\t"
17594 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
17595 ".Lkvm_vmx_return: "
17596 +
17597 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17598 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
17599 + ".Lkvm_vmx_return2: "
17600 +#endif
17601 +
17602 /* Save guest registers, load host registers, keep flags */
17603 "xchg %0, (%%"R"sp) \n\t"
17604 "mov %%"R"ax, %c[rax](%0) \n\t"
17605 @@ -3763,8 +3776,13 @@ static void vmx_vcpu_run(struct kvm_vcpu
17606 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
17607 #endif
17608 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
17609 +
17610 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17611 + ,[cs]"i"(__KERNEL_CS)
17612 +#endif
17613 +
17614 : "cc", "memory"
17615 - , R"bx", R"di", R"si"
17616 + , R"ax", R"bx", R"di", R"si"
17617 #ifdef CONFIG_X86_64
17618 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
17619 #endif
17620 @@ -3781,7 +3799,16 @@ static void vmx_vcpu_run(struct kvm_vcpu
17621 if (vmx->rmode.irq.pending)
17622 fixup_rmode_irq(vmx);
17623
17624 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
17625 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
17626 +
17627 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17628 + loadsegment(fs, __KERNEL_PERCPU);
17629 +#endif
17630 +
17631 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17632 + __set_fs(current_thread_info()->addr_limit);
17633 +#endif
17634 +
17635 vmx->launched = 1;
17636
17637 vmx_complete_interrupts(vmx);
17638 @@ -3956,7 +3983,7 @@ static bool vmx_gb_page_enable(void)
17639 return false;
17640 }
17641
17642 -static struct kvm_x86_ops vmx_x86_ops = {
17643 +static const struct kvm_x86_ops vmx_x86_ops = {
17644 .cpu_has_kvm_support = cpu_has_kvm_support,
17645 .disabled_by_bios = vmx_disabled_by_bios,
17646 .hardware_setup = hardware_setup,
17647 diff -urNp linux-2.6.32.41/arch/x86/kvm/x86.c linux-2.6.32.41/arch/x86/kvm/x86.c
17648 --- linux-2.6.32.41/arch/x86/kvm/x86.c 2011-05-10 22:12:01.000000000 -0400
17649 +++ linux-2.6.32.41/arch/x86/kvm/x86.c 2011-05-10 22:12:26.000000000 -0400
17650 @@ -82,7 +82,7 @@ static void update_cr8_intercept(struct
17651 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
17652 struct kvm_cpuid_entry2 __user *entries);
17653
17654 -struct kvm_x86_ops *kvm_x86_ops;
17655 +const struct kvm_x86_ops *kvm_x86_ops;
17656 EXPORT_SYMBOL_GPL(kvm_x86_ops);
17657
17658 int ignore_msrs = 0;
17659 @@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
17660 struct kvm_cpuid2 *cpuid,
17661 struct kvm_cpuid_entry2 __user *entries)
17662 {
17663 - int r;
17664 + int r, i;
17665
17666 r = -E2BIG;
17667 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
17668 goto out;
17669 r = -EFAULT;
17670 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
17671 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
17672 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
17673 goto out;
17674 + for (i = 0; i < cpuid->nent; ++i) {
17675 + struct kvm_cpuid_entry2 cpuid_entry;
17676 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
17677 + goto out;
17678 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
17679 + }
17680 vcpu->arch.cpuid_nent = cpuid->nent;
17681 kvm_apic_set_version(vcpu);
17682 return 0;
17683 @@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
17684 struct kvm_cpuid2 *cpuid,
17685 struct kvm_cpuid_entry2 __user *entries)
17686 {
17687 - int r;
17688 + int r, i;
17689
17690 vcpu_load(vcpu);
17691 r = -E2BIG;
17692 if (cpuid->nent < vcpu->arch.cpuid_nent)
17693 goto out;
17694 r = -EFAULT;
17695 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
17696 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
17697 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
17698 goto out;
17699 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
17700 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
17701 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
17702 + goto out;
17703 + }
17704 return 0;
17705
17706 out:
17707 @@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
17708 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
17709 struct kvm_interrupt *irq)
17710 {
17711 - if (irq->irq < 0 || irq->irq >= 256)
17712 + if (irq->irq >= 256)
17713 return -EINVAL;
17714 if (irqchip_in_kernel(vcpu->kvm))
17715 return -ENXIO;
17716 @@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cp
17717 .notifier_call = kvmclock_cpufreq_notifier
17718 };
17719
17720 -int kvm_arch_init(void *opaque)
17721 +int kvm_arch_init(const void *opaque)
17722 {
17723 int r, cpu;
17724 - struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
17725 + const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
17726
17727 if (kvm_x86_ops) {
17728 printk(KERN_ERR "kvm: already loaded the other module\n");
17729 diff -urNp linux-2.6.32.41/arch/x86/lib/atomic64_32.c linux-2.6.32.41/arch/x86/lib/atomic64_32.c
17730 --- linux-2.6.32.41/arch/x86/lib/atomic64_32.c 2011-03-27 14:31:47.000000000 -0400
17731 +++ linux-2.6.32.41/arch/x86/lib/atomic64_32.c 2011-05-04 17:56:28.000000000 -0400
17732 @@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u6
17733 }
17734 EXPORT_SYMBOL(atomic64_cmpxchg);
17735
17736 +u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
17737 +{
17738 + return cmpxchg8b(&ptr->counter, old_val, new_val);
17739 +}
17740 +EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
17741 +
17742 /**
17743 * atomic64_xchg - xchg atomic64 variable
17744 * @ptr: pointer to type atomic64_t
17745 @@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 n
17746 EXPORT_SYMBOL(atomic64_xchg);
17747
17748 /**
17749 + * atomic64_xchg_unchecked - xchg atomic64 variable
17750 + * @ptr: pointer to type atomic64_unchecked_t
17751 + * @new_val: value to assign
17752 + *
17753 + * Atomically xchgs the value of @ptr to @new_val and returns
17754 + * the old value.
17755 + */
17756 +u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
17757 +{
17758 + /*
17759 + * Try first with a (possibly incorrect) assumption about
17760 + * what we have there. We'll do two loops most likely,
17761 + * but we'll get an ownership MESI transaction straight away
17762 + * instead of a read transaction followed by a
17763 + * flush-for-ownership transaction:
17764 + */
17765 + u64 old_val, real_val = 0;
17766 +
17767 + do {
17768 + old_val = real_val;
17769 +
17770 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
17771 +
17772 + } while (real_val != old_val);
17773 +
17774 + return old_val;
17775 +}
17776 +EXPORT_SYMBOL(atomic64_xchg_unchecked);
17777 +
17778 +/**
17779 * atomic64_set - set atomic64 variable
17780 * @ptr: pointer to type atomic64_t
17781 * @new_val: value to assign
17782 @@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 n
17783 EXPORT_SYMBOL(atomic64_set);
17784
17785 /**
17786 -EXPORT_SYMBOL(atomic64_read);
17787 + * atomic64_unchecked_set - set atomic64 variable
17788 + * @ptr: pointer to type atomic64_unchecked_t
17789 + * @new_val: value to assign
17790 + *
17791 + * Atomically sets the value of @ptr to @new_val.
17792 + */
17793 +void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
17794 +{
17795 + atomic64_xchg_unchecked(ptr, new_val);
17796 +}
17797 +EXPORT_SYMBOL(atomic64_set_unchecked);
17798 +
17799 +/**
17800 * atomic64_add_return - add and return
17801 * @delta: integer value to add
17802 * @ptr: pointer to type atomic64_t
17803 @@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 del
17804 }
17805 EXPORT_SYMBOL(atomic64_add_return);
17806
17807 +/**
17808 + * atomic64_add_return_unchecked - add and return
17809 + * @delta: integer value to add
17810 + * @ptr: pointer to type atomic64_unchecked_t
17811 + *
17812 + * Atomically adds @delta to @ptr and returns @delta + *@ptr
17813 + */
17814 +noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
17815 +{
17816 + /*
17817 + * Try first with a (possibly incorrect) assumption about
17818 + * what we have there. We'll do two loops most likely,
17819 + * but we'll get an ownership MESI transaction straight away
17820 + * instead of a read transaction followed by a
17821 + * flush-for-ownership transaction:
17822 + */
17823 + u64 old_val, new_val, real_val = 0;
17824 +
17825 + do {
17826 + old_val = real_val;
17827 + new_val = old_val + delta;
17828 +
17829 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
17830 +
17831 + } while (real_val != old_val);
17832 +
17833 + return new_val;
17834 +}
17835 +EXPORT_SYMBOL(atomic64_add_return_unchecked);
17836 +
17837 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
17838 {
17839 return atomic64_add_return(-delta, ptr);
17840 }
17841 EXPORT_SYMBOL(atomic64_sub_return);
17842
17843 +u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
17844 +{
17845 + return atomic64_add_return_unchecked(-delta, ptr);
17846 +}
17847 +EXPORT_SYMBOL(atomic64_sub_return_unchecked);
17848 +
17849 u64 atomic64_inc_return(atomic64_t *ptr)
17850 {
17851 return atomic64_add_return(1, ptr);
17852 }
17853 EXPORT_SYMBOL(atomic64_inc_return);
17854
17855 +u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
17856 +{
17857 + return atomic64_add_return_unchecked(1, ptr);
17858 +}
17859 +EXPORT_SYMBOL(atomic64_inc_return_unchecked);
17860 +
17861 u64 atomic64_dec_return(atomic64_t *ptr)
17862 {
17863 return atomic64_sub_return(1, ptr);
17864 }
17865 EXPORT_SYMBOL(atomic64_dec_return);
17866
17867 +u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
17868 +{
17869 + return atomic64_sub_return_unchecked(1, ptr);
17870 +}
17871 +EXPORT_SYMBOL(atomic64_dec_return_unchecked);
17872 +
17873 /**
17874 * atomic64_add - add integer to atomic64 variable
17875 * @delta: integer value to add
17876 @@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t
17877 EXPORT_SYMBOL(atomic64_add);
17878
17879 /**
17880 + * atomic64_add_unchecked - add integer to atomic64 variable
17881 + * @delta: integer value to add
17882 + * @ptr: pointer to type atomic64_unchecked_t
17883 + *
17884 + * Atomically adds @delta to @ptr.
17885 + */
17886 +void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
17887 +{
17888 + atomic64_add_return_unchecked(delta, ptr);
17889 +}
17890 +EXPORT_SYMBOL(atomic64_add_unchecked);
17891 +
17892 +/**
17893 * atomic64_sub - subtract the atomic64 variable
17894 * @delta: integer value to subtract
17895 * @ptr: pointer to type atomic64_t
17896 @@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t
17897 EXPORT_SYMBOL(atomic64_sub);
17898
17899 /**
17900 + * atomic64_sub_unchecked - subtract the atomic64 variable
17901 + * @delta: integer value to subtract
17902 + * @ptr: pointer to type atomic64_unchecked_t
17903 + *
17904 + * Atomically subtracts @delta from @ptr.
17905 + */
17906 +void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
17907 +{
17908 + atomic64_add_unchecked(-delta, ptr);
17909 +}
17910 +EXPORT_SYMBOL(atomic64_sub_unchecked);
17911 +
17912 +/**
17913 * atomic64_sub_and_test - subtract value from variable and test result
17914 * @delta: integer value to subtract
17915 * @ptr: pointer to type atomic64_t
17916 @@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
17917 EXPORT_SYMBOL(atomic64_inc);
17918
17919 /**
17920 + * atomic64_inc_unchecked - increment atomic64 variable
17921 + * @ptr: pointer to type atomic64_unchecked_t
17922 + *
17923 + * Atomically increments @ptr by 1.
17924 + */
17925 +void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
17926 +{
17927 + atomic64_add_unchecked(1, ptr);
17928 +}
17929 +EXPORT_SYMBOL(atomic64_inc_unchecked);
17930 +
17931 +/**
17932 * atomic64_dec - decrement atomic64 variable
17933 * @ptr: pointer to type atomic64_t
17934 *
17935 @@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
17936 EXPORT_SYMBOL(atomic64_dec);
17937
17938 /**
17939 + * atomic64_dec_unchecked - decrement atomic64 variable
17940 + * @ptr: pointer to type atomic64_unchecked_t
17941 + *
17942 + * Atomically decrements @ptr by 1.
17943 + */
17944 +void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
17945 +{
17946 + atomic64_sub_unchecked(1, ptr);
17947 +}
17948 +EXPORT_SYMBOL(atomic64_dec_unchecked);
17949 +
17950 +/**
17951 * atomic64_dec_and_test - decrement and test
17952 * @ptr: pointer to type atomic64_t
17953 *
17954 diff -urNp linux-2.6.32.41/arch/x86/lib/checksum_32.S linux-2.6.32.41/arch/x86/lib/checksum_32.S
17955 --- linux-2.6.32.41/arch/x86/lib/checksum_32.S 2011-03-27 14:31:47.000000000 -0400
17956 +++ linux-2.6.32.41/arch/x86/lib/checksum_32.S 2011-04-17 15:56:46.000000000 -0400
17957 @@ -28,7 +28,8 @@
17958 #include <linux/linkage.h>
17959 #include <asm/dwarf2.h>
17960 #include <asm/errno.h>
17961 -
17962 +#include <asm/segment.h>
17963 +
17964 /*
17965 * computes a partial checksum, e.g. for TCP/UDP fragments
17966 */
17967 @@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (
17968
17969 #define ARGBASE 16
17970 #define FP 12
17971 -
17972 -ENTRY(csum_partial_copy_generic)
17973 +
17974 +ENTRY(csum_partial_copy_generic_to_user)
17975 CFI_STARTPROC
17976 +
17977 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17978 + pushl %gs
17979 + CFI_ADJUST_CFA_OFFSET 4
17980 + popl %es
17981 + CFI_ADJUST_CFA_OFFSET -4
17982 + jmp csum_partial_copy_generic
17983 +#endif
17984 +
17985 +ENTRY(csum_partial_copy_generic_from_user)
17986 +
17987 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17988 + pushl %gs
17989 + CFI_ADJUST_CFA_OFFSET 4
17990 + popl %ds
17991 + CFI_ADJUST_CFA_OFFSET -4
17992 +#endif
17993 +
17994 +ENTRY(csum_partial_copy_generic)
17995 subl $4,%esp
17996 CFI_ADJUST_CFA_OFFSET 4
17997 pushl %edi
17998 @@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
17999 jmp 4f
18000 SRC(1: movw (%esi), %bx )
18001 addl $2, %esi
18002 -DST( movw %bx, (%edi) )
18003 +DST( movw %bx, %es:(%edi) )
18004 addl $2, %edi
18005 addw %bx, %ax
18006 adcl $0, %eax
18007 @@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
18008 SRC(1: movl (%esi), %ebx )
18009 SRC( movl 4(%esi), %edx )
18010 adcl %ebx, %eax
18011 -DST( movl %ebx, (%edi) )
18012 +DST( movl %ebx, %es:(%edi) )
18013 adcl %edx, %eax
18014 -DST( movl %edx, 4(%edi) )
18015 +DST( movl %edx, %es:4(%edi) )
18016
18017 SRC( movl 8(%esi), %ebx )
18018 SRC( movl 12(%esi), %edx )
18019 adcl %ebx, %eax
18020 -DST( movl %ebx, 8(%edi) )
18021 +DST( movl %ebx, %es:8(%edi) )
18022 adcl %edx, %eax
18023 -DST( movl %edx, 12(%edi) )
18024 +DST( movl %edx, %es:12(%edi) )
18025
18026 SRC( movl 16(%esi), %ebx )
18027 SRC( movl 20(%esi), %edx )
18028 adcl %ebx, %eax
18029 -DST( movl %ebx, 16(%edi) )
18030 +DST( movl %ebx, %es:16(%edi) )
18031 adcl %edx, %eax
18032 -DST( movl %edx, 20(%edi) )
18033 +DST( movl %edx, %es:20(%edi) )
18034
18035 SRC( movl 24(%esi), %ebx )
18036 SRC( movl 28(%esi), %edx )
18037 adcl %ebx, %eax
18038 -DST( movl %ebx, 24(%edi) )
18039 +DST( movl %ebx, %es:24(%edi) )
18040 adcl %edx, %eax
18041 -DST( movl %edx, 28(%edi) )
18042 +DST( movl %edx, %es:28(%edi) )
18043
18044 lea 32(%esi), %esi
18045 lea 32(%edi), %edi
18046 @@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
18047 shrl $2, %edx # This clears CF
18048 SRC(3: movl (%esi), %ebx )
18049 adcl %ebx, %eax
18050 -DST( movl %ebx, (%edi) )
18051 +DST( movl %ebx, %es:(%edi) )
18052 lea 4(%esi), %esi
18053 lea 4(%edi), %edi
18054 dec %edx
18055 @@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
18056 jb 5f
18057 SRC( movw (%esi), %cx )
18058 leal 2(%esi), %esi
18059 -DST( movw %cx, (%edi) )
18060 +DST( movw %cx, %es:(%edi) )
18061 leal 2(%edi), %edi
18062 je 6f
18063 shll $16,%ecx
18064 SRC(5: movb (%esi), %cl )
18065 -DST( movb %cl, (%edi) )
18066 +DST( movb %cl, %es:(%edi) )
18067 6: addl %ecx, %eax
18068 adcl $0, %eax
18069 7:
18070 @@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
18071
18072 6001:
18073 movl ARGBASE+20(%esp), %ebx # src_err_ptr
18074 - movl $-EFAULT, (%ebx)
18075 + movl $-EFAULT, %ss:(%ebx)
18076
18077 # zero the complete destination - computing the rest
18078 # is too much work
18079 @@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
18080
18081 6002:
18082 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
18083 - movl $-EFAULT,(%ebx)
18084 + movl $-EFAULT,%ss:(%ebx)
18085 jmp 5000b
18086
18087 .previous
18088
18089 + pushl %ss
18090 + CFI_ADJUST_CFA_OFFSET 4
18091 + popl %ds
18092 + CFI_ADJUST_CFA_OFFSET -4
18093 + pushl %ss
18094 + CFI_ADJUST_CFA_OFFSET 4
18095 + popl %es
18096 + CFI_ADJUST_CFA_OFFSET -4
18097 popl %ebx
18098 CFI_ADJUST_CFA_OFFSET -4
18099 CFI_RESTORE ebx
18100 @@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
18101 CFI_ADJUST_CFA_OFFSET -4
18102 ret
18103 CFI_ENDPROC
18104 -ENDPROC(csum_partial_copy_generic)
18105 +ENDPROC(csum_partial_copy_generic_to_user)
18106
18107 #else
18108
18109 /* Version for PentiumII/PPro */
18110
18111 #define ROUND1(x) \
18112 + nop; nop; nop; \
18113 SRC(movl x(%esi), %ebx ) ; \
18114 addl %ebx, %eax ; \
18115 - DST(movl %ebx, x(%edi) ) ;
18116 + DST(movl %ebx, %es:x(%edi)) ;
18117
18118 #define ROUND(x) \
18119 + nop; nop; nop; \
18120 SRC(movl x(%esi), %ebx ) ; \
18121 adcl %ebx, %eax ; \
18122 - DST(movl %ebx, x(%edi) ) ;
18123 + DST(movl %ebx, %es:x(%edi)) ;
18124
18125 #define ARGBASE 12
18126 -
18127 -ENTRY(csum_partial_copy_generic)
18128 +
18129 +ENTRY(csum_partial_copy_generic_to_user)
18130 CFI_STARTPROC
18131 +
18132 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18133 + pushl %gs
18134 + CFI_ADJUST_CFA_OFFSET 4
18135 + popl %es
18136 + CFI_ADJUST_CFA_OFFSET -4
18137 + jmp csum_partial_copy_generic
18138 +#endif
18139 +
18140 +ENTRY(csum_partial_copy_generic_from_user)
18141 +
18142 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18143 + pushl %gs
18144 + CFI_ADJUST_CFA_OFFSET 4
18145 + popl %ds
18146 + CFI_ADJUST_CFA_OFFSET -4
18147 +#endif
18148 +
18149 +ENTRY(csum_partial_copy_generic)
18150 pushl %ebx
18151 CFI_ADJUST_CFA_OFFSET 4
18152 CFI_REL_OFFSET ebx, 0
18153 @@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
18154 subl %ebx, %edi
18155 lea -1(%esi),%edx
18156 andl $-32,%edx
18157 - lea 3f(%ebx,%ebx), %ebx
18158 + lea 3f(%ebx,%ebx,2), %ebx
18159 testl %esi, %esi
18160 jmp *%ebx
18161 1: addl $64,%esi
18162 @@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
18163 jb 5f
18164 SRC( movw (%esi), %dx )
18165 leal 2(%esi), %esi
18166 -DST( movw %dx, (%edi) )
18167 +DST( movw %dx, %es:(%edi) )
18168 leal 2(%edi), %edi
18169 je 6f
18170 shll $16,%edx
18171 5:
18172 SRC( movb (%esi), %dl )
18173 -DST( movb %dl, (%edi) )
18174 +DST( movb %dl, %es:(%edi) )
18175 6: addl %edx, %eax
18176 adcl $0, %eax
18177 7:
18178 .section .fixup, "ax"
18179 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
18180 - movl $-EFAULT, (%ebx)
18181 + movl $-EFAULT, %ss:(%ebx)
18182 # zero the complete destination (computing the rest is too much work)
18183 movl ARGBASE+8(%esp),%edi # dst
18184 movl ARGBASE+12(%esp),%ecx # len
18185 @@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
18186 rep; stosb
18187 jmp 7b
18188 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
18189 - movl $-EFAULT, (%ebx)
18190 + movl $-EFAULT, %ss:(%ebx)
18191 jmp 7b
18192 .previous
18193
18194 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18195 + pushl %ss
18196 + CFI_ADJUST_CFA_OFFSET 4
18197 + popl %ds
18198 + CFI_ADJUST_CFA_OFFSET -4
18199 + pushl %ss
18200 + CFI_ADJUST_CFA_OFFSET 4
18201 + popl %es
18202 + CFI_ADJUST_CFA_OFFSET -4
18203 +#endif
18204 +
18205 popl %esi
18206 CFI_ADJUST_CFA_OFFSET -4
18207 CFI_RESTORE esi
18208 @@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
18209 CFI_RESTORE ebx
18210 ret
18211 CFI_ENDPROC
18212 -ENDPROC(csum_partial_copy_generic)
18213 +ENDPROC(csum_partial_copy_generic_to_user)
18214
18215 #undef ROUND
18216 #undef ROUND1
18217 diff -urNp linux-2.6.32.41/arch/x86/lib/clear_page_64.S linux-2.6.32.41/arch/x86/lib/clear_page_64.S
18218 --- linux-2.6.32.41/arch/x86/lib/clear_page_64.S 2011-03-27 14:31:47.000000000 -0400
18219 +++ linux-2.6.32.41/arch/x86/lib/clear_page_64.S 2011-04-17 15:56:46.000000000 -0400
18220 @@ -43,7 +43,7 @@ ENDPROC(clear_page)
18221
18222 #include <asm/cpufeature.h>
18223
18224 - .section .altinstr_replacement,"ax"
18225 + .section .altinstr_replacement,"a"
18226 1: .byte 0xeb /* jmp <disp8> */
18227 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
18228 2:
18229 diff -urNp linux-2.6.32.41/arch/x86/lib/copy_page_64.S linux-2.6.32.41/arch/x86/lib/copy_page_64.S
18230 --- linux-2.6.32.41/arch/x86/lib/copy_page_64.S 2011-03-27 14:31:47.000000000 -0400
18231 +++ linux-2.6.32.41/arch/x86/lib/copy_page_64.S 2011-04-17 15:56:46.000000000 -0400
18232 @@ -104,7 +104,7 @@ ENDPROC(copy_page)
18233
18234 #include <asm/cpufeature.h>
18235
18236 - .section .altinstr_replacement,"ax"
18237 + .section .altinstr_replacement,"a"
18238 1: .byte 0xeb /* jmp <disp8> */
18239 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
18240 2:
18241 diff -urNp linux-2.6.32.41/arch/x86/lib/copy_user_64.S linux-2.6.32.41/arch/x86/lib/copy_user_64.S
18242 --- linux-2.6.32.41/arch/x86/lib/copy_user_64.S 2011-03-27 14:31:47.000000000 -0400
18243 +++ linux-2.6.32.41/arch/x86/lib/copy_user_64.S 2011-04-17 15:56:46.000000000 -0400
18244 @@ -15,13 +15,14 @@
18245 #include <asm/asm-offsets.h>
18246 #include <asm/thread_info.h>
18247 #include <asm/cpufeature.h>
18248 +#include <asm/pgtable.h>
18249
18250 .macro ALTERNATIVE_JUMP feature,orig,alt
18251 0:
18252 .byte 0xe9 /* 32bit jump */
18253 .long \orig-1f /* by default jump to orig */
18254 1:
18255 - .section .altinstr_replacement,"ax"
18256 + .section .altinstr_replacement,"a"
18257 2: .byte 0xe9 /* near jump with 32bit immediate */
18258 .long \alt-1b /* offset */ /* or alternatively to alt */
18259 .previous
18260 @@ -64,49 +65,19 @@
18261 #endif
18262 .endm
18263
18264 -/* Standard copy_to_user with segment limit checking */
18265 -ENTRY(copy_to_user)
18266 - CFI_STARTPROC
18267 - GET_THREAD_INFO(%rax)
18268 - movq %rdi,%rcx
18269 - addq %rdx,%rcx
18270 - jc bad_to_user
18271 - cmpq TI_addr_limit(%rax),%rcx
18272 - jae bad_to_user
18273 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
18274 - CFI_ENDPROC
18275 -ENDPROC(copy_to_user)
18276 -
18277 -/* Standard copy_from_user with segment limit checking */
18278 -ENTRY(copy_from_user)
18279 - CFI_STARTPROC
18280 - GET_THREAD_INFO(%rax)
18281 - movq %rsi,%rcx
18282 - addq %rdx,%rcx
18283 - jc bad_from_user
18284 - cmpq TI_addr_limit(%rax),%rcx
18285 - jae bad_from_user
18286 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
18287 - CFI_ENDPROC
18288 -ENDPROC(copy_from_user)
18289 -
18290 ENTRY(copy_user_generic)
18291 CFI_STARTPROC
18292 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
18293 CFI_ENDPROC
18294 ENDPROC(copy_user_generic)
18295
18296 -ENTRY(__copy_from_user_inatomic)
18297 - CFI_STARTPROC
18298 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
18299 - CFI_ENDPROC
18300 -ENDPROC(__copy_from_user_inatomic)
18301 -
18302 .section .fixup,"ax"
18303 /* must zero dest */
18304 ENTRY(bad_from_user)
18305 bad_from_user:
18306 CFI_STARTPROC
18307 + testl %edx,%edx
18308 + js bad_to_user
18309 movl %edx,%ecx
18310 xorl %eax,%eax
18311 rep
18312 diff -urNp linux-2.6.32.41/arch/x86/lib/copy_user_nocache_64.S linux-2.6.32.41/arch/x86/lib/copy_user_nocache_64.S
18313 --- linux-2.6.32.41/arch/x86/lib/copy_user_nocache_64.S 2011-03-27 14:31:47.000000000 -0400
18314 +++ linux-2.6.32.41/arch/x86/lib/copy_user_nocache_64.S 2011-04-17 15:56:46.000000000 -0400
18315 @@ -14,6 +14,7 @@
18316 #include <asm/current.h>
18317 #include <asm/asm-offsets.h>
18318 #include <asm/thread_info.h>
18319 +#include <asm/pgtable.h>
18320
18321 .macro ALIGN_DESTINATION
18322 #ifdef FIX_ALIGNMENT
18323 @@ -50,6 +51,15 @@
18324 */
18325 ENTRY(__copy_user_nocache)
18326 CFI_STARTPROC
18327 +
18328 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18329 + mov $PAX_USER_SHADOW_BASE,%rcx
18330 + cmp %rcx,%rsi
18331 + jae 1f
18332 + add %rcx,%rsi
18333 +1:
18334 +#endif
18335 +
18336 cmpl $8,%edx
18337 jb 20f /* less then 8 bytes, go to byte copy loop */
18338 ALIGN_DESTINATION
18339 diff -urNp linux-2.6.32.41/arch/x86/lib/csum-wrappers_64.c linux-2.6.32.41/arch/x86/lib/csum-wrappers_64.c
18340 --- linux-2.6.32.41/arch/x86/lib/csum-wrappers_64.c 2011-03-27 14:31:47.000000000 -0400
18341 +++ linux-2.6.32.41/arch/x86/lib/csum-wrappers_64.c 2011-05-04 17:56:20.000000000 -0400
18342 @@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
18343 len -= 2;
18344 }
18345 }
18346 +
18347 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18348 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
18349 + src += PAX_USER_SHADOW_BASE;
18350 +#endif
18351 +
18352 isum = csum_partial_copy_generic((__force const void *)src,
18353 dst, len, isum, errp, NULL);
18354 if (unlikely(*errp))
18355 @@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
18356 }
18357
18358 *errp = 0;
18359 +
18360 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18361 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
18362 + dst += PAX_USER_SHADOW_BASE;
18363 +#endif
18364 +
18365 return csum_partial_copy_generic(src, (void __force *)dst,
18366 len, isum, NULL, errp);
18367 }
18368 diff -urNp linux-2.6.32.41/arch/x86/lib/getuser.S linux-2.6.32.41/arch/x86/lib/getuser.S
18369 --- linux-2.6.32.41/arch/x86/lib/getuser.S 2011-03-27 14:31:47.000000000 -0400
18370 +++ linux-2.6.32.41/arch/x86/lib/getuser.S 2011-04-17 15:56:46.000000000 -0400
18371 @@ -33,14 +33,35 @@
18372 #include <asm/asm-offsets.h>
18373 #include <asm/thread_info.h>
18374 #include <asm/asm.h>
18375 +#include <asm/segment.h>
18376 +#include <asm/pgtable.h>
18377 +
18378 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18379 +#define __copyuser_seg gs;
18380 +#else
18381 +#define __copyuser_seg
18382 +#endif
18383
18384 .text
18385 ENTRY(__get_user_1)
18386 CFI_STARTPROC
18387 +
18388 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18389 GET_THREAD_INFO(%_ASM_DX)
18390 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18391 jae bad_get_user
18392 -1: movzb (%_ASM_AX),%edx
18393 +
18394 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18395 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18396 + cmp %_ASM_DX,%_ASM_AX
18397 + jae 1234f
18398 + add %_ASM_DX,%_ASM_AX
18399 +1234:
18400 +#endif
18401 +
18402 +#endif
18403 +
18404 +1: __copyuser_seg movzb (%_ASM_AX),%edx
18405 xor %eax,%eax
18406 ret
18407 CFI_ENDPROC
18408 @@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
18409 ENTRY(__get_user_2)
18410 CFI_STARTPROC
18411 add $1,%_ASM_AX
18412 +
18413 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18414 jc bad_get_user
18415 GET_THREAD_INFO(%_ASM_DX)
18416 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18417 jae bad_get_user
18418 -2: movzwl -1(%_ASM_AX),%edx
18419 +
18420 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18421 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18422 + cmp %_ASM_DX,%_ASM_AX
18423 + jae 1234f
18424 + add %_ASM_DX,%_ASM_AX
18425 +1234:
18426 +#endif
18427 +
18428 +#endif
18429 +
18430 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
18431 xor %eax,%eax
18432 ret
18433 CFI_ENDPROC
18434 @@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
18435 ENTRY(__get_user_4)
18436 CFI_STARTPROC
18437 add $3,%_ASM_AX
18438 +
18439 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18440 jc bad_get_user
18441 GET_THREAD_INFO(%_ASM_DX)
18442 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18443 jae bad_get_user
18444 -3: mov -3(%_ASM_AX),%edx
18445 +
18446 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18447 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18448 + cmp %_ASM_DX,%_ASM_AX
18449 + jae 1234f
18450 + add %_ASM_DX,%_ASM_AX
18451 +1234:
18452 +#endif
18453 +
18454 +#endif
18455 +
18456 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
18457 xor %eax,%eax
18458 ret
18459 CFI_ENDPROC
18460 @@ -80,6 +127,15 @@ ENTRY(__get_user_8)
18461 GET_THREAD_INFO(%_ASM_DX)
18462 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18463 jae bad_get_user
18464 +
18465 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18466 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18467 + cmp %_ASM_DX,%_ASM_AX
18468 + jae 1234f
18469 + add %_ASM_DX,%_ASM_AX
18470 +1234:
18471 +#endif
18472 +
18473 4: movq -7(%_ASM_AX),%_ASM_DX
18474 xor %eax,%eax
18475 ret
18476 diff -urNp linux-2.6.32.41/arch/x86/lib/memcpy_64.S linux-2.6.32.41/arch/x86/lib/memcpy_64.S
18477 --- linux-2.6.32.41/arch/x86/lib/memcpy_64.S 2011-03-27 14:31:47.000000000 -0400
18478 +++ linux-2.6.32.41/arch/x86/lib/memcpy_64.S 2011-04-17 15:56:46.000000000 -0400
18479 @@ -128,7 +128,7 @@ ENDPROC(__memcpy)
18480 * It is also a lot simpler. Use this when possible:
18481 */
18482
18483 - .section .altinstr_replacement, "ax"
18484 + .section .altinstr_replacement, "a"
18485 1: .byte 0xeb /* jmp <disp8> */
18486 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
18487 2:
18488 diff -urNp linux-2.6.32.41/arch/x86/lib/memset_64.S linux-2.6.32.41/arch/x86/lib/memset_64.S
18489 --- linux-2.6.32.41/arch/x86/lib/memset_64.S 2011-03-27 14:31:47.000000000 -0400
18490 +++ linux-2.6.32.41/arch/x86/lib/memset_64.S 2011-04-17 15:56:46.000000000 -0400
18491 @@ -118,7 +118,7 @@ ENDPROC(__memset)
18492
18493 #include <asm/cpufeature.h>
18494
18495 - .section .altinstr_replacement,"ax"
18496 + .section .altinstr_replacement,"a"
18497 1: .byte 0xeb /* jmp <disp8> */
18498 .byte (memset_c - memset) - (2f - 1b) /* offset */
18499 2:
18500 diff -urNp linux-2.6.32.41/arch/x86/lib/mmx_32.c linux-2.6.32.41/arch/x86/lib/mmx_32.c
18501 --- linux-2.6.32.41/arch/x86/lib/mmx_32.c 2011-03-27 14:31:47.000000000 -0400
18502 +++ linux-2.6.32.41/arch/x86/lib/mmx_32.c 2011-04-17 15:56:46.000000000 -0400
18503 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
18504 {
18505 void *p;
18506 int i;
18507 + unsigned long cr0;
18508
18509 if (unlikely(in_interrupt()))
18510 return __memcpy(to, from, len);
18511 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
18512 kernel_fpu_begin();
18513
18514 __asm__ __volatile__ (
18515 - "1: prefetch (%0)\n" /* This set is 28 bytes */
18516 - " prefetch 64(%0)\n"
18517 - " prefetch 128(%0)\n"
18518 - " prefetch 192(%0)\n"
18519 - " prefetch 256(%0)\n"
18520 + "1: prefetch (%1)\n" /* This set is 28 bytes */
18521 + " prefetch 64(%1)\n"
18522 + " prefetch 128(%1)\n"
18523 + " prefetch 192(%1)\n"
18524 + " prefetch 256(%1)\n"
18525 "2: \n"
18526 ".section .fixup, \"ax\"\n"
18527 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18528 + "3: \n"
18529 +
18530 +#ifdef CONFIG_PAX_KERNEXEC
18531 + " movl %%cr0, %0\n"
18532 + " movl %0, %%eax\n"
18533 + " andl $0xFFFEFFFF, %%eax\n"
18534 + " movl %%eax, %%cr0\n"
18535 +#endif
18536 +
18537 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18538 +
18539 +#ifdef CONFIG_PAX_KERNEXEC
18540 + " movl %0, %%cr0\n"
18541 +#endif
18542 +
18543 " jmp 2b\n"
18544 ".previous\n"
18545 _ASM_EXTABLE(1b, 3b)
18546 - : : "r" (from));
18547 + : "=&r" (cr0) : "r" (from) : "ax");
18548
18549 for ( ; i > 5; i--) {
18550 __asm__ __volatile__ (
18551 - "1: prefetch 320(%0)\n"
18552 - "2: movq (%0), %%mm0\n"
18553 - " movq 8(%0), %%mm1\n"
18554 - " movq 16(%0), %%mm2\n"
18555 - " movq 24(%0), %%mm3\n"
18556 - " movq %%mm0, (%1)\n"
18557 - " movq %%mm1, 8(%1)\n"
18558 - " movq %%mm2, 16(%1)\n"
18559 - " movq %%mm3, 24(%1)\n"
18560 - " movq 32(%0), %%mm0\n"
18561 - " movq 40(%0), %%mm1\n"
18562 - " movq 48(%0), %%mm2\n"
18563 - " movq 56(%0), %%mm3\n"
18564 - " movq %%mm0, 32(%1)\n"
18565 - " movq %%mm1, 40(%1)\n"
18566 - " movq %%mm2, 48(%1)\n"
18567 - " movq %%mm3, 56(%1)\n"
18568 + "1: prefetch 320(%1)\n"
18569 + "2: movq (%1), %%mm0\n"
18570 + " movq 8(%1), %%mm1\n"
18571 + " movq 16(%1), %%mm2\n"
18572 + " movq 24(%1), %%mm3\n"
18573 + " movq %%mm0, (%2)\n"
18574 + " movq %%mm1, 8(%2)\n"
18575 + " movq %%mm2, 16(%2)\n"
18576 + " movq %%mm3, 24(%2)\n"
18577 + " movq 32(%1), %%mm0\n"
18578 + " movq 40(%1), %%mm1\n"
18579 + " movq 48(%1), %%mm2\n"
18580 + " movq 56(%1), %%mm3\n"
18581 + " movq %%mm0, 32(%2)\n"
18582 + " movq %%mm1, 40(%2)\n"
18583 + " movq %%mm2, 48(%2)\n"
18584 + " movq %%mm3, 56(%2)\n"
18585 ".section .fixup, \"ax\"\n"
18586 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18587 + "3:\n"
18588 +
18589 +#ifdef CONFIG_PAX_KERNEXEC
18590 + " movl %%cr0, %0\n"
18591 + " movl %0, %%eax\n"
18592 + " andl $0xFFFEFFFF, %%eax\n"
18593 + " movl %%eax, %%cr0\n"
18594 +#endif
18595 +
18596 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18597 +
18598 +#ifdef CONFIG_PAX_KERNEXEC
18599 + " movl %0, %%cr0\n"
18600 +#endif
18601 +
18602 " jmp 2b\n"
18603 ".previous\n"
18604 _ASM_EXTABLE(1b, 3b)
18605 - : : "r" (from), "r" (to) : "memory");
18606 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18607
18608 from += 64;
18609 to += 64;
18610 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
18611 static void fast_copy_page(void *to, void *from)
18612 {
18613 int i;
18614 + unsigned long cr0;
18615
18616 kernel_fpu_begin();
18617
18618 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
18619 * but that is for later. -AV
18620 */
18621 __asm__ __volatile__(
18622 - "1: prefetch (%0)\n"
18623 - " prefetch 64(%0)\n"
18624 - " prefetch 128(%0)\n"
18625 - " prefetch 192(%0)\n"
18626 - " prefetch 256(%0)\n"
18627 + "1: prefetch (%1)\n"
18628 + " prefetch 64(%1)\n"
18629 + " prefetch 128(%1)\n"
18630 + " prefetch 192(%1)\n"
18631 + " prefetch 256(%1)\n"
18632 "2: \n"
18633 ".section .fixup, \"ax\"\n"
18634 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18635 + "3: \n"
18636 +
18637 +#ifdef CONFIG_PAX_KERNEXEC
18638 + " movl %%cr0, %0\n"
18639 + " movl %0, %%eax\n"
18640 + " andl $0xFFFEFFFF, %%eax\n"
18641 + " movl %%eax, %%cr0\n"
18642 +#endif
18643 +
18644 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18645 +
18646 +#ifdef CONFIG_PAX_KERNEXEC
18647 + " movl %0, %%cr0\n"
18648 +#endif
18649 +
18650 " jmp 2b\n"
18651 ".previous\n"
18652 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
18653 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
18654
18655 for (i = 0; i < (4096-320)/64; i++) {
18656 __asm__ __volatile__ (
18657 - "1: prefetch 320(%0)\n"
18658 - "2: movq (%0), %%mm0\n"
18659 - " movntq %%mm0, (%1)\n"
18660 - " movq 8(%0), %%mm1\n"
18661 - " movntq %%mm1, 8(%1)\n"
18662 - " movq 16(%0), %%mm2\n"
18663 - " movntq %%mm2, 16(%1)\n"
18664 - " movq 24(%0), %%mm3\n"
18665 - " movntq %%mm3, 24(%1)\n"
18666 - " movq 32(%0), %%mm4\n"
18667 - " movntq %%mm4, 32(%1)\n"
18668 - " movq 40(%0), %%mm5\n"
18669 - " movntq %%mm5, 40(%1)\n"
18670 - " movq 48(%0), %%mm6\n"
18671 - " movntq %%mm6, 48(%1)\n"
18672 - " movq 56(%0), %%mm7\n"
18673 - " movntq %%mm7, 56(%1)\n"
18674 + "1: prefetch 320(%1)\n"
18675 + "2: movq (%1), %%mm0\n"
18676 + " movntq %%mm0, (%2)\n"
18677 + " movq 8(%1), %%mm1\n"
18678 + " movntq %%mm1, 8(%2)\n"
18679 + " movq 16(%1), %%mm2\n"
18680 + " movntq %%mm2, 16(%2)\n"
18681 + " movq 24(%1), %%mm3\n"
18682 + " movntq %%mm3, 24(%2)\n"
18683 + " movq 32(%1), %%mm4\n"
18684 + " movntq %%mm4, 32(%2)\n"
18685 + " movq 40(%1), %%mm5\n"
18686 + " movntq %%mm5, 40(%2)\n"
18687 + " movq 48(%1), %%mm6\n"
18688 + " movntq %%mm6, 48(%2)\n"
18689 + " movq 56(%1), %%mm7\n"
18690 + " movntq %%mm7, 56(%2)\n"
18691 ".section .fixup, \"ax\"\n"
18692 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18693 + "3:\n"
18694 +
18695 +#ifdef CONFIG_PAX_KERNEXEC
18696 + " movl %%cr0, %0\n"
18697 + " movl %0, %%eax\n"
18698 + " andl $0xFFFEFFFF, %%eax\n"
18699 + " movl %%eax, %%cr0\n"
18700 +#endif
18701 +
18702 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18703 +
18704 +#ifdef CONFIG_PAX_KERNEXEC
18705 + " movl %0, %%cr0\n"
18706 +#endif
18707 +
18708 " jmp 2b\n"
18709 ".previous\n"
18710 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
18711 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18712
18713 from += 64;
18714 to += 64;
18715 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
18716 static void fast_copy_page(void *to, void *from)
18717 {
18718 int i;
18719 + unsigned long cr0;
18720
18721 kernel_fpu_begin();
18722
18723 __asm__ __volatile__ (
18724 - "1: prefetch (%0)\n"
18725 - " prefetch 64(%0)\n"
18726 - " prefetch 128(%0)\n"
18727 - " prefetch 192(%0)\n"
18728 - " prefetch 256(%0)\n"
18729 + "1: prefetch (%1)\n"
18730 + " prefetch 64(%1)\n"
18731 + " prefetch 128(%1)\n"
18732 + " prefetch 192(%1)\n"
18733 + " prefetch 256(%1)\n"
18734 "2: \n"
18735 ".section .fixup, \"ax\"\n"
18736 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18737 + "3: \n"
18738 +
18739 +#ifdef CONFIG_PAX_KERNEXEC
18740 + " movl %%cr0, %0\n"
18741 + " movl %0, %%eax\n"
18742 + " andl $0xFFFEFFFF, %%eax\n"
18743 + " movl %%eax, %%cr0\n"
18744 +#endif
18745 +
18746 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18747 +
18748 +#ifdef CONFIG_PAX_KERNEXEC
18749 + " movl %0, %%cr0\n"
18750 +#endif
18751 +
18752 " jmp 2b\n"
18753 ".previous\n"
18754 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
18755 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
18756
18757 for (i = 0; i < 4096/64; i++) {
18758 __asm__ __volatile__ (
18759 - "1: prefetch 320(%0)\n"
18760 - "2: movq (%0), %%mm0\n"
18761 - " movq 8(%0), %%mm1\n"
18762 - " movq 16(%0), %%mm2\n"
18763 - " movq 24(%0), %%mm3\n"
18764 - " movq %%mm0, (%1)\n"
18765 - " movq %%mm1, 8(%1)\n"
18766 - " movq %%mm2, 16(%1)\n"
18767 - " movq %%mm3, 24(%1)\n"
18768 - " movq 32(%0), %%mm0\n"
18769 - " movq 40(%0), %%mm1\n"
18770 - " movq 48(%0), %%mm2\n"
18771 - " movq 56(%0), %%mm3\n"
18772 - " movq %%mm0, 32(%1)\n"
18773 - " movq %%mm1, 40(%1)\n"
18774 - " movq %%mm2, 48(%1)\n"
18775 - " movq %%mm3, 56(%1)\n"
18776 + "1: prefetch 320(%1)\n"
18777 + "2: movq (%1), %%mm0\n"
18778 + " movq 8(%1), %%mm1\n"
18779 + " movq 16(%1), %%mm2\n"
18780 + " movq 24(%1), %%mm3\n"
18781 + " movq %%mm0, (%2)\n"
18782 + " movq %%mm1, 8(%2)\n"
18783 + " movq %%mm2, 16(%2)\n"
18784 + " movq %%mm3, 24(%2)\n"
18785 + " movq 32(%1), %%mm0\n"
18786 + " movq 40(%1), %%mm1\n"
18787 + " movq 48(%1), %%mm2\n"
18788 + " movq 56(%1), %%mm3\n"
18789 + " movq %%mm0, 32(%2)\n"
18790 + " movq %%mm1, 40(%2)\n"
18791 + " movq %%mm2, 48(%2)\n"
18792 + " movq %%mm3, 56(%2)\n"
18793 ".section .fixup, \"ax\"\n"
18794 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18795 + "3:\n"
18796 +
18797 +#ifdef CONFIG_PAX_KERNEXEC
18798 + " movl %%cr0, %0\n"
18799 + " movl %0, %%eax\n"
18800 + " andl $0xFFFEFFFF, %%eax\n"
18801 + " movl %%eax, %%cr0\n"
18802 +#endif
18803 +
18804 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18805 +
18806 +#ifdef CONFIG_PAX_KERNEXEC
18807 + " movl %0, %%cr0\n"
18808 +#endif
18809 +
18810 " jmp 2b\n"
18811 ".previous\n"
18812 _ASM_EXTABLE(1b, 3b)
18813 - : : "r" (from), "r" (to) : "memory");
18814 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18815
18816 from += 64;
18817 to += 64;
18818 diff -urNp linux-2.6.32.41/arch/x86/lib/putuser.S linux-2.6.32.41/arch/x86/lib/putuser.S
18819 --- linux-2.6.32.41/arch/x86/lib/putuser.S 2011-03-27 14:31:47.000000000 -0400
18820 +++ linux-2.6.32.41/arch/x86/lib/putuser.S 2011-04-17 15:56:46.000000000 -0400
18821 @@ -15,7 +15,8 @@
18822 #include <asm/thread_info.h>
18823 #include <asm/errno.h>
18824 #include <asm/asm.h>
18825 -
18826 +#include <asm/segment.h>
18827 +#include <asm/pgtable.h>
18828
18829 /*
18830 * __put_user_X
18831 @@ -29,52 +30,119 @@
18832 * as they get called from within inline assembly.
18833 */
18834
18835 -#define ENTER CFI_STARTPROC ; \
18836 - GET_THREAD_INFO(%_ASM_BX)
18837 +#define ENTER CFI_STARTPROC
18838 #define EXIT ret ; \
18839 CFI_ENDPROC
18840
18841 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18842 +#define _DEST %_ASM_CX,%_ASM_BX
18843 +#else
18844 +#define _DEST %_ASM_CX
18845 +#endif
18846 +
18847 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18848 +#define __copyuser_seg gs;
18849 +#else
18850 +#define __copyuser_seg
18851 +#endif
18852 +
18853 .text
18854 ENTRY(__put_user_1)
18855 ENTER
18856 +
18857 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18858 + GET_THREAD_INFO(%_ASM_BX)
18859 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
18860 jae bad_put_user
18861 -1: movb %al,(%_ASM_CX)
18862 +
18863 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18864 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18865 + cmp %_ASM_BX,%_ASM_CX
18866 + jb 1234f
18867 + xor %ebx,%ebx
18868 +1234:
18869 +#endif
18870 +
18871 +#endif
18872 +
18873 +1: __copyuser_seg movb %al,(_DEST)
18874 xor %eax,%eax
18875 EXIT
18876 ENDPROC(__put_user_1)
18877
18878 ENTRY(__put_user_2)
18879 ENTER
18880 +
18881 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18882 + GET_THREAD_INFO(%_ASM_BX)
18883 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18884 sub $1,%_ASM_BX
18885 cmp %_ASM_BX,%_ASM_CX
18886 jae bad_put_user
18887 -2: movw %ax,(%_ASM_CX)
18888 +
18889 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18890 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18891 + cmp %_ASM_BX,%_ASM_CX
18892 + jb 1234f
18893 + xor %ebx,%ebx
18894 +1234:
18895 +#endif
18896 +
18897 +#endif
18898 +
18899 +2: __copyuser_seg movw %ax,(_DEST)
18900 xor %eax,%eax
18901 EXIT
18902 ENDPROC(__put_user_2)
18903
18904 ENTRY(__put_user_4)
18905 ENTER
18906 +
18907 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18908 + GET_THREAD_INFO(%_ASM_BX)
18909 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18910 sub $3,%_ASM_BX
18911 cmp %_ASM_BX,%_ASM_CX
18912 jae bad_put_user
18913 -3: movl %eax,(%_ASM_CX)
18914 +
18915 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18916 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18917 + cmp %_ASM_BX,%_ASM_CX
18918 + jb 1234f
18919 + xor %ebx,%ebx
18920 +1234:
18921 +#endif
18922 +
18923 +#endif
18924 +
18925 +3: __copyuser_seg movl %eax,(_DEST)
18926 xor %eax,%eax
18927 EXIT
18928 ENDPROC(__put_user_4)
18929
18930 ENTRY(__put_user_8)
18931 ENTER
18932 +
18933 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18934 + GET_THREAD_INFO(%_ASM_BX)
18935 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18936 sub $7,%_ASM_BX
18937 cmp %_ASM_BX,%_ASM_CX
18938 jae bad_put_user
18939 -4: mov %_ASM_AX,(%_ASM_CX)
18940 +
18941 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18942 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18943 + cmp %_ASM_BX,%_ASM_CX
18944 + jb 1234f
18945 + xor %ebx,%ebx
18946 +1234:
18947 +#endif
18948 +
18949 +#endif
18950 +
18951 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
18952 #ifdef CONFIG_X86_32
18953 -5: movl %edx,4(%_ASM_CX)
18954 +5: __copyuser_seg movl %edx,4(_DEST)
18955 #endif
18956 xor %eax,%eax
18957 EXIT
18958 diff -urNp linux-2.6.32.41/arch/x86/lib/usercopy_32.c linux-2.6.32.41/arch/x86/lib/usercopy_32.c
18959 --- linux-2.6.32.41/arch/x86/lib/usercopy_32.c 2011-03-27 14:31:47.000000000 -0400
18960 +++ linux-2.6.32.41/arch/x86/lib/usercopy_32.c 2011-04-23 21:12:28.000000000 -0400
18961 @@ -43,7 +43,7 @@ do { \
18962 __asm__ __volatile__( \
18963 " testl %1,%1\n" \
18964 " jz 2f\n" \
18965 - "0: lodsb\n" \
18966 + "0: "__copyuser_seg"lodsb\n" \
18967 " stosb\n" \
18968 " testb %%al,%%al\n" \
18969 " jz 1f\n" \
18970 @@ -128,10 +128,12 @@ do { \
18971 int __d0; \
18972 might_fault(); \
18973 __asm__ __volatile__( \
18974 + __COPYUSER_SET_ES \
18975 "0: rep; stosl\n" \
18976 " movl %2,%0\n" \
18977 "1: rep; stosb\n" \
18978 "2:\n" \
18979 + __COPYUSER_RESTORE_ES \
18980 ".section .fixup,\"ax\"\n" \
18981 "3: lea 0(%2,%0,4),%0\n" \
18982 " jmp 2b\n" \
18983 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
18984 might_fault();
18985
18986 __asm__ __volatile__(
18987 + __COPYUSER_SET_ES
18988 " testl %0, %0\n"
18989 " jz 3f\n"
18990 " andl %0,%%ecx\n"
18991 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
18992 " subl %%ecx,%0\n"
18993 " addl %0,%%eax\n"
18994 "1:\n"
18995 + __COPYUSER_RESTORE_ES
18996 ".section .fixup,\"ax\"\n"
18997 "2: xorl %%eax,%%eax\n"
18998 " jmp 1b\n"
18999 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
19000
19001 #ifdef CONFIG_X86_INTEL_USERCOPY
19002 static unsigned long
19003 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
19004 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
19005 {
19006 int d0, d1;
19007 __asm__ __volatile__(
19008 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
19009 " .align 2,0x90\n"
19010 "3: movl 0(%4), %%eax\n"
19011 "4: movl 4(%4), %%edx\n"
19012 - "5: movl %%eax, 0(%3)\n"
19013 - "6: movl %%edx, 4(%3)\n"
19014 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
19015 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
19016 "7: movl 8(%4), %%eax\n"
19017 "8: movl 12(%4),%%edx\n"
19018 - "9: movl %%eax, 8(%3)\n"
19019 - "10: movl %%edx, 12(%3)\n"
19020 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
19021 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
19022 "11: movl 16(%4), %%eax\n"
19023 "12: movl 20(%4), %%edx\n"
19024 - "13: movl %%eax, 16(%3)\n"
19025 - "14: movl %%edx, 20(%3)\n"
19026 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
19027 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
19028 "15: movl 24(%4), %%eax\n"
19029 "16: movl 28(%4), %%edx\n"
19030 - "17: movl %%eax, 24(%3)\n"
19031 - "18: movl %%edx, 28(%3)\n"
19032 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
19033 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
19034 "19: movl 32(%4), %%eax\n"
19035 "20: movl 36(%4), %%edx\n"
19036 - "21: movl %%eax, 32(%3)\n"
19037 - "22: movl %%edx, 36(%3)\n"
19038 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
19039 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
19040 "23: movl 40(%4), %%eax\n"
19041 "24: movl 44(%4), %%edx\n"
19042 - "25: movl %%eax, 40(%3)\n"
19043 - "26: movl %%edx, 44(%3)\n"
19044 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
19045 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
19046 "27: movl 48(%4), %%eax\n"
19047 "28: movl 52(%4), %%edx\n"
19048 - "29: movl %%eax, 48(%3)\n"
19049 - "30: movl %%edx, 52(%3)\n"
19050 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
19051 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
19052 "31: movl 56(%4), %%eax\n"
19053 "32: movl 60(%4), %%edx\n"
19054 - "33: movl %%eax, 56(%3)\n"
19055 - "34: movl %%edx, 60(%3)\n"
19056 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
19057 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
19058 " addl $-64, %0\n"
19059 " addl $64, %4\n"
19060 " addl $64, %3\n"
19061 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
19062 " shrl $2, %0\n"
19063 " andl $3, %%eax\n"
19064 " cld\n"
19065 + __COPYUSER_SET_ES
19066 "99: rep; movsl\n"
19067 "36: movl %%eax, %0\n"
19068 "37: rep; movsb\n"
19069 "100:\n"
19070 + __COPYUSER_RESTORE_ES
19071 + ".section .fixup,\"ax\"\n"
19072 + "101: lea 0(%%eax,%0,4),%0\n"
19073 + " jmp 100b\n"
19074 + ".previous\n"
19075 + ".section __ex_table,\"a\"\n"
19076 + " .align 4\n"
19077 + " .long 1b,100b\n"
19078 + " .long 2b,100b\n"
19079 + " .long 3b,100b\n"
19080 + " .long 4b,100b\n"
19081 + " .long 5b,100b\n"
19082 + " .long 6b,100b\n"
19083 + " .long 7b,100b\n"
19084 + " .long 8b,100b\n"
19085 + " .long 9b,100b\n"
19086 + " .long 10b,100b\n"
19087 + " .long 11b,100b\n"
19088 + " .long 12b,100b\n"
19089 + " .long 13b,100b\n"
19090 + " .long 14b,100b\n"
19091 + " .long 15b,100b\n"
19092 + " .long 16b,100b\n"
19093 + " .long 17b,100b\n"
19094 + " .long 18b,100b\n"
19095 + " .long 19b,100b\n"
19096 + " .long 20b,100b\n"
19097 + " .long 21b,100b\n"
19098 + " .long 22b,100b\n"
19099 + " .long 23b,100b\n"
19100 + " .long 24b,100b\n"
19101 + " .long 25b,100b\n"
19102 + " .long 26b,100b\n"
19103 + " .long 27b,100b\n"
19104 + " .long 28b,100b\n"
19105 + " .long 29b,100b\n"
19106 + " .long 30b,100b\n"
19107 + " .long 31b,100b\n"
19108 + " .long 32b,100b\n"
19109 + " .long 33b,100b\n"
19110 + " .long 34b,100b\n"
19111 + " .long 35b,100b\n"
19112 + " .long 36b,100b\n"
19113 + " .long 37b,100b\n"
19114 + " .long 99b,101b\n"
19115 + ".previous"
19116 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
19117 + : "1"(to), "2"(from), "0"(size)
19118 + : "eax", "edx", "memory");
19119 + return size;
19120 +}
19121 +
19122 +static unsigned long
19123 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
19124 +{
19125 + int d0, d1;
19126 + __asm__ __volatile__(
19127 + " .align 2,0x90\n"
19128 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
19129 + " cmpl $67, %0\n"
19130 + " jbe 3f\n"
19131 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
19132 + " .align 2,0x90\n"
19133 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
19134 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
19135 + "5: movl %%eax, 0(%3)\n"
19136 + "6: movl %%edx, 4(%3)\n"
19137 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
19138 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
19139 + "9: movl %%eax, 8(%3)\n"
19140 + "10: movl %%edx, 12(%3)\n"
19141 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
19142 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
19143 + "13: movl %%eax, 16(%3)\n"
19144 + "14: movl %%edx, 20(%3)\n"
19145 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
19146 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
19147 + "17: movl %%eax, 24(%3)\n"
19148 + "18: movl %%edx, 28(%3)\n"
19149 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
19150 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
19151 + "21: movl %%eax, 32(%3)\n"
19152 + "22: movl %%edx, 36(%3)\n"
19153 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
19154 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
19155 + "25: movl %%eax, 40(%3)\n"
19156 + "26: movl %%edx, 44(%3)\n"
19157 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
19158 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
19159 + "29: movl %%eax, 48(%3)\n"
19160 + "30: movl %%edx, 52(%3)\n"
19161 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
19162 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
19163 + "33: movl %%eax, 56(%3)\n"
19164 + "34: movl %%edx, 60(%3)\n"
19165 + " addl $-64, %0\n"
19166 + " addl $64, %4\n"
19167 + " addl $64, %3\n"
19168 + " cmpl $63, %0\n"
19169 + " ja 1b\n"
19170 + "35: movl %0, %%eax\n"
19171 + " shrl $2, %0\n"
19172 + " andl $3, %%eax\n"
19173 + " cld\n"
19174 + "99: rep; "__copyuser_seg" movsl\n"
19175 + "36: movl %%eax, %0\n"
19176 + "37: rep; "__copyuser_seg" movsb\n"
19177 + "100:\n"
19178 ".section .fixup,\"ax\"\n"
19179 "101: lea 0(%%eax,%0,4),%0\n"
19180 " jmp 100b\n"
19181 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
19182 int d0, d1;
19183 __asm__ __volatile__(
19184 " .align 2,0x90\n"
19185 - "0: movl 32(%4), %%eax\n"
19186 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19187 " cmpl $67, %0\n"
19188 " jbe 2f\n"
19189 - "1: movl 64(%4), %%eax\n"
19190 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19191 " .align 2,0x90\n"
19192 - "2: movl 0(%4), %%eax\n"
19193 - "21: movl 4(%4), %%edx\n"
19194 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19195 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19196 " movl %%eax, 0(%3)\n"
19197 " movl %%edx, 4(%3)\n"
19198 - "3: movl 8(%4), %%eax\n"
19199 - "31: movl 12(%4),%%edx\n"
19200 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19201 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19202 " movl %%eax, 8(%3)\n"
19203 " movl %%edx, 12(%3)\n"
19204 - "4: movl 16(%4), %%eax\n"
19205 - "41: movl 20(%4), %%edx\n"
19206 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19207 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19208 " movl %%eax, 16(%3)\n"
19209 " movl %%edx, 20(%3)\n"
19210 - "10: movl 24(%4), %%eax\n"
19211 - "51: movl 28(%4), %%edx\n"
19212 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19213 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19214 " movl %%eax, 24(%3)\n"
19215 " movl %%edx, 28(%3)\n"
19216 - "11: movl 32(%4), %%eax\n"
19217 - "61: movl 36(%4), %%edx\n"
19218 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19219 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19220 " movl %%eax, 32(%3)\n"
19221 " movl %%edx, 36(%3)\n"
19222 - "12: movl 40(%4), %%eax\n"
19223 - "71: movl 44(%4), %%edx\n"
19224 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19225 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19226 " movl %%eax, 40(%3)\n"
19227 " movl %%edx, 44(%3)\n"
19228 - "13: movl 48(%4), %%eax\n"
19229 - "81: movl 52(%4), %%edx\n"
19230 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19231 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19232 " movl %%eax, 48(%3)\n"
19233 " movl %%edx, 52(%3)\n"
19234 - "14: movl 56(%4), %%eax\n"
19235 - "91: movl 60(%4), %%edx\n"
19236 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19237 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19238 " movl %%eax, 56(%3)\n"
19239 " movl %%edx, 60(%3)\n"
19240 " addl $-64, %0\n"
19241 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
19242 " shrl $2, %0\n"
19243 " andl $3, %%eax\n"
19244 " cld\n"
19245 - "6: rep; movsl\n"
19246 + "6: rep; "__copyuser_seg" movsl\n"
19247 " movl %%eax,%0\n"
19248 - "7: rep; movsb\n"
19249 + "7: rep; "__copyuser_seg" movsb\n"
19250 "8:\n"
19251 ".section .fixup,\"ax\"\n"
19252 "9: lea 0(%%eax,%0,4),%0\n"
19253 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
19254
19255 __asm__ __volatile__(
19256 " .align 2,0x90\n"
19257 - "0: movl 32(%4), %%eax\n"
19258 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19259 " cmpl $67, %0\n"
19260 " jbe 2f\n"
19261 - "1: movl 64(%4), %%eax\n"
19262 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19263 " .align 2,0x90\n"
19264 - "2: movl 0(%4), %%eax\n"
19265 - "21: movl 4(%4), %%edx\n"
19266 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19267 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19268 " movnti %%eax, 0(%3)\n"
19269 " movnti %%edx, 4(%3)\n"
19270 - "3: movl 8(%4), %%eax\n"
19271 - "31: movl 12(%4),%%edx\n"
19272 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19273 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19274 " movnti %%eax, 8(%3)\n"
19275 " movnti %%edx, 12(%3)\n"
19276 - "4: movl 16(%4), %%eax\n"
19277 - "41: movl 20(%4), %%edx\n"
19278 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19279 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19280 " movnti %%eax, 16(%3)\n"
19281 " movnti %%edx, 20(%3)\n"
19282 - "10: movl 24(%4), %%eax\n"
19283 - "51: movl 28(%4), %%edx\n"
19284 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19285 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19286 " movnti %%eax, 24(%3)\n"
19287 " movnti %%edx, 28(%3)\n"
19288 - "11: movl 32(%4), %%eax\n"
19289 - "61: movl 36(%4), %%edx\n"
19290 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19291 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19292 " movnti %%eax, 32(%3)\n"
19293 " movnti %%edx, 36(%3)\n"
19294 - "12: movl 40(%4), %%eax\n"
19295 - "71: movl 44(%4), %%edx\n"
19296 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19297 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19298 " movnti %%eax, 40(%3)\n"
19299 " movnti %%edx, 44(%3)\n"
19300 - "13: movl 48(%4), %%eax\n"
19301 - "81: movl 52(%4), %%edx\n"
19302 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19303 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19304 " movnti %%eax, 48(%3)\n"
19305 " movnti %%edx, 52(%3)\n"
19306 - "14: movl 56(%4), %%eax\n"
19307 - "91: movl 60(%4), %%edx\n"
19308 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19309 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19310 " movnti %%eax, 56(%3)\n"
19311 " movnti %%edx, 60(%3)\n"
19312 " addl $-64, %0\n"
19313 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
19314 " shrl $2, %0\n"
19315 " andl $3, %%eax\n"
19316 " cld\n"
19317 - "6: rep; movsl\n"
19318 + "6: rep; "__copyuser_seg" movsl\n"
19319 " movl %%eax,%0\n"
19320 - "7: rep; movsb\n"
19321 + "7: rep; "__copyuser_seg" movsb\n"
19322 "8:\n"
19323 ".section .fixup,\"ax\"\n"
19324 "9: lea 0(%%eax,%0,4),%0\n"
19325 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
19326
19327 __asm__ __volatile__(
19328 " .align 2,0x90\n"
19329 - "0: movl 32(%4), %%eax\n"
19330 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19331 " cmpl $67, %0\n"
19332 " jbe 2f\n"
19333 - "1: movl 64(%4), %%eax\n"
19334 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19335 " .align 2,0x90\n"
19336 - "2: movl 0(%4), %%eax\n"
19337 - "21: movl 4(%4), %%edx\n"
19338 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19339 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19340 " movnti %%eax, 0(%3)\n"
19341 " movnti %%edx, 4(%3)\n"
19342 - "3: movl 8(%4), %%eax\n"
19343 - "31: movl 12(%4),%%edx\n"
19344 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19345 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19346 " movnti %%eax, 8(%3)\n"
19347 " movnti %%edx, 12(%3)\n"
19348 - "4: movl 16(%4), %%eax\n"
19349 - "41: movl 20(%4), %%edx\n"
19350 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19351 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19352 " movnti %%eax, 16(%3)\n"
19353 " movnti %%edx, 20(%3)\n"
19354 - "10: movl 24(%4), %%eax\n"
19355 - "51: movl 28(%4), %%edx\n"
19356 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19357 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19358 " movnti %%eax, 24(%3)\n"
19359 " movnti %%edx, 28(%3)\n"
19360 - "11: movl 32(%4), %%eax\n"
19361 - "61: movl 36(%4), %%edx\n"
19362 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19363 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19364 " movnti %%eax, 32(%3)\n"
19365 " movnti %%edx, 36(%3)\n"
19366 - "12: movl 40(%4), %%eax\n"
19367 - "71: movl 44(%4), %%edx\n"
19368 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19369 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19370 " movnti %%eax, 40(%3)\n"
19371 " movnti %%edx, 44(%3)\n"
19372 - "13: movl 48(%4), %%eax\n"
19373 - "81: movl 52(%4), %%edx\n"
19374 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19375 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19376 " movnti %%eax, 48(%3)\n"
19377 " movnti %%edx, 52(%3)\n"
19378 - "14: movl 56(%4), %%eax\n"
19379 - "91: movl 60(%4), %%edx\n"
19380 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19381 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19382 " movnti %%eax, 56(%3)\n"
19383 " movnti %%edx, 60(%3)\n"
19384 " addl $-64, %0\n"
19385 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
19386 " shrl $2, %0\n"
19387 " andl $3, %%eax\n"
19388 " cld\n"
19389 - "6: rep; movsl\n"
19390 + "6: rep; "__copyuser_seg" movsl\n"
19391 " movl %%eax,%0\n"
19392 - "7: rep; movsb\n"
19393 + "7: rep; "__copyuser_seg" movsb\n"
19394 "8:\n"
19395 ".section .fixup,\"ax\"\n"
19396 "9: lea 0(%%eax,%0,4),%0\n"
19397 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
19398 */
19399 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
19400 unsigned long size);
19401 -unsigned long __copy_user_intel(void __user *to, const void *from,
19402 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
19403 + unsigned long size);
19404 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
19405 unsigned long size);
19406 unsigned long __copy_user_zeroing_intel_nocache(void *to,
19407 const void __user *from, unsigned long size);
19408 #endif /* CONFIG_X86_INTEL_USERCOPY */
19409
19410 /* Generic arbitrary sized copy. */
19411 -#define __copy_user(to, from, size) \
19412 +#define __copy_user(to, from, size, prefix, set, restore) \
19413 do { \
19414 int __d0, __d1, __d2; \
19415 __asm__ __volatile__( \
19416 + set \
19417 " cmp $7,%0\n" \
19418 " jbe 1f\n" \
19419 " movl %1,%0\n" \
19420 " negl %0\n" \
19421 " andl $7,%0\n" \
19422 " subl %0,%3\n" \
19423 - "4: rep; movsb\n" \
19424 + "4: rep; "prefix"movsb\n" \
19425 " movl %3,%0\n" \
19426 " shrl $2,%0\n" \
19427 " andl $3,%3\n" \
19428 " .align 2,0x90\n" \
19429 - "0: rep; movsl\n" \
19430 + "0: rep; "prefix"movsl\n" \
19431 " movl %3,%0\n" \
19432 - "1: rep; movsb\n" \
19433 + "1: rep; "prefix"movsb\n" \
19434 "2:\n" \
19435 + restore \
19436 ".section .fixup,\"ax\"\n" \
19437 "5: addl %3,%0\n" \
19438 " jmp 2b\n" \
19439 @@ -682,14 +799,14 @@ do { \
19440 " negl %0\n" \
19441 " andl $7,%0\n" \
19442 " subl %0,%3\n" \
19443 - "4: rep; movsb\n" \
19444 + "4: rep; "__copyuser_seg"movsb\n" \
19445 " movl %3,%0\n" \
19446 " shrl $2,%0\n" \
19447 " andl $3,%3\n" \
19448 " .align 2,0x90\n" \
19449 - "0: rep; movsl\n" \
19450 + "0: rep; "__copyuser_seg"movsl\n" \
19451 " movl %3,%0\n" \
19452 - "1: rep; movsb\n" \
19453 + "1: rep; "__copyuser_seg"movsb\n" \
19454 "2:\n" \
19455 ".section .fixup,\"ax\"\n" \
19456 "5: addl %3,%0\n" \
19457 @@ -775,9 +892,9 @@ survive:
19458 }
19459 #endif
19460 if (movsl_is_ok(to, from, n))
19461 - __copy_user(to, from, n);
19462 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
19463 else
19464 - n = __copy_user_intel(to, from, n);
19465 + n = __generic_copy_to_user_intel(to, from, n);
19466 return n;
19467 }
19468 EXPORT_SYMBOL(__copy_to_user_ll);
19469 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
19470 unsigned long n)
19471 {
19472 if (movsl_is_ok(to, from, n))
19473 - __copy_user(to, from, n);
19474 + __copy_user(to, from, n, __copyuser_seg, "", "");
19475 else
19476 - n = __copy_user_intel((void __user *)to,
19477 - (const void *)from, n);
19478 + n = __generic_copy_from_user_intel(to, from, n);
19479 return n;
19480 }
19481 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
19482 @@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocach
19483 if (n > 64 && cpu_has_xmm2)
19484 n = __copy_user_intel_nocache(to, from, n);
19485 else
19486 - __copy_user(to, from, n);
19487 + __copy_user(to, from, n, __copyuser_seg, "", "");
19488 #else
19489 - __copy_user(to, from, n);
19490 + __copy_user(to, from, n, __copyuser_seg, "", "");
19491 #endif
19492 return n;
19493 }
19494 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
19495
19496 -/**
19497 - * copy_to_user: - Copy a block of data into user space.
19498 - * @to: Destination address, in user space.
19499 - * @from: Source address, in kernel space.
19500 - * @n: Number of bytes to copy.
19501 - *
19502 - * Context: User context only. This function may sleep.
19503 - *
19504 - * Copy data from kernel space to user space.
19505 - *
19506 - * Returns number of bytes that could not be copied.
19507 - * On success, this will be zero.
19508 - */
19509 -unsigned long
19510 -copy_to_user(void __user *to, const void *from, unsigned long n)
19511 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19512 +void __set_fs(mm_segment_t x)
19513 {
19514 - if (access_ok(VERIFY_WRITE, to, n))
19515 - n = __copy_to_user(to, from, n);
19516 - return n;
19517 + switch (x.seg) {
19518 + case 0:
19519 + loadsegment(gs, 0);
19520 + break;
19521 + case TASK_SIZE_MAX:
19522 + loadsegment(gs, __USER_DS);
19523 + break;
19524 + case -1UL:
19525 + loadsegment(gs, __KERNEL_DS);
19526 + break;
19527 + default:
19528 + BUG();
19529 + }
19530 + return;
19531 }
19532 -EXPORT_SYMBOL(copy_to_user);
19533 +EXPORT_SYMBOL(__set_fs);
19534
19535 -/**
19536 - * copy_from_user: - Copy a block of data from user space.
19537 - * @to: Destination address, in kernel space.
19538 - * @from: Source address, in user space.
19539 - * @n: Number of bytes to copy.
19540 - *
19541 - * Context: User context only. This function may sleep.
19542 - *
19543 - * Copy data from user space to kernel space.
19544 - *
19545 - * Returns number of bytes that could not be copied.
19546 - * On success, this will be zero.
19547 - *
19548 - * If some data could not be copied, this function will pad the copied
19549 - * data to the requested size using zero bytes.
19550 - */
19551 -unsigned long
19552 -copy_from_user(void *to, const void __user *from, unsigned long n)
19553 +void set_fs(mm_segment_t x)
19554 {
19555 - if (access_ok(VERIFY_READ, from, n))
19556 - n = __copy_from_user(to, from, n);
19557 - else
19558 - memset(to, 0, n);
19559 - return n;
19560 + current_thread_info()->addr_limit = x;
19561 + __set_fs(x);
19562 }
19563 -EXPORT_SYMBOL(copy_from_user);
19564 +EXPORT_SYMBOL(set_fs);
19565 +#endif
19566 diff -urNp linux-2.6.32.41/arch/x86/lib/usercopy_64.c linux-2.6.32.41/arch/x86/lib/usercopy_64.c
19567 --- linux-2.6.32.41/arch/x86/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
19568 +++ linux-2.6.32.41/arch/x86/lib/usercopy_64.c 2011-05-04 17:56:20.000000000 -0400
19569 @@ -42,6 +42,12 @@ long
19570 __strncpy_from_user(char *dst, const char __user *src, long count)
19571 {
19572 long res;
19573 +
19574 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19575 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19576 + src += PAX_USER_SHADOW_BASE;
19577 +#endif
19578 +
19579 __do_strncpy_from_user(dst, src, count, res);
19580 return res;
19581 }
19582 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
19583 {
19584 long __d0;
19585 might_fault();
19586 +
19587 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19588 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
19589 + addr += PAX_USER_SHADOW_BASE;
19590 +#endif
19591 +
19592 /* no memory constraint because it doesn't change any memory gcc knows
19593 about */
19594 asm volatile(
19595 @@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
19596
19597 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
19598 {
19599 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
19600 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
19601 +
19602 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19603 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
19604 + to += PAX_USER_SHADOW_BASE;
19605 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
19606 + from += PAX_USER_SHADOW_BASE;
19607 +#endif
19608 +
19609 return copy_user_generic((__force void *)to, (__force void *)from, len);
19610 - }
19611 - return len;
19612 + }
19613 + return len;
19614 }
19615 EXPORT_SYMBOL(copy_in_user);
19616
19617 diff -urNp linux-2.6.32.41/arch/x86/Makefile linux-2.6.32.41/arch/x86/Makefile
19618 --- linux-2.6.32.41/arch/x86/Makefile 2011-03-27 14:31:47.000000000 -0400
19619 +++ linux-2.6.32.41/arch/x86/Makefile 2011-04-17 15:56:46.000000000 -0400
19620 @@ -189,3 +189,12 @@ define archhelp
19621 echo ' FDARGS="..." arguments for the booted kernel'
19622 echo ' FDINITRD=file initrd for the booted kernel'
19623 endef
19624 +
19625 +define OLD_LD
19626 +
19627 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
19628 +*** Please upgrade your binutils to 2.18 or newer
19629 +endef
19630 +
19631 +archprepare:
19632 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
19633 diff -urNp linux-2.6.32.41/arch/x86/mm/extable.c linux-2.6.32.41/arch/x86/mm/extable.c
19634 --- linux-2.6.32.41/arch/x86/mm/extable.c 2011-03-27 14:31:47.000000000 -0400
19635 +++ linux-2.6.32.41/arch/x86/mm/extable.c 2011-04-17 15:56:46.000000000 -0400
19636 @@ -1,14 +1,71 @@
19637 #include <linux/module.h>
19638 #include <linux/spinlock.h>
19639 +#include <linux/sort.h>
19640 #include <asm/uaccess.h>
19641 +#include <asm/pgtable.h>
19642
19643 +/*
19644 + * The exception table needs to be sorted so that the binary
19645 + * search that we use to find entries in it works properly.
19646 + * This is used both for the kernel exception table and for
19647 + * the exception tables of modules that get loaded.
19648 + */
19649 +static int cmp_ex(const void *a, const void *b)
19650 +{
19651 + const struct exception_table_entry *x = a, *y = b;
19652 +
19653 + /* avoid overflow */
19654 + if (x->insn > y->insn)
19655 + return 1;
19656 + if (x->insn < y->insn)
19657 + return -1;
19658 + return 0;
19659 +}
19660 +
19661 +static void swap_ex(void *a, void *b, int size)
19662 +{
19663 + struct exception_table_entry t, *x = a, *y = b;
19664 +
19665 + t = *x;
19666 +
19667 + pax_open_kernel();
19668 + *x = *y;
19669 + *y = t;
19670 + pax_close_kernel();
19671 +}
19672 +
19673 +void sort_extable(struct exception_table_entry *start,
19674 + struct exception_table_entry *finish)
19675 +{
19676 + sort(start, finish - start, sizeof(struct exception_table_entry),
19677 + cmp_ex, swap_ex);
19678 +}
19679 +
19680 +#ifdef CONFIG_MODULES
19681 +/*
19682 + * If the exception table is sorted, any referring to the module init
19683 + * will be at the beginning or the end.
19684 + */
19685 +void trim_init_extable(struct module *m)
19686 +{
19687 + /*trim the beginning*/
19688 + while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
19689 + m->extable++;
19690 + m->num_exentries--;
19691 + }
19692 + /*trim the end*/
19693 + while (m->num_exentries &&
19694 + within_module_init(m->extable[m->num_exentries-1].insn, m))
19695 + m->num_exentries--;
19696 +}
19697 +#endif /* CONFIG_MODULES */
19698
19699 int fixup_exception(struct pt_regs *regs)
19700 {
19701 const struct exception_table_entry *fixup;
19702
19703 #ifdef CONFIG_PNPBIOS
19704 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
19705 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
19706 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
19707 extern u32 pnp_bios_is_utter_crap;
19708 pnp_bios_is_utter_crap = 1;
19709 diff -urNp linux-2.6.32.41/arch/x86/mm/fault.c linux-2.6.32.41/arch/x86/mm/fault.c
19710 --- linux-2.6.32.41/arch/x86/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
19711 +++ linux-2.6.32.41/arch/x86/mm/fault.c 2011-06-06 17:35:16.000000000 -0400
19712 @@ -11,10 +11,19 @@
19713 #include <linux/kprobes.h> /* __kprobes, ... */
19714 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
19715 #include <linux/perf_event.h> /* perf_sw_event */
19716 +#include <linux/unistd.h>
19717 +#include <linux/compiler.h>
19718
19719 #include <asm/traps.h> /* dotraplinkage, ... */
19720 #include <asm/pgalloc.h> /* pgd_*(), ... */
19721 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
19722 +#include <asm/vsyscall.h>
19723 +#include <asm/tlbflush.h>
19724 +
19725 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19726 +#include <asm/stacktrace.h>
19727 +#include "../kernel/dumpstack.h"
19728 +#endif
19729
19730 /*
19731 * Page fault error code bits:
19732 @@ -51,7 +60,7 @@ static inline int notify_page_fault(stru
19733 int ret = 0;
19734
19735 /* kprobe_running() needs smp_processor_id() */
19736 - if (kprobes_built_in() && !user_mode_vm(regs)) {
19737 + if (kprobes_built_in() && !user_mode(regs)) {
19738 preempt_disable();
19739 if (kprobe_running() && kprobe_fault_handler(regs, 14))
19740 ret = 1;
19741 @@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *re
19742 return !instr_lo || (instr_lo>>1) == 1;
19743 case 0x00:
19744 /* Prefetch instruction is 0x0F0D or 0x0F18 */
19745 - if (probe_kernel_address(instr, opcode))
19746 + if (user_mode(regs)) {
19747 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
19748 + return 0;
19749 + } else if (probe_kernel_address(instr, opcode))
19750 return 0;
19751
19752 *prefetch = (instr_lo == 0xF) &&
19753 @@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsign
19754 while (instr < max_instr) {
19755 unsigned char opcode;
19756
19757 - if (probe_kernel_address(instr, opcode))
19758 + if (user_mode(regs)) {
19759 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
19760 + break;
19761 + } else if (probe_kernel_address(instr, opcode))
19762 break;
19763
19764 instr++;
19765 @@ -172,6 +187,30 @@ force_sig_info_fault(int si_signo, int s
19766 force_sig_info(si_signo, &info, tsk);
19767 }
19768
19769 +#ifdef CONFIG_PAX_EMUTRAMP
19770 +static int pax_handle_fetch_fault(struct pt_regs *regs);
19771 +#endif
19772 +
19773 +#ifdef CONFIG_PAX_PAGEEXEC
19774 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
19775 +{
19776 + pgd_t *pgd;
19777 + pud_t *pud;
19778 + pmd_t *pmd;
19779 +
19780 + pgd = pgd_offset(mm, address);
19781 + if (!pgd_present(*pgd))
19782 + return NULL;
19783 + pud = pud_offset(pgd, address);
19784 + if (!pud_present(*pud))
19785 + return NULL;
19786 + pmd = pmd_offset(pud, address);
19787 + if (!pmd_present(*pmd))
19788 + return NULL;
19789 + return pmd;
19790 +}
19791 +#endif
19792 +
19793 DEFINE_SPINLOCK(pgd_lock);
19794 LIST_HEAD(pgd_list);
19795
19796 @@ -224,11 +263,24 @@ void vmalloc_sync_all(void)
19797 address += PMD_SIZE) {
19798
19799 unsigned long flags;
19800 +
19801 +#ifdef CONFIG_PAX_PER_CPU_PGD
19802 + unsigned long cpu;
19803 +#else
19804 struct page *page;
19805 +#endif
19806
19807 spin_lock_irqsave(&pgd_lock, flags);
19808 +
19809 +#ifdef CONFIG_PAX_PER_CPU_PGD
19810 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19811 + pgd_t *pgd = get_cpu_pgd(cpu);
19812 +#else
19813 list_for_each_entry(page, &pgd_list, lru) {
19814 - if (!vmalloc_sync_one(page_address(page), address))
19815 + pgd_t *pgd = page_address(page);
19816 +#endif
19817 +
19818 + if (!vmalloc_sync_one(pgd, address))
19819 break;
19820 }
19821 spin_unlock_irqrestore(&pgd_lock, flags);
19822 @@ -258,6 +310,11 @@ static noinline int vmalloc_fault(unsign
19823 * an interrupt in the middle of a task switch..
19824 */
19825 pgd_paddr = read_cr3();
19826 +
19827 +#ifdef CONFIG_PAX_PER_CPU_PGD
19828 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
19829 +#endif
19830 +
19831 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
19832 if (!pmd_k)
19833 return -1;
19834 @@ -332,15 +389,27 @@ void vmalloc_sync_all(void)
19835
19836 const pgd_t *pgd_ref = pgd_offset_k(address);
19837 unsigned long flags;
19838 +
19839 +#ifdef CONFIG_PAX_PER_CPU_PGD
19840 + unsigned long cpu;
19841 +#else
19842 struct page *page;
19843 +#endif
19844
19845 if (pgd_none(*pgd_ref))
19846 continue;
19847
19848 spin_lock_irqsave(&pgd_lock, flags);
19849 +
19850 +#ifdef CONFIG_PAX_PER_CPU_PGD
19851 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19852 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
19853 +#else
19854 list_for_each_entry(page, &pgd_list, lru) {
19855 pgd_t *pgd;
19856 pgd = (pgd_t *)page_address(page) + pgd_index(address);
19857 +#endif
19858 +
19859 if (pgd_none(*pgd))
19860 set_pgd(pgd, *pgd_ref);
19861 else
19862 @@ -373,7 +442,14 @@ static noinline int vmalloc_fault(unsign
19863 * happen within a race in page table update. In the later
19864 * case just flush:
19865 */
19866 +
19867 +#ifdef CONFIG_PAX_PER_CPU_PGD
19868 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
19869 + pgd = pgd_offset_cpu(smp_processor_id(), address);
19870 +#else
19871 pgd = pgd_offset(current->active_mm, address);
19872 +#endif
19873 +
19874 pgd_ref = pgd_offset_k(address);
19875 if (pgd_none(*pgd_ref))
19876 return -1;
19877 @@ -535,7 +611,7 @@ static int is_errata93(struct pt_regs *r
19878 static int is_errata100(struct pt_regs *regs, unsigned long address)
19879 {
19880 #ifdef CONFIG_X86_64
19881 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
19882 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
19883 return 1;
19884 #endif
19885 return 0;
19886 @@ -562,7 +638,7 @@ static int is_f00f_bug(struct pt_regs *r
19887 }
19888
19889 static const char nx_warning[] = KERN_CRIT
19890 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
19891 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
19892
19893 static void
19894 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
19895 @@ -571,15 +647,26 @@ show_fault_oops(struct pt_regs *regs, un
19896 if (!oops_may_print())
19897 return;
19898
19899 - if (error_code & PF_INSTR) {
19900 + if (nx_enabled && (error_code & PF_INSTR)) {
19901 unsigned int level;
19902
19903 pte_t *pte = lookup_address(address, &level);
19904
19905 if (pte && pte_present(*pte) && !pte_exec(*pte))
19906 - printk(nx_warning, current_uid());
19907 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
19908 }
19909
19910 +#ifdef CONFIG_PAX_KERNEXEC
19911 + if (init_mm.start_code <= address && address < init_mm.end_code) {
19912 + if (current->signal->curr_ip)
19913 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
19914 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
19915 + else
19916 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
19917 + current->comm, task_pid_nr(current), current_uid(), current_euid());
19918 + }
19919 +#endif
19920 +
19921 printk(KERN_ALERT "BUG: unable to handle kernel ");
19922 if (address < PAGE_SIZE)
19923 printk(KERN_CONT "NULL pointer dereference");
19924 @@ -704,6 +791,68 @@ __bad_area_nosemaphore(struct pt_regs *r
19925 unsigned long address, int si_code)
19926 {
19927 struct task_struct *tsk = current;
19928 + struct mm_struct *mm = tsk->mm;
19929 +
19930 +#ifdef CONFIG_X86_64
19931 + if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
19932 + if (regs->ip == (unsigned long)vgettimeofday) {
19933 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
19934 + return;
19935 + } else if (regs->ip == (unsigned long)vtime) {
19936 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
19937 + return;
19938 + } else if (regs->ip == (unsigned long)vgetcpu) {
19939 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
19940 + return;
19941 + }
19942 + }
19943 +#endif
19944 +
19945 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19946 + if (mm && (error_code & PF_USER)) {
19947 + unsigned long ip = regs->ip;
19948 +
19949 + if (v8086_mode(regs))
19950 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
19951 +
19952 + /*
19953 + * It's possible to have interrupts off here:
19954 + */
19955 + local_irq_enable();
19956 +
19957 +#ifdef CONFIG_PAX_PAGEEXEC
19958 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
19959 + ((nx_enabled && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
19960 +
19961 +#ifdef CONFIG_PAX_EMUTRAMP
19962 + switch (pax_handle_fetch_fault(regs)) {
19963 + case 2:
19964 + return;
19965 + }
19966 +#endif
19967 +
19968 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
19969 + do_group_exit(SIGKILL);
19970 + }
19971 +#endif
19972 +
19973 +#ifdef CONFIG_PAX_SEGMEXEC
19974 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
19975 +
19976 +#ifdef CONFIG_PAX_EMUTRAMP
19977 + switch (pax_handle_fetch_fault(regs)) {
19978 + case 2:
19979 + return;
19980 + }
19981 +#endif
19982 +
19983 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
19984 + do_group_exit(SIGKILL);
19985 + }
19986 +#endif
19987 +
19988 + }
19989 +#endif
19990
19991 /* User mode accesses just cause a SIGSEGV */
19992 if (error_code & PF_USER) {
19993 @@ -857,6 +1006,99 @@ static int spurious_fault_check(unsigned
19994 return 1;
19995 }
19996
19997 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19998 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
19999 +{
20000 + pte_t *pte;
20001 + pmd_t *pmd;
20002 + spinlock_t *ptl;
20003 + unsigned char pte_mask;
20004 +
20005 + if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
20006 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
20007 + return 0;
20008 +
20009 + /* PaX: it's our fault, let's handle it if we can */
20010 +
20011 + /* PaX: take a look at read faults before acquiring any locks */
20012 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
20013 + /* instruction fetch attempt from a protected page in user mode */
20014 + up_read(&mm->mmap_sem);
20015 +
20016 +#ifdef CONFIG_PAX_EMUTRAMP
20017 + switch (pax_handle_fetch_fault(regs)) {
20018 + case 2:
20019 + return 1;
20020 + }
20021 +#endif
20022 +
20023 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
20024 + do_group_exit(SIGKILL);
20025 + }
20026 +
20027 + pmd = pax_get_pmd(mm, address);
20028 + if (unlikely(!pmd))
20029 + return 0;
20030 +
20031 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
20032 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
20033 + pte_unmap_unlock(pte, ptl);
20034 + return 0;
20035 + }
20036 +
20037 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
20038 + /* write attempt to a protected page in user mode */
20039 + pte_unmap_unlock(pte, ptl);
20040 + return 0;
20041 + }
20042 +
20043 +#ifdef CONFIG_SMP
20044 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
20045 +#else
20046 + if (likely(address > get_limit(regs->cs)))
20047 +#endif
20048 + {
20049 + set_pte(pte, pte_mkread(*pte));
20050 + __flush_tlb_one(address);
20051 + pte_unmap_unlock(pte, ptl);
20052 + up_read(&mm->mmap_sem);
20053 + return 1;
20054 + }
20055 +
20056 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
20057 +
20058 + /*
20059 + * PaX: fill DTLB with user rights and retry
20060 + */
20061 + __asm__ __volatile__ (
20062 + "orb %2,(%1)\n"
20063 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
20064 +/*
20065 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
20066 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
20067 + * page fault when examined during a TLB load attempt. this is true not only
20068 + * for PTEs holding a non-present entry but also present entries that will
20069 + * raise a page fault (such as those set up by PaX, or the copy-on-write
20070 + * mechanism). in effect it means that we do *not* need to flush the TLBs
20071 + * for our target pages since their PTEs are simply not in the TLBs at all.
20072 +
20073 + * the best thing in omitting it is that we gain around 15-20% speed in the
20074 + * fast path of the page fault handler and can get rid of tracing since we
20075 + * can no longer flush unintended entries.
20076 + */
20077 + "invlpg (%0)\n"
20078 +#endif
20079 + __copyuser_seg"testb $0,(%0)\n"
20080 + "xorb %3,(%1)\n"
20081 + :
20082 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
20083 + : "memory", "cc");
20084 + pte_unmap_unlock(pte, ptl);
20085 + up_read(&mm->mmap_sem);
20086 + return 1;
20087 +}
20088 +#endif
20089 +
20090 /*
20091 * Handle a spurious fault caused by a stale TLB entry.
20092 *
20093 @@ -923,6 +1165,9 @@ int show_unhandled_signals = 1;
20094 static inline int
20095 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
20096 {
20097 + if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
20098 + return 1;
20099 +
20100 if (write) {
20101 /* write, present and write, not present: */
20102 if (unlikely(!(vma->vm_flags & VM_WRITE)))
20103 @@ -956,17 +1201,31 @@ do_page_fault(struct pt_regs *regs, unsi
20104 {
20105 struct vm_area_struct *vma;
20106 struct task_struct *tsk;
20107 - unsigned long address;
20108 struct mm_struct *mm;
20109 int write;
20110 int fault;
20111
20112 + /* Get the faulting address: */
20113 + unsigned long address = read_cr2();
20114 +
20115 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20116 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
20117 + if (!search_exception_tables(regs->ip)) {
20118 + bad_area_nosemaphore(regs, error_code, address);
20119 + return;
20120 + }
20121 + if (address < PAX_USER_SHADOW_BASE) {
20122 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
20123 + printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
20124 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
20125 + } else
20126 + address -= PAX_USER_SHADOW_BASE;
20127 + }
20128 +#endif
20129 +
20130 tsk = current;
20131 mm = tsk->mm;
20132
20133 - /* Get the faulting address: */
20134 - address = read_cr2();
20135 -
20136 /*
20137 * Detect and handle instructions that would cause a page fault for
20138 * both a tracked kernel page and a userspace page.
20139 @@ -1026,7 +1285,7 @@ do_page_fault(struct pt_regs *regs, unsi
20140 * User-mode registers count as a user access even for any
20141 * potential system fault or CPU buglet:
20142 */
20143 - if (user_mode_vm(regs)) {
20144 + if (user_mode(regs)) {
20145 local_irq_enable();
20146 error_code |= PF_USER;
20147 } else {
20148 @@ -1080,6 +1339,11 @@ do_page_fault(struct pt_regs *regs, unsi
20149 might_sleep();
20150 }
20151
20152 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20153 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
20154 + return;
20155 +#endif
20156 +
20157 vma = find_vma(mm, address);
20158 if (unlikely(!vma)) {
20159 bad_area(regs, error_code, address);
20160 @@ -1091,18 +1355,24 @@ do_page_fault(struct pt_regs *regs, unsi
20161 bad_area(regs, error_code, address);
20162 return;
20163 }
20164 - if (error_code & PF_USER) {
20165 - /*
20166 - * Accessing the stack below %sp is always a bug.
20167 - * The large cushion allows instructions like enter
20168 - * and pusha to work. ("enter $65535, $31" pushes
20169 - * 32 pointers and then decrements %sp by 65535.)
20170 - */
20171 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
20172 - bad_area(regs, error_code, address);
20173 - return;
20174 - }
20175 + /*
20176 + * Accessing the stack below %sp is always a bug.
20177 + * The large cushion allows instructions like enter
20178 + * and pusha to work. ("enter $65535, $31" pushes
20179 + * 32 pointers and then decrements %sp by 65535.)
20180 + */
20181 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
20182 + bad_area(regs, error_code, address);
20183 + return;
20184 + }
20185 +
20186 +#ifdef CONFIG_PAX_SEGMEXEC
20187 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
20188 + bad_area(regs, error_code, address);
20189 + return;
20190 }
20191 +#endif
20192 +
20193 if (unlikely(expand_stack(vma, address))) {
20194 bad_area(regs, error_code, address);
20195 return;
20196 @@ -1146,3 +1416,199 @@ good_area:
20197
20198 up_read(&mm->mmap_sem);
20199 }
20200 +
20201 +#ifdef CONFIG_PAX_EMUTRAMP
20202 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
20203 +{
20204 + int err;
20205 +
20206 + do { /* PaX: gcc trampoline emulation #1 */
20207 + unsigned char mov1, mov2;
20208 + unsigned short jmp;
20209 + unsigned int addr1, addr2;
20210 +
20211 +#ifdef CONFIG_X86_64
20212 + if ((regs->ip + 11) >> 32)
20213 + break;
20214 +#endif
20215 +
20216 + err = get_user(mov1, (unsigned char __user *)regs->ip);
20217 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
20218 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
20219 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
20220 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
20221 +
20222 + if (err)
20223 + break;
20224 +
20225 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
20226 + regs->cx = addr1;
20227 + regs->ax = addr2;
20228 + regs->ip = addr2;
20229 + return 2;
20230 + }
20231 + } while (0);
20232 +
20233 + do { /* PaX: gcc trampoline emulation #2 */
20234 + unsigned char mov, jmp;
20235 + unsigned int addr1, addr2;
20236 +
20237 +#ifdef CONFIG_X86_64
20238 + if ((regs->ip + 9) >> 32)
20239 + break;
20240 +#endif
20241 +
20242 + err = get_user(mov, (unsigned char __user *)regs->ip);
20243 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
20244 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
20245 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
20246 +
20247 + if (err)
20248 + break;
20249 +
20250 + if (mov == 0xB9 && jmp == 0xE9) {
20251 + regs->cx = addr1;
20252 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
20253 + return 2;
20254 + }
20255 + } while (0);
20256 +
20257 + return 1; /* PaX in action */
20258 +}
20259 +
20260 +#ifdef CONFIG_X86_64
20261 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
20262 +{
20263 + int err;
20264 +
20265 + do { /* PaX: gcc trampoline emulation #1 */
20266 + unsigned short mov1, mov2, jmp1;
20267 + unsigned char jmp2;
20268 + unsigned int addr1;
20269 + unsigned long addr2;
20270 +
20271 + err = get_user(mov1, (unsigned short __user *)regs->ip);
20272 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
20273 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
20274 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
20275 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
20276 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
20277 +
20278 + if (err)
20279 + break;
20280 +
20281 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
20282 + regs->r11 = addr1;
20283 + regs->r10 = addr2;
20284 + regs->ip = addr1;
20285 + return 2;
20286 + }
20287 + } while (0);
20288 +
20289 + do { /* PaX: gcc trampoline emulation #2 */
20290 + unsigned short mov1, mov2, jmp1;
20291 + unsigned char jmp2;
20292 + unsigned long addr1, addr2;
20293 +
20294 + err = get_user(mov1, (unsigned short __user *)regs->ip);
20295 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
20296 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
20297 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
20298 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
20299 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
20300 +
20301 + if (err)
20302 + break;
20303 +
20304 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
20305 + regs->r11 = addr1;
20306 + regs->r10 = addr2;
20307 + regs->ip = addr1;
20308 + return 2;
20309 + }
20310 + } while (0);
20311 +
20312 + return 1; /* PaX in action */
20313 +}
20314 +#endif
20315 +
20316 +/*
20317 + * PaX: decide what to do with offenders (regs->ip = fault address)
20318 + *
20319 + * returns 1 when task should be killed
20320 + * 2 when gcc trampoline was detected
20321 + */
20322 +static int pax_handle_fetch_fault(struct pt_regs *regs)
20323 +{
20324 + if (v8086_mode(regs))
20325 + return 1;
20326 +
20327 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
20328 + return 1;
20329 +
20330 +#ifdef CONFIG_X86_32
20331 + return pax_handle_fetch_fault_32(regs);
20332 +#else
20333 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
20334 + return pax_handle_fetch_fault_32(regs);
20335 + else
20336 + return pax_handle_fetch_fault_64(regs);
20337 +#endif
20338 +}
20339 +#endif
20340 +
20341 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20342 +void pax_report_insns(void *pc, void *sp)
20343 +{
20344 + long i;
20345 +
20346 + printk(KERN_ERR "PAX: bytes at PC: ");
20347 + for (i = 0; i < 20; i++) {
20348 + unsigned char c;
20349 + if (get_user(c, (__force unsigned char __user *)pc+i))
20350 + printk(KERN_CONT "?? ");
20351 + else
20352 + printk(KERN_CONT "%02x ", c);
20353 + }
20354 + printk("\n");
20355 +
20356 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
20357 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
20358 + unsigned long c;
20359 + if (get_user(c, (__force unsigned long __user *)sp+i))
20360 +#ifdef CONFIG_X86_32
20361 + printk(KERN_CONT "???????? ");
20362 +#else
20363 + printk(KERN_CONT "???????????????? ");
20364 +#endif
20365 + else
20366 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
20367 + }
20368 + printk("\n");
20369 +}
20370 +#endif
20371 +
20372 +/**
20373 + * probe_kernel_write(): safely attempt to write to a location
20374 + * @dst: address to write to
20375 + * @src: pointer to the data that shall be written
20376 + * @size: size of the data chunk
20377 + *
20378 + * Safely write to address @dst from the buffer at @src. If a kernel fault
20379 + * happens, handle that and return -EFAULT.
20380 + */
20381 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
20382 +{
20383 + long ret;
20384 + mm_segment_t old_fs = get_fs();
20385 +
20386 + set_fs(KERNEL_DS);
20387 + pagefault_disable();
20388 + pax_open_kernel();
20389 + ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
20390 + pax_close_kernel();
20391 + pagefault_enable();
20392 + set_fs(old_fs);
20393 +
20394 + return ret ? -EFAULT : 0;
20395 +}
20396 diff -urNp linux-2.6.32.41/arch/x86/mm/gup.c linux-2.6.32.41/arch/x86/mm/gup.c
20397 --- linux-2.6.32.41/arch/x86/mm/gup.c 2011-03-27 14:31:47.000000000 -0400
20398 +++ linux-2.6.32.41/arch/x86/mm/gup.c 2011-04-17 15:56:46.000000000 -0400
20399 @@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long
20400 addr = start;
20401 len = (unsigned long) nr_pages << PAGE_SHIFT;
20402 end = start + len;
20403 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
20404 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
20405 (void __user *)start, len)))
20406 return 0;
20407
20408 diff -urNp linux-2.6.32.41/arch/x86/mm/highmem_32.c linux-2.6.32.41/arch/x86/mm/highmem_32.c
20409 --- linux-2.6.32.41/arch/x86/mm/highmem_32.c 2011-03-27 14:31:47.000000000 -0400
20410 +++ linux-2.6.32.41/arch/x86/mm/highmem_32.c 2011-04-17 15:56:46.000000000 -0400
20411 @@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page
20412 idx = type + KM_TYPE_NR*smp_processor_id();
20413 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
20414 BUG_ON(!pte_none(*(kmap_pte-idx)));
20415 +
20416 + pax_open_kernel();
20417 set_pte(kmap_pte-idx, mk_pte(page, prot));
20418 + pax_close_kernel();
20419
20420 return (void *)vaddr;
20421 }
20422 diff -urNp linux-2.6.32.41/arch/x86/mm/hugetlbpage.c linux-2.6.32.41/arch/x86/mm/hugetlbpage.c
20423 --- linux-2.6.32.41/arch/x86/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
20424 +++ linux-2.6.32.41/arch/x86/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
20425 @@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmappe
20426 struct hstate *h = hstate_file(file);
20427 struct mm_struct *mm = current->mm;
20428 struct vm_area_struct *vma;
20429 - unsigned long start_addr;
20430 + unsigned long start_addr, pax_task_size = TASK_SIZE;
20431 +
20432 +#ifdef CONFIG_PAX_SEGMEXEC
20433 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20434 + pax_task_size = SEGMEXEC_TASK_SIZE;
20435 +#endif
20436 +
20437 + pax_task_size -= PAGE_SIZE;
20438
20439 if (len > mm->cached_hole_size) {
20440 - start_addr = mm->free_area_cache;
20441 + start_addr = mm->free_area_cache;
20442 } else {
20443 - start_addr = TASK_UNMAPPED_BASE;
20444 - mm->cached_hole_size = 0;
20445 + start_addr = mm->mmap_base;
20446 + mm->cached_hole_size = 0;
20447 }
20448
20449 full_search:
20450 @@ -281,26 +288,27 @@ full_search:
20451
20452 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
20453 /* At this point: (!vma || addr < vma->vm_end). */
20454 - if (TASK_SIZE - len < addr) {
20455 + if (pax_task_size - len < addr) {
20456 /*
20457 * Start a new search - just in case we missed
20458 * some holes.
20459 */
20460 - if (start_addr != TASK_UNMAPPED_BASE) {
20461 - start_addr = TASK_UNMAPPED_BASE;
20462 + if (start_addr != mm->mmap_base) {
20463 + start_addr = mm->mmap_base;
20464 mm->cached_hole_size = 0;
20465 goto full_search;
20466 }
20467 return -ENOMEM;
20468 }
20469 - if (!vma || addr + len <= vma->vm_start) {
20470 - mm->free_area_cache = addr + len;
20471 - return addr;
20472 - }
20473 + if (check_heap_stack_gap(vma, addr, len))
20474 + break;
20475 if (addr + mm->cached_hole_size < vma->vm_start)
20476 mm->cached_hole_size = vma->vm_start - addr;
20477 addr = ALIGN(vma->vm_end, huge_page_size(h));
20478 }
20479 +
20480 + mm->free_area_cache = addr + len;
20481 + return addr;
20482 }
20483
20484 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
20485 @@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmappe
20486 {
20487 struct hstate *h = hstate_file(file);
20488 struct mm_struct *mm = current->mm;
20489 - struct vm_area_struct *vma, *prev_vma;
20490 - unsigned long base = mm->mmap_base, addr = addr0;
20491 + struct vm_area_struct *vma;
20492 + unsigned long base = mm->mmap_base, addr;
20493 unsigned long largest_hole = mm->cached_hole_size;
20494 - int first_time = 1;
20495
20496 /* don't allow allocations above current base */
20497 if (mm->free_area_cache > base)
20498 @@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmappe
20499 largest_hole = 0;
20500 mm->free_area_cache = base;
20501 }
20502 -try_again:
20503 +
20504 /* make sure it can fit in the remaining address space */
20505 if (mm->free_area_cache < len)
20506 goto fail;
20507
20508 /* either no address requested or cant fit in requested address hole */
20509 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
20510 + addr = (mm->free_area_cache - len);
20511 do {
20512 + addr &= huge_page_mask(h);
20513 + vma = find_vma(mm, addr);
20514 /*
20515 * Lookup failure means no vma is above this address,
20516 * i.e. return with success:
20517 - */
20518 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
20519 - return addr;
20520 -
20521 - /*
20522 * new region fits between prev_vma->vm_end and
20523 * vma->vm_start, use it:
20524 */
20525 - if (addr + len <= vma->vm_start &&
20526 - (!prev_vma || (addr >= prev_vma->vm_end))) {
20527 + if (check_heap_stack_gap(vma, addr, len)) {
20528 /* remember the address as a hint for next time */
20529 - mm->cached_hole_size = largest_hole;
20530 - return (mm->free_area_cache = addr);
20531 - } else {
20532 - /* pull free_area_cache down to the first hole */
20533 - if (mm->free_area_cache == vma->vm_end) {
20534 - mm->free_area_cache = vma->vm_start;
20535 - mm->cached_hole_size = largest_hole;
20536 - }
20537 + mm->cached_hole_size = largest_hole;
20538 + return (mm->free_area_cache = addr);
20539 + }
20540 + /* pull free_area_cache down to the first hole */
20541 + if (mm->free_area_cache == vma->vm_end) {
20542 + mm->free_area_cache = vma->vm_start;
20543 + mm->cached_hole_size = largest_hole;
20544 }
20545
20546 /* remember the largest hole we saw so far */
20547 if (addr + largest_hole < vma->vm_start)
20548 - largest_hole = vma->vm_start - addr;
20549 + largest_hole = vma->vm_start - addr;
20550
20551 /* try just below the current vma->vm_start */
20552 - addr = (vma->vm_start - len) & huge_page_mask(h);
20553 - } while (len <= vma->vm_start);
20554 + addr = skip_heap_stack_gap(vma, len);
20555 + } while (!IS_ERR_VALUE(addr));
20556
20557 fail:
20558 /*
20559 - * if hint left us with no space for the requested
20560 - * mapping then try again:
20561 - */
20562 - if (first_time) {
20563 - mm->free_area_cache = base;
20564 - largest_hole = 0;
20565 - first_time = 0;
20566 - goto try_again;
20567 - }
20568 - /*
20569 * A failed mmap() very likely causes application failure,
20570 * so fall back to the bottom-up function here. This scenario
20571 * can happen with large stack limits and large mmap()
20572 * allocations.
20573 */
20574 - mm->free_area_cache = TASK_UNMAPPED_BASE;
20575 +
20576 +#ifdef CONFIG_PAX_SEGMEXEC
20577 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20578 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
20579 + else
20580 +#endif
20581 +
20582 + mm->mmap_base = TASK_UNMAPPED_BASE;
20583 +
20584 +#ifdef CONFIG_PAX_RANDMMAP
20585 + if (mm->pax_flags & MF_PAX_RANDMMAP)
20586 + mm->mmap_base += mm->delta_mmap;
20587 +#endif
20588 +
20589 + mm->free_area_cache = mm->mmap_base;
20590 mm->cached_hole_size = ~0UL;
20591 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
20592 len, pgoff, flags);
20593 @@ -387,6 +393,7 @@ fail:
20594 /*
20595 * Restore the topdown base:
20596 */
20597 + mm->mmap_base = base;
20598 mm->free_area_cache = base;
20599 mm->cached_hole_size = ~0UL;
20600
20601 @@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *f
20602 struct hstate *h = hstate_file(file);
20603 struct mm_struct *mm = current->mm;
20604 struct vm_area_struct *vma;
20605 + unsigned long pax_task_size = TASK_SIZE;
20606
20607 if (len & ~huge_page_mask(h))
20608 return -EINVAL;
20609 - if (len > TASK_SIZE)
20610 +
20611 +#ifdef CONFIG_PAX_SEGMEXEC
20612 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20613 + pax_task_size = SEGMEXEC_TASK_SIZE;
20614 +#endif
20615 +
20616 + pax_task_size -= PAGE_SIZE;
20617 +
20618 + if (len > pax_task_size)
20619 return -ENOMEM;
20620
20621 if (flags & MAP_FIXED) {
20622 @@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *f
20623 if (addr) {
20624 addr = ALIGN(addr, huge_page_size(h));
20625 vma = find_vma(mm, addr);
20626 - if (TASK_SIZE - len >= addr &&
20627 - (!vma || addr + len <= vma->vm_start))
20628 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
20629 return addr;
20630 }
20631 if (mm->get_unmapped_area == arch_get_unmapped_area)
20632 diff -urNp linux-2.6.32.41/arch/x86/mm/init_32.c linux-2.6.32.41/arch/x86/mm/init_32.c
20633 --- linux-2.6.32.41/arch/x86/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
20634 +++ linux-2.6.32.41/arch/x86/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
20635 @@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
20636 }
20637
20638 /*
20639 - * Creates a middle page table and puts a pointer to it in the
20640 - * given global directory entry. This only returns the gd entry
20641 - * in non-PAE compilation mode, since the middle layer is folded.
20642 - */
20643 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
20644 -{
20645 - pud_t *pud;
20646 - pmd_t *pmd_table;
20647 -
20648 -#ifdef CONFIG_X86_PAE
20649 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
20650 - if (after_bootmem)
20651 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
20652 - else
20653 - pmd_table = (pmd_t *)alloc_low_page();
20654 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
20655 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
20656 - pud = pud_offset(pgd, 0);
20657 - BUG_ON(pmd_table != pmd_offset(pud, 0));
20658 -
20659 - return pmd_table;
20660 - }
20661 -#endif
20662 - pud = pud_offset(pgd, 0);
20663 - pmd_table = pmd_offset(pud, 0);
20664 -
20665 - return pmd_table;
20666 -}
20667 -
20668 -/*
20669 * Create a page table and place a pointer to it in a middle page
20670 * directory entry:
20671 */
20672 @@ -121,13 +91,28 @@ static pte_t * __init one_page_table_ini
20673 page_table = (pte_t *)alloc_low_page();
20674
20675 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
20676 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20677 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
20678 +#else
20679 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
20680 +#endif
20681 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
20682 }
20683
20684 return pte_offset_kernel(pmd, 0);
20685 }
20686
20687 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
20688 +{
20689 + pud_t *pud;
20690 + pmd_t *pmd_table;
20691 +
20692 + pud = pud_offset(pgd, 0);
20693 + pmd_table = pmd_offset(pud, 0);
20694 +
20695 + return pmd_table;
20696 +}
20697 +
20698 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
20699 {
20700 int pgd_idx = pgd_index(vaddr);
20701 @@ -201,6 +186,7 @@ page_table_range_init(unsigned long star
20702 int pgd_idx, pmd_idx;
20703 unsigned long vaddr;
20704 pgd_t *pgd;
20705 + pud_t *pud;
20706 pmd_t *pmd;
20707 pte_t *pte = NULL;
20708
20709 @@ -210,8 +196,13 @@ page_table_range_init(unsigned long star
20710 pgd = pgd_base + pgd_idx;
20711
20712 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
20713 - pmd = one_md_table_init(pgd);
20714 - pmd = pmd + pmd_index(vaddr);
20715 + pud = pud_offset(pgd, vaddr);
20716 + pmd = pmd_offset(pud, vaddr);
20717 +
20718 +#ifdef CONFIG_X86_PAE
20719 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
20720 +#endif
20721 +
20722 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
20723 pmd++, pmd_idx++) {
20724 pte = page_table_kmap_check(one_page_table_init(pmd),
20725 @@ -223,11 +214,20 @@ page_table_range_init(unsigned long star
20726 }
20727 }
20728
20729 -static inline int is_kernel_text(unsigned long addr)
20730 +static inline int is_kernel_text(unsigned long start, unsigned long end)
20731 {
20732 - if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
20733 - return 1;
20734 - return 0;
20735 + if ((start > ktla_ktva((unsigned long)_etext) ||
20736 + end <= ktla_ktva((unsigned long)_stext)) &&
20737 + (start > ktla_ktva((unsigned long)_einittext) ||
20738 + end <= ktla_ktva((unsigned long)_sinittext)) &&
20739 +
20740 +#ifdef CONFIG_ACPI_SLEEP
20741 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
20742 +#endif
20743 +
20744 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
20745 + return 0;
20746 + return 1;
20747 }
20748
20749 /*
20750 @@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned lo
20751 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
20752 unsigned long start_pfn, end_pfn;
20753 pgd_t *pgd_base = swapper_pg_dir;
20754 - int pgd_idx, pmd_idx, pte_ofs;
20755 + unsigned int pgd_idx, pmd_idx, pte_ofs;
20756 unsigned long pfn;
20757 pgd_t *pgd;
20758 + pud_t *pud;
20759 pmd_t *pmd;
20760 pte_t *pte;
20761 unsigned pages_2m, pages_4k;
20762 @@ -278,8 +279,13 @@ repeat:
20763 pfn = start_pfn;
20764 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
20765 pgd = pgd_base + pgd_idx;
20766 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
20767 - pmd = one_md_table_init(pgd);
20768 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
20769 + pud = pud_offset(pgd, 0);
20770 + pmd = pmd_offset(pud, 0);
20771 +
20772 +#ifdef CONFIG_X86_PAE
20773 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
20774 +#endif
20775
20776 if (pfn >= end_pfn)
20777 continue;
20778 @@ -291,14 +297,13 @@ repeat:
20779 #endif
20780 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
20781 pmd++, pmd_idx++) {
20782 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
20783 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
20784
20785 /*
20786 * Map with big pages if possible, otherwise
20787 * create normal page tables:
20788 */
20789 if (use_pse) {
20790 - unsigned int addr2;
20791 pgprot_t prot = PAGE_KERNEL_LARGE;
20792 /*
20793 * first pass will use the same initial
20794 @@ -308,11 +313,7 @@ repeat:
20795 __pgprot(PTE_IDENT_ATTR |
20796 _PAGE_PSE);
20797
20798 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
20799 - PAGE_OFFSET + PAGE_SIZE-1;
20800 -
20801 - if (is_kernel_text(addr) ||
20802 - is_kernel_text(addr2))
20803 + if (is_kernel_text(address, address + PMD_SIZE))
20804 prot = PAGE_KERNEL_LARGE_EXEC;
20805
20806 pages_2m++;
20807 @@ -329,7 +330,7 @@ repeat:
20808 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
20809 pte += pte_ofs;
20810 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
20811 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
20812 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
20813 pgprot_t prot = PAGE_KERNEL;
20814 /*
20815 * first pass will use the same initial
20816 @@ -337,7 +338,7 @@ repeat:
20817 */
20818 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
20819
20820 - if (is_kernel_text(addr))
20821 + if (is_kernel_text(address, address + PAGE_SIZE))
20822 prot = PAGE_KERNEL_EXEC;
20823
20824 pages_4k++;
20825 @@ -489,7 +490,7 @@ void __init native_pagetable_setup_start
20826
20827 pud = pud_offset(pgd, va);
20828 pmd = pmd_offset(pud, va);
20829 - if (!pmd_present(*pmd))
20830 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
20831 break;
20832
20833 pte = pte_offset_kernel(pmd, va);
20834 @@ -541,9 +542,7 @@ void __init early_ioremap_page_table_ran
20835
20836 static void __init pagetable_init(void)
20837 {
20838 - pgd_t *pgd_base = swapper_pg_dir;
20839 -
20840 - permanent_kmaps_init(pgd_base);
20841 + permanent_kmaps_init(swapper_pg_dir);
20842 }
20843
20844 #ifdef CONFIG_ACPI_SLEEP
20845 @@ -551,12 +550,12 @@ static void __init pagetable_init(void)
20846 * ACPI suspend needs this for resume, because things like the intel-agp
20847 * driver might have split up a kernel 4MB mapping.
20848 */
20849 -char swsusp_pg_dir[PAGE_SIZE]
20850 +pgd_t swsusp_pg_dir[PTRS_PER_PGD]
20851 __attribute__ ((aligned(PAGE_SIZE)));
20852
20853 static inline void save_pg_dir(void)
20854 {
20855 - memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
20856 + clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
20857 }
20858 #else /* !CONFIG_ACPI_SLEEP */
20859 static inline void save_pg_dir(void)
20860 @@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
20861 flush_tlb_all();
20862 }
20863
20864 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
20865 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
20866 EXPORT_SYMBOL_GPL(__supported_pte_mask);
20867
20868 /* user-defined highmem size */
20869 @@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void
20870 * Initialize the boot-time allocator (with low memory only):
20871 */
20872 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
20873 - bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
20874 + bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
20875 PAGE_SIZE);
20876 if (bootmap == -1L)
20877 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
20878 @@ -864,6 +863,12 @@ void __init mem_init(void)
20879
20880 pci_iommu_alloc();
20881
20882 +#ifdef CONFIG_PAX_PER_CPU_PGD
20883 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
20884 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20885 + KERNEL_PGD_PTRS);
20886 +#endif
20887 +
20888 #ifdef CONFIG_FLATMEM
20889 BUG_ON(!mem_map);
20890 #endif
20891 @@ -881,7 +886,7 @@ void __init mem_init(void)
20892 set_highmem_pages_init();
20893
20894 codesize = (unsigned long) &_etext - (unsigned long) &_text;
20895 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
20896 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
20897 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
20898
20899 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
20900 @@ -923,10 +928,10 @@ void __init mem_init(void)
20901 ((unsigned long)&__init_end -
20902 (unsigned long)&__init_begin) >> 10,
20903
20904 - (unsigned long)&_etext, (unsigned long)&_edata,
20905 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
20906 + (unsigned long)&_sdata, (unsigned long)&_edata,
20907 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
20908
20909 - (unsigned long)&_text, (unsigned long)&_etext,
20910 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
20911 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
20912
20913 /*
20914 @@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
20915 if (!kernel_set_to_readonly)
20916 return;
20917
20918 + start = ktla_ktva(start);
20919 pr_debug("Set kernel text: %lx - %lx for read write\n",
20920 start, start+size);
20921
20922 @@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
20923 if (!kernel_set_to_readonly)
20924 return;
20925
20926 + start = ktla_ktva(start);
20927 pr_debug("Set kernel text: %lx - %lx for read only\n",
20928 start, start+size);
20929
20930 @@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
20931 unsigned long start = PFN_ALIGN(_text);
20932 unsigned long size = PFN_ALIGN(_etext) - start;
20933
20934 + start = ktla_ktva(start);
20935 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
20936 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
20937 size >> 10);
20938 diff -urNp linux-2.6.32.41/arch/x86/mm/init_64.c linux-2.6.32.41/arch/x86/mm/init_64.c
20939 --- linux-2.6.32.41/arch/x86/mm/init_64.c 2011-04-17 17:00:52.000000000 -0400
20940 +++ linux-2.6.32.41/arch/x86/mm/init_64.c 2011-04-17 17:03:05.000000000 -0400
20941 @@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
20942 pmd = fill_pmd(pud, vaddr);
20943 pte = fill_pte(pmd, vaddr);
20944
20945 + pax_open_kernel();
20946 set_pte(pte, new_pte);
20947 + pax_close_kernel();
20948
20949 /*
20950 * It's enough to flush this one mapping.
20951 @@ -223,14 +225,12 @@ static void __init __init_extra_mapping(
20952 pgd = pgd_offset_k((unsigned long)__va(phys));
20953 if (pgd_none(*pgd)) {
20954 pud = (pud_t *) spp_getpage();
20955 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
20956 - _PAGE_USER));
20957 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
20958 }
20959 pud = pud_offset(pgd, (unsigned long)__va(phys));
20960 if (pud_none(*pud)) {
20961 pmd = (pmd_t *) spp_getpage();
20962 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
20963 - _PAGE_USER));
20964 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
20965 }
20966 pmd = pmd_offset(pud, phys);
20967 BUG_ON(!pmd_none(*pmd));
20968 @@ -675,6 +675,12 @@ void __init mem_init(void)
20969
20970 pci_iommu_alloc();
20971
20972 +#ifdef CONFIG_PAX_PER_CPU_PGD
20973 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
20974 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20975 + KERNEL_PGD_PTRS);
20976 +#endif
20977 +
20978 /* clear_bss() already clear the empty_zero_page */
20979
20980 reservedpages = 0;
20981 @@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
20982 static struct vm_area_struct gate_vma = {
20983 .vm_start = VSYSCALL_START,
20984 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
20985 - .vm_page_prot = PAGE_READONLY_EXEC,
20986 - .vm_flags = VM_READ | VM_EXEC
20987 + .vm_page_prot = PAGE_READONLY,
20988 + .vm_flags = VM_READ
20989 };
20990
20991 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
20992 @@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long a
20993
20994 const char *arch_vma_name(struct vm_area_struct *vma)
20995 {
20996 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
20997 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
20998 return "[vdso]";
20999 if (vma == &gate_vma)
21000 return "[vsyscall]";
21001 diff -urNp linux-2.6.32.41/arch/x86/mm/init.c linux-2.6.32.41/arch/x86/mm/init.c
21002 --- linux-2.6.32.41/arch/x86/mm/init.c 2011-04-17 17:00:52.000000000 -0400
21003 +++ linux-2.6.32.41/arch/x86/mm/init.c 2011-06-07 19:06:09.000000000 -0400
21004 @@ -69,11 +69,7 @@ static void __init find_early_table_spac
21005 * cause a hotspot and fill up ZONE_DMA. The page tables
21006 * need roughly 0.5KB per GB.
21007 */
21008 -#ifdef CONFIG_X86_32
21009 - start = 0x7000;
21010 -#else
21011 - start = 0x8000;
21012 -#endif
21013 + start = 0x100000;
21014 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
21015 tables, PAGE_SIZE);
21016 if (e820_table_start == -1UL)
21017 @@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_m
21018 #endif
21019
21020 set_nx();
21021 - if (nx_enabled)
21022 + if (nx_enabled && cpu_has_nx)
21023 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
21024
21025 /* Enable PSE if available */
21026 @@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_m
21027 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
21028 * mmio resources as well as potential bios/acpi data regions.
21029 */
21030 +
21031 int devmem_is_allowed(unsigned long pagenr)
21032 {
21033 +#ifdef CONFIG_GRKERNSEC_KMEM
21034 + /* allow BDA */
21035 + if (!pagenr)
21036 + return 1;
21037 + /* allow EBDA */
21038 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
21039 + return 1;
21040 + /* allow ISA/video mem */
21041 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
21042 + return 1;
21043 + /* throw out everything else below 1MB */
21044 + if (pagenr <= 256)
21045 + return 0;
21046 +#else
21047 if (pagenr <= 256)
21048 return 1;
21049 +#endif
21050 +
21051 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
21052 return 0;
21053 if (!page_is_ram(pagenr))
21054 @@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigne
21055
21056 void free_initmem(void)
21057 {
21058 +
21059 +#ifdef CONFIG_PAX_KERNEXEC
21060 +#ifdef CONFIG_X86_32
21061 + /* PaX: limit KERNEL_CS to actual size */
21062 + unsigned long addr, limit;
21063 + struct desc_struct d;
21064 + int cpu;
21065 +
21066 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
21067 + limit = (limit - 1UL) >> PAGE_SHIFT;
21068 +
21069 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
21070 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
21071 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
21072 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
21073 + }
21074 +
21075 + /* PaX: make KERNEL_CS read-only */
21076 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
21077 + if (!paravirt_enabled())
21078 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
21079 +/*
21080 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
21081 + pgd = pgd_offset_k(addr);
21082 + pud = pud_offset(pgd, addr);
21083 + pmd = pmd_offset(pud, addr);
21084 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21085 + }
21086 +*/
21087 +#ifdef CONFIG_X86_PAE
21088 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
21089 +/*
21090 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
21091 + pgd = pgd_offset_k(addr);
21092 + pud = pud_offset(pgd, addr);
21093 + pmd = pmd_offset(pud, addr);
21094 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
21095 + }
21096 +*/
21097 +#endif
21098 +
21099 +#ifdef CONFIG_MODULES
21100 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
21101 +#endif
21102 +
21103 +#else
21104 + pgd_t *pgd;
21105 + pud_t *pud;
21106 + pmd_t *pmd;
21107 + unsigned long addr, end;
21108 +
21109 + /* PaX: make kernel code/rodata read-only, rest non-executable */
21110 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
21111 + pgd = pgd_offset_k(addr);
21112 + pud = pud_offset(pgd, addr);
21113 + pmd = pmd_offset(pud, addr);
21114 + if (!pmd_present(*pmd))
21115 + continue;
21116 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
21117 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21118 + else
21119 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
21120 + }
21121 +
21122 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
21123 + end = addr + KERNEL_IMAGE_SIZE;
21124 + for (; addr < end; addr += PMD_SIZE) {
21125 + pgd = pgd_offset_k(addr);
21126 + pud = pud_offset(pgd, addr);
21127 + pmd = pmd_offset(pud, addr);
21128 + if (!pmd_present(*pmd))
21129 + continue;
21130 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
21131 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21132 + }
21133 +#endif
21134 +
21135 + flush_tlb_all();
21136 +#endif
21137 +
21138 free_init_pages("unused kernel memory",
21139 (unsigned long)(&__init_begin),
21140 (unsigned long)(&__init_end));
21141 diff -urNp linux-2.6.32.41/arch/x86/mm/iomap_32.c linux-2.6.32.41/arch/x86/mm/iomap_32.c
21142 --- linux-2.6.32.41/arch/x86/mm/iomap_32.c 2011-03-27 14:31:47.000000000 -0400
21143 +++ linux-2.6.32.41/arch/x86/mm/iomap_32.c 2011-04-17 15:56:46.000000000 -0400
21144 @@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long
21145 debug_kmap_atomic(type);
21146 idx = type + KM_TYPE_NR * smp_processor_id();
21147 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
21148 +
21149 + pax_open_kernel();
21150 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
21151 + pax_close_kernel();
21152 +
21153 arch_flush_lazy_mmu_mode();
21154
21155 return (void *)vaddr;
21156 diff -urNp linux-2.6.32.41/arch/x86/mm/ioremap.c linux-2.6.32.41/arch/x86/mm/ioremap.c
21157 --- linux-2.6.32.41/arch/x86/mm/ioremap.c 2011-03-27 14:31:47.000000000 -0400
21158 +++ linux-2.6.32.41/arch/x86/mm/ioremap.c 2011-04-17 15:56:46.000000000 -0400
21159 @@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
21160 * Second special case: Some BIOSen report the PC BIOS
21161 * area (640->1Mb) as ram even though it is not.
21162 */
21163 - if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
21164 - pagenr < (BIOS_END >> PAGE_SHIFT))
21165 + if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
21166 + pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
21167 return 0;
21168
21169 for (i = 0; i < e820.nr_map; i++) {
21170 @@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(re
21171 /*
21172 * Don't allow anybody to remap normal RAM that we're using..
21173 */
21174 - for (pfn = phys_addr >> PAGE_SHIFT;
21175 - (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
21176 - pfn++) {
21177 -
21178 + for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
21179 int is_ram = page_is_ram(pfn);
21180
21181 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
21182 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
21183 return NULL;
21184 WARN_ON_ONCE(is_ram);
21185 }
21186 @@ -407,7 +404,7 @@ static int __init early_ioremap_debug_se
21187 early_param("early_ioremap_debug", early_ioremap_debug_setup);
21188
21189 static __initdata int after_paging_init;
21190 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
21191 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
21192
21193 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
21194 {
21195 @@ -439,8 +436,7 @@ void __init early_ioremap_init(void)
21196 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
21197
21198 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
21199 - memset(bm_pte, 0, sizeof(bm_pte));
21200 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
21201 + pmd_populate_user(&init_mm, pmd, bm_pte);
21202
21203 /*
21204 * The boot-ioremap range spans multiple pmds, for which
21205 diff -urNp linux-2.6.32.41/arch/x86/mm/kmemcheck/kmemcheck.c linux-2.6.32.41/arch/x86/mm/kmemcheck/kmemcheck.c
21206 --- linux-2.6.32.41/arch/x86/mm/kmemcheck/kmemcheck.c 2011-03-27 14:31:47.000000000 -0400
21207 +++ linux-2.6.32.41/arch/x86/mm/kmemcheck/kmemcheck.c 2011-04-17 15:56:46.000000000 -0400
21208 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
21209 * memory (e.g. tracked pages)? For now, we need this to avoid
21210 * invoking kmemcheck for PnP BIOS calls.
21211 */
21212 - if (regs->flags & X86_VM_MASK)
21213 + if (v8086_mode(regs))
21214 return false;
21215 - if (regs->cs != __KERNEL_CS)
21216 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
21217 return false;
21218
21219 pte = kmemcheck_pte_lookup(address);
21220 diff -urNp linux-2.6.32.41/arch/x86/mm/mmap.c linux-2.6.32.41/arch/x86/mm/mmap.c
21221 --- linux-2.6.32.41/arch/x86/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
21222 +++ linux-2.6.32.41/arch/x86/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
21223 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
21224 * Leave an at least ~128 MB hole with possible stack randomization.
21225 */
21226 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
21227 -#define MAX_GAP (TASK_SIZE/6*5)
21228 +#define MAX_GAP (pax_task_size/6*5)
21229
21230 /*
21231 * True on X86_32 or when emulating IA32 on X86_64
21232 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
21233 return rnd << PAGE_SHIFT;
21234 }
21235
21236 -static unsigned long mmap_base(void)
21237 +static unsigned long mmap_base(struct mm_struct *mm)
21238 {
21239 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
21240 + unsigned long pax_task_size = TASK_SIZE;
21241 +
21242 +#ifdef CONFIG_PAX_SEGMEXEC
21243 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21244 + pax_task_size = SEGMEXEC_TASK_SIZE;
21245 +#endif
21246
21247 if (gap < MIN_GAP)
21248 gap = MIN_GAP;
21249 else if (gap > MAX_GAP)
21250 gap = MAX_GAP;
21251
21252 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
21253 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
21254 }
21255
21256 /*
21257 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
21258 * does, but not when emulating X86_32
21259 */
21260 -static unsigned long mmap_legacy_base(void)
21261 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
21262 {
21263 - if (mmap_is_ia32())
21264 + if (mmap_is_ia32()) {
21265 +
21266 +#ifdef CONFIG_PAX_SEGMEXEC
21267 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21268 + return SEGMEXEC_TASK_UNMAPPED_BASE;
21269 + else
21270 +#endif
21271 +
21272 return TASK_UNMAPPED_BASE;
21273 - else
21274 + } else
21275 return TASK_UNMAPPED_BASE + mmap_rnd();
21276 }
21277
21278 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
21279 void arch_pick_mmap_layout(struct mm_struct *mm)
21280 {
21281 if (mmap_is_legacy()) {
21282 - mm->mmap_base = mmap_legacy_base();
21283 + mm->mmap_base = mmap_legacy_base(mm);
21284 +
21285 +#ifdef CONFIG_PAX_RANDMMAP
21286 + if (mm->pax_flags & MF_PAX_RANDMMAP)
21287 + mm->mmap_base += mm->delta_mmap;
21288 +#endif
21289 +
21290 mm->get_unmapped_area = arch_get_unmapped_area;
21291 mm->unmap_area = arch_unmap_area;
21292 } else {
21293 - mm->mmap_base = mmap_base();
21294 + mm->mmap_base = mmap_base(mm);
21295 +
21296 +#ifdef CONFIG_PAX_RANDMMAP
21297 + if (mm->pax_flags & MF_PAX_RANDMMAP)
21298 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
21299 +#endif
21300 +
21301 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
21302 mm->unmap_area = arch_unmap_area_topdown;
21303 }
21304 diff -urNp linux-2.6.32.41/arch/x86/mm/mmio-mod.c linux-2.6.32.41/arch/x86/mm/mmio-mod.c
21305 --- linux-2.6.32.41/arch/x86/mm/mmio-mod.c 2011-03-27 14:31:47.000000000 -0400
21306 +++ linux-2.6.32.41/arch/x86/mm/mmio-mod.c 2011-05-04 17:56:28.000000000 -0400
21307 @@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p,
21308 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
21309 void __iomem *addr)
21310 {
21311 - static atomic_t next_id;
21312 + static atomic_unchecked_t next_id;
21313 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
21314 /* These are page-unaligned. */
21315 struct mmiotrace_map map = {
21316 @@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_
21317 .private = trace
21318 },
21319 .phys = offset,
21320 - .id = atomic_inc_return(&next_id)
21321 + .id = atomic_inc_return_unchecked(&next_id)
21322 };
21323 map.map_id = trace->id;
21324
21325 diff -urNp linux-2.6.32.41/arch/x86/mm/numa_32.c linux-2.6.32.41/arch/x86/mm/numa_32.c
21326 --- linux-2.6.32.41/arch/x86/mm/numa_32.c 2011-03-27 14:31:47.000000000 -0400
21327 +++ linux-2.6.32.41/arch/x86/mm/numa_32.c 2011-04-17 15:56:46.000000000 -0400
21328 @@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int
21329 }
21330 #endif
21331
21332 -extern unsigned long find_max_low_pfn(void);
21333 extern unsigned long highend_pfn, highstart_pfn;
21334
21335 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
21336 diff -urNp linux-2.6.32.41/arch/x86/mm/pageattr.c linux-2.6.32.41/arch/x86/mm/pageattr.c
21337 --- linux-2.6.32.41/arch/x86/mm/pageattr.c 2011-03-27 14:31:47.000000000 -0400
21338 +++ linux-2.6.32.41/arch/x86/mm/pageattr.c 2011-04-17 15:56:46.000000000 -0400
21339 @@ -261,16 +261,17 @@ static inline pgprot_t static_protection
21340 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
21341 */
21342 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
21343 - pgprot_val(forbidden) |= _PAGE_NX;
21344 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
21345
21346 /*
21347 * The kernel text needs to be executable for obvious reasons
21348 * Does not cover __inittext since that is gone later on. On
21349 * 64bit we do not enforce !NX on the low mapping
21350 */
21351 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
21352 - pgprot_val(forbidden) |= _PAGE_NX;
21353 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
21354 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
21355
21356 +#ifdef CONFIG_DEBUG_RODATA
21357 /*
21358 * The .rodata section needs to be read-only. Using the pfn
21359 * catches all aliases.
21360 @@ -278,6 +279,14 @@ static inline pgprot_t static_protection
21361 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
21362 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
21363 pgprot_val(forbidden) |= _PAGE_RW;
21364 +#endif
21365 +
21366 +#ifdef CONFIG_PAX_KERNEXEC
21367 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
21368 + pgprot_val(forbidden) |= _PAGE_RW;
21369 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
21370 + }
21371 +#endif
21372
21373 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
21374
21375 @@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
21376 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
21377 {
21378 /* change init_mm */
21379 + pax_open_kernel();
21380 set_pte_atomic(kpte, pte);
21381 +
21382 #ifdef CONFIG_X86_32
21383 if (!SHARED_KERNEL_PMD) {
21384 +
21385 +#ifdef CONFIG_PAX_PER_CPU_PGD
21386 + unsigned long cpu;
21387 +#else
21388 struct page *page;
21389 +#endif
21390
21391 +#ifdef CONFIG_PAX_PER_CPU_PGD
21392 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
21393 + pgd_t *pgd = get_cpu_pgd(cpu);
21394 +#else
21395 list_for_each_entry(page, &pgd_list, lru) {
21396 - pgd_t *pgd;
21397 + pgd_t *pgd = (pgd_t *)page_address(page);
21398 +#endif
21399 +
21400 pud_t *pud;
21401 pmd_t *pmd;
21402
21403 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
21404 + pgd += pgd_index(address);
21405 pud = pud_offset(pgd, address);
21406 pmd = pmd_offset(pud, address);
21407 set_pte_atomic((pte_t *)pmd, pte);
21408 }
21409 }
21410 #endif
21411 + pax_close_kernel();
21412 }
21413
21414 static int
21415 diff -urNp linux-2.6.32.41/arch/x86/mm/pageattr-test.c linux-2.6.32.41/arch/x86/mm/pageattr-test.c
21416 --- linux-2.6.32.41/arch/x86/mm/pageattr-test.c 2011-03-27 14:31:47.000000000 -0400
21417 +++ linux-2.6.32.41/arch/x86/mm/pageattr-test.c 2011-04-17 15:56:46.000000000 -0400
21418 @@ -36,7 +36,7 @@ enum {
21419
21420 static int pte_testbit(pte_t pte)
21421 {
21422 - return pte_flags(pte) & _PAGE_UNUSED1;
21423 + return pte_flags(pte) & _PAGE_CPA_TEST;
21424 }
21425
21426 struct split_state {
21427 diff -urNp linux-2.6.32.41/arch/x86/mm/pat.c linux-2.6.32.41/arch/x86/mm/pat.c
21428 --- linux-2.6.32.41/arch/x86/mm/pat.c 2011-03-27 14:31:47.000000000 -0400
21429 +++ linux-2.6.32.41/arch/x86/mm/pat.c 2011-04-17 15:56:46.000000000 -0400
21430 @@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct
21431
21432 conflict:
21433 printk(KERN_INFO "%s:%d conflicting memory types "
21434 - "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
21435 + "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
21436 new->end, cattr_name(new->type), cattr_name(entry->type));
21437 return -EBUSY;
21438 }
21439 @@ -559,7 +559,7 @@ unlock_ret:
21440
21441 if (err) {
21442 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
21443 - current->comm, current->pid, start, end);
21444 + current->comm, task_pid_nr(current), start, end);
21445 }
21446
21447 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
21448 @@ -689,8 +689,8 @@ static inline int range_is_allowed(unsig
21449 while (cursor < to) {
21450 if (!devmem_is_allowed(pfn)) {
21451 printk(KERN_INFO
21452 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
21453 - current->comm, from, to);
21454 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
21455 + current->comm, from, to, cursor);
21456 return 0;
21457 }
21458 cursor += PAGE_SIZE;
21459 @@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, un
21460 printk(KERN_INFO
21461 "%s:%d ioremap_change_attr failed %s "
21462 "for %Lx-%Lx\n",
21463 - current->comm, current->pid,
21464 + current->comm, task_pid_nr(current),
21465 cattr_name(flags),
21466 base, (unsigned long long)(base + size));
21467 return -EINVAL;
21468 @@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr,
21469 free_memtype(paddr, paddr + size);
21470 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
21471 " for %Lx-%Lx, got %s\n",
21472 - current->comm, current->pid,
21473 + current->comm, task_pid_nr(current),
21474 cattr_name(want_flags),
21475 (unsigned long long)paddr,
21476 (unsigned long long)(paddr + size),
21477 diff -urNp linux-2.6.32.41/arch/x86/mm/pgtable_32.c linux-2.6.32.41/arch/x86/mm/pgtable_32.c
21478 --- linux-2.6.32.41/arch/x86/mm/pgtable_32.c 2011-03-27 14:31:47.000000000 -0400
21479 +++ linux-2.6.32.41/arch/x86/mm/pgtable_32.c 2011-04-17 15:56:46.000000000 -0400
21480 @@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr,
21481 return;
21482 }
21483 pte = pte_offset_kernel(pmd, vaddr);
21484 +
21485 + pax_open_kernel();
21486 if (pte_val(pteval))
21487 set_pte_at(&init_mm, vaddr, pte, pteval);
21488 else
21489 pte_clear(&init_mm, vaddr, pte);
21490 + pax_close_kernel();
21491
21492 /*
21493 * It's enough to flush this one mapping.
21494 diff -urNp linux-2.6.32.41/arch/x86/mm/pgtable.c linux-2.6.32.41/arch/x86/mm/pgtable.c
21495 --- linux-2.6.32.41/arch/x86/mm/pgtable.c 2011-03-27 14:31:47.000000000 -0400
21496 +++ linux-2.6.32.41/arch/x86/mm/pgtable.c 2011-05-11 18:25:15.000000000 -0400
21497 @@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *p
21498 list_del(&page->lru);
21499 }
21500
21501 -#define UNSHARED_PTRS_PER_PGD \
21502 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
21503 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21504 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
21505
21506 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
21507 +{
21508 + while (count--)
21509 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
21510 +}
21511 +#endif
21512 +
21513 +#ifdef CONFIG_PAX_PER_CPU_PGD
21514 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
21515 +{
21516 + while (count--)
21517 +
21518 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21519 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
21520 +#else
21521 + *dst++ = *src++;
21522 +#endif
21523 +
21524 +}
21525 +#endif
21526 +
21527 +#ifdef CONFIG_X86_64
21528 +#define pxd_t pud_t
21529 +#define pyd_t pgd_t
21530 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
21531 +#define pxd_free(mm, pud) pud_free((mm), (pud))
21532 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
21533 +#define pyd_offset(mm ,address) pgd_offset((mm), (address))
21534 +#define PYD_SIZE PGDIR_SIZE
21535 +#else
21536 +#define pxd_t pmd_t
21537 +#define pyd_t pud_t
21538 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
21539 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
21540 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
21541 +#define pyd_offset(mm ,address) pud_offset((mm), (address))
21542 +#define PYD_SIZE PUD_SIZE
21543 +#endif
21544 +
21545 +#ifdef CONFIG_PAX_PER_CPU_PGD
21546 +static inline void pgd_ctor(pgd_t *pgd) {}
21547 +static inline void pgd_dtor(pgd_t *pgd) {}
21548 +#else
21549 static void pgd_ctor(pgd_t *pgd)
21550 {
21551 /* If the pgd points to a shared pagetable level (either the
21552 @@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
21553 pgd_list_del(pgd);
21554 spin_unlock_irqrestore(&pgd_lock, flags);
21555 }
21556 +#endif
21557
21558 /*
21559 * List of all pgd's needed for non-PAE so it can invalidate entries
21560 @@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
21561 * -- wli
21562 */
21563
21564 -#ifdef CONFIG_X86_PAE
21565 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
21566 /*
21567 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
21568 * updating the top-level pagetable entries to guarantee the
21569 @@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
21570 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
21571 * and initialize the kernel pmds here.
21572 */
21573 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
21574 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
21575
21576 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
21577 {
21578 @@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm,
21579 */
21580 flush_tlb_mm(mm);
21581 }
21582 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
21583 +#define PREALLOCATED_PXDS USER_PGD_PTRS
21584 #else /* !CONFIG_X86_PAE */
21585
21586 /* No need to prepopulate any pagetable entries in non-PAE modes. */
21587 -#define PREALLOCATED_PMDS 0
21588 +#define PREALLOCATED_PXDS 0
21589
21590 #endif /* CONFIG_X86_PAE */
21591
21592 -static void free_pmds(pmd_t *pmds[])
21593 +static void free_pxds(pxd_t *pxds[])
21594 {
21595 int i;
21596
21597 - for(i = 0; i < PREALLOCATED_PMDS; i++)
21598 - if (pmds[i])
21599 - free_page((unsigned long)pmds[i]);
21600 + for(i = 0; i < PREALLOCATED_PXDS; i++)
21601 + if (pxds[i])
21602 + free_page((unsigned long)pxds[i]);
21603 }
21604
21605 -static int preallocate_pmds(pmd_t *pmds[])
21606 +static int preallocate_pxds(pxd_t *pxds[])
21607 {
21608 int i;
21609 bool failed = false;
21610
21611 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
21612 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
21613 - if (pmd == NULL)
21614 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
21615 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
21616 + if (pxd == NULL)
21617 failed = true;
21618 - pmds[i] = pmd;
21619 + pxds[i] = pxd;
21620 }
21621
21622 if (failed) {
21623 - free_pmds(pmds);
21624 + free_pxds(pxds);
21625 return -ENOMEM;
21626 }
21627
21628 @@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[
21629 * preallocate which never got a corresponding vma will need to be
21630 * freed manually.
21631 */
21632 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
21633 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
21634 {
21635 int i;
21636
21637 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
21638 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
21639 pgd_t pgd = pgdp[i];
21640
21641 if (pgd_val(pgd) != 0) {
21642 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
21643 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
21644
21645 - pgdp[i] = native_make_pgd(0);
21646 + set_pgd(pgdp + i, native_make_pgd(0));
21647
21648 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
21649 - pmd_free(mm, pmd);
21650 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
21651 + pxd_free(mm, pxd);
21652 }
21653 }
21654 }
21655
21656 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
21657 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
21658 {
21659 - pud_t *pud;
21660 + pyd_t *pyd;
21661 unsigned long addr;
21662 int i;
21663
21664 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
21665 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
21666 return;
21667
21668 - pud = pud_offset(pgd, 0);
21669 +#ifdef CONFIG_X86_64
21670 + pyd = pyd_offset(mm, 0L);
21671 +#else
21672 + pyd = pyd_offset(pgd, 0L);
21673 +#endif
21674
21675 - for (addr = i = 0; i < PREALLOCATED_PMDS;
21676 - i++, pud++, addr += PUD_SIZE) {
21677 - pmd_t *pmd = pmds[i];
21678 + for (addr = i = 0; i < PREALLOCATED_PXDS;
21679 + i++, pyd++, addr += PYD_SIZE) {
21680 + pxd_t *pxd = pxds[i];
21681
21682 if (i >= KERNEL_PGD_BOUNDARY)
21683 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
21684 - sizeof(pmd_t) * PTRS_PER_PMD);
21685 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
21686 + sizeof(pxd_t) * PTRS_PER_PMD);
21687
21688 - pud_populate(mm, pud, pmd);
21689 + pyd_populate(mm, pyd, pxd);
21690 }
21691 }
21692
21693 pgd_t *pgd_alloc(struct mm_struct *mm)
21694 {
21695 pgd_t *pgd;
21696 - pmd_t *pmds[PREALLOCATED_PMDS];
21697 + pxd_t *pxds[PREALLOCATED_PXDS];
21698 +
21699 unsigned long flags;
21700
21701 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
21702 @@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
21703
21704 mm->pgd = pgd;
21705
21706 - if (preallocate_pmds(pmds) != 0)
21707 + if (preallocate_pxds(pxds) != 0)
21708 goto out_free_pgd;
21709
21710 if (paravirt_pgd_alloc(mm) != 0)
21711 - goto out_free_pmds;
21712 + goto out_free_pxds;
21713
21714 /*
21715 * Make sure that pre-populating the pmds is atomic with
21716 @@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
21717 spin_lock_irqsave(&pgd_lock, flags);
21718
21719 pgd_ctor(pgd);
21720 - pgd_prepopulate_pmd(mm, pgd, pmds);
21721 + pgd_prepopulate_pxd(mm, pgd, pxds);
21722
21723 spin_unlock_irqrestore(&pgd_lock, flags);
21724
21725 return pgd;
21726
21727 -out_free_pmds:
21728 - free_pmds(pmds);
21729 +out_free_pxds:
21730 + free_pxds(pxds);
21731 out_free_pgd:
21732 free_page((unsigned long)pgd);
21733 out:
21734 @@ -287,7 +338,7 @@ out:
21735
21736 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
21737 {
21738 - pgd_mop_up_pmds(mm, pgd);
21739 + pgd_mop_up_pxds(mm, pgd);
21740 pgd_dtor(pgd);
21741 paravirt_pgd_free(mm, pgd);
21742 free_page((unsigned long)pgd);
21743 diff -urNp linux-2.6.32.41/arch/x86/mm/setup_nx.c linux-2.6.32.41/arch/x86/mm/setup_nx.c
21744 --- linux-2.6.32.41/arch/x86/mm/setup_nx.c 2011-03-27 14:31:47.000000000 -0400
21745 +++ linux-2.6.32.41/arch/x86/mm/setup_nx.c 2011-04-17 15:56:46.000000000 -0400
21746 @@ -4,11 +4,10 @@
21747
21748 #include <asm/pgtable.h>
21749
21750 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
21751 int nx_enabled;
21752
21753 -#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21754 -static int disable_nx __cpuinitdata;
21755 -
21756 +#ifndef CONFIG_PAX_PAGEEXEC
21757 /*
21758 * noexec = on|off
21759 *
21760 @@ -22,32 +21,26 @@ static int __init noexec_setup(char *str
21761 if (!str)
21762 return -EINVAL;
21763 if (!strncmp(str, "on", 2)) {
21764 - __supported_pte_mask |= _PAGE_NX;
21765 - disable_nx = 0;
21766 + nx_enabled = 1;
21767 } else if (!strncmp(str, "off", 3)) {
21768 - disable_nx = 1;
21769 - __supported_pte_mask &= ~_PAGE_NX;
21770 + nx_enabled = 0;
21771 }
21772 return 0;
21773 }
21774 early_param("noexec", noexec_setup);
21775 #endif
21776 +#endif
21777
21778 #ifdef CONFIG_X86_PAE
21779 void __init set_nx(void)
21780 {
21781 - unsigned int v[4], l, h;
21782 + if (!nx_enabled && cpu_has_nx) {
21783 + unsigned l, h;
21784
21785 - if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
21786 - cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
21787 -
21788 - if ((v[3] & (1 << 20)) && !disable_nx) {
21789 - rdmsr(MSR_EFER, l, h);
21790 - l |= EFER_NX;
21791 - wrmsr(MSR_EFER, l, h);
21792 - nx_enabled = 1;
21793 - __supported_pte_mask |= _PAGE_NX;
21794 - }
21795 + __supported_pte_mask &= ~_PAGE_NX;
21796 + rdmsr(MSR_EFER, l, h);
21797 + l &= ~EFER_NX;
21798 + wrmsr(MSR_EFER, l, h);
21799 }
21800 }
21801 #else
21802 @@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
21803 unsigned long efer;
21804
21805 rdmsrl(MSR_EFER, efer);
21806 - if (!(efer & EFER_NX) || disable_nx)
21807 + if (!(efer & EFER_NX) || !nx_enabled)
21808 __supported_pte_mask &= ~_PAGE_NX;
21809 }
21810 #endif
21811 diff -urNp linux-2.6.32.41/arch/x86/mm/tlb.c linux-2.6.32.41/arch/x86/mm/tlb.c
21812 --- linux-2.6.32.41/arch/x86/mm/tlb.c 2011-03-27 14:31:47.000000000 -0400
21813 +++ linux-2.6.32.41/arch/x86/mm/tlb.c 2011-04-23 12:56:10.000000000 -0400
21814 @@ -61,7 +61,11 @@ void leave_mm(int cpu)
21815 BUG();
21816 cpumask_clear_cpu(cpu,
21817 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
21818 +
21819 +#ifndef CONFIG_PAX_PER_CPU_PGD
21820 load_cr3(swapper_pg_dir);
21821 +#endif
21822 +
21823 }
21824 EXPORT_SYMBOL_GPL(leave_mm);
21825
21826 diff -urNp linux-2.6.32.41/arch/x86/oprofile/backtrace.c linux-2.6.32.41/arch/x86/oprofile/backtrace.c
21827 --- linux-2.6.32.41/arch/x86/oprofile/backtrace.c 2011-03-27 14:31:47.000000000 -0400
21828 +++ linux-2.6.32.41/arch/x86/oprofile/backtrace.c 2011-04-17 15:56:46.000000000 -0400
21829 @@ -57,7 +57,7 @@ static struct frame_head *dump_user_back
21830 struct frame_head bufhead[2];
21831
21832 /* Also check accessibility of one struct frame_head beyond */
21833 - if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
21834 + if (!__access_ok(VERIFY_READ, head, sizeof(bufhead)))
21835 return NULL;
21836 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
21837 return NULL;
21838 @@ -77,7 +77,7 @@ x86_backtrace(struct pt_regs * const reg
21839 {
21840 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
21841
21842 - if (!user_mode_vm(regs)) {
21843 + if (!user_mode(regs)) {
21844 unsigned long stack = kernel_stack_pointer(regs);
21845 if (depth)
21846 dump_trace(NULL, regs, (unsigned long *)stack, 0,
21847 diff -urNp linux-2.6.32.41/arch/x86/oprofile/op_model_p4.c linux-2.6.32.41/arch/x86/oprofile/op_model_p4.c
21848 --- linux-2.6.32.41/arch/x86/oprofile/op_model_p4.c 2011-03-27 14:31:47.000000000 -0400
21849 +++ linux-2.6.32.41/arch/x86/oprofile/op_model_p4.c 2011-04-17 15:56:46.000000000 -0400
21850 @@ -50,7 +50,7 @@ static inline void setup_num_counters(vo
21851 #endif
21852 }
21853
21854 -static int inline addr_increment(void)
21855 +static inline int addr_increment(void)
21856 {
21857 #ifdef CONFIG_SMP
21858 return smp_num_siblings == 2 ? 2 : 1;
21859 diff -urNp linux-2.6.32.41/arch/x86/pci/common.c linux-2.6.32.41/arch/x86/pci/common.c
21860 --- linux-2.6.32.41/arch/x86/pci/common.c 2011-03-27 14:31:47.000000000 -0400
21861 +++ linux-2.6.32.41/arch/x86/pci/common.c 2011-04-23 12:56:10.000000000 -0400
21862 @@ -31,8 +31,8 @@ int noioapicreroute = 1;
21863 int pcibios_last_bus = -1;
21864 unsigned long pirq_table_addr;
21865 struct pci_bus *pci_root_bus;
21866 -struct pci_raw_ops *raw_pci_ops;
21867 -struct pci_raw_ops *raw_pci_ext_ops;
21868 +const struct pci_raw_ops *raw_pci_ops;
21869 +const struct pci_raw_ops *raw_pci_ext_ops;
21870
21871 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
21872 int reg, int len, u32 *val)
21873 diff -urNp linux-2.6.32.41/arch/x86/pci/direct.c linux-2.6.32.41/arch/x86/pci/direct.c
21874 --- linux-2.6.32.41/arch/x86/pci/direct.c 2011-03-27 14:31:47.000000000 -0400
21875 +++ linux-2.6.32.41/arch/x86/pci/direct.c 2011-04-17 15:56:46.000000000 -0400
21876 @@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int
21877
21878 #undef PCI_CONF1_ADDRESS
21879
21880 -struct pci_raw_ops pci_direct_conf1 = {
21881 +const struct pci_raw_ops pci_direct_conf1 = {
21882 .read = pci_conf1_read,
21883 .write = pci_conf1_write,
21884 };
21885 @@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int
21886
21887 #undef PCI_CONF2_ADDRESS
21888
21889 -struct pci_raw_ops pci_direct_conf2 = {
21890 +const struct pci_raw_ops pci_direct_conf2 = {
21891 .read = pci_conf2_read,
21892 .write = pci_conf2_write,
21893 };
21894 @@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
21895 * This should be close to trivial, but it isn't, because there are buggy
21896 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
21897 */
21898 -static int __init pci_sanity_check(struct pci_raw_ops *o)
21899 +static int __init pci_sanity_check(const struct pci_raw_ops *o)
21900 {
21901 u32 x = 0;
21902 int year, devfn;
21903 diff -urNp linux-2.6.32.41/arch/x86/pci/mmconfig_32.c linux-2.6.32.41/arch/x86/pci/mmconfig_32.c
21904 --- linux-2.6.32.41/arch/x86/pci/mmconfig_32.c 2011-03-27 14:31:47.000000000 -0400
21905 +++ linux-2.6.32.41/arch/x86/pci/mmconfig_32.c 2011-04-17 15:56:46.000000000 -0400
21906 @@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int
21907 return 0;
21908 }
21909
21910 -static struct pci_raw_ops pci_mmcfg = {
21911 +static const struct pci_raw_ops pci_mmcfg = {
21912 .read = pci_mmcfg_read,
21913 .write = pci_mmcfg_write,
21914 };
21915 diff -urNp linux-2.6.32.41/arch/x86/pci/mmconfig_64.c linux-2.6.32.41/arch/x86/pci/mmconfig_64.c
21916 --- linux-2.6.32.41/arch/x86/pci/mmconfig_64.c 2011-03-27 14:31:47.000000000 -0400
21917 +++ linux-2.6.32.41/arch/x86/pci/mmconfig_64.c 2011-04-17 15:56:46.000000000 -0400
21918 @@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int
21919 return 0;
21920 }
21921
21922 -static struct pci_raw_ops pci_mmcfg = {
21923 +static const struct pci_raw_ops pci_mmcfg = {
21924 .read = pci_mmcfg_read,
21925 .write = pci_mmcfg_write,
21926 };
21927 diff -urNp linux-2.6.32.41/arch/x86/pci/numaq_32.c linux-2.6.32.41/arch/x86/pci/numaq_32.c
21928 --- linux-2.6.32.41/arch/x86/pci/numaq_32.c 2011-03-27 14:31:47.000000000 -0400
21929 +++ linux-2.6.32.41/arch/x86/pci/numaq_32.c 2011-04-17 15:56:46.000000000 -0400
21930 @@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned i
21931
21932 #undef PCI_CONF1_MQ_ADDRESS
21933
21934 -static struct pci_raw_ops pci_direct_conf1_mq = {
21935 +static const struct pci_raw_ops pci_direct_conf1_mq = {
21936 .read = pci_conf1_mq_read,
21937 .write = pci_conf1_mq_write
21938 };
21939 diff -urNp linux-2.6.32.41/arch/x86/pci/olpc.c linux-2.6.32.41/arch/x86/pci/olpc.c
21940 --- linux-2.6.32.41/arch/x86/pci/olpc.c 2011-03-27 14:31:47.000000000 -0400
21941 +++ linux-2.6.32.41/arch/x86/pci/olpc.c 2011-04-17 15:56:46.000000000 -0400
21942 @@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int s
21943 return 0;
21944 }
21945
21946 -static struct pci_raw_ops pci_olpc_conf = {
21947 +static const struct pci_raw_ops pci_olpc_conf = {
21948 .read = pci_olpc_read,
21949 .write = pci_olpc_write,
21950 };
21951 diff -urNp linux-2.6.32.41/arch/x86/pci/pcbios.c linux-2.6.32.41/arch/x86/pci/pcbios.c
21952 --- linux-2.6.32.41/arch/x86/pci/pcbios.c 2011-03-27 14:31:47.000000000 -0400
21953 +++ linux-2.6.32.41/arch/x86/pci/pcbios.c 2011-04-17 15:56:46.000000000 -0400
21954 @@ -56,50 +56,93 @@ union bios32 {
21955 static struct {
21956 unsigned long address;
21957 unsigned short segment;
21958 -} bios32_indirect = { 0, __KERNEL_CS };
21959 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
21960
21961 /*
21962 * Returns the entry point for the given service, NULL on error
21963 */
21964
21965 -static unsigned long bios32_service(unsigned long service)
21966 +static unsigned long __devinit bios32_service(unsigned long service)
21967 {
21968 unsigned char return_code; /* %al */
21969 unsigned long address; /* %ebx */
21970 unsigned long length; /* %ecx */
21971 unsigned long entry; /* %edx */
21972 unsigned long flags;
21973 + struct desc_struct d, *gdt;
21974
21975 local_irq_save(flags);
21976 - __asm__("lcall *(%%edi); cld"
21977 +
21978 + gdt = get_cpu_gdt_table(smp_processor_id());
21979 +
21980 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
21981 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
21982 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
21983 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
21984 +
21985 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
21986 : "=a" (return_code),
21987 "=b" (address),
21988 "=c" (length),
21989 "=d" (entry)
21990 : "0" (service),
21991 "1" (0),
21992 - "D" (&bios32_indirect));
21993 + "D" (&bios32_indirect),
21994 + "r"(__PCIBIOS_DS)
21995 + : "memory");
21996 +
21997 + pax_open_kernel();
21998 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
21999 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
22000 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
22001 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
22002 + pax_close_kernel();
22003 +
22004 local_irq_restore(flags);
22005
22006 switch (return_code) {
22007 - case 0:
22008 - return address + entry;
22009 - case 0x80: /* Not present */
22010 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
22011 - return 0;
22012 - default: /* Shouldn't happen */
22013 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
22014 - service, return_code);
22015 + case 0: {
22016 + int cpu;
22017 + unsigned char flags;
22018 +
22019 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
22020 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
22021 + printk(KERN_WARNING "bios32_service: not valid\n");
22022 return 0;
22023 + }
22024 + address = address + PAGE_OFFSET;
22025 + length += 16UL; /* some BIOSs underreport this... */
22026 + flags = 4;
22027 + if (length >= 64*1024*1024) {
22028 + length >>= PAGE_SHIFT;
22029 + flags |= 8;
22030 + }
22031 +
22032 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
22033 + gdt = get_cpu_gdt_table(cpu);
22034 + pack_descriptor(&d, address, length, 0x9b, flags);
22035 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
22036 + pack_descriptor(&d, address, length, 0x93, flags);
22037 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
22038 + }
22039 + return entry;
22040 + }
22041 + case 0x80: /* Not present */
22042 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
22043 + return 0;
22044 + default: /* Shouldn't happen */
22045 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
22046 + service, return_code);
22047 + return 0;
22048 }
22049 }
22050
22051 static struct {
22052 unsigned long address;
22053 unsigned short segment;
22054 -} pci_indirect = { 0, __KERNEL_CS };
22055 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
22056
22057 -static int pci_bios_present;
22058 +static int pci_bios_present __read_only;
22059
22060 static int __devinit check_pcibios(void)
22061 {
22062 @@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
22063 unsigned long flags, pcibios_entry;
22064
22065 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
22066 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
22067 + pci_indirect.address = pcibios_entry;
22068
22069 local_irq_save(flags);
22070 - __asm__(
22071 - "lcall *(%%edi); cld\n\t"
22072 + __asm__("movw %w6, %%ds\n\t"
22073 + "lcall *%%ss:(%%edi); cld\n\t"
22074 + "push %%ss\n\t"
22075 + "pop %%ds\n\t"
22076 "jc 1f\n\t"
22077 "xor %%ah, %%ah\n"
22078 "1:"
22079 @@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
22080 "=b" (ebx),
22081 "=c" (ecx)
22082 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
22083 - "D" (&pci_indirect)
22084 + "D" (&pci_indirect),
22085 + "r" (__PCIBIOS_DS)
22086 : "memory");
22087 local_irq_restore(flags);
22088
22089 @@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int se
22090
22091 switch (len) {
22092 case 1:
22093 - __asm__("lcall *(%%esi); cld\n\t"
22094 + __asm__("movw %w6, %%ds\n\t"
22095 + "lcall *%%ss:(%%esi); cld\n\t"
22096 + "push %%ss\n\t"
22097 + "pop %%ds\n\t"
22098 "jc 1f\n\t"
22099 "xor %%ah, %%ah\n"
22100 "1:"
22101 @@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int se
22102 : "1" (PCIBIOS_READ_CONFIG_BYTE),
22103 "b" (bx),
22104 "D" ((long)reg),
22105 - "S" (&pci_indirect));
22106 + "S" (&pci_indirect),
22107 + "r" (__PCIBIOS_DS));
22108 /*
22109 * Zero-extend the result beyond 8 bits, do not trust the
22110 * BIOS having done it:
22111 @@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int se
22112 *value &= 0xff;
22113 break;
22114 case 2:
22115 - __asm__("lcall *(%%esi); cld\n\t"
22116 + __asm__("movw %w6, %%ds\n\t"
22117 + "lcall *%%ss:(%%esi); cld\n\t"
22118 + "push %%ss\n\t"
22119 + "pop %%ds\n\t"
22120 "jc 1f\n\t"
22121 "xor %%ah, %%ah\n"
22122 "1:"
22123 @@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int se
22124 : "1" (PCIBIOS_READ_CONFIG_WORD),
22125 "b" (bx),
22126 "D" ((long)reg),
22127 - "S" (&pci_indirect));
22128 + "S" (&pci_indirect),
22129 + "r" (__PCIBIOS_DS));
22130 /*
22131 * Zero-extend the result beyond 16 bits, do not trust the
22132 * BIOS having done it:
22133 @@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int se
22134 *value &= 0xffff;
22135 break;
22136 case 4:
22137 - __asm__("lcall *(%%esi); cld\n\t"
22138 + __asm__("movw %w6, %%ds\n\t"
22139 + "lcall *%%ss:(%%esi); cld\n\t"
22140 + "push %%ss\n\t"
22141 + "pop %%ds\n\t"
22142 "jc 1f\n\t"
22143 "xor %%ah, %%ah\n"
22144 "1:"
22145 @@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int se
22146 : "1" (PCIBIOS_READ_CONFIG_DWORD),
22147 "b" (bx),
22148 "D" ((long)reg),
22149 - "S" (&pci_indirect));
22150 + "S" (&pci_indirect),
22151 + "r" (__PCIBIOS_DS));
22152 break;
22153 }
22154
22155 @@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int s
22156
22157 switch (len) {
22158 case 1:
22159 - __asm__("lcall *(%%esi); cld\n\t"
22160 + __asm__("movw %w6, %%ds\n\t"
22161 + "lcall *%%ss:(%%esi); cld\n\t"
22162 + "push %%ss\n\t"
22163 + "pop %%ds\n\t"
22164 "jc 1f\n\t"
22165 "xor %%ah, %%ah\n"
22166 "1:"
22167 @@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int s
22168 "c" (value),
22169 "b" (bx),
22170 "D" ((long)reg),
22171 - "S" (&pci_indirect));
22172 + "S" (&pci_indirect),
22173 + "r" (__PCIBIOS_DS));
22174 break;
22175 case 2:
22176 - __asm__("lcall *(%%esi); cld\n\t"
22177 + __asm__("movw %w6, %%ds\n\t"
22178 + "lcall *%%ss:(%%esi); cld\n\t"
22179 + "push %%ss\n\t"
22180 + "pop %%ds\n\t"
22181 "jc 1f\n\t"
22182 "xor %%ah, %%ah\n"
22183 "1:"
22184 @@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int s
22185 "c" (value),
22186 "b" (bx),
22187 "D" ((long)reg),
22188 - "S" (&pci_indirect));
22189 + "S" (&pci_indirect),
22190 + "r" (__PCIBIOS_DS));
22191 break;
22192 case 4:
22193 - __asm__("lcall *(%%esi); cld\n\t"
22194 + __asm__("movw %w6, %%ds\n\t"
22195 + "lcall *%%ss:(%%esi); cld\n\t"
22196 + "push %%ss\n\t"
22197 + "pop %%ds\n\t"
22198 "jc 1f\n\t"
22199 "xor %%ah, %%ah\n"
22200 "1:"
22201 @@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int s
22202 "c" (value),
22203 "b" (bx),
22204 "D" ((long)reg),
22205 - "S" (&pci_indirect));
22206 + "S" (&pci_indirect),
22207 + "r" (__PCIBIOS_DS));
22208 break;
22209 }
22210
22211 @@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int s
22212 * Function table for BIOS32 access
22213 */
22214
22215 -static struct pci_raw_ops pci_bios_access = {
22216 +static const struct pci_raw_ops pci_bios_access = {
22217 .read = pci_bios_read,
22218 .write = pci_bios_write
22219 };
22220 @@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_acces
22221 * Try to find PCI BIOS.
22222 */
22223
22224 -static struct pci_raw_ops * __devinit pci_find_bios(void)
22225 +static const struct pci_raw_ops * __devinit pci_find_bios(void)
22226 {
22227 union bios32 *check;
22228 unsigned char sum;
22229 @@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_i
22230
22231 DBG("PCI: Fetching IRQ routing table... ");
22232 __asm__("push %%es\n\t"
22233 + "movw %w8, %%ds\n\t"
22234 "push %%ds\n\t"
22235 "pop %%es\n\t"
22236 - "lcall *(%%esi); cld\n\t"
22237 + "lcall *%%ss:(%%esi); cld\n\t"
22238 "pop %%es\n\t"
22239 + "push %%ss\n\t"
22240 + "pop %%ds\n"
22241 "jc 1f\n\t"
22242 "xor %%ah, %%ah\n"
22243 "1:"
22244 @@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_i
22245 "1" (0),
22246 "D" ((long) &opt),
22247 "S" (&pci_indirect),
22248 - "m" (opt)
22249 + "m" (opt),
22250 + "r" (__PCIBIOS_DS)
22251 : "memory");
22252 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
22253 if (ret & 0xff00)
22254 @@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_d
22255 {
22256 int ret;
22257
22258 - __asm__("lcall *(%%esi); cld\n\t"
22259 + __asm__("movw %w5, %%ds\n\t"
22260 + "lcall *%%ss:(%%esi); cld\n\t"
22261 + "push %%ss\n\t"
22262 + "pop %%ds\n"
22263 "jc 1f\n\t"
22264 "xor %%ah, %%ah\n"
22265 "1:"
22266 @@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_d
22267 : "0" (PCIBIOS_SET_PCI_HW_INT),
22268 "b" ((dev->bus->number << 8) | dev->devfn),
22269 "c" ((irq << 8) | (pin + 10)),
22270 - "S" (&pci_indirect));
22271 + "S" (&pci_indirect),
22272 + "r" (__PCIBIOS_DS));
22273 return !(ret & 0xff00);
22274 }
22275 EXPORT_SYMBOL(pcibios_set_irq_routing);
22276 diff -urNp linux-2.6.32.41/arch/x86/power/cpu.c linux-2.6.32.41/arch/x86/power/cpu.c
22277 --- linux-2.6.32.41/arch/x86/power/cpu.c 2011-03-27 14:31:47.000000000 -0400
22278 +++ linux-2.6.32.41/arch/x86/power/cpu.c 2011-04-17 15:56:46.000000000 -0400
22279 @@ -129,7 +129,7 @@ static void do_fpu_end(void)
22280 static void fix_processor_context(void)
22281 {
22282 int cpu = smp_processor_id();
22283 - struct tss_struct *t = &per_cpu(init_tss, cpu);
22284 + struct tss_struct *t = init_tss + cpu;
22285
22286 set_tss_desc(cpu, t); /*
22287 * This just modifies memory; should not be
22288 @@ -139,7 +139,9 @@ static void fix_processor_context(void)
22289 */
22290
22291 #ifdef CONFIG_X86_64
22292 + pax_open_kernel();
22293 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
22294 + pax_close_kernel();
22295
22296 syscall_init(); /* This sets MSR_*STAR and related */
22297 #endif
22298 diff -urNp linux-2.6.32.41/arch/x86/vdso/Makefile linux-2.6.32.41/arch/x86/vdso/Makefile
22299 --- linux-2.6.32.41/arch/x86/vdso/Makefile 2011-03-27 14:31:47.000000000 -0400
22300 +++ linux-2.6.32.41/arch/x86/vdso/Makefile 2011-04-17 15:56:46.000000000 -0400
22301 @@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
22302 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
22303 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
22304
22305 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
22306 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
22307 GCOV_PROFILE := n
22308
22309 #
22310 diff -urNp linux-2.6.32.41/arch/x86/vdso/vclock_gettime.c linux-2.6.32.41/arch/x86/vdso/vclock_gettime.c
22311 --- linux-2.6.32.41/arch/x86/vdso/vclock_gettime.c 2011-03-27 14:31:47.000000000 -0400
22312 +++ linux-2.6.32.41/arch/x86/vdso/vclock_gettime.c 2011-04-17 15:56:46.000000000 -0400
22313 @@ -22,24 +22,48 @@
22314 #include <asm/hpet.h>
22315 #include <asm/unistd.h>
22316 #include <asm/io.h>
22317 +#include <asm/fixmap.h>
22318 #include "vextern.h"
22319
22320 #define gtod vdso_vsyscall_gtod_data
22321
22322 +notrace noinline long __vdso_fallback_time(long *t)
22323 +{
22324 + long secs;
22325 + asm volatile("syscall"
22326 + : "=a" (secs)
22327 + : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
22328 + return secs;
22329 +}
22330 +
22331 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
22332 {
22333 long ret;
22334 asm("syscall" : "=a" (ret) :
22335 - "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
22336 + "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
22337 return ret;
22338 }
22339
22340 +notrace static inline cycle_t __vdso_vread_hpet(void)
22341 +{
22342 + return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
22343 +}
22344 +
22345 +notrace static inline cycle_t __vdso_vread_tsc(void)
22346 +{
22347 + cycle_t ret = (cycle_t)vget_cycles();
22348 +
22349 + return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
22350 +}
22351 +
22352 notrace static inline long vgetns(void)
22353 {
22354 long v;
22355 - cycles_t (*vread)(void);
22356 - vread = gtod->clock.vread;
22357 - v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
22358 + if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
22359 + v = __vdso_vread_tsc();
22360 + else
22361 + v = __vdso_vread_hpet();
22362 + v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
22363 return (v * gtod->clock.mult) >> gtod->clock.shift;
22364 }
22365
22366 @@ -113,7 +137,9 @@ notrace static noinline int do_monotonic
22367
22368 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
22369 {
22370 - if (likely(gtod->sysctl_enabled))
22371 + if (likely(gtod->sysctl_enabled &&
22372 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
22373 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
22374 switch (clock) {
22375 case CLOCK_REALTIME:
22376 if (likely(gtod->clock.vread))
22377 @@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid
22378 int clock_gettime(clockid_t, struct timespec *)
22379 __attribute__((weak, alias("__vdso_clock_gettime")));
22380
22381 -notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
22382 +notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
22383 {
22384 long ret;
22385 - if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
22386 + asm("syscall" : "=a" (ret) :
22387 + "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
22388 + return ret;
22389 +}
22390 +
22391 +notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
22392 +{
22393 + if (likely(gtod->sysctl_enabled &&
22394 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
22395 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
22396 + {
22397 if (likely(tv != NULL)) {
22398 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
22399 offsetof(struct timespec, tv_nsec) ||
22400 @@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct t
22401 }
22402 return 0;
22403 }
22404 - asm("syscall" : "=a" (ret) :
22405 - "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
22406 - return ret;
22407 + return __vdso_fallback_gettimeofday(tv, tz);
22408 }
22409 int gettimeofday(struct timeval *, struct timezone *)
22410 __attribute__((weak, alias("__vdso_gettimeofday")));
22411 diff -urNp linux-2.6.32.41/arch/x86/vdso/vdso32-setup.c linux-2.6.32.41/arch/x86/vdso/vdso32-setup.c
22412 --- linux-2.6.32.41/arch/x86/vdso/vdso32-setup.c 2011-03-27 14:31:47.000000000 -0400
22413 +++ linux-2.6.32.41/arch/x86/vdso/vdso32-setup.c 2011-04-23 12:56:10.000000000 -0400
22414 @@ -25,6 +25,7 @@
22415 #include <asm/tlbflush.h>
22416 #include <asm/vdso.h>
22417 #include <asm/proto.h>
22418 +#include <asm/mman.h>
22419
22420 enum {
22421 VDSO_DISABLED = 0,
22422 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
22423 void enable_sep_cpu(void)
22424 {
22425 int cpu = get_cpu();
22426 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
22427 + struct tss_struct *tss = init_tss + cpu;
22428
22429 if (!boot_cpu_has(X86_FEATURE_SEP)) {
22430 put_cpu();
22431 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
22432 gate_vma.vm_start = FIXADDR_USER_START;
22433 gate_vma.vm_end = FIXADDR_USER_END;
22434 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
22435 - gate_vma.vm_page_prot = __P101;
22436 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
22437 /*
22438 * Make sure the vDSO gets into every core dump.
22439 * Dumping its contents makes post-mortem fully interpretable later
22440 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
22441 if (compat)
22442 addr = VDSO_HIGH_BASE;
22443 else {
22444 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
22445 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
22446 if (IS_ERR_VALUE(addr)) {
22447 ret = addr;
22448 goto up_fail;
22449 }
22450 }
22451
22452 - current->mm->context.vdso = (void *)addr;
22453 + current->mm->context.vdso = addr;
22454
22455 if (compat_uses_vma || !compat) {
22456 /*
22457 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
22458 }
22459
22460 current_thread_info()->sysenter_return =
22461 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
22462 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
22463
22464 up_fail:
22465 if (ret)
22466 - current->mm->context.vdso = NULL;
22467 + current->mm->context.vdso = 0;
22468
22469 up_write(&mm->mmap_sem);
22470
22471 @@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
22472
22473 const char *arch_vma_name(struct vm_area_struct *vma)
22474 {
22475 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
22476 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
22477 return "[vdso]";
22478 +
22479 +#ifdef CONFIG_PAX_SEGMEXEC
22480 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
22481 + return "[vdso]";
22482 +#endif
22483 +
22484 return NULL;
22485 }
22486
22487 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
22488 struct mm_struct *mm = tsk->mm;
22489
22490 /* Check to see if this task was created in compat vdso mode */
22491 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
22492 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
22493 return &gate_vma;
22494 return NULL;
22495 }
22496 diff -urNp linux-2.6.32.41/arch/x86/vdso/vdso.lds.S linux-2.6.32.41/arch/x86/vdso/vdso.lds.S
22497 --- linux-2.6.32.41/arch/x86/vdso/vdso.lds.S 2011-03-27 14:31:47.000000000 -0400
22498 +++ linux-2.6.32.41/arch/x86/vdso/vdso.lds.S 2011-06-06 17:35:35.000000000 -0400
22499 @@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
22500 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
22501 #include "vextern.h"
22502 #undef VEXTERN
22503 +
22504 +#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
22505 +VEXTERN(fallback_gettimeofday)
22506 +VEXTERN(fallback_time)
22507 +VEXTERN(getcpu)
22508 +#undef VEXTERN
22509 diff -urNp linux-2.6.32.41/arch/x86/vdso/vextern.h linux-2.6.32.41/arch/x86/vdso/vextern.h
22510 --- linux-2.6.32.41/arch/x86/vdso/vextern.h 2011-03-27 14:31:47.000000000 -0400
22511 +++ linux-2.6.32.41/arch/x86/vdso/vextern.h 2011-04-17 15:56:46.000000000 -0400
22512 @@ -11,6 +11,5 @@
22513 put into vextern.h and be referenced as a pointer with vdso prefix.
22514 The main kernel later fills in the values. */
22515
22516 -VEXTERN(jiffies)
22517 VEXTERN(vgetcpu_mode)
22518 VEXTERN(vsyscall_gtod_data)
22519 diff -urNp linux-2.6.32.41/arch/x86/vdso/vma.c linux-2.6.32.41/arch/x86/vdso/vma.c
22520 --- linux-2.6.32.41/arch/x86/vdso/vma.c 2011-03-27 14:31:47.000000000 -0400
22521 +++ linux-2.6.32.41/arch/x86/vdso/vma.c 2011-04-17 15:56:46.000000000 -0400
22522 @@ -57,7 +57,7 @@ static int __init init_vdso_vars(void)
22523 if (!vbase)
22524 goto oom;
22525
22526 - if (memcmp(vbase, "\177ELF", 4)) {
22527 + if (memcmp(vbase, ELFMAG, SELFMAG)) {
22528 printk("VDSO: I'm broken; not ELF\n");
22529 vdso_enabled = 0;
22530 }
22531 @@ -66,6 +66,7 @@ static int __init init_vdso_vars(void)
22532 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
22533 #include "vextern.h"
22534 #undef VEXTERN
22535 + vunmap(vbase);
22536 return 0;
22537
22538 oom:
22539 @@ -116,7 +117,7 @@ int arch_setup_additional_pages(struct l
22540 goto up_fail;
22541 }
22542
22543 - current->mm->context.vdso = (void *)addr;
22544 + current->mm->context.vdso = addr;
22545
22546 ret = install_special_mapping(mm, addr, vdso_size,
22547 VM_READ|VM_EXEC|
22548 @@ -124,7 +125,7 @@ int arch_setup_additional_pages(struct l
22549 VM_ALWAYSDUMP,
22550 vdso_pages);
22551 if (ret) {
22552 - current->mm->context.vdso = NULL;
22553 + current->mm->context.vdso = 0;
22554 goto up_fail;
22555 }
22556
22557 @@ -132,10 +133,3 @@ up_fail:
22558 up_write(&mm->mmap_sem);
22559 return ret;
22560 }
22561 -
22562 -static __init int vdso_setup(char *s)
22563 -{
22564 - vdso_enabled = simple_strtoul(s, NULL, 0);
22565 - return 0;
22566 -}
22567 -__setup("vdso=", vdso_setup);
22568 diff -urNp linux-2.6.32.41/arch/x86/xen/enlighten.c linux-2.6.32.41/arch/x86/xen/enlighten.c
22569 --- linux-2.6.32.41/arch/x86/xen/enlighten.c 2011-03-27 14:31:47.000000000 -0400
22570 +++ linux-2.6.32.41/arch/x86/xen/enlighten.c 2011-05-22 23:02:03.000000000 -0400
22571 @@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
22572
22573 struct shared_info xen_dummy_shared_info;
22574
22575 -void *xen_initial_gdt;
22576 -
22577 /*
22578 * Point at some empty memory to start with. We map the real shared_info
22579 * page as soon as fixmap is up and running.
22580 @@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_des
22581
22582 preempt_disable();
22583
22584 - start = __get_cpu_var(idt_desc).address;
22585 + start = (unsigned long)__get_cpu_var(idt_desc).address;
22586 end = start + __get_cpu_var(idt_desc).size + 1;
22587
22588 xen_mc_flush();
22589 @@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic
22590 #endif
22591 };
22592
22593 -static void xen_reboot(int reason)
22594 +static __noreturn void xen_reboot(int reason)
22595 {
22596 struct sched_shutdown r = { .reason = reason };
22597
22598 @@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
22599 BUG();
22600 }
22601
22602 -static void xen_restart(char *msg)
22603 +static __noreturn void xen_restart(char *msg)
22604 {
22605 xen_reboot(SHUTDOWN_reboot);
22606 }
22607
22608 -static void xen_emergency_restart(void)
22609 +static __noreturn void xen_emergency_restart(void)
22610 {
22611 xen_reboot(SHUTDOWN_reboot);
22612 }
22613
22614 -static void xen_machine_halt(void)
22615 +static __noreturn void xen_machine_halt(void)
22616 {
22617 xen_reboot(SHUTDOWN_poweroff);
22618 }
22619 @@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(
22620 */
22621 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
22622
22623 -#ifdef CONFIG_X86_64
22624 /* Work out if we support NX */
22625 - check_efer();
22626 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
22627 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
22628 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
22629 + unsigned l, h;
22630 +
22631 +#ifdef CONFIG_X86_PAE
22632 + nx_enabled = 1;
22633 +#endif
22634 + __supported_pte_mask |= _PAGE_NX;
22635 + rdmsr(MSR_EFER, l, h);
22636 + l |= EFER_NX;
22637 + wrmsr(MSR_EFER, l, h);
22638 + }
22639 #endif
22640
22641 xen_setup_features();
22642 @@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(
22643
22644 machine_ops = xen_machine_ops;
22645
22646 - /*
22647 - * The only reliable way to retain the initial address of the
22648 - * percpu gdt_page is to remember it here, so we can go and
22649 - * mark it RW later, when the initial percpu area is freed.
22650 - */
22651 - xen_initial_gdt = &per_cpu(gdt_page, 0);
22652 -
22653 xen_smp_init();
22654
22655 pgd = (pgd_t *)xen_start_info->pt_base;
22656 diff -urNp linux-2.6.32.41/arch/x86/xen/mmu.c linux-2.6.32.41/arch/x86/xen/mmu.c
22657 --- linux-2.6.32.41/arch/x86/xen/mmu.c 2011-03-27 14:31:47.000000000 -0400
22658 +++ linux-2.6.32.41/arch/x86/xen/mmu.c 2011-04-17 15:56:46.000000000 -0400
22659 @@ -1714,6 +1714,8 @@ __init pgd_t *xen_setup_kernel_pagetable
22660 convert_pfn_mfn(init_level4_pgt);
22661 convert_pfn_mfn(level3_ident_pgt);
22662 convert_pfn_mfn(level3_kernel_pgt);
22663 + convert_pfn_mfn(level3_vmalloc_pgt);
22664 + convert_pfn_mfn(level3_vmemmap_pgt);
22665
22666 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
22667 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
22668 @@ -1732,7 +1734,10 @@ __init pgd_t *xen_setup_kernel_pagetable
22669 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
22670 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
22671 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
22672 + set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
22673 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
22674 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
22675 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
22676 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
22677 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
22678
22679 diff -urNp linux-2.6.32.41/arch/x86/xen/smp.c linux-2.6.32.41/arch/x86/xen/smp.c
22680 --- linux-2.6.32.41/arch/x86/xen/smp.c 2011-03-27 14:31:47.000000000 -0400
22681 +++ linux-2.6.32.41/arch/x86/xen/smp.c 2011-05-11 18:25:15.000000000 -0400
22682 @@ -167,11 +167,6 @@ static void __init xen_smp_prepare_boot_
22683 {
22684 BUG_ON(smp_processor_id() != 0);
22685 native_smp_prepare_boot_cpu();
22686 -
22687 - /* We've switched to the "real" per-cpu gdt, so make sure the
22688 - old memory can be recycled */
22689 - make_lowmem_page_readwrite(xen_initial_gdt);
22690 -
22691 xen_setup_vcpu_info_placement();
22692 }
22693
22694 @@ -231,12 +226,12 @@ cpu_initialize_context(unsigned int cpu,
22695 gdt = get_cpu_gdt_table(cpu);
22696
22697 ctxt->flags = VGCF_IN_KERNEL;
22698 - ctxt->user_regs.ds = __USER_DS;
22699 - ctxt->user_regs.es = __USER_DS;
22700 + ctxt->user_regs.ds = __KERNEL_DS;
22701 + ctxt->user_regs.es = __KERNEL_DS;
22702 ctxt->user_regs.ss = __KERNEL_DS;
22703 #ifdef CONFIG_X86_32
22704 ctxt->user_regs.fs = __KERNEL_PERCPU;
22705 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
22706 + savesegment(gs, ctxt->user_regs.gs);
22707 #else
22708 ctxt->gs_base_kernel = per_cpu_offset(cpu);
22709 #endif
22710 @@ -287,13 +282,12 @@ static int __cpuinit xen_cpu_up(unsigned
22711 int rc;
22712
22713 per_cpu(current_task, cpu) = idle;
22714 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
22715 #ifdef CONFIG_X86_32
22716 irq_ctx_init(cpu);
22717 #else
22718 clear_tsk_thread_flag(idle, TIF_FORK);
22719 - per_cpu(kernel_stack, cpu) =
22720 - (unsigned long)task_stack_page(idle) -
22721 - KERNEL_STACK_OFFSET + THREAD_SIZE;
22722 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
22723 #endif
22724 xen_setup_runstate_info(cpu);
22725 xen_setup_timer(cpu);
22726 diff -urNp linux-2.6.32.41/arch/x86/xen/xen-asm_32.S linux-2.6.32.41/arch/x86/xen/xen-asm_32.S
22727 --- linux-2.6.32.41/arch/x86/xen/xen-asm_32.S 2011-03-27 14:31:47.000000000 -0400
22728 +++ linux-2.6.32.41/arch/x86/xen/xen-asm_32.S 2011-04-22 19:13:13.000000000 -0400
22729 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
22730 ESP_OFFSET=4 # bytes pushed onto stack
22731
22732 /*
22733 - * Store vcpu_info pointer for easy access. Do it this way to
22734 - * avoid having to reload %fs
22735 + * Store vcpu_info pointer for easy access.
22736 */
22737 #ifdef CONFIG_SMP
22738 - GET_THREAD_INFO(%eax)
22739 - movl TI_cpu(%eax), %eax
22740 - movl __per_cpu_offset(,%eax,4), %eax
22741 - mov per_cpu__xen_vcpu(%eax), %eax
22742 + push %fs
22743 + mov $(__KERNEL_PERCPU), %eax
22744 + mov %eax, %fs
22745 + mov PER_CPU_VAR(xen_vcpu), %eax
22746 + pop %fs
22747 #else
22748 movl per_cpu__xen_vcpu, %eax
22749 #endif
22750 diff -urNp linux-2.6.32.41/arch/x86/xen/xen-head.S linux-2.6.32.41/arch/x86/xen/xen-head.S
22751 --- linux-2.6.32.41/arch/x86/xen/xen-head.S 2011-03-27 14:31:47.000000000 -0400
22752 +++ linux-2.6.32.41/arch/x86/xen/xen-head.S 2011-04-17 15:56:46.000000000 -0400
22753 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
22754 #ifdef CONFIG_X86_32
22755 mov %esi,xen_start_info
22756 mov $init_thread_union+THREAD_SIZE,%esp
22757 +#ifdef CONFIG_SMP
22758 + movl $cpu_gdt_table,%edi
22759 + movl $__per_cpu_load,%eax
22760 + movw %ax,__KERNEL_PERCPU + 2(%edi)
22761 + rorl $16,%eax
22762 + movb %al,__KERNEL_PERCPU + 4(%edi)
22763 + movb %ah,__KERNEL_PERCPU + 7(%edi)
22764 + movl $__per_cpu_end - 1,%eax
22765 + subl $__per_cpu_start,%eax
22766 + movw %ax,__KERNEL_PERCPU + 0(%edi)
22767 +#endif
22768 #else
22769 mov %rsi,xen_start_info
22770 mov $init_thread_union+THREAD_SIZE,%rsp
22771 diff -urNp linux-2.6.32.41/arch/x86/xen/xen-ops.h linux-2.6.32.41/arch/x86/xen/xen-ops.h
22772 --- linux-2.6.32.41/arch/x86/xen/xen-ops.h 2011-03-27 14:31:47.000000000 -0400
22773 +++ linux-2.6.32.41/arch/x86/xen/xen-ops.h 2011-04-17 15:56:46.000000000 -0400
22774 @@ -10,8 +10,6 @@
22775 extern const char xen_hypervisor_callback[];
22776 extern const char xen_failsafe_callback[];
22777
22778 -extern void *xen_initial_gdt;
22779 -
22780 struct trap_info;
22781 void xen_copy_trap_info(struct trap_info *traps);
22782
22783 diff -urNp linux-2.6.32.41/block/blk-integrity.c linux-2.6.32.41/block/blk-integrity.c
22784 --- linux-2.6.32.41/block/blk-integrity.c 2011-03-27 14:31:47.000000000 -0400
22785 +++ linux-2.6.32.41/block/blk-integrity.c 2011-04-17 15:56:46.000000000 -0400
22786 @@ -278,7 +278,7 @@ static struct attribute *integrity_attrs
22787 NULL,
22788 };
22789
22790 -static struct sysfs_ops integrity_ops = {
22791 +static const struct sysfs_ops integrity_ops = {
22792 .show = &integrity_attr_show,
22793 .store = &integrity_attr_store,
22794 };
22795 diff -urNp linux-2.6.32.41/block/blk-iopoll.c linux-2.6.32.41/block/blk-iopoll.c
22796 --- linux-2.6.32.41/block/blk-iopoll.c 2011-03-27 14:31:47.000000000 -0400
22797 +++ linux-2.6.32.41/block/blk-iopoll.c 2011-04-17 15:56:46.000000000 -0400
22798 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
22799 }
22800 EXPORT_SYMBOL(blk_iopoll_complete);
22801
22802 -static void blk_iopoll_softirq(struct softirq_action *h)
22803 +static void blk_iopoll_softirq(void)
22804 {
22805 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
22806 int rearm = 0, budget = blk_iopoll_budget;
22807 diff -urNp linux-2.6.32.41/block/blk-map.c linux-2.6.32.41/block/blk-map.c
22808 --- linux-2.6.32.41/block/blk-map.c 2011-03-27 14:31:47.000000000 -0400
22809 +++ linux-2.6.32.41/block/blk-map.c 2011-04-18 16:57:33.000000000 -0400
22810 @@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct requ
22811 * direct dma. else, set up kernel bounce buffers
22812 */
22813 uaddr = (unsigned long) ubuf;
22814 - if (blk_rq_aligned(q, ubuf, len) && !map_data)
22815 + if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
22816 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
22817 else
22818 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
22819 @@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_q
22820 for (i = 0; i < iov_count; i++) {
22821 unsigned long uaddr = (unsigned long)iov[i].iov_base;
22822
22823 + if (!iov[i].iov_len)
22824 + return -EINVAL;
22825 +
22826 if (uaddr & queue_dma_alignment(q)) {
22827 unaligned = 1;
22828 break;
22829 }
22830 - if (!iov[i].iov_len)
22831 - return -EINVAL;
22832 }
22833
22834 if (unaligned || (q->dma_pad_mask & len) || map_data)
22835 @@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue
22836 if (!len || !kbuf)
22837 return -EINVAL;
22838
22839 - do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
22840 + do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
22841 if (do_copy)
22842 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
22843 else
22844 diff -urNp linux-2.6.32.41/block/blk-softirq.c linux-2.6.32.41/block/blk-softirq.c
22845 --- linux-2.6.32.41/block/blk-softirq.c 2011-03-27 14:31:47.000000000 -0400
22846 +++ linux-2.6.32.41/block/blk-softirq.c 2011-04-17 15:56:46.000000000 -0400
22847 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
22848 * Softirq action handler - move entries to local list and loop over them
22849 * while passing them to the queue registered handler.
22850 */
22851 -static void blk_done_softirq(struct softirq_action *h)
22852 +static void blk_done_softirq(void)
22853 {
22854 struct list_head *cpu_list, local_list;
22855
22856 diff -urNp linux-2.6.32.41/block/blk-sysfs.c linux-2.6.32.41/block/blk-sysfs.c
22857 --- linux-2.6.32.41/block/blk-sysfs.c 2011-05-10 22:12:01.000000000 -0400
22858 +++ linux-2.6.32.41/block/blk-sysfs.c 2011-05-10 22:12:26.000000000 -0400
22859 @@ -414,7 +414,7 @@ static void blk_release_queue(struct kob
22860 kmem_cache_free(blk_requestq_cachep, q);
22861 }
22862
22863 -static struct sysfs_ops queue_sysfs_ops = {
22864 +static const struct sysfs_ops queue_sysfs_ops = {
22865 .show = queue_attr_show,
22866 .store = queue_attr_store,
22867 };
22868 diff -urNp linux-2.6.32.41/block/bsg.c linux-2.6.32.41/block/bsg.c
22869 --- linux-2.6.32.41/block/bsg.c 2011-03-27 14:31:47.000000000 -0400
22870 +++ linux-2.6.32.41/block/bsg.c 2011-04-17 15:56:46.000000000 -0400
22871 @@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
22872 struct sg_io_v4 *hdr, struct bsg_device *bd,
22873 fmode_t has_write_perm)
22874 {
22875 + unsigned char tmpcmd[sizeof(rq->__cmd)];
22876 + unsigned char *cmdptr;
22877 +
22878 if (hdr->request_len > BLK_MAX_CDB) {
22879 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
22880 if (!rq->cmd)
22881 return -ENOMEM;
22882 - }
22883 + cmdptr = rq->cmd;
22884 + } else
22885 + cmdptr = tmpcmd;
22886
22887 - if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
22888 + if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
22889 hdr->request_len))
22890 return -EFAULT;
22891
22892 + if (cmdptr != rq->cmd)
22893 + memcpy(rq->cmd, cmdptr, hdr->request_len);
22894 +
22895 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
22896 if (blk_verify_command(rq->cmd, has_write_perm))
22897 return -EPERM;
22898 diff -urNp linux-2.6.32.41/block/elevator.c linux-2.6.32.41/block/elevator.c
22899 --- linux-2.6.32.41/block/elevator.c 2011-03-27 14:31:47.000000000 -0400
22900 +++ linux-2.6.32.41/block/elevator.c 2011-04-17 15:56:46.000000000 -0400
22901 @@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, str
22902 return error;
22903 }
22904
22905 -static struct sysfs_ops elv_sysfs_ops = {
22906 +static const struct sysfs_ops elv_sysfs_ops = {
22907 .show = elv_attr_show,
22908 .store = elv_attr_store,
22909 };
22910 diff -urNp linux-2.6.32.41/block/scsi_ioctl.c linux-2.6.32.41/block/scsi_ioctl.c
22911 --- linux-2.6.32.41/block/scsi_ioctl.c 2011-03-27 14:31:47.000000000 -0400
22912 +++ linux-2.6.32.41/block/scsi_ioctl.c 2011-04-23 13:28:22.000000000 -0400
22913 @@ -220,8 +220,20 @@ EXPORT_SYMBOL(blk_verify_command);
22914 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
22915 struct sg_io_hdr *hdr, fmode_t mode)
22916 {
22917 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
22918 + unsigned char tmpcmd[sizeof(rq->__cmd)];
22919 + unsigned char *cmdptr;
22920 +
22921 + if (rq->cmd != rq->__cmd)
22922 + cmdptr = rq->cmd;
22923 + else
22924 + cmdptr = tmpcmd;
22925 +
22926 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
22927 return -EFAULT;
22928 +
22929 + if (cmdptr != rq->cmd)
22930 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
22931 +
22932 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
22933 return -EPERM;
22934
22935 @@ -430,6 +442,8 @@ int sg_scsi_ioctl(struct request_queue *
22936 int err;
22937 unsigned int in_len, out_len, bytes, opcode, cmdlen;
22938 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
22939 + unsigned char tmpcmd[sizeof(rq->__cmd)];
22940 + unsigned char *cmdptr;
22941
22942 if (!sic)
22943 return -EINVAL;
22944 @@ -463,9 +477,18 @@ int sg_scsi_ioctl(struct request_queue *
22945 */
22946 err = -EFAULT;
22947 rq->cmd_len = cmdlen;
22948 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
22949 +
22950 + if (rq->cmd != rq->__cmd)
22951 + cmdptr = rq->cmd;
22952 + else
22953 + cmdptr = tmpcmd;
22954 +
22955 + if (copy_from_user(cmdptr, sic->data, cmdlen))
22956 goto error;
22957
22958 + if (rq->cmd != cmdptr)
22959 + memcpy(rq->cmd, cmdptr, cmdlen);
22960 +
22961 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
22962 goto error;
22963
22964 diff -urNp linux-2.6.32.41/crypto/serpent.c linux-2.6.32.41/crypto/serpent.c
22965 --- linux-2.6.32.41/crypto/serpent.c 2011-03-27 14:31:47.000000000 -0400
22966 +++ linux-2.6.32.41/crypto/serpent.c 2011-05-16 21:46:57.000000000 -0400
22967 @@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
22968 u32 r0,r1,r2,r3,r4;
22969 int i;
22970
22971 + pax_track_stack();
22972 +
22973 /* Copy key, add padding */
22974
22975 for (i = 0; i < keylen; ++i)
22976 diff -urNp linux-2.6.32.41/Documentation/dontdiff linux-2.6.32.41/Documentation/dontdiff
22977 --- linux-2.6.32.41/Documentation/dontdiff 2011-03-27 14:31:47.000000000 -0400
22978 +++ linux-2.6.32.41/Documentation/dontdiff 2011-05-18 20:09:36.000000000 -0400
22979 @@ -1,13 +1,16 @@
22980 *.a
22981 *.aux
22982 *.bin
22983 +*.cis
22984 *.cpio
22985 *.csp
22986 +*.dbg
22987 *.dsp
22988 *.dvi
22989 *.elf
22990 *.eps
22991 *.fw
22992 +*.gcno
22993 *.gen.S
22994 *.gif
22995 *.grep
22996 @@ -38,8 +41,10 @@
22997 *.tab.h
22998 *.tex
22999 *.ver
23000 +*.vim
23001 *.xml
23002 *_MODULES
23003 +*_reg_safe.h
23004 *_vga16.c
23005 *~
23006 *.9
23007 @@ -49,11 +54,16 @@
23008 53c700_d.h
23009 CVS
23010 ChangeSet
23011 +GPATH
23012 +GRTAGS
23013 +GSYMS
23014 +GTAGS
23015 Image
23016 Kerntypes
23017 Module.markers
23018 Module.symvers
23019 PENDING
23020 +PERF*
23021 SCCS
23022 System.map*
23023 TAGS
23024 @@ -76,7 +86,11 @@ btfixupprep
23025 build
23026 bvmlinux
23027 bzImage*
23028 +capability_names.h
23029 +capflags.c
23030 classlist.h*
23031 +clut_vga16.c
23032 +common-cmds.h
23033 comp*.log
23034 compile.h*
23035 conf
23036 @@ -103,13 +117,14 @@ gen_crc32table
23037 gen_init_cpio
23038 genksyms
23039 *_gray256.c
23040 +hash
23041 ihex2fw
23042 ikconfig.h*
23043 initramfs_data.cpio
23044 +initramfs_data.cpio.bz2
23045 initramfs_data.cpio.gz
23046 initramfs_list
23047 kallsyms
23048 -kconfig
23049 keywords.c
23050 ksym.c*
23051 ksym.h*
23052 @@ -133,7 +148,9 @@ mkboot
23053 mkbugboot
23054 mkcpustr
23055 mkdep
23056 +mkpiggy
23057 mkprep
23058 +mkregtable
23059 mktables
23060 mktree
23061 modpost
23062 @@ -149,6 +166,7 @@ patches*
23063 pca200e.bin
23064 pca200e_ecd.bin2
23065 piggy.gz
23066 +piggy.S
23067 piggyback
23068 pnmtologo
23069 ppc_defs.h*
23070 @@ -157,12 +175,15 @@ qconf
23071 raid6altivec*.c
23072 raid6int*.c
23073 raid6tables.c
23074 +regdb.c
23075 relocs
23076 +rlim_names.h
23077 series
23078 setup
23079 setup.bin
23080 setup.elf
23081 sImage
23082 +slabinfo
23083 sm_tbl*
23084 split-include
23085 syscalltab.h
23086 @@ -186,14 +207,20 @@ version.h*
23087 vmlinux
23088 vmlinux-*
23089 vmlinux.aout
23090 +vmlinux.bin.all
23091 +vmlinux.bin.bz2
23092 vmlinux.lds
23093 +vmlinux.relocs
23094 +voffset.h
23095 vsyscall.lds
23096 vsyscall_32.lds
23097 wanxlfw.inc
23098 uImage
23099 unifdef
23100 +utsrelease.h
23101 wakeup.bin
23102 wakeup.elf
23103 wakeup.lds
23104 zImage*
23105 zconf.hash.c
23106 +zoffset.h
23107 diff -urNp linux-2.6.32.41/Documentation/kernel-parameters.txt linux-2.6.32.41/Documentation/kernel-parameters.txt
23108 --- linux-2.6.32.41/Documentation/kernel-parameters.txt 2011-03-27 14:31:47.000000000 -0400
23109 +++ linux-2.6.32.41/Documentation/kernel-parameters.txt 2011-04-17 15:56:45.000000000 -0400
23110 @@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters.
23111 the specified number of seconds. This is to be used if
23112 your oopses keep scrolling off the screen.
23113
23114 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
23115 + virtualization environments that don't cope well with the
23116 + expand down segment used by UDEREF on X86-32 or the frequent
23117 + page table updates on X86-64.
23118 +
23119 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
23120 +
23121 pcbit= [HW,ISDN]
23122
23123 pcd. [PARIDE]
23124 diff -urNp linux-2.6.32.41/drivers/acpi/acpi_pad.c linux-2.6.32.41/drivers/acpi/acpi_pad.c
23125 --- linux-2.6.32.41/drivers/acpi/acpi_pad.c 2011-03-27 14:31:47.000000000 -0400
23126 +++ linux-2.6.32.41/drivers/acpi/acpi_pad.c 2011-04-17 15:56:46.000000000 -0400
23127 @@ -30,7 +30,7 @@
23128 #include <acpi/acpi_bus.h>
23129 #include <acpi/acpi_drivers.h>
23130
23131 -#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
23132 +#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
23133 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
23134 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
23135 static DEFINE_MUTEX(isolated_cpus_lock);
23136 diff -urNp linux-2.6.32.41/drivers/acpi/battery.c linux-2.6.32.41/drivers/acpi/battery.c
23137 --- linux-2.6.32.41/drivers/acpi/battery.c 2011-03-27 14:31:47.000000000 -0400
23138 +++ linux-2.6.32.41/drivers/acpi/battery.c 2011-04-17 15:56:46.000000000 -0400
23139 @@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
23140 }
23141
23142 static struct battery_file {
23143 - struct file_operations ops;
23144 + const struct file_operations ops;
23145 mode_t mode;
23146 const char *name;
23147 } acpi_battery_file[] = {
23148 diff -urNp linux-2.6.32.41/drivers/acpi/dock.c linux-2.6.32.41/drivers/acpi/dock.c
23149 --- linux-2.6.32.41/drivers/acpi/dock.c 2011-03-27 14:31:47.000000000 -0400
23150 +++ linux-2.6.32.41/drivers/acpi/dock.c 2011-04-17 15:56:46.000000000 -0400
23151 @@ -77,7 +77,7 @@ struct dock_dependent_device {
23152 struct list_head list;
23153 struct list_head hotplug_list;
23154 acpi_handle handle;
23155 - struct acpi_dock_ops *ops;
23156 + const struct acpi_dock_ops *ops;
23157 void *context;
23158 };
23159
23160 @@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifi
23161 * the dock driver after _DCK is executed.
23162 */
23163 int
23164 -register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
23165 +register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
23166 void *context)
23167 {
23168 struct dock_dependent_device *dd;
23169 diff -urNp linux-2.6.32.41/drivers/acpi/osl.c linux-2.6.32.41/drivers/acpi/osl.c
23170 --- linux-2.6.32.41/drivers/acpi/osl.c 2011-03-27 14:31:47.000000000 -0400
23171 +++ linux-2.6.32.41/drivers/acpi/osl.c 2011-04-17 15:56:46.000000000 -0400
23172 @@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_addres
23173 void __iomem *virt_addr;
23174
23175 virt_addr = ioremap(phys_addr, width);
23176 + if (!virt_addr)
23177 + return AE_NO_MEMORY;
23178 if (!value)
23179 value = &dummy;
23180
23181 @@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_addre
23182 void __iomem *virt_addr;
23183
23184 virt_addr = ioremap(phys_addr, width);
23185 + if (!virt_addr)
23186 + return AE_NO_MEMORY;
23187
23188 switch (width) {
23189 case 8:
23190 diff -urNp linux-2.6.32.41/drivers/acpi/power_meter.c linux-2.6.32.41/drivers/acpi/power_meter.c
23191 --- linux-2.6.32.41/drivers/acpi/power_meter.c 2011-03-27 14:31:47.000000000 -0400
23192 +++ linux-2.6.32.41/drivers/acpi/power_meter.c 2011-04-17 15:56:46.000000000 -0400
23193 @@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *d
23194 return res;
23195
23196 temp /= 1000;
23197 - if (temp < 0)
23198 - return -EINVAL;
23199
23200 mutex_lock(&resource->lock);
23201 resource->trip[attr->index - 7] = temp;
23202 diff -urNp linux-2.6.32.41/drivers/acpi/proc.c linux-2.6.32.41/drivers/acpi/proc.c
23203 --- linux-2.6.32.41/drivers/acpi/proc.c 2011-03-27 14:31:47.000000000 -0400
23204 +++ linux-2.6.32.41/drivers/acpi/proc.c 2011-04-17 15:56:46.000000000 -0400
23205 @@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct f
23206 size_t count, loff_t * ppos)
23207 {
23208 struct list_head *node, *next;
23209 - char strbuf[5];
23210 - char str[5] = "";
23211 - unsigned int len = count;
23212 + char strbuf[5] = {0};
23213 struct acpi_device *found_dev = NULL;
23214
23215 - if (len > 4)
23216 - len = 4;
23217 - if (len < 0)
23218 - return -EFAULT;
23219 + if (count > 4)
23220 + count = 4;
23221
23222 - if (copy_from_user(strbuf, buffer, len))
23223 + if (copy_from_user(strbuf, buffer, count))
23224 return -EFAULT;
23225 - strbuf[len] = '\0';
23226 - sscanf(strbuf, "%s", str);
23227 + strbuf[count] = '\0';
23228
23229 mutex_lock(&acpi_device_lock);
23230 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
23231 @@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct f
23232 if (!dev->wakeup.flags.valid)
23233 continue;
23234
23235 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
23236 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
23237 dev->wakeup.state.enabled =
23238 dev->wakeup.state.enabled ? 0 : 1;
23239 found_dev = dev;
23240 diff -urNp linux-2.6.32.41/drivers/acpi/processor_core.c linux-2.6.32.41/drivers/acpi/processor_core.c
23241 --- linux-2.6.32.41/drivers/acpi/processor_core.c 2011-03-27 14:31:47.000000000 -0400
23242 +++ linux-2.6.32.41/drivers/acpi/processor_core.c 2011-04-17 15:56:46.000000000 -0400
23243 @@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(
23244 return 0;
23245 }
23246
23247 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
23248 + BUG_ON(pr->id >= nr_cpu_ids);
23249
23250 /*
23251 * Buggy BIOS check
23252 diff -urNp linux-2.6.32.41/drivers/acpi/sbshc.c linux-2.6.32.41/drivers/acpi/sbshc.c
23253 --- linux-2.6.32.41/drivers/acpi/sbshc.c 2011-03-27 14:31:47.000000000 -0400
23254 +++ linux-2.6.32.41/drivers/acpi/sbshc.c 2011-04-17 15:56:46.000000000 -0400
23255 @@ -17,7 +17,7 @@
23256
23257 #define PREFIX "ACPI: "
23258
23259 -#define ACPI_SMB_HC_CLASS "smbus_host_controller"
23260 +#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
23261 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
23262
23263 struct acpi_smb_hc {
23264 diff -urNp linux-2.6.32.41/drivers/acpi/sleep.c linux-2.6.32.41/drivers/acpi/sleep.c
23265 --- linux-2.6.32.41/drivers/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
23266 +++ linux-2.6.32.41/drivers/acpi/sleep.c 2011-04-17 15:56:46.000000000 -0400
23267 @@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(susp
23268 }
23269 }
23270
23271 -static struct platform_suspend_ops acpi_suspend_ops = {
23272 +static const struct platform_suspend_ops acpi_suspend_ops = {
23273 .valid = acpi_suspend_state_valid,
23274 .begin = acpi_suspend_begin,
23275 .prepare_late = acpi_pm_prepare,
23276 @@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspen
23277 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
23278 * been requested.
23279 */
23280 -static struct platform_suspend_ops acpi_suspend_ops_old = {
23281 +static const struct platform_suspend_ops acpi_suspend_ops_old = {
23282 .valid = acpi_suspend_state_valid,
23283 .begin = acpi_suspend_begin_old,
23284 .prepare_late = acpi_pm_disable_gpes,
23285 @@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
23286 acpi_enable_all_runtime_gpes();
23287 }
23288
23289 -static struct platform_hibernation_ops acpi_hibernation_ops = {
23290 +static const struct platform_hibernation_ops acpi_hibernation_ops = {
23291 .begin = acpi_hibernation_begin,
23292 .end = acpi_pm_end,
23293 .pre_snapshot = acpi_hibernation_pre_snapshot,
23294 @@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot
23295 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
23296 * been requested.
23297 */
23298 -static struct platform_hibernation_ops acpi_hibernation_ops_old = {
23299 +static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
23300 .begin = acpi_hibernation_begin_old,
23301 .end = acpi_pm_end,
23302 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
23303 diff -urNp linux-2.6.32.41/drivers/acpi/video.c linux-2.6.32.41/drivers/acpi/video.c
23304 --- linux-2.6.32.41/drivers/acpi/video.c 2011-03-27 14:31:47.000000000 -0400
23305 +++ linux-2.6.32.41/drivers/acpi/video.c 2011-04-17 15:56:46.000000000 -0400
23306 @@ -359,7 +359,7 @@ static int acpi_video_set_brightness(str
23307 vd->brightness->levels[request_level]);
23308 }
23309
23310 -static struct backlight_ops acpi_backlight_ops = {
23311 +static const struct backlight_ops acpi_backlight_ops = {
23312 .get_brightness = acpi_video_get_brightness,
23313 .update_status = acpi_video_set_brightness,
23314 };
23315 diff -urNp linux-2.6.32.41/drivers/ata/ahci.c linux-2.6.32.41/drivers/ata/ahci.c
23316 --- linux-2.6.32.41/drivers/ata/ahci.c 2011-03-27 14:31:47.000000000 -0400
23317 +++ linux-2.6.32.41/drivers/ata/ahci.c 2011-04-23 12:56:10.000000000 -0400
23318 @@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sh
23319 .sdev_attrs = ahci_sdev_attrs,
23320 };
23321
23322 -static struct ata_port_operations ahci_ops = {
23323 +static const struct ata_port_operations ahci_ops = {
23324 .inherits = &sata_pmp_port_ops,
23325
23326 .qc_defer = sata_pmp_qc_defer_cmd_switch,
23327 @@ -424,17 +424,17 @@ static struct ata_port_operations ahci_o
23328 .port_stop = ahci_port_stop,
23329 };
23330
23331 -static struct ata_port_operations ahci_vt8251_ops = {
23332 +static const struct ata_port_operations ahci_vt8251_ops = {
23333 .inherits = &ahci_ops,
23334 .hardreset = ahci_vt8251_hardreset,
23335 };
23336
23337 -static struct ata_port_operations ahci_p5wdh_ops = {
23338 +static const struct ata_port_operations ahci_p5wdh_ops = {
23339 .inherits = &ahci_ops,
23340 .hardreset = ahci_p5wdh_hardreset,
23341 };
23342
23343 -static struct ata_port_operations ahci_sb600_ops = {
23344 +static const struct ata_port_operations ahci_sb600_ops = {
23345 .inherits = &ahci_ops,
23346 .softreset = ahci_sb600_softreset,
23347 .pmp_softreset = ahci_sb600_softreset,
23348 diff -urNp linux-2.6.32.41/drivers/ata/ata_generic.c linux-2.6.32.41/drivers/ata/ata_generic.c
23349 --- linux-2.6.32.41/drivers/ata/ata_generic.c 2011-03-27 14:31:47.000000000 -0400
23350 +++ linux-2.6.32.41/drivers/ata/ata_generic.c 2011-04-17 15:56:46.000000000 -0400
23351 @@ -104,7 +104,7 @@ static struct scsi_host_template generic
23352 ATA_BMDMA_SHT(DRV_NAME),
23353 };
23354
23355 -static struct ata_port_operations generic_port_ops = {
23356 +static const struct ata_port_operations generic_port_ops = {
23357 .inherits = &ata_bmdma_port_ops,
23358 .cable_detect = ata_cable_unknown,
23359 .set_mode = generic_set_mode,
23360 diff -urNp linux-2.6.32.41/drivers/ata/ata_piix.c linux-2.6.32.41/drivers/ata/ata_piix.c
23361 --- linux-2.6.32.41/drivers/ata/ata_piix.c 2011-03-27 14:31:47.000000000 -0400
23362 +++ linux-2.6.32.41/drivers/ata/ata_piix.c 2011-04-23 12:56:10.000000000 -0400
23363 @@ -318,7 +318,7 @@ static struct scsi_host_template piix_sh
23364 ATA_BMDMA_SHT(DRV_NAME),
23365 };
23366
23367 -static struct ata_port_operations piix_pata_ops = {
23368 +static const struct ata_port_operations piix_pata_ops = {
23369 .inherits = &ata_bmdma32_port_ops,
23370 .cable_detect = ata_cable_40wire,
23371 .set_piomode = piix_set_piomode,
23372 @@ -326,22 +326,22 @@ static struct ata_port_operations piix_p
23373 .prereset = piix_pata_prereset,
23374 };
23375
23376 -static struct ata_port_operations piix_vmw_ops = {
23377 +static const struct ata_port_operations piix_vmw_ops = {
23378 .inherits = &piix_pata_ops,
23379 .bmdma_status = piix_vmw_bmdma_status,
23380 };
23381
23382 -static struct ata_port_operations ich_pata_ops = {
23383 +static const struct ata_port_operations ich_pata_ops = {
23384 .inherits = &piix_pata_ops,
23385 .cable_detect = ich_pata_cable_detect,
23386 .set_dmamode = ich_set_dmamode,
23387 };
23388
23389 -static struct ata_port_operations piix_sata_ops = {
23390 +static const struct ata_port_operations piix_sata_ops = {
23391 .inherits = &ata_bmdma_port_ops,
23392 };
23393
23394 -static struct ata_port_operations piix_sidpr_sata_ops = {
23395 +static const struct ata_port_operations piix_sidpr_sata_ops = {
23396 .inherits = &piix_sata_ops,
23397 .hardreset = sata_std_hardreset,
23398 .scr_read = piix_sidpr_scr_read,
23399 diff -urNp linux-2.6.32.41/drivers/ata/libata-acpi.c linux-2.6.32.41/drivers/ata/libata-acpi.c
23400 --- linux-2.6.32.41/drivers/ata/libata-acpi.c 2011-03-27 14:31:47.000000000 -0400
23401 +++ linux-2.6.32.41/drivers/ata/libata-acpi.c 2011-04-17 15:56:46.000000000 -0400
23402 @@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_han
23403 ata_acpi_uevent(dev->link->ap, dev, event);
23404 }
23405
23406 -static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
23407 +static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
23408 .handler = ata_acpi_dev_notify_dock,
23409 .uevent = ata_acpi_dev_uevent,
23410 };
23411
23412 -static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
23413 +static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
23414 .handler = ata_acpi_ap_notify_dock,
23415 .uevent = ata_acpi_ap_uevent,
23416 };
23417 diff -urNp linux-2.6.32.41/drivers/ata/libata-core.c linux-2.6.32.41/drivers/ata/libata-core.c
23418 --- linux-2.6.32.41/drivers/ata/libata-core.c 2011-03-27 14:31:47.000000000 -0400
23419 +++ linux-2.6.32.41/drivers/ata/libata-core.c 2011-04-23 12:56:10.000000000 -0400
23420 @@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *
23421 struct ata_port *ap;
23422 unsigned int tag;
23423
23424 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23425 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23426 ap = qc->ap;
23427
23428 qc->flags = 0;
23429 @@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued
23430 struct ata_port *ap;
23431 struct ata_link *link;
23432
23433 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23434 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23435 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
23436 ap = qc->ap;
23437 link = qc->dev->link;
23438 @@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device
23439 * LOCKING:
23440 * None.
23441 */
23442 -static void ata_finalize_port_ops(struct ata_port_operations *ops)
23443 +static void ata_finalize_port_ops(const struct ata_port_operations *ops)
23444 {
23445 static DEFINE_SPINLOCK(lock);
23446 const struct ata_port_operations *cur;
23447 @@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct
23448 return;
23449
23450 spin_lock(&lock);
23451 + pax_open_kernel();
23452
23453 for (cur = ops->inherits; cur; cur = cur->inherits) {
23454 void **inherit = (void **)cur;
23455 @@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct
23456 if (IS_ERR(*pp))
23457 *pp = NULL;
23458
23459 - ops->inherits = NULL;
23460 + ((struct ata_port_operations *)ops)->inherits = NULL;
23461
23462 + pax_close_kernel();
23463 spin_unlock(&lock);
23464 }
23465
23466 @@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host
23467 */
23468 /* KILLME - the only user left is ipr */
23469 void ata_host_init(struct ata_host *host, struct device *dev,
23470 - unsigned long flags, struct ata_port_operations *ops)
23471 + unsigned long flags, const struct ata_port_operations *ops)
23472 {
23473 spin_lock_init(&host->lock);
23474 host->dev = dev;
23475 @@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(stru
23476 /* truly dummy */
23477 }
23478
23479 -struct ata_port_operations ata_dummy_port_ops = {
23480 +const struct ata_port_operations ata_dummy_port_ops = {
23481 .qc_prep = ata_noop_qc_prep,
23482 .qc_issue = ata_dummy_qc_issue,
23483 .error_handler = ata_dummy_error_handler,
23484 diff -urNp linux-2.6.32.41/drivers/ata/libata-eh.c linux-2.6.32.41/drivers/ata/libata-eh.c
23485 --- linux-2.6.32.41/drivers/ata/libata-eh.c 2011-03-27 14:31:47.000000000 -0400
23486 +++ linux-2.6.32.41/drivers/ata/libata-eh.c 2011-05-16 21:46:57.000000000 -0400
23487 @@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
23488 {
23489 struct ata_link *link;
23490
23491 + pax_track_stack();
23492 +
23493 ata_for_each_link(link, ap, HOST_FIRST)
23494 ata_eh_link_report(link);
23495 }
23496 @@ -3590,7 +3592,7 @@ void ata_do_eh(struct ata_port *ap, ata_
23497 */
23498 void ata_std_error_handler(struct ata_port *ap)
23499 {
23500 - struct ata_port_operations *ops = ap->ops;
23501 + const struct ata_port_operations *ops = ap->ops;
23502 ata_reset_fn_t hardreset = ops->hardreset;
23503
23504 /* ignore built-in hardreset if SCR access is not available */
23505 diff -urNp linux-2.6.32.41/drivers/ata/libata-pmp.c linux-2.6.32.41/drivers/ata/libata-pmp.c
23506 --- linux-2.6.32.41/drivers/ata/libata-pmp.c 2011-03-27 14:31:47.000000000 -0400
23507 +++ linux-2.6.32.41/drivers/ata/libata-pmp.c 2011-04-17 15:56:46.000000000 -0400
23508 @@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(str
23509 */
23510 static int sata_pmp_eh_recover(struct ata_port *ap)
23511 {
23512 - struct ata_port_operations *ops = ap->ops;
23513 + const struct ata_port_operations *ops = ap->ops;
23514 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
23515 struct ata_link *pmp_link = &ap->link;
23516 struct ata_device *pmp_dev = pmp_link->device;
23517 diff -urNp linux-2.6.32.41/drivers/ata/pata_acpi.c linux-2.6.32.41/drivers/ata/pata_acpi.c
23518 --- linux-2.6.32.41/drivers/ata/pata_acpi.c 2011-03-27 14:31:47.000000000 -0400
23519 +++ linux-2.6.32.41/drivers/ata/pata_acpi.c 2011-04-17 15:56:46.000000000 -0400
23520 @@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_s
23521 ATA_BMDMA_SHT(DRV_NAME),
23522 };
23523
23524 -static struct ata_port_operations pacpi_ops = {
23525 +static const struct ata_port_operations pacpi_ops = {
23526 .inherits = &ata_bmdma_port_ops,
23527 .qc_issue = pacpi_qc_issue,
23528 .cable_detect = pacpi_cable_detect,
23529 diff -urNp linux-2.6.32.41/drivers/ata/pata_ali.c linux-2.6.32.41/drivers/ata/pata_ali.c
23530 --- linux-2.6.32.41/drivers/ata/pata_ali.c 2011-03-27 14:31:47.000000000 -0400
23531 +++ linux-2.6.32.41/drivers/ata/pata_ali.c 2011-04-17 15:56:46.000000000 -0400
23532 @@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht
23533 * Port operations for PIO only ALi
23534 */
23535
23536 -static struct ata_port_operations ali_early_port_ops = {
23537 +static const struct ata_port_operations ali_early_port_ops = {
23538 .inherits = &ata_sff_port_ops,
23539 .cable_detect = ata_cable_40wire,
23540 .set_piomode = ali_set_piomode,
23541 @@ -382,7 +382,7 @@ static const struct ata_port_operations
23542 * Port operations for DMA capable ALi without cable
23543 * detect
23544 */
23545 -static struct ata_port_operations ali_20_port_ops = {
23546 +static const struct ata_port_operations ali_20_port_ops = {
23547 .inherits = &ali_dma_base_ops,
23548 .cable_detect = ata_cable_40wire,
23549 .mode_filter = ali_20_filter,
23550 @@ -393,7 +393,7 @@ static struct ata_port_operations ali_20
23551 /*
23552 * Port operations for DMA capable ALi with cable detect
23553 */
23554 -static struct ata_port_operations ali_c2_port_ops = {
23555 +static const struct ata_port_operations ali_c2_port_ops = {
23556 .inherits = &ali_dma_base_ops,
23557 .check_atapi_dma = ali_check_atapi_dma,
23558 .cable_detect = ali_c2_cable_detect,
23559 @@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2
23560 /*
23561 * Port operations for DMA capable ALi with cable detect
23562 */
23563 -static struct ata_port_operations ali_c4_port_ops = {
23564 +static const struct ata_port_operations ali_c4_port_ops = {
23565 .inherits = &ali_dma_base_ops,
23566 .check_atapi_dma = ali_check_atapi_dma,
23567 .cable_detect = ali_c2_cable_detect,
23568 @@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4
23569 /*
23570 * Port operations for DMA capable ALi with cable detect and LBA48
23571 */
23572 -static struct ata_port_operations ali_c5_port_ops = {
23573 +static const struct ata_port_operations ali_c5_port_ops = {
23574 .inherits = &ali_dma_base_ops,
23575 .check_atapi_dma = ali_check_atapi_dma,
23576 .dev_config = ali_warn_atapi_dma,
23577 diff -urNp linux-2.6.32.41/drivers/ata/pata_amd.c linux-2.6.32.41/drivers/ata/pata_amd.c
23578 --- linux-2.6.32.41/drivers/ata/pata_amd.c 2011-03-27 14:31:47.000000000 -0400
23579 +++ linux-2.6.32.41/drivers/ata/pata_amd.c 2011-04-17 15:56:46.000000000 -0400
23580 @@ -397,28 +397,28 @@ static const struct ata_port_operations
23581 .prereset = amd_pre_reset,
23582 };
23583
23584 -static struct ata_port_operations amd33_port_ops = {
23585 +static const struct ata_port_operations amd33_port_ops = {
23586 .inherits = &amd_base_port_ops,
23587 .cable_detect = ata_cable_40wire,
23588 .set_piomode = amd33_set_piomode,
23589 .set_dmamode = amd33_set_dmamode,
23590 };
23591
23592 -static struct ata_port_operations amd66_port_ops = {
23593 +static const struct ata_port_operations amd66_port_ops = {
23594 .inherits = &amd_base_port_ops,
23595 .cable_detect = ata_cable_unknown,
23596 .set_piomode = amd66_set_piomode,
23597 .set_dmamode = amd66_set_dmamode,
23598 };
23599
23600 -static struct ata_port_operations amd100_port_ops = {
23601 +static const struct ata_port_operations amd100_port_ops = {
23602 .inherits = &amd_base_port_ops,
23603 .cable_detect = ata_cable_unknown,
23604 .set_piomode = amd100_set_piomode,
23605 .set_dmamode = amd100_set_dmamode,
23606 };
23607
23608 -static struct ata_port_operations amd133_port_ops = {
23609 +static const struct ata_port_operations amd133_port_ops = {
23610 .inherits = &amd_base_port_ops,
23611 .cable_detect = amd_cable_detect,
23612 .set_piomode = amd133_set_piomode,
23613 @@ -433,13 +433,13 @@ static const struct ata_port_operations
23614 .host_stop = nv_host_stop,
23615 };
23616
23617 -static struct ata_port_operations nv100_port_ops = {
23618 +static const struct ata_port_operations nv100_port_ops = {
23619 .inherits = &nv_base_port_ops,
23620 .set_piomode = nv100_set_piomode,
23621 .set_dmamode = nv100_set_dmamode,
23622 };
23623
23624 -static struct ata_port_operations nv133_port_ops = {
23625 +static const struct ata_port_operations nv133_port_ops = {
23626 .inherits = &nv_base_port_ops,
23627 .set_piomode = nv133_set_piomode,
23628 .set_dmamode = nv133_set_dmamode,
23629 diff -urNp linux-2.6.32.41/drivers/ata/pata_artop.c linux-2.6.32.41/drivers/ata/pata_artop.c
23630 --- linux-2.6.32.41/drivers/ata/pata_artop.c 2011-03-27 14:31:47.000000000 -0400
23631 +++ linux-2.6.32.41/drivers/ata/pata_artop.c 2011-04-17 15:56:46.000000000 -0400
23632 @@ -311,7 +311,7 @@ static struct scsi_host_template artop_s
23633 ATA_BMDMA_SHT(DRV_NAME),
23634 };
23635
23636 -static struct ata_port_operations artop6210_ops = {
23637 +static const struct ata_port_operations artop6210_ops = {
23638 .inherits = &ata_bmdma_port_ops,
23639 .cable_detect = ata_cable_40wire,
23640 .set_piomode = artop6210_set_piomode,
23641 @@ -320,7 +320,7 @@ static struct ata_port_operations artop6
23642 .qc_defer = artop6210_qc_defer,
23643 };
23644
23645 -static struct ata_port_operations artop6260_ops = {
23646 +static const struct ata_port_operations artop6260_ops = {
23647 .inherits = &ata_bmdma_port_ops,
23648 .cable_detect = artop6260_cable_detect,
23649 .set_piomode = artop6260_set_piomode,
23650 diff -urNp linux-2.6.32.41/drivers/ata/pata_at32.c linux-2.6.32.41/drivers/ata/pata_at32.c
23651 --- linux-2.6.32.41/drivers/ata/pata_at32.c 2011-03-27 14:31:47.000000000 -0400
23652 +++ linux-2.6.32.41/drivers/ata/pata_at32.c 2011-04-17 15:56:46.000000000 -0400
23653 @@ -172,7 +172,7 @@ static struct scsi_host_template at32_sh
23654 ATA_PIO_SHT(DRV_NAME),
23655 };
23656
23657 -static struct ata_port_operations at32_port_ops = {
23658 +static const struct ata_port_operations at32_port_ops = {
23659 .inherits = &ata_sff_port_ops,
23660 .cable_detect = ata_cable_40wire,
23661 .set_piomode = pata_at32_set_piomode,
23662 diff -urNp linux-2.6.32.41/drivers/ata/pata_at91.c linux-2.6.32.41/drivers/ata/pata_at91.c
23663 --- linux-2.6.32.41/drivers/ata/pata_at91.c 2011-03-27 14:31:47.000000000 -0400
23664 +++ linux-2.6.32.41/drivers/ata/pata_at91.c 2011-04-17 15:56:46.000000000 -0400
23665 @@ -195,7 +195,7 @@ static struct scsi_host_template pata_at
23666 ATA_PIO_SHT(DRV_NAME),
23667 };
23668
23669 -static struct ata_port_operations pata_at91_port_ops = {
23670 +static const struct ata_port_operations pata_at91_port_ops = {
23671 .inherits = &ata_sff_port_ops,
23672
23673 .sff_data_xfer = pata_at91_data_xfer_noirq,
23674 diff -urNp linux-2.6.32.41/drivers/ata/pata_atiixp.c linux-2.6.32.41/drivers/ata/pata_atiixp.c
23675 --- linux-2.6.32.41/drivers/ata/pata_atiixp.c 2011-03-27 14:31:47.000000000 -0400
23676 +++ linux-2.6.32.41/drivers/ata/pata_atiixp.c 2011-04-17 15:56:46.000000000 -0400
23677 @@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_
23678 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
23679 };
23680
23681 -static struct ata_port_operations atiixp_port_ops = {
23682 +static const struct ata_port_operations atiixp_port_ops = {
23683 .inherits = &ata_bmdma_port_ops,
23684
23685 .qc_prep = ata_sff_dumb_qc_prep,
23686 diff -urNp linux-2.6.32.41/drivers/ata/pata_atp867x.c linux-2.6.32.41/drivers/ata/pata_atp867x.c
23687 --- linux-2.6.32.41/drivers/ata/pata_atp867x.c 2011-03-27 14:31:47.000000000 -0400
23688 +++ linux-2.6.32.41/drivers/ata/pata_atp867x.c 2011-04-17 15:56:46.000000000 -0400
23689 @@ -274,7 +274,7 @@ static struct scsi_host_template atp867x
23690 ATA_BMDMA_SHT(DRV_NAME),
23691 };
23692
23693 -static struct ata_port_operations atp867x_ops = {
23694 +static const struct ata_port_operations atp867x_ops = {
23695 .inherits = &ata_bmdma_port_ops,
23696 .cable_detect = atp867x_cable_detect,
23697 .set_piomode = atp867x_set_piomode,
23698 diff -urNp linux-2.6.32.41/drivers/ata/pata_bf54x.c linux-2.6.32.41/drivers/ata/pata_bf54x.c
23699 --- linux-2.6.32.41/drivers/ata/pata_bf54x.c 2011-03-27 14:31:47.000000000 -0400
23700 +++ linux-2.6.32.41/drivers/ata/pata_bf54x.c 2011-04-17 15:56:46.000000000 -0400
23701 @@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sh
23702 .dma_boundary = ATA_DMA_BOUNDARY,
23703 };
23704
23705 -static struct ata_port_operations bfin_pata_ops = {
23706 +static const struct ata_port_operations bfin_pata_ops = {
23707 .inherits = &ata_sff_port_ops,
23708
23709 .set_piomode = bfin_set_piomode,
23710 diff -urNp linux-2.6.32.41/drivers/ata/pata_cmd640.c linux-2.6.32.41/drivers/ata/pata_cmd640.c
23711 --- linux-2.6.32.41/drivers/ata/pata_cmd640.c 2011-03-27 14:31:47.000000000 -0400
23712 +++ linux-2.6.32.41/drivers/ata/pata_cmd640.c 2011-04-17 15:56:46.000000000 -0400
23713 @@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_
23714 ATA_BMDMA_SHT(DRV_NAME),
23715 };
23716
23717 -static struct ata_port_operations cmd640_port_ops = {
23718 +static const struct ata_port_operations cmd640_port_ops = {
23719 .inherits = &ata_bmdma_port_ops,
23720 /* In theory xfer_noirq is not needed once we kill the prefetcher */
23721 .sff_data_xfer = ata_sff_data_xfer_noirq,
23722 diff -urNp linux-2.6.32.41/drivers/ata/pata_cmd64x.c linux-2.6.32.41/drivers/ata/pata_cmd64x.c
23723 --- linux-2.6.32.41/drivers/ata/pata_cmd64x.c 2011-03-27 14:31:47.000000000 -0400
23724 +++ linux-2.6.32.41/drivers/ata/pata_cmd64x.c 2011-04-17 15:56:46.000000000 -0400
23725 @@ -275,18 +275,18 @@ static const struct ata_port_operations
23726 .set_dmamode = cmd64x_set_dmamode,
23727 };
23728
23729 -static struct ata_port_operations cmd64x_port_ops = {
23730 +static const struct ata_port_operations cmd64x_port_ops = {
23731 .inherits = &cmd64x_base_ops,
23732 .cable_detect = ata_cable_40wire,
23733 };
23734
23735 -static struct ata_port_operations cmd646r1_port_ops = {
23736 +static const struct ata_port_operations cmd646r1_port_ops = {
23737 .inherits = &cmd64x_base_ops,
23738 .bmdma_stop = cmd646r1_bmdma_stop,
23739 .cable_detect = ata_cable_40wire,
23740 };
23741
23742 -static struct ata_port_operations cmd648_port_ops = {
23743 +static const struct ata_port_operations cmd648_port_ops = {
23744 .inherits = &cmd64x_base_ops,
23745 .bmdma_stop = cmd648_bmdma_stop,
23746 .cable_detect = cmd648_cable_detect,
23747 diff -urNp linux-2.6.32.41/drivers/ata/pata_cs5520.c linux-2.6.32.41/drivers/ata/pata_cs5520.c
23748 --- linux-2.6.32.41/drivers/ata/pata_cs5520.c 2011-03-27 14:31:47.000000000 -0400
23749 +++ linux-2.6.32.41/drivers/ata/pata_cs5520.c 2011-04-17 15:56:46.000000000 -0400
23750 @@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_
23751 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
23752 };
23753
23754 -static struct ata_port_operations cs5520_port_ops = {
23755 +static const struct ata_port_operations cs5520_port_ops = {
23756 .inherits = &ata_bmdma_port_ops,
23757 .qc_prep = ata_sff_dumb_qc_prep,
23758 .cable_detect = ata_cable_40wire,
23759 diff -urNp linux-2.6.32.41/drivers/ata/pata_cs5530.c linux-2.6.32.41/drivers/ata/pata_cs5530.c
23760 --- linux-2.6.32.41/drivers/ata/pata_cs5530.c 2011-03-27 14:31:47.000000000 -0400
23761 +++ linux-2.6.32.41/drivers/ata/pata_cs5530.c 2011-04-17 15:56:46.000000000 -0400
23762 @@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_
23763 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
23764 };
23765
23766 -static struct ata_port_operations cs5530_port_ops = {
23767 +static const struct ata_port_operations cs5530_port_ops = {
23768 .inherits = &ata_bmdma_port_ops,
23769
23770 .qc_prep = ata_sff_dumb_qc_prep,
23771 diff -urNp linux-2.6.32.41/drivers/ata/pata_cs5535.c linux-2.6.32.41/drivers/ata/pata_cs5535.c
23772 --- linux-2.6.32.41/drivers/ata/pata_cs5535.c 2011-03-27 14:31:47.000000000 -0400
23773 +++ linux-2.6.32.41/drivers/ata/pata_cs5535.c 2011-04-17 15:56:46.000000000 -0400
23774 @@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_
23775 ATA_BMDMA_SHT(DRV_NAME),
23776 };
23777
23778 -static struct ata_port_operations cs5535_port_ops = {
23779 +static const struct ata_port_operations cs5535_port_ops = {
23780 .inherits = &ata_bmdma_port_ops,
23781 .cable_detect = cs5535_cable_detect,
23782 .set_piomode = cs5535_set_piomode,
23783 diff -urNp linux-2.6.32.41/drivers/ata/pata_cs5536.c linux-2.6.32.41/drivers/ata/pata_cs5536.c
23784 --- linux-2.6.32.41/drivers/ata/pata_cs5536.c 2011-03-27 14:31:47.000000000 -0400
23785 +++ linux-2.6.32.41/drivers/ata/pata_cs5536.c 2011-04-17 15:56:46.000000000 -0400
23786 @@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_
23787 ATA_BMDMA_SHT(DRV_NAME),
23788 };
23789
23790 -static struct ata_port_operations cs5536_port_ops = {
23791 +static const struct ata_port_operations cs5536_port_ops = {
23792 .inherits = &ata_bmdma_port_ops,
23793 .cable_detect = cs5536_cable_detect,
23794 .set_piomode = cs5536_set_piomode,
23795 diff -urNp linux-2.6.32.41/drivers/ata/pata_cypress.c linux-2.6.32.41/drivers/ata/pata_cypress.c
23796 --- linux-2.6.32.41/drivers/ata/pata_cypress.c 2011-03-27 14:31:47.000000000 -0400
23797 +++ linux-2.6.32.41/drivers/ata/pata_cypress.c 2011-04-17 15:56:46.000000000 -0400
23798 @@ -113,7 +113,7 @@ static struct scsi_host_template cy82c69
23799 ATA_BMDMA_SHT(DRV_NAME),
23800 };
23801
23802 -static struct ata_port_operations cy82c693_port_ops = {
23803 +static const struct ata_port_operations cy82c693_port_ops = {
23804 .inherits = &ata_bmdma_port_ops,
23805 .cable_detect = ata_cable_40wire,
23806 .set_piomode = cy82c693_set_piomode,
23807 diff -urNp linux-2.6.32.41/drivers/ata/pata_efar.c linux-2.6.32.41/drivers/ata/pata_efar.c
23808 --- linux-2.6.32.41/drivers/ata/pata_efar.c 2011-03-27 14:31:47.000000000 -0400
23809 +++ linux-2.6.32.41/drivers/ata/pata_efar.c 2011-04-17 15:56:46.000000000 -0400
23810 @@ -222,7 +222,7 @@ static struct scsi_host_template efar_sh
23811 ATA_BMDMA_SHT(DRV_NAME),
23812 };
23813
23814 -static struct ata_port_operations efar_ops = {
23815 +static const struct ata_port_operations efar_ops = {
23816 .inherits = &ata_bmdma_port_ops,
23817 .cable_detect = efar_cable_detect,
23818 .set_piomode = efar_set_piomode,
23819 diff -urNp linux-2.6.32.41/drivers/ata/pata_hpt366.c linux-2.6.32.41/drivers/ata/pata_hpt366.c
23820 --- linux-2.6.32.41/drivers/ata/pata_hpt366.c 2011-03-27 14:31:47.000000000 -0400
23821 +++ linux-2.6.32.41/drivers/ata/pata_hpt366.c 2011-04-17 15:56:46.000000000 -0400
23822 @@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_
23823 * Configuration for HPT366/68
23824 */
23825
23826 -static struct ata_port_operations hpt366_port_ops = {
23827 +static const struct ata_port_operations hpt366_port_ops = {
23828 .inherits = &ata_bmdma_port_ops,
23829 .cable_detect = hpt36x_cable_detect,
23830 .mode_filter = hpt366_filter,
23831 diff -urNp linux-2.6.32.41/drivers/ata/pata_hpt37x.c linux-2.6.32.41/drivers/ata/pata_hpt37x.c
23832 --- linux-2.6.32.41/drivers/ata/pata_hpt37x.c 2011-03-27 14:31:47.000000000 -0400
23833 +++ linux-2.6.32.41/drivers/ata/pata_hpt37x.c 2011-04-17 15:56:46.000000000 -0400
23834 @@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_
23835 * Configuration for HPT370
23836 */
23837
23838 -static struct ata_port_operations hpt370_port_ops = {
23839 +static const struct ata_port_operations hpt370_port_ops = {
23840 .inherits = &ata_bmdma_port_ops,
23841
23842 .bmdma_stop = hpt370_bmdma_stop,
23843 @@ -591,7 +591,7 @@ static struct ata_port_operations hpt370
23844 * Configuration for HPT370A. Close to 370 but less filters
23845 */
23846
23847 -static struct ata_port_operations hpt370a_port_ops = {
23848 +static const struct ata_port_operations hpt370a_port_ops = {
23849 .inherits = &hpt370_port_ops,
23850 .mode_filter = hpt370a_filter,
23851 };
23852 @@ -601,7 +601,7 @@ static struct ata_port_operations hpt370
23853 * and DMA mode setting functionality.
23854 */
23855
23856 -static struct ata_port_operations hpt372_port_ops = {
23857 +static const struct ata_port_operations hpt372_port_ops = {
23858 .inherits = &ata_bmdma_port_ops,
23859
23860 .bmdma_stop = hpt37x_bmdma_stop,
23861 @@ -616,7 +616,7 @@ static struct ata_port_operations hpt372
23862 * but we have a different cable detection procedure for function 1.
23863 */
23864
23865 -static struct ata_port_operations hpt374_fn1_port_ops = {
23866 +static const struct ata_port_operations hpt374_fn1_port_ops = {
23867 .inherits = &hpt372_port_ops,
23868 .prereset = hpt374_fn1_pre_reset,
23869 };
23870 diff -urNp linux-2.6.32.41/drivers/ata/pata_hpt3x2n.c linux-2.6.32.41/drivers/ata/pata_hpt3x2n.c
23871 --- linux-2.6.32.41/drivers/ata/pata_hpt3x2n.c 2011-03-27 14:31:47.000000000 -0400
23872 +++ linux-2.6.32.41/drivers/ata/pata_hpt3x2n.c 2011-04-17 15:56:46.000000000 -0400
23873 @@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n
23874 * Configuration for HPT3x2n.
23875 */
23876
23877 -static struct ata_port_operations hpt3x2n_port_ops = {
23878 +static const struct ata_port_operations hpt3x2n_port_ops = {
23879 .inherits = &ata_bmdma_port_ops,
23880
23881 .bmdma_stop = hpt3x2n_bmdma_stop,
23882 diff -urNp linux-2.6.32.41/drivers/ata/pata_hpt3x3.c linux-2.6.32.41/drivers/ata/pata_hpt3x3.c
23883 --- linux-2.6.32.41/drivers/ata/pata_hpt3x3.c 2011-03-27 14:31:47.000000000 -0400
23884 +++ linux-2.6.32.41/drivers/ata/pata_hpt3x3.c 2011-04-17 15:56:46.000000000 -0400
23885 @@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_
23886 ATA_BMDMA_SHT(DRV_NAME),
23887 };
23888
23889 -static struct ata_port_operations hpt3x3_port_ops = {
23890 +static const struct ata_port_operations hpt3x3_port_ops = {
23891 .inherits = &ata_bmdma_port_ops,
23892 .cable_detect = ata_cable_40wire,
23893 .set_piomode = hpt3x3_set_piomode,
23894 diff -urNp linux-2.6.32.41/drivers/ata/pata_icside.c linux-2.6.32.41/drivers/ata/pata_icside.c
23895 --- linux-2.6.32.41/drivers/ata/pata_icside.c 2011-03-27 14:31:47.000000000 -0400
23896 +++ linux-2.6.32.41/drivers/ata/pata_icside.c 2011-04-17 15:56:46.000000000 -0400
23897 @@ -319,7 +319,7 @@ static void pata_icside_postreset(struct
23898 }
23899 }
23900
23901 -static struct ata_port_operations pata_icside_port_ops = {
23902 +static const struct ata_port_operations pata_icside_port_ops = {
23903 .inherits = &ata_sff_port_ops,
23904 /* no need to build any PRD tables for DMA */
23905 .qc_prep = ata_noop_qc_prep,
23906 diff -urNp linux-2.6.32.41/drivers/ata/pata_isapnp.c linux-2.6.32.41/drivers/ata/pata_isapnp.c
23907 --- linux-2.6.32.41/drivers/ata/pata_isapnp.c 2011-03-27 14:31:47.000000000 -0400
23908 +++ linux-2.6.32.41/drivers/ata/pata_isapnp.c 2011-04-17 15:56:46.000000000 -0400
23909 @@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_
23910 ATA_PIO_SHT(DRV_NAME),
23911 };
23912
23913 -static struct ata_port_operations isapnp_port_ops = {
23914 +static const struct ata_port_operations isapnp_port_ops = {
23915 .inherits = &ata_sff_port_ops,
23916 .cable_detect = ata_cable_40wire,
23917 };
23918
23919 -static struct ata_port_operations isapnp_noalt_port_ops = {
23920 +static const struct ata_port_operations isapnp_noalt_port_ops = {
23921 .inherits = &ata_sff_port_ops,
23922 .cable_detect = ata_cable_40wire,
23923 /* No altstatus so we don't want to use the lost interrupt poll */
23924 diff -urNp linux-2.6.32.41/drivers/ata/pata_it8213.c linux-2.6.32.41/drivers/ata/pata_it8213.c
23925 --- linux-2.6.32.41/drivers/ata/pata_it8213.c 2011-03-27 14:31:47.000000000 -0400
23926 +++ linux-2.6.32.41/drivers/ata/pata_it8213.c 2011-04-17 15:56:46.000000000 -0400
23927 @@ -234,7 +234,7 @@ static struct scsi_host_template it8213_
23928 };
23929
23930
23931 -static struct ata_port_operations it8213_ops = {
23932 +static const struct ata_port_operations it8213_ops = {
23933 .inherits = &ata_bmdma_port_ops,
23934 .cable_detect = it8213_cable_detect,
23935 .set_piomode = it8213_set_piomode,
23936 diff -urNp linux-2.6.32.41/drivers/ata/pata_it821x.c linux-2.6.32.41/drivers/ata/pata_it821x.c
23937 --- linux-2.6.32.41/drivers/ata/pata_it821x.c 2011-03-27 14:31:47.000000000 -0400
23938 +++ linux-2.6.32.41/drivers/ata/pata_it821x.c 2011-04-17 15:56:46.000000000 -0400
23939 @@ -800,7 +800,7 @@ static struct scsi_host_template it821x_
23940 ATA_BMDMA_SHT(DRV_NAME),
23941 };
23942
23943 -static struct ata_port_operations it821x_smart_port_ops = {
23944 +static const struct ata_port_operations it821x_smart_port_ops = {
23945 .inherits = &ata_bmdma_port_ops,
23946
23947 .check_atapi_dma= it821x_check_atapi_dma,
23948 @@ -814,7 +814,7 @@ static struct ata_port_operations it821x
23949 .port_start = it821x_port_start,
23950 };
23951
23952 -static struct ata_port_operations it821x_passthru_port_ops = {
23953 +static const struct ata_port_operations it821x_passthru_port_ops = {
23954 .inherits = &ata_bmdma_port_ops,
23955
23956 .check_atapi_dma= it821x_check_atapi_dma,
23957 @@ -830,7 +830,7 @@ static struct ata_port_operations it821x
23958 .port_start = it821x_port_start,
23959 };
23960
23961 -static struct ata_port_operations it821x_rdc_port_ops = {
23962 +static const struct ata_port_operations it821x_rdc_port_ops = {
23963 .inherits = &ata_bmdma_port_ops,
23964
23965 .check_atapi_dma= it821x_check_atapi_dma,
23966 diff -urNp linux-2.6.32.41/drivers/ata/pata_ixp4xx_cf.c linux-2.6.32.41/drivers/ata/pata_ixp4xx_cf.c
23967 --- linux-2.6.32.41/drivers/ata/pata_ixp4xx_cf.c 2011-03-27 14:31:47.000000000 -0400
23968 +++ linux-2.6.32.41/drivers/ata/pata_ixp4xx_cf.c 2011-04-17 15:56:46.000000000 -0400
23969 @@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_
23970 ATA_PIO_SHT(DRV_NAME),
23971 };
23972
23973 -static struct ata_port_operations ixp4xx_port_ops = {
23974 +static const struct ata_port_operations ixp4xx_port_ops = {
23975 .inherits = &ata_sff_port_ops,
23976 .sff_data_xfer = ixp4xx_mmio_data_xfer,
23977 .cable_detect = ata_cable_40wire,
23978 diff -urNp linux-2.6.32.41/drivers/ata/pata_jmicron.c linux-2.6.32.41/drivers/ata/pata_jmicron.c
23979 --- linux-2.6.32.41/drivers/ata/pata_jmicron.c 2011-03-27 14:31:47.000000000 -0400
23980 +++ linux-2.6.32.41/drivers/ata/pata_jmicron.c 2011-04-17 15:56:46.000000000 -0400
23981 @@ -111,7 +111,7 @@ static struct scsi_host_template jmicron
23982 ATA_BMDMA_SHT(DRV_NAME),
23983 };
23984
23985 -static struct ata_port_operations jmicron_ops = {
23986 +static const struct ata_port_operations jmicron_ops = {
23987 .inherits = &ata_bmdma_port_ops,
23988 .prereset = jmicron_pre_reset,
23989 };
23990 diff -urNp linux-2.6.32.41/drivers/ata/pata_legacy.c linux-2.6.32.41/drivers/ata/pata_legacy.c
23991 --- linux-2.6.32.41/drivers/ata/pata_legacy.c 2011-03-27 14:31:47.000000000 -0400
23992 +++ linux-2.6.32.41/drivers/ata/pata_legacy.c 2011-04-17 15:56:46.000000000 -0400
23993 @@ -106,7 +106,7 @@ struct legacy_probe {
23994
23995 struct legacy_controller {
23996 const char *name;
23997 - struct ata_port_operations *ops;
23998 + const struct ata_port_operations *ops;
23999 unsigned int pio_mask;
24000 unsigned int flags;
24001 unsigned int pflags;
24002 @@ -223,12 +223,12 @@ static const struct ata_port_operations
24003 * pio_mask as well.
24004 */
24005
24006 -static struct ata_port_operations simple_port_ops = {
24007 +static const struct ata_port_operations simple_port_ops = {
24008 .inherits = &legacy_base_port_ops,
24009 .sff_data_xfer = ata_sff_data_xfer_noirq,
24010 };
24011
24012 -static struct ata_port_operations legacy_port_ops = {
24013 +static const struct ata_port_operations legacy_port_ops = {
24014 .inherits = &legacy_base_port_ops,
24015 .sff_data_xfer = ata_sff_data_xfer_noirq,
24016 .set_mode = legacy_set_mode,
24017 @@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(st
24018 return buflen;
24019 }
24020
24021 -static struct ata_port_operations pdc20230_port_ops = {
24022 +static const struct ata_port_operations pdc20230_port_ops = {
24023 .inherits = &legacy_base_port_ops,
24024 .set_piomode = pdc20230_set_piomode,
24025 .sff_data_xfer = pdc_data_xfer_vlb,
24026 @@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct a
24027 ioread8(ap->ioaddr.status_addr);
24028 }
24029
24030 -static struct ata_port_operations ht6560a_port_ops = {
24031 +static const struct ata_port_operations ht6560a_port_ops = {
24032 .inherits = &legacy_base_port_ops,
24033 .set_piomode = ht6560a_set_piomode,
24034 };
24035 @@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct a
24036 ioread8(ap->ioaddr.status_addr);
24037 }
24038
24039 -static struct ata_port_operations ht6560b_port_ops = {
24040 +static const struct ata_port_operations ht6560b_port_ops = {
24041 .inherits = &legacy_base_port_ops,
24042 .set_piomode = ht6560b_set_piomode,
24043 };
24044 @@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(stru
24045 }
24046
24047
24048 -static struct ata_port_operations opti82c611a_port_ops = {
24049 +static const struct ata_port_operations opti82c611a_port_ops = {
24050 .inherits = &legacy_base_port_ops,
24051 .set_piomode = opti82c611a_set_piomode,
24052 };
24053 @@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(
24054 return ata_sff_qc_issue(qc);
24055 }
24056
24057 -static struct ata_port_operations opti82c46x_port_ops = {
24058 +static const struct ata_port_operations opti82c46x_port_ops = {
24059 .inherits = &legacy_base_port_ops,
24060 .set_piomode = opti82c46x_set_piomode,
24061 .qc_issue = opti82c46x_qc_issue,
24062 @@ -771,20 +771,20 @@ static int qdi_port(struct platform_devi
24063 return 0;
24064 }
24065
24066 -static struct ata_port_operations qdi6500_port_ops = {
24067 +static const struct ata_port_operations qdi6500_port_ops = {
24068 .inherits = &legacy_base_port_ops,
24069 .set_piomode = qdi6500_set_piomode,
24070 .qc_issue = qdi_qc_issue,
24071 .sff_data_xfer = vlb32_data_xfer,
24072 };
24073
24074 -static struct ata_port_operations qdi6580_port_ops = {
24075 +static const struct ata_port_operations qdi6580_port_ops = {
24076 .inherits = &legacy_base_port_ops,
24077 .set_piomode = qdi6580_set_piomode,
24078 .sff_data_xfer = vlb32_data_xfer,
24079 };
24080
24081 -static struct ata_port_operations qdi6580dp_port_ops = {
24082 +static const struct ata_port_operations qdi6580dp_port_ops = {
24083 .inherits = &legacy_base_port_ops,
24084 .set_piomode = qdi6580dp_set_piomode,
24085 .sff_data_xfer = vlb32_data_xfer,
24086 @@ -855,7 +855,7 @@ static int winbond_port(struct platform_
24087 return 0;
24088 }
24089
24090 -static struct ata_port_operations winbond_port_ops = {
24091 +static const struct ata_port_operations winbond_port_ops = {
24092 .inherits = &legacy_base_port_ops,
24093 .set_piomode = winbond_set_piomode,
24094 .sff_data_xfer = vlb32_data_xfer,
24095 @@ -978,7 +978,7 @@ static __init int legacy_init_one(struct
24096 int pio_modes = controller->pio_mask;
24097 unsigned long io = probe->port;
24098 u32 mask = (1 << probe->slot);
24099 - struct ata_port_operations *ops = controller->ops;
24100 + const struct ata_port_operations *ops = controller->ops;
24101 struct legacy_data *ld = &legacy_data[probe->slot];
24102 struct ata_host *host = NULL;
24103 struct ata_port *ap;
24104 diff -urNp linux-2.6.32.41/drivers/ata/pata_marvell.c linux-2.6.32.41/drivers/ata/pata_marvell.c
24105 --- linux-2.6.32.41/drivers/ata/pata_marvell.c 2011-03-27 14:31:47.000000000 -0400
24106 +++ linux-2.6.32.41/drivers/ata/pata_marvell.c 2011-04-17 15:56:46.000000000 -0400
24107 @@ -100,7 +100,7 @@ static struct scsi_host_template marvell
24108 ATA_BMDMA_SHT(DRV_NAME),
24109 };
24110
24111 -static struct ata_port_operations marvell_ops = {
24112 +static const struct ata_port_operations marvell_ops = {
24113 .inherits = &ata_bmdma_port_ops,
24114 .cable_detect = marvell_cable_detect,
24115 .prereset = marvell_pre_reset,
24116 diff -urNp linux-2.6.32.41/drivers/ata/pata_mpc52xx.c linux-2.6.32.41/drivers/ata/pata_mpc52xx.c
24117 --- linux-2.6.32.41/drivers/ata/pata_mpc52xx.c 2011-03-27 14:31:47.000000000 -0400
24118 +++ linux-2.6.32.41/drivers/ata/pata_mpc52xx.c 2011-04-17 15:56:46.000000000 -0400
24119 @@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx
24120 ATA_PIO_SHT(DRV_NAME),
24121 };
24122
24123 -static struct ata_port_operations mpc52xx_ata_port_ops = {
24124 +static const struct ata_port_operations mpc52xx_ata_port_ops = {
24125 .inherits = &ata_bmdma_port_ops,
24126 .sff_dev_select = mpc52xx_ata_dev_select,
24127 .set_piomode = mpc52xx_ata_set_piomode,
24128 diff -urNp linux-2.6.32.41/drivers/ata/pata_mpiix.c linux-2.6.32.41/drivers/ata/pata_mpiix.c
24129 --- linux-2.6.32.41/drivers/ata/pata_mpiix.c 2011-03-27 14:31:47.000000000 -0400
24130 +++ linux-2.6.32.41/drivers/ata/pata_mpiix.c 2011-04-17 15:56:46.000000000 -0400
24131 @@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_s
24132 ATA_PIO_SHT(DRV_NAME),
24133 };
24134
24135 -static struct ata_port_operations mpiix_port_ops = {
24136 +static const struct ata_port_operations mpiix_port_ops = {
24137 .inherits = &ata_sff_port_ops,
24138 .qc_issue = mpiix_qc_issue,
24139 .cable_detect = ata_cable_40wire,
24140 diff -urNp linux-2.6.32.41/drivers/ata/pata_netcell.c linux-2.6.32.41/drivers/ata/pata_netcell.c
24141 --- linux-2.6.32.41/drivers/ata/pata_netcell.c 2011-03-27 14:31:47.000000000 -0400
24142 +++ linux-2.6.32.41/drivers/ata/pata_netcell.c 2011-04-17 15:56:46.000000000 -0400
24143 @@ -34,7 +34,7 @@ static struct scsi_host_template netcell
24144 ATA_BMDMA_SHT(DRV_NAME),
24145 };
24146
24147 -static struct ata_port_operations netcell_ops = {
24148 +static const struct ata_port_operations netcell_ops = {
24149 .inherits = &ata_bmdma_port_ops,
24150 .cable_detect = ata_cable_80wire,
24151 .read_id = netcell_read_id,
24152 diff -urNp linux-2.6.32.41/drivers/ata/pata_ninja32.c linux-2.6.32.41/drivers/ata/pata_ninja32.c
24153 --- linux-2.6.32.41/drivers/ata/pata_ninja32.c 2011-03-27 14:31:47.000000000 -0400
24154 +++ linux-2.6.32.41/drivers/ata/pata_ninja32.c 2011-04-17 15:56:46.000000000 -0400
24155 @@ -81,7 +81,7 @@ static struct scsi_host_template ninja32
24156 ATA_BMDMA_SHT(DRV_NAME),
24157 };
24158
24159 -static struct ata_port_operations ninja32_port_ops = {
24160 +static const struct ata_port_operations ninja32_port_ops = {
24161 .inherits = &ata_bmdma_port_ops,
24162 .sff_dev_select = ninja32_dev_select,
24163 .cable_detect = ata_cable_40wire,
24164 diff -urNp linux-2.6.32.41/drivers/ata/pata_ns87410.c linux-2.6.32.41/drivers/ata/pata_ns87410.c
24165 --- linux-2.6.32.41/drivers/ata/pata_ns87410.c 2011-03-27 14:31:47.000000000 -0400
24166 +++ linux-2.6.32.41/drivers/ata/pata_ns87410.c 2011-04-17 15:56:46.000000000 -0400
24167 @@ -132,7 +132,7 @@ static struct scsi_host_template ns87410
24168 ATA_PIO_SHT(DRV_NAME),
24169 };
24170
24171 -static struct ata_port_operations ns87410_port_ops = {
24172 +static const struct ata_port_operations ns87410_port_ops = {
24173 .inherits = &ata_sff_port_ops,
24174 .qc_issue = ns87410_qc_issue,
24175 .cable_detect = ata_cable_40wire,
24176 diff -urNp linux-2.6.32.41/drivers/ata/pata_ns87415.c linux-2.6.32.41/drivers/ata/pata_ns87415.c
24177 --- linux-2.6.32.41/drivers/ata/pata_ns87415.c 2011-03-27 14:31:47.000000000 -0400
24178 +++ linux-2.6.32.41/drivers/ata/pata_ns87415.c 2011-04-17 15:56:46.000000000 -0400
24179 @@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct at
24180 }
24181 #endif /* 87560 SuperIO Support */
24182
24183 -static struct ata_port_operations ns87415_pata_ops = {
24184 +static const struct ata_port_operations ns87415_pata_ops = {
24185 .inherits = &ata_bmdma_port_ops,
24186
24187 .check_atapi_dma = ns87415_check_atapi_dma,
24188 @@ -313,7 +313,7 @@ static struct ata_port_operations ns8741
24189 };
24190
24191 #if defined(CONFIG_SUPERIO)
24192 -static struct ata_port_operations ns87560_pata_ops = {
24193 +static const struct ata_port_operations ns87560_pata_ops = {
24194 .inherits = &ns87415_pata_ops,
24195 .sff_tf_read = ns87560_tf_read,
24196 .sff_check_status = ns87560_check_status,
24197 diff -urNp linux-2.6.32.41/drivers/ata/pata_octeon_cf.c linux-2.6.32.41/drivers/ata/pata_octeon_cf.c
24198 --- linux-2.6.32.41/drivers/ata/pata_octeon_cf.c 2011-03-27 14:31:47.000000000 -0400
24199 +++ linux-2.6.32.41/drivers/ata/pata_octeon_cf.c 2011-04-17 15:56:46.000000000 -0400
24200 @@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(s
24201 return 0;
24202 }
24203
24204 +/* cannot be const */
24205 static struct ata_port_operations octeon_cf_ops = {
24206 .inherits = &ata_sff_port_ops,
24207 .check_atapi_dma = octeon_cf_check_atapi_dma,
24208 diff -urNp linux-2.6.32.41/drivers/ata/pata_oldpiix.c linux-2.6.32.41/drivers/ata/pata_oldpiix.c
24209 --- linux-2.6.32.41/drivers/ata/pata_oldpiix.c 2011-03-27 14:31:47.000000000 -0400
24210 +++ linux-2.6.32.41/drivers/ata/pata_oldpiix.c 2011-04-17 15:56:46.000000000 -0400
24211 @@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix
24212 ATA_BMDMA_SHT(DRV_NAME),
24213 };
24214
24215 -static struct ata_port_operations oldpiix_pata_ops = {
24216 +static const struct ata_port_operations oldpiix_pata_ops = {
24217 .inherits = &ata_bmdma_port_ops,
24218 .qc_issue = oldpiix_qc_issue,
24219 .cable_detect = ata_cable_40wire,
24220 diff -urNp linux-2.6.32.41/drivers/ata/pata_opti.c linux-2.6.32.41/drivers/ata/pata_opti.c
24221 --- linux-2.6.32.41/drivers/ata/pata_opti.c 2011-03-27 14:31:47.000000000 -0400
24222 +++ linux-2.6.32.41/drivers/ata/pata_opti.c 2011-04-17 15:56:46.000000000 -0400
24223 @@ -152,7 +152,7 @@ static struct scsi_host_template opti_sh
24224 ATA_PIO_SHT(DRV_NAME),
24225 };
24226
24227 -static struct ata_port_operations opti_port_ops = {
24228 +static const struct ata_port_operations opti_port_ops = {
24229 .inherits = &ata_sff_port_ops,
24230 .cable_detect = ata_cable_40wire,
24231 .set_piomode = opti_set_piomode,
24232 diff -urNp linux-2.6.32.41/drivers/ata/pata_optidma.c linux-2.6.32.41/drivers/ata/pata_optidma.c
24233 --- linux-2.6.32.41/drivers/ata/pata_optidma.c 2011-03-27 14:31:47.000000000 -0400
24234 +++ linux-2.6.32.41/drivers/ata/pata_optidma.c 2011-04-17 15:56:46.000000000 -0400
24235 @@ -337,7 +337,7 @@ static struct scsi_host_template optidma
24236 ATA_BMDMA_SHT(DRV_NAME),
24237 };
24238
24239 -static struct ata_port_operations optidma_port_ops = {
24240 +static const struct ata_port_operations optidma_port_ops = {
24241 .inherits = &ata_bmdma_port_ops,
24242 .cable_detect = ata_cable_40wire,
24243 .set_piomode = optidma_set_pio_mode,
24244 @@ -346,7 +346,7 @@ static struct ata_port_operations optidm
24245 .prereset = optidma_pre_reset,
24246 };
24247
24248 -static struct ata_port_operations optiplus_port_ops = {
24249 +static const struct ata_port_operations optiplus_port_ops = {
24250 .inherits = &optidma_port_ops,
24251 .set_piomode = optiplus_set_pio_mode,
24252 .set_dmamode = optiplus_set_dma_mode,
24253 diff -urNp linux-2.6.32.41/drivers/ata/pata_palmld.c linux-2.6.32.41/drivers/ata/pata_palmld.c
24254 --- linux-2.6.32.41/drivers/ata/pata_palmld.c 2011-03-27 14:31:47.000000000 -0400
24255 +++ linux-2.6.32.41/drivers/ata/pata_palmld.c 2011-04-17 15:56:46.000000000 -0400
24256 @@ -37,7 +37,7 @@ static struct scsi_host_template palmld_
24257 ATA_PIO_SHT(DRV_NAME),
24258 };
24259
24260 -static struct ata_port_operations palmld_port_ops = {
24261 +static const struct ata_port_operations palmld_port_ops = {
24262 .inherits = &ata_sff_port_ops,
24263 .sff_data_xfer = ata_sff_data_xfer_noirq,
24264 .cable_detect = ata_cable_40wire,
24265 diff -urNp linux-2.6.32.41/drivers/ata/pata_pcmcia.c linux-2.6.32.41/drivers/ata/pata_pcmcia.c
24266 --- linux-2.6.32.41/drivers/ata/pata_pcmcia.c 2011-03-27 14:31:47.000000000 -0400
24267 +++ linux-2.6.32.41/drivers/ata/pata_pcmcia.c 2011-04-17 15:56:46.000000000 -0400
24268 @@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_
24269 ATA_PIO_SHT(DRV_NAME),
24270 };
24271
24272 -static struct ata_port_operations pcmcia_port_ops = {
24273 +static const struct ata_port_operations pcmcia_port_ops = {
24274 .inherits = &ata_sff_port_ops,
24275 .sff_data_xfer = ata_sff_data_xfer_noirq,
24276 .cable_detect = ata_cable_40wire,
24277 .set_mode = pcmcia_set_mode,
24278 };
24279
24280 -static struct ata_port_operations pcmcia_8bit_port_ops = {
24281 +static const struct ata_port_operations pcmcia_8bit_port_ops = {
24282 .inherits = &ata_sff_port_ops,
24283 .sff_data_xfer = ata_data_xfer_8bit,
24284 .cable_detect = ata_cable_40wire,
24285 @@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia
24286 unsigned long io_base, ctl_base;
24287 void __iomem *io_addr, *ctl_addr;
24288 int n_ports = 1;
24289 - struct ata_port_operations *ops = &pcmcia_port_ops;
24290 + const struct ata_port_operations *ops = &pcmcia_port_ops;
24291
24292 info = kzalloc(sizeof(*info), GFP_KERNEL);
24293 if (info == NULL)
24294 diff -urNp linux-2.6.32.41/drivers/ata/pata_pdc2027x.c linux-2.6.32.41/drivers/ata/pata_pdc2027x.c
24295 --- linux-2.6.32.41/drivers/ata/pata_pdc2027x.c 2011-03-27 14:31:47.000000000 -0400
24296 +++ linux-2.6.32.41/drivers/ata/pata_pdc2027x.c 2011-04-17 15:56:46.000000000 -0400
24297 @@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027
24298 ATA_BMDMA_SHT(DRV_NAME),
24299 };
24300
24301 -static struct ata_port_operations pdc2027x_pata100_ops = {
24302 +static const struct ata_port_operations pdc2027x_pata100_ops = {
24303 .inherits = &ata_bmdma_port_ops,
24304 .check_atapi_dma = pdc2027x_check_atapi_dma,
24305 .cable_detect = pdc2027x_cable_detect,
24306 .prereset = pdc2027x_prereset,
24307 };
24308
24309 -static struct ata_port_operations pdc2027x_pata133_ops = {
24310 +static const struct ata_port_operations pdc2027x_pata133_ops = {
24311 .inherits = &pdc2027x_pata100_ops,
24312 .mode_filter = pdc2027x_mode_filter,
24313 .set_piomode = pdc2027x_set_piomode,
24314 diff -urNp linux-2.6.32.41/drivers/ata/pata_pdc202xx_old.c linux-2.6.32.41/drivers/ata/pata_pdc202xx_old.c
24315 --- linux-2.6.32.41/drivers/ata/pata_pdc202xx_old.c 2011-03-27 14:31:47.000000000 -0400
24316 +++ linux-2.6.32.41/drivers/ata/pata_pdc202xx_old.c 2011-04-17 15:56:46.000000000 -0400
24317 @@ -274,7 +274,7 @@ static struct scsi_host_template pdc202x
24318 ATA_BMDMA_SHT(DRV_NAME),
24319 };
24320
24321 -static struct ata_port_operations pdc2024x_port_ops = {
24322 +static const struct ata_port_operations pdc2024x_port_ops = {
24323 .inherits = &ata_bmdma_port_ops,
24324
24325 .cable_detect = ata_cable_40wire,
24326 @@ -284,7 +284,7 @@ static struct ata_port_operations pdc202
24327 .sff_exec_command = pdc202xx_exec_command,
24328 };
24329
24330 -static struct ata_port_operations pdc2026x_port_ops = {
24331 +static const struct ata_port_operations pdc2026x_port_ops = {
24332 .inherits = &pdc2024x_port_ops,
24333
24334 .check_atapi_dma = pdc2026x_check_atapi_dma,
24335 diff -urNp linux-2.6.32.41/drivers/ata/pata_platform.c linux-2.6.32.41/drivers/ata/pata_platform.c
24336 --- linux-2.6.32.41/drivers/ata/pata_platform.c 2011-03-27 14:31:47.000000000 -0400
24337 +++ linux-2.6.32.41/drivers/ata/pata_platform.c 2011-04-17 15:56:46.000000000 -0400
24338 @@ -48,7 +48,7 @@ static struct scsi_host_template pata_pl
24339 ATA_PIO_SHT(DRV_NAME),
24340 };
24341
24342 -static struct ata_port_operations pata_platform_port_ops = {
24343 +static const struct ata_port_operations pata_platform_port_ops = {
24344 .inherits = &ata_sff_port_ops,
24345 .sff_data_xfer = ata_sff_data_xfer_noirq,
24346 .cable_detect = ata_cable_unknown,
24347 diff -urNp linux-2.6.32.41/drivers/ata/pata_qdi.c linux-2.6.32.41/drivers/ata/pata_qdi.c
24348 --- linux-2.6.32.41/drivers/ata/pata_qdi.c 2011-03-27 14:31:47.000000000 -0400
24349 +++ linux-2.6.32.41/drivers/ata/pata_qdi.c 2011-04-17 15:56:46.000000000 -0400
24350 @@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht
24351 ATA_PIO_SHT(DRV_NAME),
24352 };
24353
24354 -static struct ata_port_operations qdi6500_port_ops = {
24355 +static const struct ata_port_operations qdi6500_port_ops = {
24356 .inherits = &ata_sff_port_ops,
24357 .qc_issue = qdi_qc_issue,
24358 .sff_data_xfer = qdi_data_xfer,
24359 @@ -165,7 +165,7 @@ static struct ata_port_operations qdi650
24360 .set_piomode = qdi6500_set_piomode,
24361 };
24362
24363 -static struct ata_port_operations qdi6580_port_ops = {
24364 +static const struct ata_port_operations qdi6580_port_ops = {
24365 .inherits = &qdi6500_port_ops,
24366 .set_piomode = qdi6580_set_piomode,
24367 };
24368 diff -urNp linux-2.6.32.41/drivers/ata/pata_radisys.c linux-2.6.32.41/drivers/ata/pata_radisys.c
24369 --- linux-2.6.32.41/drivers/ata/pata_radisys.c 2011-03-27 14:31:47.000000000 -0400
24370 +++ linux-2.6.32.41/drivers/ata/pata_radisys.c 2011-04-17 15:56:46.000000000 -0400
24371 @@ -187,7 +187,7 @@ static struct scsi_host_template radisys
24372 ATA_BMDMA_SHT(DRV_NAME),
24373 };
24374
24375 -static struct ata_port_operations radisys_pata_ops = {
24376 +static const struct ata_port_operations radisys_pata_ops = {
24377 .inherits = &ata_bmdma_port_ops,
24378 .qc_issue = radisys_qc_issue,
24379 .cable_detect = ata_cable_unknown,
24380 diff -urNp linux-2.6.32.41/drivers/ata/pata_rb532_cf.c linux-2.6.32.41/drivers/ata/pata_rb532_cf.c
24381 --- linux-2.6.32.41/drivers/ata/pata_rb532_cf.c 2011-03-27 14:31:47.000000000 -0400
24382 +++ linux-2.6.32.41/drivers/ata/pata_rb532_cf.c 2011-04-17 15:56:46.000000000 -0400
24383 @@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handle
24384 return IRQ_HANDLED;
24385 }
24386
24387 -static struct ata_port_operations rb532_pata_port_ops = {
24388 +static const struct ata_port_operations rb532_pata_port_ops = {
24389 .inherits = &ata_sff_port_ops,
24390 .sff_data_xfer = ata_sff_data_xfer32,
24391 };
24392 diff -urNp linux-2.6.32.41/drivers/ata/pata_rdc.c linux-2.6.32.41/drivers/ata/pata_rdc.c
24393 --- linux-2.6.32.41/drivers/ata/pata_rdc.c 2011-03-27 14:31:47.000000000 -0400
24394 +++ linux-2.6.32.41/drivers/ata/pata_rdc.c 2011-04-17 15:56:46.000000000 -0400
24395 @@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_p
24396 pci_write_config_byte(dev, 0x48, udma_enable);
24397 }
24398
24399 -static struct ata_port_operations rdc_pata_ops = {
24400 +static const struct ata_port_operations rdc_pata_ops = {
24401 .inherits = &ata_bmdma32_port_ops,
24402 .cable_detect = rdc_pata_cable_detect,
24403 .set_piomode = rdc_set_piomode,
24404 diff -urNp linux-2.6.32.41/drivers/ata/pata_rz1000.c linux-2.6.32.41/drivers/ata/pata_rz1000.c
24405 --- linux-2.6.32.41/drivers/ata/pata_rz1000.c 2011-03-27 14:31:47.000000000 -0400
24406 +++ linux-2.6.32.41/drivers/ata/pata_rz1000.c 2011-04-17 15:56:46.000000000 -0400
24407 @@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_
24408 ATA_PIO_SHT(DRV_NAME),
24409 };
24410
24411 -static struct ata_port_operations rz1000_port_ops = {
24412 +static const struct ata_port_operations rz1000_port_ops = {
24413 .inherits = &ata_sff_port_ops,
24414 .cable_detect = ata_cable_40wire,
24415 .set_mode = rz1000_set_mode,
24416 diff -urNp linux-2.6.32.41/drivers/ata/pata_sc1200.c linux-2.6.32.41/drivers/ata/pata_sc1200.c
24417 --- linux-2.6.32.41/drivers/ata/pata_sc1200.c 2011-03-27 14:31:47.000000000 -0400
24418 +++ linux-2.6.32.41/drivers/ata/pata_sc1200.c 2011-04-17 15:56:46.000000000 -0400
24419 @@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_
24420 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24421 };
24422
24423 -static struct ata_port_operations sc1200_port_ops = {
24424 +static const struct ata_port_operations sc1200_port_ops = {
24425 .inherits = &ata_bmdma_port_ops,
24426 .qc_prep = ata_sff_dumb_qc_prep,
24427 .qc_issue = sc1200_qc_issue,
24428 diff -urNp linux-2.6.32.41/drivers/ata/pata_scc.c linux-2.6.32.41/drivers/ata/pata_scc.c
24429 --- linux-2.6.32.41/drivers/ata/pata_scc.c 2011-03-27 14:31:47.000000000 -0400
24430 +++ linux-2.6.32.41/drivers/ata/pata_scc.c 2011-04-17 15:56:46.000000000 -0400
24431 @@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht
24432 ATA_BMDMA_SHT(DRV_NAME),
24433 };
24434
24435 -static struct ata_port_operations scc_pata_ops = {
24436 +static const struct ata_port_operations scc_pata_ops = {
24437 .inherits = &ata_bmdma_port_ops,
24438
24439 .set_piomode = scc_set_piomode,
24440 diff -urNp linux-2.6.32.41/drivers/ata/pata_sch.c linux-2.6.32.41/drivers/ata/pata_sch.c
24441 --- linux-2.6.32.41/drivers/ata/pata_sch.c 2011-03-27 14:31:47.000000000 -0400
24442 +++ linux-2.6.32.41/drivers/ata/pata_sch.c 2011-04-17 15:56:46.000000000 -0400
24443 @@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht
24444 ATA_BMDMA_SHT(DRV_NAME),
24445 };
24446
24447 -static struct ata_port_operations sch_pata_ops = {
24448 +static const struct ata_port_operations sch_pata_ops = {
24449 .inherits = &ata_bmdma_port_ops,
24450 .cable_detect = ata_cable_unknown,
24451 .set_piomode = sch_set_piomode,
24452 diff -urNp linux-2.6.32.41/drivers/ata/pata_serverworks.c linux-2.6.32.41/drivers/ata/pata_serverworks.c
24453 --- linux-2.6.32.41/drivers/ata/pata_serverworks.c 2011-03-27 14:31:47.000000000 -0400
24454 +++ linux-2.6.32.41/drivers/ata/pata_serverworks.c 2011-04-17 15:56:46.000000000 -0400
24455 @@ -299,7 +299,7 @@ static struct scsi_host_template serverw
24456 ATA_BMDMA_SHT(DRV_NAME),
24457 };
24458
24459 -static struct ata_port_operations serverworks_osb4_port_ops = {
24460 +static const struct ata_port_operations serverworks_osb4_port_ops = {
24461 .inherits = &ata_bmdma_port_ops,
24462 .cable_detect = serverworks_cable_detect,
24463 .mode_filter = serverworks_osb4_filter,
24464 @@ -307,7 +307,7 @@ static struct ata_port_operations server
24465 .set_dmamode = serverworks_set_dmamode,
24466 };
24467
24468 -static struct ata_port_operations serverworks_csb_port_ops = {
24469 +static const struct ata_port_operations serverworks_csb_port_ops = {
24470 .inherits = &serverworks_osb4_port_ops,
24471 .mode_filter = serverworks_csb_filter,
24472 };
24473 diff -urNp linux-2.6.32.41/drivers/ata/pata_sil680.c linux-2.6.32.41/drivers/ata/pata_sil680.c
24474 --- linux-2.6.32.41/drivers/ata/pata_sil680.c 2011-03-27 14:31:47.000000000 -0400
24475 +++ linux-2.6.32.41/drivers/ata/pata_sil680.c 2011-04-17 15:56:46.000000000 -0400
24476 @@ -194,7 +194,7 @@ static struct scsi_host_template sil680_
24477 ATA_BMDMA_SHT(DRV_NAME),
24478 };
24479
24480 -static struct ata_port_operations sil680_port_ops = {
24481 +static const struct ata_port_operations sil680_port_ops = {
24482 .inherits = &ata_bmdma32_port_ops,
24483 .cable_detect = sil680_cable_detect,
24484 .set_piomode = sil680_set_piomode,
24485 diff -urNp linux-2.6.32.41/drivers/ata/pata_sis.c linux-2.6.32.41/drivers/ata/pata_sis.c
24486 --- linux-2.6.32.41/drivers/ata/pata_sis.c 2011-03-27 14:31:47.000000000 -0400
24487 +++ linux-2.6.32.41/drivers/ata/pata_sis.c 2011-04-17 15:56:46.000000000 -0400
24488 @@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht
24489 ATA_BMDMA_SHT(DRV_NAME),
24490 };
24491
24492 -static struct ata_port_operations sis_133_for_sata_ops = {
24493 +static const struct ata_port_operations sis_133_for_sata_ops = {
24494 .inherits = &ata_bmdma_port_ops,
24495 .set_piomode = sis_133_set_piomode,
24496 .set_dmamode = sis_133_set_dmamode,
24497 .cable_detect = sis_133_cable_detect,
24498 };
24499
24500 -static struct ata_port_operations sis_base_ops = {
24501 +static const struct ata_port_operations sis_base_ops = {
24502 .inherits = &ata_bmdma_port_ops,
24503 .prereset = sis_pre_reset,
24504 };
24505
24506 -static struct ata_port_operations sis_133_ops = {
24507 +static const struct ata_port_operations sis_133_ops = {
24508 .inherits = &sis_base_ops,
24509 .set_piomode = sis_133_set_piomode,
24510 .set_dmamode = sis_133_set_dmamode,
24511 .cable_detect = sis_133_cable_detect,
24512 };
24513
24514 -static struct ata_port_operations sis_133_early_ops = {
24515 +static const struct ata_port_operations sis_133_early_ops = {
24516 .inherits = &sis_base_ops,
24517 .set_piomode = sis_100_set_piomode,
24518 .set_dmamode = sis_133_early_set_dmamode,
24519 .cable_detect = sis_66_cable_detect,
24520 };
24521
24522 -static struct ata_port_operations sis_100_ops = {
24523 +static const struct ata_port_operations sis_100_ops = {
24524 .inherits = &sis_base_ops,
24525 .set_piomode = sis_100_set_piomode,
24526 .set_dmamode = sis_100_set_dmamode,
24527 .cable_detect = sis_66_cable_detect,
24528 };
24529
24530 -static struct ata_port_operations sis_66_ops = {
24531 +static const struct ata_port_operations sis_66_ops = {
24532 .inherits = &sis_base_ops,
24533 .set_piomode = sis_old_set_piomode,
24534 .set_dmamode = sis_66_set_dmamode,
24535 .cable_detect = sis_66_cable_detect,
24536 };
24537
24538 -static struct ata_port_operations sis_old_ops = {
24539 +static const struct ata_port_operations sis_old_ops = {
24540 .inherits = &sis_base_ops,
24541 .set_piomode = sis_old_set_piomode,
24542 .set_dmamode = sis_old_set_dmamode,
24543 diff -urNp linux-2.6.32.41/drivers/ata/pata_sl82c105.c linux-2.6.32.41/drivers/ata/pata_sl82c105.c
24544 --- linux-2.6.32.41/drivers/ata/pata_sl82c105.c 2011-03-27 14:31:47.000000000 -0400
24545 +++ linux-2.6.32.41/drivers/ata/pata_sl82c105.c 2011-04-17 15:56:46.000000000 -0400
24546 @@ -231,7 +231,7 @@ static struct scsi_host_template sl82c10
24547 ATA_BMDMA_SHT(DRV_NAME),
24548 };
24549
24550 -static struct ata_port_operations sl82c105_port_ops = {
24551 +static const struct ata_port_operations sl82c105_port_ops = {
24552 .inherits = &ata_bmdma_port_ops,
24553 .qc_defer = sl82c105_qc_defer,
24554 .bmdma_start = sl82c105_bmdma_start,
24555 diff -urNp linux-2.6.32.41/drivers/ata/pata_triflex.c linux-2.6.32.41/drivers/ata/pata_triflex.c
24556 --- linux-2.6.32.41/drivers/ata/pata_triflex.c 2011-03-27 14:31:47.000000000 -0400
24557 +++ linux-2.6.32.41/drivers/ata/pata_triflex.c 2011-04-17 15:56:46.000000000 -0400
24558 @@ -178,7 +178,7 @@ static struct scsi_host_template triflex
24559 ATA_BMDMA_SHT(DRV_NAME),
24560 };
24561
24562 -static struct ata_port_operations triflex_port_ops = {
24563 +static const struct ata_port_operations triflex_port_ops = {
24564 .inherits = &ata_bmdma_port_ops,
24565 .bmdma_start = triflex_bmdma_start,
24566 .bmdma_stop = triflex_bmdma_stop,
24567 diff -urNp linux-2.6.32.41/drivers/ata/pata_via.c linux-2.6.32.41/drivers/ata/pata_via.c
24568 --- linux-2.6.32.41/drivers/ata/pata_via.c 2011-03-27 14:31:47.000000000 -0400
24569 +++ linux-2.6.32.41/drivers/ata/pata_via.c 2011-04-17 15:56:46.000000000 -0400
24570 @@ -419,7 +419,7 @@ static struct scsi_host_template via_sht
24571 ATA_BMDMA_SHT(DRV_NAME),
24572 };
24573
24574 -static struct ata_port_operations via_port_ops = {
24575 +static const struct ata_port_operations via_port_ops = {
24576 .inherits = &ata_bmdma_port_ops,
24577 .cable_detect = via_cable_detect,
24578 .set_piomode = via_set_piomode,
24579 @@ -429,7 +429,7 @@ static struct ata_port_operations via_po
24580 .port_start = via_port_start,
24581 };
24582
24583 -static struct ata_port_operations via_port_ops_noirq = {
24584 +static const struct ata_port_operations via_port_ops_noirq = {
24585 .inherits = &via_port_ops,
24586 .sff_data_xfer = ata_sff_data_xfer_noirq,
24587 };
24588 diff -urNp linux-2.6.32.41/drivers/ata/pata_winbond.c linux-2.6.32.41/drivers/ata/pata_winbond.c
24589 --- linux-2.6.32.41/drivers/ata/pata_winbond.c 2011-03-27 14:31:47.000000000 -0400
24590 +++ linux-2.6.32.41/drivers/ata/pata_winbond.c 2011-04-17 15:56:46.000000000 -0400
24591 @@ -125,7 +125,7 @@ static struct scsi_host_template winbond
24592 ATA_PIO_SHT(DRV_NAME),
24593 };
24594
24595 -static struct ata_port_operations winbond_port_ops = {
24596 +static const struct ata_port_operations winbond_port_ops = {
24597 .inherits = &ata_sff_port_ops,
24598 .sff_data_xfer = winbond_data_xfer,
24599 .cable_detect = ata_cable_40wire,
24600 diff -urNp linux-2.6.32.41/drivers/ata/pdc_adma.c linux-2.6.32.41/drivers/ata/pdc_adma.c
24601 --- linux-2.6.32.41/drivers/ata/pdc_adma.c 2011-03-27 14:31:47.000000000 -0400
24602 +++ linux-2.6.32.41/drivers/ata/pdc_adma.c 2011-04-17 15:56:46.000000000 -0400
24603 @@ -145,7 +145,7 @@ static struct scsi_host_template adma_at
24604 .dma_boundary = ADMA_DMA_BOUNDARY,
24605 };
24606
24607 -static struct ata_port_operations adma_ata_ops = {
24608 +static const struct ata_port_operations adma_ata_ops = {
24609 .inherits = &ata_sff_port_ops,
24610
24611 .lost_interrupt = ATA_OP_NULL,
24612 diff -urNp linux-2.6.32.41/drivers/ata/sata_fsl.c linux-2.6.32.41/drivers/ata/sata_fsl.c
24613 --- linux-2.6.32.41/drivers/ata/sata_fsl.c 2011-03-27 14:31:47.000000000 -0400
24614 +++ linux-2.6.32.41/drivers/ata/sata_fsl.c 2011-04-17 15:56:46.000000000 -0400
24615 @@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fs
24616 .dma_boundary = ATA_DMA_BOUNDARY,
24617 };
24618
24619 -static struct ata_port_operations sata_fsl_ops = {
24620 +static const struct ata_port_operations sata_fsl_ops = {
24621 .inherits = &sata_pmp_port_ops,
24622
24623 .qc_defer = ata_std_qc_defer,
24624 diff -urNp linux-2.6.32.41/drivers/ata/sata_inic162x.c linux-2.6.32.41/drivers/ata/sata_inic162x.c
24625 --- linux-2.6.32.41/drivers/ata/sata_inic162x.c 2011-03-27 14:31:47.000000000 -0400
24626 +++ linux-2.6.32.41/drivers/ata/sata_inic162x.c 2011-04-17 15:56:46.000000000 -0400
24627 @@ -721,7 +721,7 @@ static int inic_port_start(struct ata_po
24628 return 0;
24629 }
24630
24631 -static struct ata_port_operations inic_port_ops = {
24632 +static const struct ata_port_operations inic_port_ops = {
24633 .inherits = &sata_port_ops,
24634
24635 .check_atapi_dma = inic_check_atapi_dma,
24636 diff -urNp linux-2.6.32.41/drivers/ata/sata_mv.c linux-2.6.32.41/drivers/ata/sata_mv.c
24637 --- linux-2.6.32.41/drivers/ata/sata_mv.c 2011-03-27 14:31:47.000000000 -0400
24638 +++ linux-2.6.32.41/drivers/ata/sata_mv.c 2011-04-17 15:56:46.000000000 -0400
24639 @@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht
24640 .dma_boundary = MV_DMA_BOUNDARY,
24641 };
24642
24643 -static struct ata_port_operations mv5_ops = {
24644 +static const struct ata_port_operations mv5_ops = {
24645 .inherits = &ata_sff_port_ops,
24646
24647 .lost_interrupt = ATA_OP_NULL,
24648 @@ -678,7 +678,7 @@ static struct ata_port_operations mv5_op
24649 .port_stop = mv_port_stop,
24650 };
24651
24652 -static struct ata_port_operations mv6_ops = {
24653 +static const struct ata_port_operations mv6_ops = {
24654 .inherits = &mv5_ops,
24655 .dev_config = mv6_dev_config,
24656 .scr_read = mv_scr_read,
24657 @@ -698,7 +698,7 @@ static struct ata_port_operations mv6_op
24658 .bmdma_status = mv_bmdma_status,
24659 };
24660
24661 -static struct ata_port_operations mv_iie_ops = {
24662 +static const struct ata_port_operations mv_iie_ops = {
24663 .inherits = &mv6_ops,
24664 .dev_config = ATA_OP_NULL,
24665 .qc_prep = mv_qc_prep_iie,
24666 diff -urNp linux-2.6.32.41/drivers/ata/sata_nv.c linux-2.6.32.41/drivers/ata/sata_nv.c
24667 --- linux-2.6.32.41/drivers/ata/sata_nv.c 2011-03-27 14:31:47.000000000 -0400
24668 +++ linux-2.6.32.41/drivers/ata/sata_nv.c 2011-04-17 15:56:46.000000000 -0400
24669 @@ -464,7 +464,7 @@ static struct scsi_host_template nv_swnc
24670 * cases. Define nv_hardreset() which only kicks in for post-boot
24671 * probing and use it for all variants.
24672 */
24673 -static struct ata_port_operations nv_generic_ops = {
24674 +static const struct ata_port_operations nv_generic_ops = {
24675 .inherits = &ata_bmdma_port_ops,
24676 .lost_interrupt = ATA_OP_NULL,
24677 .scr_read = nv_scr_read,
24678 @@ -472,20 +472,20 @@ static struct ata_port_operations nv_gen
24679 .hardreset = nv_hardreset,
24680 };
24681
24682 -static struct ata_port_operations nv_nf2_ops = {
24683 +static const struct ata_port_operations nv_nf2_ops = {
24684 .inherits = &nv_generic_ops,
24685 .freeze = nv_nf2_freeze,
24686 .thaw = nv_nf2_thaw,
24687 };
24688
24689 -static struct ata_port_operations nv_ck804_ops = {
24690 +static const struct ata_port_operations nv_ck804_ops = {
24691 .inherits = &nv_generic_ops,
24692 .freeze = nv_ck804_freeze,
24693 .thaw = nv_ck804_thaw,
24694 .host_stop = nv_ck804_host_stop,
24695 };
24696
24697 -static struct ata_port_operations nv_adma_ops = {
24698 +static const struct ata_port_operations nv_adma_ops = {
24699 .inherits = &nv_ck804_ops,
24700
24701 .check_atapi_dma = nv_adma_check_atapi_dma,
24702 @@ -509,7 +509,7 @@ static struct ata_port_operations nv_adm
24703 .host_stop = nv_adma_host_stop,
24704 };
24705
24706 -static struct ata_port_operations nv_swncq_ops = {
24707 +static const struct ata_port_operations nv_swncq_ops = {
24708 .inherits = &nv_generic_ops,
24709
24710 .qc_defer = ata_std_qc_defer,
24711 diff -urNp linux-2.6.32.41/drivers/ata/sata_promise.c linux-2.6.32.41/drivers/ata/sata_promise.c
24712 --- linux-2.6.32.41/drivers/ata/sata_promise.c 2011-03-27 14:31:47.000000000 -0400
24713 +++ linux-2.6.32.41/drivers/ata/sata_promise.c 2011-04-17 15:56:46.000000000 -0400
24714 @@ -195,7 +195,7 @@ static const struct ata_port_operations
24715 .error_handler = pdc_error_handler,
24716 };
24717
24718 -static struct ata_port_operations pdc_sata_ops = {
24719 +static const struct ata_port_operations pdc_sata_ops = {
24720 .inherits = &pdc_common_ops,
24721 .cable_detect = pdc_sata_cable_detect,
24722 .freeze = pdc_sata_freeze,
24723 @@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sa
24724
24725 /* First-generation chips need a more restrictive ->check_atapi_dma op,
24726 and ->freeze/thaw that ignore the hotplug controls. */
24727 -static struct ata_port_operations pdc_old_sata_ops = {
24728 +static const struct ata_port_operations pdc_old_sata_ops = {
24729 .inherits = &pdc_sata_ops,
24730 .freeze = pdc_freeze,
24731 .thaw = pdc_thaw,
24732 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
24733 };
24734
24735 -static struct ata_port_operations pdc_pata_ops = {
24736 +static const struct ata_port_operations pdc_pata_ops = {
24737 .inherits = &pdc_common_ops,
24738 .cable_detect = pdc_pata_cable_detect,
24739 .freeze = pdc_freeze,
24740 diff -urNp linux-2.6.32.41/drivers/ata/sata_qstor.c linux-2.6.32.41/drivers/ata/sata_qstor.c
24741 --- linux-2.6.32.41/drivers/ata/sata_qstor.c 2011-03-27 14:31:47.000000000 -0400
24742 +++ linux-2.6.32.41/drivers/ata/sata_qstor.c 2011-04-17 15:56:46.000000000 -0400
24743 @@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_
24744 .dma_boundary = QS_DMA_BOUNDARY,
24745 };
24746
24747 -static struct ata_port_operations qs_ata_ops = {
24748 +static const struct ata_port_operations qs_ata_ops = {
24749 .inherits = &ata_sff_port_ops,
24750
24751 .check_atapi_dma = qs_check_atapi_dma,
24752 diff -urNp linux-2.6.32.41/drivers/ata/sata_sil24.c linux-2.6.32.41/drivers/ata/sata_sil24.c
24753 --- linux-2.6.32.41/drivers/ata/sata_sil24.c 2011-03-27 14:31:47.000000000 -0400
24754 +++ linux-2.6.32.41/drivers/ata/sata_sil24.c 2011-04-17 15:56:46.000000000 -0400
24755 @@ -388,7 +388,7 @@ static struct scsi_host_template sil24_s
24756 .dma_boundary = ATA_DMA_BOUNDARY,
24757 };
24758
24759 -static struct ata_port_operations sil24_ops = {
24760 +static const struct ata_port_operations sil24_ops = {
24761 .inherits = &sata_pmp_port_ops,
24762
24763 .qc_defer = sil24_qc_defer,
24764 diff -urNp linux-2.6.32.41/drivers/ata/sata_sil.c linux-2.6.32.41/drivers/ata/sata_sil.c
24765 --- linux-2.6.32.41/drivers/ata/sata_sil.c 2011-03-27 14:31:47.000000000 -0400
24766 +++ linux-2.6.32.41/drivers/ata/sata_sil.c 2011-04-17 15:56:46.000000000 -0400
24767 @@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht
24768 .sg_tablesize = ATA_MAX_PRD
24769 };
24770
24771 -static struct ata_port_operations sil_ops = {
24772 +static const struct ata_port_operations sil_ops = {
24773 .inherits = &ata_bmdma32_port_ops,
24774 .dev_config = sil_dev_config,
24775 .set_mode = sil_set_mode,
24776 diff -urNp linux-2.6.32.41/drivers/ata/sata_sis.c linux-2.6.32.41/drivers/ata/sata_sis.c
24777 --- linux-2.6.32.41/drivers/ata/sata_sis.c 2011-03-27 14:31:47.000000000 -0400
24778 +++ linux-2.6.32.41/drivers/ata/sata_sis.c 2011-04-17 15:56:46.000000000 -0400
24779 @@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht
24780 ATA_BMDMA_SHT(DRV_NAME),
24781 };
24782
24783 -static struct ata_port_operations sis_ops = {
24784 +static const struct ata_port_operations sis_ops = {
24785 .inherits = &ata_bmdma_port_ops,
24786 .scr_read = sis_scr_read,
24787 .scr_write = sis_scr_write,
24788 diff -urNp linux-2.6.32.41/drivers/ata/sata_svw.c linux-2.6.32.41/drivers/ata/sata_svw.c
24789 --- linux-2.6.32.41/drivers/ata/sata_svw.c 2011-03-27 14:31:47.000000000 -0400
24790 +++ linux-2.6.32.41/drivers/ata/sata_svw.c 2011-04-17 15:56:46.000000000 -0400
24791 @@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata
24792 };
24793
24794
24795 -static struct ata_port_operations k2_sata_ops = {
24796 +static const struct ata_port_operations k2_sata_ops = {
24797 .inherits = &ata_bmdma_port_ops,
24798 .sff_tf_load = k2_sata_tf_load,
24799 .sff_tf_read = k2_sata_tf_read,
24800 diff -urNp linux-2.6.32.41/drivers/ata/sata_sx4.c linux-2.6.32.41/drivers/ata/sata_sx4.c
24801 --- linux-2.6.32.41/drivers/ata/sata_sx4.c 2011-03-27 14:31:47.000000000 -0400
24802 +++ linux-2.6.32.41/drivers/ata/sata_sx4.c 2011-04-17 15:56:46.000000000 -0400
24803 @@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sat
24804 };
24805
24806 /* TODO: inherit from base port_ops after converting to new EH */
24807 -static struct ata_port_operations pdc_20621_ops = {
24808 +static const struct ata_port_operations pdc_20621_ops = {
24809 .inherits = &ata_sff_port_ops,
24810
24811 .check_atapi_dma = pdc_check_atapi_dma,
24812 diff -urNp linux-2.6.32.41/drivers/ata/sata_uli.c linux-2.6.32.41/drivers/ata/sata_uli.c
24813 --- linux-2.6.32.41/drivers/ata/sata_uli.c 2011-03-27 14:31:47.000000000 -0400
24814 +++ linux-2.6.32.41/drivers/ata/sata_uli.c 2011-04-17 15:56:46.000000000 -0400
24815 @@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht
24816 ATA_BMDMA_SHT(DRV_NAME),
24817 };
24818
24819 -static struct ata_port_operations uli_ops = {
24820 +static const struct ata_port_operations uli_ops = {
24821 .inherits = &ata_bmdma_port_ops,
24822 .scr_read = uli_scr_read,
24823 .scr_write = uli_scr_write,
24824 diff -urNp linux-2.6.32.41/drivers/ata/sata_via.c linux-2.6.32.41/drivers/ata/sata_via.c
24825 --- linux-2.6.32.41/drivers/ata/sata_via.c 2011-05-10 22:12:01.000000000 -0400
24826 +++ linux-2.6.32.41/drivers/ata/sata_via.c 2011-05-10 22:15:08.000000000 -0400
24827 @@ -115,32 +115,32 @@ static struct scsi_host_template svia_sh
24828 ATA_BMDMA_SHT(DRV_NAME),
24829 };
24830
24831 -static struct ata_port_operations svia_base_ops = {
24832 +static const struct ata_port_operations svia_base_ops = {
24833 .inherits = &ata_bmdma_port_ops,
24834 .sff_tf_load = svia_tf_load,
24835 };
24836
24837 -static struct ata_port_operations vt6420_sata_ops = {
24838 +static const struct ata_port_operations vt6420_sata_ops = {
24839 .inherits = &svia_base_ops,
24840 .freeze = svia_noop_freeze,
24841 .prereset = vt6420_prereset,
24842 .bmdma_start = vt6420_bmdma_start,
24843 };
24844
24845 -static struct ata_port_operations vt6421_pata_ops = {
24846 +static const struct ata_port_operations vt6421_pata_ops = {
24847 .inherits = &svia_base_ops,
24848 .cable_detect = vt6421_pata_cable_detect,
24849 .set_piomode = vt6421_set_pio_mode,
24850 .set_dmamode = vt6421_set_dma_mode,
24851 };
24852
24853 -static struct ata_port_operations vt6421_sata_ops = {
24854 +static const struct ata_port_operations vt6421_sata_ops = {
24855 .inherits = &svia_base_ops,
24856 .scr_read = svia_scr_read,
24857 .scr_write = svia_scr_write,
24858 };
24859
24860 -static struct ata_port_operations vt8251_ops = {
24861 +static const struct ata_port_operations vt8251_ops = {
24862 .inherits = &svia_base_ops,
24863 .hardreset = sata_std_hardreset,
24864 .scr_read = vt8251_scr_read,
24865 diff -urNp linux-2.6.32.41/drivers/ata/sata_vsc.c linux-2.6.32.41/drivers/ata/sata_vsc.c
24866 --- linux-2.6.32.41/drivers/ata/sata_vsc.c 2011-03-27 14:31:47.000000000 -0400
24867 +++ linux-2.6.32.41/drivers/ata/sata_vsc.c 2011-04-17 15:56:46.000000000 -0400
24868 @@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sat
24869 };
24870
24871
24872 -static struct ata_port_operations vsc_sata_ops = {
24873 +static const struct ata_port_operations vsc_sata_ops = {
24874 .inherits = &ata_bmdma_port_ops,
24875 /* The IRQ handling is not quite standard SFF behaviour so we
24876 cannot use the default lost interrupt handler */
24877 diff -urNp linux-2.6.32.41/drivers/atm/adummy.c linux-2.6.32.41/drivers/atm/adummy.c
24878 --- linux-2.6.32.41/drivers/atm/adummy.c 2011-03-27 14:31:47.000000000 -0400
24879 +++ linux-2.6.32.41/drivers/atm/adummy.c 2011-04-17 15:56:46.000000000 -0400
24880 @@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct
24881 vcc->pop(vcc, skb);
24882 else
24883 dev_kfree_skb_any(skb);
24884 - atomic_inc(&vcc->stats->tx);
24885 + atomic_inc_unchecked(&vcc->stats->tx);
24886
24887 return 0;
24888 }
24889 diff -urNp linux-2.6.32.41/drivers/atm/ambassador.c linux-2.6.32.41/drivers/atm/ambassador.c
24890 --- linux-2.6.32.41/drivers/atm/ambassador.c 2011-03-27 14:31:47.000000000 -0400
24891 +++ linux-2.6.32.41/drivers/atm/ambassador.c 2011-04-17 15:56:46.000000000 -0400
24892 @@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev,
24893 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
24894
24895 // VC layer stats
24896 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
24897 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
24898
24899 // free the descriptor
24900 kfree (tx_descr);
24901 @@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev,
24902 dump_skb ("<<<", vc, skb);
24903
24904 // VC layer stats
24905 - atomic_inc(&atm_vcc->stats->rx);
24906 + atomic_inc_unchecked(&atm_vcc->stats->rx);
24907 __net_timestamp(skb);
24908 // end of our responsability
24909 atm_vcc->push (atm_vcc, skb);
24910 @@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev,
24911 } else {
24912 PRINTK (KERN_INFO, "dropped over-size frame");
24913 // should we count this?
24914 - atomic_inc(&atm_vcc->stats->rx_drop);
24915 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
24916 }
24917
24918 } else {
24919 @@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * at
24920 }
24921
24922 if (check_area (skb->data, skb->len)) {
24923 - atomic_inc(&atm_vcc->stats->tx_err);
24924 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
24925 return -ENOMEM; // ?
24926 }
24927
24928 diff -urNp linux-2.6.32.41/drivers/atm/atmtcp.c linux-2.6.32.41/drivers/atm/atmtcp.c
24929 --- linux-2.6.32.41/drivers/atm/atmtcp.c 2011-03-27 14:31:47.000000000 -0400
24930 +++ linux-2.6.32.41/drivers/atm/atmtcp.c 2011-04-17 15:56:46.000000000 -0400
24931 @@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc
24932 if (vcc->pop) vcc->pop(vcc,skb);
24933 else dev_kfree_skb(skb);
24934 if (dev_data) return 0;
24935 - atomic_inc(&vcc->stats->tx_err);
24936 + atomic_inc_unchecked(&vcc->stats->tx_err);
24937 return -ENOLINK;
24938 }
24939 size = skb->len+sizeof(struct atmtcp_hdr);
24940 @@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc
24941 if (!new_skb) {
24942 if (vcc->pop) vcc->pop(vcc,skb);
24943 else dev_kfree_skb(skb);
24944 - atomic_inc(&vcc->stats->tx_err);
24945 + atomic_inc_unchecked(&vcc->stats->tx_err);
24946 return -ENOBUFS;
24947 }
24948 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
24949 @@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc
24950 if (vcc->pop) vcc->pop(vcc,skb);
24951 else dev_kfree_skb(skb);
24952 out_vcc->push(out_vcc,new_skb);
24953 - atomic_inc(&vcc->stats->tx);
24954 - atomic_inc(&out_vcc->stats->rx);
24955 + atomic_inc_unchecked(&vcc->stats->tx);
24956 + atomic_inc_unchecked(&out_vcc->stats->rx);
24957 return 0;
24958 }
24959
24960 @@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc
24961 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
24962 read_unlock(&vcc_sklist_lock);
24963 if (!out_vcc) {
24964 - atomic_inc(&vcc->stats->tx_err);
24965 + atomic_inc_unchecked(&vcc->stats->tx_err);
24966 goto done;
24967 }
24968 skb_pull(skb,sizeof(struct atmtcp_hdr));
24969 @@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc
24970 __net_timestamp(new_skb);
24971 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
24972 out_vcc->push(out_vcc,new_skb);
24973 - atomic_inc(&vcc->stats->tx);
24974 - atomic_inc(&out_vcc->stats->rx);
24975 + atomic_inc_unchecked(&vcc->stats->tx);
24976 + atomic_inc_unchecked(&out_vcc->stats->rx);
24977 done:
24978 if (vcc->pop) vcc->pop(vcc,skb);
24979 else dev_kfree_skb(skb);
24980 diff -urNp linux-2.6.32.41/drivers/atm/eni.c linux-2.6.32.41/drivers/atm/eni.c
24981 --- linux-2.6.32.41/drivers/atm/eni.c 2011-03-27 14:31:47.000000000 -0400
24982 +++ linux-2.6.32.41/drivers/atm/eni.c 2011-04-17 15:56:46.000000000 -0400
24983 @@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
24984 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
24985 vcc->dev->number);
24986 length = 0;
24987 - atomic_inc(&vcc->stats->rx_err);
24988 + atomic_inc_unchecked(&vcc->stats->rx_err);
24989 }
24990 else {
24991 length = ATM_CELL_SIZE-1; /* no HEC */
24992 @@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
24993 size);
24994 }
24995 eff = length = 0;
24996 - atomic_inc(&vcc->stats->rx_err);
24997 + atomic_inc_unchecked(&vcc->stats->rx_err);
24998 }
24999 else {
25000 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
25001 @@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25002 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
25003 vcc->dev->number,vcc->vci,length,size << 2,descr);
25004 length = eff = 0;
25005 - atomic_inc(&vcc->stats->rx_err);
25006 + atomic_inc_unchecked(&vcc->stats->rx_err);
25007 }
25008 }
25009 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
25010 @@ -770,7 +770,7 @@ rx_dequeued++;
25011 vcc->push(vcc,skb);
25012 pushed++;
25013 }
25014 - atomic_inc(&vcc->stats->rx);
25015 + atomic_inc_unchecked(&vcc->stats->rx);
25016 }
25017 wake_up(&eni_dev->rx_wait);
25018 }
25019 @@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *d
25020 PCI_DMA_TODEVICE);
25021 if (vcc->pop) vcc->pop(vcc,skb);
25022 else dev_kfree_skb_irq(skb);
25023 - atomic_inc(&vcc->stats->tx);
25024 + atomic_inc_unchecked(&vcc->stats->tx);
25025 wake_up(&eni_dev->tx_wait);
25026 dma_complete++;
25027 }
25028 diff -urNp linux-2.6.32.41/drivers/atm/firestream.c linux-2.6.32.41/drivers/atm/firestream.c
25029 --- linux-2.6.32.41/drivers/atm/firestream.c 2011-03-27 14:31:47.000000000 -0400
25030 +++ linux-2.6.32.41/drivers/atm/firestream.c 2011-04-17 15:56:46.000000000 -0400
25031 @@ -748,7 +748,7 @@ static void process_txdone_queue (struct
25032 }
25033 }
25034
25035 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25036 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25037
25038 fs_dprintk (FS_DEBUG_TXMEM, "i");
25039 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
25040 @@ -815,7 +815,7 @@ static void process_incoming (struct fs_
25041 #endif
25042 skb_put (skb, qe->p1 & 0xffff);
25043 ATM_SKB(skb)->vcc = atm_vcc;
25044 - atomic_inc(&atm_vcc->stats->rx);
25045 + atomic_inc_unchecked(&atm_vcc->stats->rx);
25046 __net_timestamp(skb);
25047 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
25048 atm_vcc->push (atm_vcc, skb);
25049 @@ -836,12 +836,12 @@ static void process_incoming (struct fs_
25050 kfree (pe);
25051 }
25052 if (atm_vcc)
25053 - atomic_inc(&atm_vcc->stats->rx_drop);
25054 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25055 break;
25056 case 0x1f: /* Reassembly abort: no buffers. */
25057 /* Silently increment error counter. */
25058 if (atm_vcc)
25059 - atomic_inc(&atm_vcc->stats->rx_drop);
25060 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25061 break;
25062 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
25063 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
25064 diff -urNp linux-2.6.32.41/drivers/atm/fore200e.c linux-2.6.32.41/drivers/atm/fore200e.c
25065 --- linux-2.6.32.41/drivers/atm/fore200e.c 2011-03-27 14:31:47.000000000 -0400
25066 +++ linux-2.6.32.41/drivers/atm/fore200e.c 2011-04-17 15:56:46.000000000 -0400
25067 @@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200
25068 #endif
25069 /* check error condition */
25070 if (*entry->status & STATUS_ERROR)
25071 - atomic_inc(&vcc->stats->tx_err);
25072 + atomic_inc_unchecked(&vcc->stats->tx_err);
25073 else
25074 - atomic_inc(&vcc->stats->tx);
25075 + atomic_inc_unchecked(&vcc->stats->tx);
25076 }
25077 }
25078
25079 @@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore2
25080 if (skb == NULL) {
25081 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
25082
25083 - atomic_inc(&vcc->stats->rx_drop);
25084 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25085 return -ENOMEM;
25086 }
25087
25088 @@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore2
25089
25090 dev_kfree_skb_any(skb);
25091
25092 - atomic_inc(&vcc->stats->rx_drop);
25093 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25094 return -ENOMEM;
25095 }
25096
25097 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25098
25099 vcc->push(vcc, skb);
25100 - atomic_inc(&vcc->stats->rx);
25101 + atomic_inc_unchecked(&vcc->stats->rx);
25102
25103 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25104
25105 @@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200
25106 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
25107 fore200e->atm_dev->number,
25108 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
25109 - atomic_inc(&vcc->stats->rx_err);
25110 + atomic_inc_unchecked(&vcc->stats->rx_err);
25111 }
25112 }
25113
25114 @@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struc
25115 goto retry_here;
25116 }
25117
25118 - atomic_inc(&vcc->stats->tx_err);
25119 + atomic_inc_unchecked(&vcc->stats->tx_err);
25120
25121 fore200e->tx_sat++;
25122 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
25123 diff -urNp linux-2.6.32.41/drivers/atm/he.c linux-2.6.32.41/drivers/atm/he.c
25124 --- linux-2.6.32.41/drivers/atm/he.c 2011-03-27 14:31:47.000000000 -0400
25125 +++ linux-2.6.32.41/drivers/atm/he.c 2011-04-17 15:56:46.000000000 -0400
25126 @@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, i
25127
25128 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
25129 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
25130 - atomic_inc(&vcc->stats->rx_drop);
25131 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25132 goto return_host_buffers;
25133 }
25134
25135 @@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, i
25136 RBRQ_LEN_ERR(he_dev->rbrq_head)
25137 ? "LEN_ERR" : "",
25138 vcc->vpi, vcc->vci);
25139 - atomic_inc(&vcc->stats->rx_err);
25140 + atomic_inc_unchecked(&vcc->stats->rx_err);
25141 goto return_host_buffers;
25142 }
25143
25144 @@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, i
25145 vcc->push(vcc, skb);
25146 spin_lock(&he_dev->global_lock);
25147
25148 - atomic_inc(&vcc->stats->rx);
25149 + atomic_inc_unchecked(&vcc->stats->rx);
25150
25151 return_host_buffers:
25152 ++pdus_assembled;
25153 @@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
25154 tpd->vcc->pop(tpd->vcc, tpd->skb);
25155 else
25156 dev_kfree_skb_any(tpd->skb);
25157 - atomic_inc(&tpd->vcc->stats->tx_err);
25158 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
25159 }
25160 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
25161 return;
25162 @@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25163 vcc->pop(vcc, skb);
25164 else
25165 dev_kfree_skb_any(skb);
25166 - atomic_inc(&vcc->stats->tx_err);
25167 + atomic_inc_unchecked(&vcc->stats->tx_err);
25168 return -EINVAL;
25169 }
25170
25171 @@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25172 vcc->pop(vcc, skb);
25173 else
25174 dev_kfree_skb_any(skb);
25175 - atomic_inc(&vcc->stats->tx_err);
25176 + atomic_inc_unchecked(&vcc->stats->tx_err);
25177 return -EINVAL;
25178 }
25179 #endif
25180 @@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25181 vcc->pop(vcc, skb);
25182 else
25183 dev_kfree_skb_any(skb);
25184 - atomic_inc(&vcc->stats->tx_err);
25185 + atomic_inc_unchecked(&vcc->stats->tx_err);
25186 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25187 return -ENOMEM;
25188 }
25189 @@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25190 vcc->pop(vcc, skb);
25191 else
25192 dev_kfree_skb_any(skb);
25193 - atomic_inc(&vcc->stats->tx_err);
25194 + atomic_inc_unchecked(&vcc->stats->tx_err);
25195 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25196 return -ENOMEM;
25197 }
25198 @@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25199 __enqueue_tpd(he_dev, tpd, cid);
25200 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25201
25202 - atomic_inc(&vcc->stats->tx);
25203 + atomic_inc_unchecked(&vcc->stats->tx);
25204
25205 return 0;
25206 }
25207 diff -urNp linux-2.6.32.41/drivers/atm/horizon.c linux-2.6.32.41/drivers/atm/horizon.c
25208 --- linux-2.6.32.41/drivers/atm/horizon.c 2011-03-27 14:31:47.000000000 -0400
25209 +++ linux-2.6.32.41/drivers/atm/horizon.c 2011-04-17 15:56:46.000000000 -0400
25210 @@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev,
25211 {
25212 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
25213 // VC layer stats
25214 - atomic_inc(&vcc->stats->rx);
25215 + atomic_inc_unchecked(&vcc->stats->rx);
25216 __net_timestamp(skb);
25217 // end of our responsability
25218 vcc->push (vcc, skb);
25219 @@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const
25220 dev->tx_iovec = NULL;
25221
25222 // VC layer stats
25223 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25224 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25225
25226 // free the skb
25227 hrz_kfree_skb (skb);
25228 diff -urNp linux-2.6.32.41/drivers/atm/idt77252.c linux-2.6.32.41/drivers/atm/idt77252.c
25229 --- linux-2.6.32.41/drivers/atm/idt77252.c 2011-03-27 14:31:47.000000000 -0400
25230 +++ linux-2.6.32.41/drivers/atm/idt77252.c 2011-04-17 15:56:46.000000000 -0400
25231 @@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, str
25232 else
25233 dev_kfree_skb(skb);
25234
25235 - atomic_inc(&vcc->stats->tx);
25236 + atomic_inc_unchecked(&vcc->stats->tx);
25237 }
25238
25239 atomic_dec(&scq->used);
25240 @@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, st
25241 if ((sb = dev_alloc_skb(64)) == NULL) {
25242 printk("%s: Can't allocate buffers for aal0.\n",
25243 card->name);
25244 - atomic_add(i, &vcc->stats->rx_drop);
25245 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
25246 break;
25247 }
25248 if (!atm_charge(vcc, sb->truesize)) {
25249 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
25250 card->name);
25251 - atomic_add(i - 1, &vcc->stats->rx_drop);
25252 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
25253 dev_kfree_skb(sb);
25254 break;
25255 }
25256 @@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, st
25257 ATM_SKB(sb)->vcc = vcc;
25258 __net_timestamp(sb);
25259 vcc->push(vcc, sb);
25260 - atomic_inc(&vcc->stats->rx);
25261 + atomic_inc_unchecked(&vcc->stats->rx);
25262
25263 cell += ATM_CELL_PAYLOAD;
25264 }
25265 @@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, st
25266 "(CDC: %08x)\n",
25267 card->name, len, rpp->len, readl(SAR_REG_CDC));
25268 recycle_rx_pool_skb(card, rpp);
25269 - atomic_inc(&vcc->stats->rx_err);
25270 + atomic_inc_unchecked(&vcc->stats->rx_err);
25271 return;
25272 }
25273 if (stat & SAR_RSQE_CRC) {
25274 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
25275 recycle_rx_pool_skb(card, rpp);
25276 - atomic_inc(&vcc->stats->rx_err);
25277 + atomic_inc_unchecked(&vcc->stats->rx_err);
25278 return;
25279 }
25280 if (skb_queue_len(&rpp->queue) > 1) {
25281 @@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, st
25282 RXPRINTK("%s: Can't alloc RX skb.\n",
25283 card->name);
25284 recycle_rx_pool_skb(card, rpp);
25285 - atomic_inc(&vcc->stats->rx_err);
25286 + atomic_inc_unchecked(&vcc->stats->rx_err);
25287 return;
25288 }
25289 if (!atm_charge(vcc, skb->truesize)) {
25290 @@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, st
25291 __net_timestamp(skb);
25292
25293 vcc->push(vcc, skb);
25294 - atomic_inc(&vcc->stats->rx);
25295 + atomic_inc_unchecked(&vcc->stats->rx);
25296
25297 return;
25298 }
25299 @@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, st
25300 __net_timestamp(skb);
25301
25302 vcc->push(vcc, skb);
25303 - atomic_inc(&vcc->stats->rx);
25304 + atomic_inc_unchecked(&vcc->stats->rx);
25305
25306 if (skb->truesize > SAR_FB_SIZE_3)
25307 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
25308 @@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
25309 if (vcc->qos.aal != ATM_AAL0) {
25310 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
25311 card->name, vpi, vci);
25312 - atomic_inc(&vcc->stats->rx_drop);
25313 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25314 goto drop;
25315 }
25316
25317 if ((sb = dev_alloc_skb(64)) == NULL) {
25318 printk("%s: Can't allocate buffers for AAL0.\n",
25319 card->name);
25320 - atomic_inc(&vcc->stats->rx_err);
25321 + atomic_inc_unchecked(&vcc->stats->rx_err);
25322 goto drop;
25323 }
25324
25325 @@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
25326 ATM_SKB(sb)->vcc = vcc;
25327 __net_timestamp(sb);
25328 vcc->push(vcc, sb);
25329 - atomic_inc(&vcc->stats->rx);
25330 + atomic_inc_unchecked(&vcc->stats->rx);
25331
25332 drop:
25333 skb_pull(queue, 64);
25334 @@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
25335
25336 if (vc == NULL) {
25337 printk("%s: NULL connection in send().\n", card->name);
25338 - atomic_inc(&vcc->stats->tx_err);
25339 + atomic_inc_unchecked(&vcc->stats->tx_err);
25340 dev_kfree_skb(skb);
25341 return -EINVAL;
25342 }
25343 if (!test_bit(VCF_TX, &vc->flags)) {
25344 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
25345 - atomic_inc(&vcc->stats->tx_err);
25346 + atomic_inc_unchecked(&vcc->stats->tx_err);
25347 dev_kfree_skb(skb);
25348 return -EINVAL;
25349 }
25350 @@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
25351 break;
25352 default:
25353 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
25354 - atomic_inc(&vcc->stats->tx_err);
25355 + atomic_inc_unchecked(&vcc->stats->tx_err);
25356 dev_kfree_skb(skb);
25357 return -EINVAL;
25358 }
25359
25360 if (skb_shinfo(skb)->nr_frags != 0) {
25361 printk("%s: No scatter-gather yet.\n", card->name);
25362 - atomic_inc(&vcc->stats->tx_err);
25363 + atomic_inc_unchecked(&vcc->stats->tx_err);
25364 dev_kfree_skb(skb);
25365 return -EINVAL;
25366 }
25367 @@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
25368
25369 err = queue_skb(card, vc, skb, oam);
25370 if (err) {
25371 - atomic_inc(&vcc->stats->tx_err);
25372 + atomic_inc_unchecked(&vcc->stats->tx_err);
25373 dev_kfree_skb(skb);
25374 return err;
25375 }
25376 @@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
25377 skb = dev_alloc_skb(64);
25378 if (!skb) {
25379 printk("%s: Out of memory in send_oam().\n", card->name);
25380 - atomic_inc(&vcc->stats->tx_err);
25381 + atomic_inc_unchecked(&vcc->stats->tx_err);
25382 return -ENOMEM;
25383 }
25384 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
25385 diff -urNp linux-2.6.32.41/drivers/atm/iphase.c linux-2.6.32.41/drivers/atm/iphase.c
25386 --- linux-2.6.32.41/drivers/atm/iphase.c 2011-03-27 14:31:47.000000000 -0400
25387 +++ linux-2.6.32.41/drivers/atm/iphase.c 2011-04-17 15:56:46.000000000 -0400
25388 @@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
25389 status = (u_short) (buf_desc_ptr->desc_mode);
25390 if (status & (RX_CER | RX_PTE | RX_OFL))
25391 {
25392 - atomic_inc(&vcc->stats->rx_err);
25393 + atomic_inc_unchecked(&vcc->stats->rx_err);
25394 IF_ERR(printk("IA: bad packet, dropping it");)
25395 if (status & RX_CER) {
25396 IF_ERR(printk(" cause: packet CRC error\n");)
25397 @@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
25398 len = dma_addr - buf_addr;
25399 if (len > iadev->rx_buf_sz) {
25400 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
25401 - atomic_inc(&vcc->stats->rx_err);
25402 + atomic_inc_unchecked(&vcc->stats->rx_err);
25403 goto out_free_desc;
25404 }
25405
25406 @@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *
25407 ia_vcc = INPH_IA_VCC(vcc);
25408 if (ia_vcc == NULL)
25409 {
25410 - atomic_inc(&vcc->stats->rx_err);
25411 + atomic_inc_unchecked(&vcc->stats->rx_err);
25412 dev_kfree_skb_any(skb);
25413 atm_return(vcc, atm_guess_pdu2truesize(len));
25414 goto INCR_DLE;
25415 @@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *
25416 if ((length > iadev->rx_buf_sz) || (length >
25417 (skb->len - sizeof(struct cpcs_trailer))))
25418 {
25419 - atomic_inc(&vcc->stats->rx_err);
25420 + atomic_inc_unchecked(&vcc->stats->rx_err);
25421 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
25422 length, skb->len);)
25423 dev_kfree_skb_any(skb);
25424 @@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *
25425
25426 IF_RX(printk("rx_dle_intr: skb push");)
25427 vcc->push(vcc,skb);
25428 - atomic_inc(&vcc->stats->rx);
25429 + atomic_inc_unchecked(&vcc->stats->rx);
25430 iadev->rx_pkt_cnt++;
25431 }
25432 INCR_DLE:
25433 @@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev,
25434 {
25435 struct k_sonet_stats *stats;
25436 stats = &PRIV(_ia_dev[board])->sonet_stats;
25437 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
25438 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
25439 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
25440 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
25441 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
25442 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
25443 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
25444 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
25445 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
25446 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
25447 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
25448 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
25449 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
25450 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
25451 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
25452 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
25453 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
25454 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
25455 }
25456 ia_cmds.status = 0;
25457 break;
25458 @@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
25459 if ((desc == 0) || (desc > iadev->num_tx_desc))
25460 {
25461 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
25462 - atomic_inc(&vcc->stats->tx);
25463 + atomic_inc_unchecked(&vcc->stats->tx);
25464 if (vcc->pop)
25465 vcc->pop(vcc, skb);
25466 else
25467 @@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
25468 ATM_DESC(skb) = vcc->vci;
25469 skb_queue_tail(&iadev->tx_dma_q, skb);
25470
25471 - atomic_inc(&vcc->stats->tx);
25472 + atomic_inc_unchecked(&vcc->stats->tx);
25473 iadev->tx_pkt_cnt++;
25474 /* Increment transaction counter */
25475 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
25476
25477 #if 0
25478 /* add flow control logic */
25479 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
25480 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
25481 if (iavcc->vc_desc_cnt > 10) {
25482 vcc->tx_quota = vcc->tx_quota * 3 / 4;
25483 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
25484 diff -urNp linux-2.6.32.41/drivers/atm/lanai.c linux-2.6.32.41/drivers/atm/lanai.c
25485 --- linux-2.6.32.41/drivers/atm/lanai.c 2011-03-27 14:31:47.000000000 -0400
25486 +++ linux-2.6.32.41/drivers/atm/lanai.c 2011-04-17 15:56:46.000000000 -0400
25487 @@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct l
25488 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
25489 lanai_endtx(lanai, lvcc);
25490 lanai_free_skb(lvcc->tx.atmvcc, skb);
25491 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
25492 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
25493 }
25494
25495 /* Try to fill the buffer - don't call unless there is backlog */
25496 @@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc
25497 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
25498 __net_timestamp(skb);
25499 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
25500 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
25501 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
25502 out:
25503 lvcc->rx.buf.ptr = end;
25504 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
25505 @@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_d
25506 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
25507 "vcc %d\n", lanai->number, (unsigned int) s, vci);
25508 lanai->stats.service_rxnotaal5++;
25509 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25510 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25511 return 0;
25512 }
25513 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
25514 @@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_d
25515 int bytes;
25516 read_unlock(&vcc_sklist_lock);
25517 DPRINTK("got trashed rx pdu on vci %d\n", vci);
25518 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25519 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25520 lvcc->stats.x.aal5.service_trash++;
25521 bytes = (SERVICE_GET_END(s) * 16) -
25522 (((unsigned long) lvcc->rx.buf.ptr) -
25523 @@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_d
25524 }
25525 if (s & SERVICE_STREAM) {
25526 read_unlock(&vcc_sklist_lock);
25527 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25528 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25529 lvcc->stats.x.aal5.service_stream++;
25530 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
25531 "PDU on VCI %d!\n", lanai->number, vci);
25532 @@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_d
25533 return 0;
25534 }
25535 DPRINTK("got rx crc error on vci %d\n", vci);
25536 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25537 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25538 lvcc->stats.x.aal5.service_rxcrc++;
25539 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
25540 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
25541 diff -urNp linux-2.6.32.41/drivers/atm/nicstar.c linux-2.6.32.41/drivers/atm/nicstar.c
25542 --- linux-2.6.32.41/drivers/atm/nicstar.c 2011-03-27 14:31:47.000000000 -0400
25543 +++ linux-2.6.32.41/drivers/atm/nicstar.c 2011-04-17 15:56:46.000000000 -0400
25544 @@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc,
25545 if ((vc = (vc_map *) vcc->dev_data) == NULL)
25546 {
25547 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
25548 - atomic_inc(&vcc->stats->tx_err);
25549 + atomic_inc_unchecked(&vcc->stats->tx_err);
25550 dev_kfree_skb_any(skb);
25551 return -EINVAL;
25552 }
25553 @@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc,
25554 if (!vc->tx)
25555 {
25556 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
25557 - atomic_inc(&vcc->stats->tx_err);
25558 + atomic_inc_unchecked(&vcc->stats->tx_err);
25559 dev_kfree_skb_any(skb);
25560 return -EINVAL;
25561 }
25562 @@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc,
25563 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
25564 {
25565 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
25566 - atomic_inc(&vcc->stats->tx_err);
25567 + atomic_inc_unchecked(&vcc->stats->tx_err);
25568 dev_kfree_skb_any(skb);
25569 return -EINVAL;
25570 }
25571 @@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc,
25572 if (skb_shinfo(skb)->nr_frags != 0)
25573 {
25574 printk("nicstar%d: No scatter-gather yet.\n", card->index);
25575 - atomic_inc(&vcc->stats->tx_err);
25576 + atomic_inc_unchecked(&vcc->stats->tx_err);
25577 dev_kfree_skb_any(skb);
25578 return -EINVAL;
25579 }
25580 @@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc,
25581
25582 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
25583 {
25584 - atomic_inc(&vcc->stats->tx_err);
25585 + atomic_inc_unchecked(&vcc->stats->tx_err);
25586 dev_kfree_skb_any(skb);
25587 return -EIO;
25588 }
25589 - atomic_inc(&vcc->stats->tx);
25590 + atomic_inc_unchecked(&vcc->stats->tx);
25591
25592 return 0;
25593 }
25594 @@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_
25595 {
25596 printk("nicstar%d: Can't allocate buffers for aal0.\n",
25597 card->index);
25598 - atomic_add(i,&vcc->stats->rx_drop);
25599 + atomic_add_unchecked(i,&vcc->stats->rx_drop);
25600 break;
25601 }
25602 if (!atm_charge(vcc, sb->truesize))
25603 {
25604 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
25605 card->index);
25606 - atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
25607 + atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
25608 dev_kfree_skb_any(sb);
25609 break;
25610 }
25611 @@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_
25612 ATM_SKB(sb)->vcc = vcc;
25613 __net_timestamp(sb);
25614 vcc->push(vcc, sb);
25615 - atomic_inc(&vcc->stats->rx);
25616 + atomic_inc_unchecked(&vcc->stats->rx);
25617 cell += ATM_CELL_PAYLOAD;
25618 }
25619
25620 @@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_
25621 if (iovb == NULL)
25622 {
25623 printk("nicstar%d: Out of iovec buffers.\n", card->index);
25624 - atomic_inc(&vcc->stats->rx_drop);
25625 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25626 recycle_rx_buf(card, skb);
25627 return;
25628 }
25629 @@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_
25630 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
25631 {
25632 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
25633 - atomic_inc(&vcc->stats->rx_err);
25634 + atomic_inc_unchecked(&vcc->stats->rx_err);
25635 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
25636 NS_SKB(iovb)->iovcnt = 0;
25637 iovb->len = 0;
25638 @@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_
25639 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
25640 card->index);
25641 which_list(card, skb);
25642 - atomic_inc(&vcc->stats->rx_err);
25643 + atomic_inc_unchecked(&vcc->stats->rx_err);
25644 recycle_rx_buf(card, skb);
25645 vc->rx_iov = NULL;
25646 recycle_iov_buf(card, iovb);
25647 @@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_
25648 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
25649 card->index);
25650 which_list(card, skb);
25651 - atomic_inc(&vcc->stats->rx_err);
25652 + atomic_inc_unchecked(&vcc->stats->rx_err);
25653 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
25654 NS_SKB(iovb)->iovcnt);
25655 vc->rx_iov = NULL;
25656 @@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_
25657 printk(" - PDU size mismatch.\n");
25658 else
25659 printk(".\n");
25660 - atomic_inc(&vcc->stats->rx_err);
25661 + atomic_inc_unchecked(&vcc->stats->rx_err);
25662 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
25663 NS_SKB(iovb)->iovcnt);
25664 vc->rx_iov = NULL;
25665 @@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_
25666 if (!atm_charge(vcc, skb->truesize))
25667 {
25668 push_rxbufs(card, skb);
25669 - atomic_inc(&vcc->stats->rx_drop);
25670 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25671 }
25672 else
25673 {
25674 @@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_
25675 ATM_SKB(skb)->vcc = vcc;
25676 __net_timestamp(skb);
25677 vcc->push(vcc, skb);
25678 - atomic_inc(&vcc->stats->rx);
25679 + atomic_inc_unchecked(&vcc->stats->rx);
25680 }
25681 }
25682 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
25683 @@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_
25684 if (!atm_charge(vcc, sb->truesize))
25685 {
25686 push_rxbufs(card, sb);
25687 - atomic_inc(&vcc->stats->rx_drop);
25688 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25689 }
25690 else
25691 {
25692 @@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_
25693 ATM_SKB(sb)->vcc = vcc;
25694 __net_timestamp(sb);
25695 vcc->push(vcc, sb);
25696 - atomic_inc(&vcc->stats->rx);
25697 + atomic_inc_unchecked(&vcc->stats->rx);
25698 }
25699
25700 push_rxbufs(card, skb);
25701 @@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_
25702 if (!atm_charge(vcc, skb->truesize))
25703 {
25704 push_rxbufs(card, skb);
25705 - atomic_inc(&vcc->stats->rx_drop);
25706 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25707 }
25708 else
25709 {
25710 @@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_
25711 ATM_SKB(skb)->vcc = vcc;
25712 __net_timestamp(skb);
25713 vcc->push(vcc, skb);
25714 - atomic_inc(&vcc->stats->rx);
25715 + atomic_inc_unchecked(&vcc->stats->rx);
25716 }
25717
25718 push_rxbufs(card, sb);
25719 @@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_
25720 if (hb == NULL)
25721 {
25722 printk("nicstar%d: Out of huge buffers.\n", card->index);
25723 - atomic_inc(&vcc->stats->rx_drop);
25724 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25725 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
25726 NS_SKB(iovb)->iovcnt);
25727 vc->rx_iov = NULL;
25728 @@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_
25729 }
25730 else
25731 dev_kfree_skb_any(hb);
25732 - atomic_inc(&vcc->stats->rx_drop);
25733 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25734 }
25735 else
25736 {
25737 @@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_
25738 #endif /* NS_USE_DESTRUCTORS */
25739 __net_timestamp(hb);
25740 vcc->push(vcc, hb);
25741 - atomic_inc(&vcc->stats->rx);
25742 + atomic_inc_unchecked(&vcc->stats->rx);
25743 }
25744 }
25745
25746 diff -urNp linux-2.6.32.41/drivers/atm/solos-pci.c linux-2.6.32.41/drivers/atm/solos-pci.c
25747 --- linux-2.6.32.41/drivers/atm/solos-pci.c 2011-04-17 17:00:52.000000000 -0400
25748 +++ linux-2.6.32.41/drivers/atm/solos-pci.c 2011-05-16 21:46:57.000000000 -0400
25749 @@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
25750 }
25751 atm_charge(vcc, skb->truesize);
25752 vcc->push(vcc, skb);
25753 - atomic_inc(&vcc->stats->rx);
25754 + atomic_inc_unchecked(&vcc->stats->rx);
25755 break;
25756
25757 case PKT_STATUS:
25758 @@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *
25759 char msg[500];
25760 char item[10];
25761
25762 + pax_track_stack();
25763 +
25764 len = buf->len;
25765 for (i = 0; i < len; i++){
25766 if(i % 8 == 0)
25767 @@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_car
25768 vcc = SKB_CB(oldskb)->vcc;
25769
25770 if (vcc) {
25771 - atomic_inc(&vcc->stats->tx);
25772 + atomic_inc_unchecked(&vcc->stats->tx);
25773 solos_pop(vcc, oldskb);
25774 } else
25775 dev_kfree_skb_irq(oldskb);
25776 diff -urNp linux-2.6.32.41/drivers/atm/suni.c linux-2.6.32.41/drivers/atm/suni.c
25777 --- linux-2.6.32.41/drivers/atm/suni.c 2011-03-27 14:31:47.000000000 -0400
25778 +++ linux-2.6.32.41/drivers/atm/suni.c 2011-04-17 15:56:46.000000000 -0400
25779 @@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
25780
25781
25782 #define ADD_LIMITED(s,v) \
25783 - atomic_add((v),&stats->s); \
25784 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
25785 + atomic_add_unchecked((v),&stats->s); \
25786 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
25787
25788
25789 static void suni_hz(unsigned long from_timer)
25790 diff -urNp linux-2.6.32.41/drivers/atm/uPD98402.c linux-2.6.32.41/drivers/atm/uPD98402.c
25791 --- linux-2.6.32.41/drivers/atm/uPD98402.c 2011-03-27 14:31:47.000000000 -0400
25792 +++ linux-2.6.32.41/drivers/atm/uPD98402.c 2011-04-17 15:56:46.000000000 -0400
25793 @@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *d
25794 struct sonet_stats tmp;
25795 int error = 0;
25796
25797 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
25798 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
25799 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
25800 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
25801 if (zero && !error) {
25802 @@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev
25803
25804
25805 #define ADD_LIMITED(s,v) \
25806 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
25807 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
25808 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
25809 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
25810 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
25811 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
25812
25813
25814 static void stat_event(struct atm_dev *dev)
25815 @@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev
25816 if (reason & uPD98402_INT_PFM) stat_event(dev);
25817 if (reason & uPD98402_INT_PCO) {
25818 (void) GET(PCOCR); /* clear interrupt cause */
25819 - atomic_add(GET(HECCT),
25820 + atomic_add_unchecked(GET(HECCT),
25821 &PRIV(dev)->sonet_stats.uncorr_hcs);
25822 }
25823 if ((reason & uPD98402_INT_RFO) &&
25824 @@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev
25825 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
25826 uPD98402_INT_LOS),PIMR); /* enable them */
25827 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
25828 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
25829 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
25830 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
25831 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
25832 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
25833 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
25834 return 0;
25835 }
25836
25837 diff -urNp linux-2.6.32.41/drivers/atm/zatm.c linux-2.6.32.41/drivers/atm/zatm.c
25838 --- linux-2.6.32.41/drivers/atm/zatm.c 2011-03-27 14:31:47.000000000 -0400
25839 +++ linux-2.6.32.41/drivers/atm/zatm.c 2011-04-17 15:56:46.000000000 -0400
25840 @@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
25841 }
25842 if (!size) {
25843 dev_kfree_skb_irq(skb);
25844 - if (vcc) atomic_inc(&vcc->stats->rx_err);
25845 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
25846 continue;
25847 }
25848 if (!atm_charge(vcc,skb->truesize)) {
25849 @@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
25850 skb->len = size;
25851 ATM_SKB(skb)->vcc = vcc;
25852 vcc->push(vcc,skb);
25853 - atomic_inc(&vcc->stats->rx);
25854 + atomic_inc_unchecked(&vcc->stats->rx);
25855 }
25856 zout(pos & 0xffff,MTA(mbx));
25857 #if 0 /* probably a stupid idea */
25858 @@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
25859 skb_queue_head(&zatm_vcc->backlog,skb);
25860 break;
25861 }
25862 - atomic_inc(&vcc->stats->tx);
25863 + atomic_inc_unchecked(&vcc->stats->tx);
25864 wake_up(&zatm_vcc->tx_wait);
25865 }
25866
25867 diff -urNp linux-2.6.32.41/drivers/base/bus.c linux-2.6.32.41/drivers/base/bus.c
25868 --- linux-2.6.32.41/drivers/base/bus.c 2011-03-27 14:31:47.000000000 -0400
25869 +++ linux-2.6.32.41/drivers/base/bus.c 2011-04-17 15:56:46.000000000 -0400
25870 @@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kob
25871 return ret;
25872 }
25873
25874 -static struct sysfs_ops driver_sysfs_ops = {
25875 +static const struct sysfs_ops driver_sysfs_ops = {
25876 .show = drv_attr_show,
25877 .store = drv_attr_store,
25878 };
25879 @@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kob
25880 return ret;
25881 }
25882
25883 -static struct sysfs_ops bus_sysfs_ops = {
25884 +static const struct sysfs_ops bus_sysfs_ops = {
25885 .show = bus_attr_show,
25886 .store = bus_attr_store,
25887 };
25888 @@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset
25889 return 0;
25890 }
25891
25892 -static struct kset_uevent_ops bus_uevent_ops = {
25893 +static const struct kset_uevent_ops bus_uevent_ops = {
25894 .filter = bus_uevent_filter,
25895 };
25896
25897 diff -urNp linux-2.6.32.41/drivers/base/class.c linux-2.6.32.41/drivers/base/class.c
25898 --- linux-2.6.32.41/drivers/base/class.c 2011-03-27 14:31:47.000000000 -0400
25899 +++ linux-2.6.32.41/drivers/base/class.c 2011-04-17 15:56:46.000000000 -0400
25900 @@ -63,7 +63,7 @@ static void class_release(struct kobject
25901 kfree(cp);
25902 }
25903
25904 -static struct sysfs_ops class_sysfs_ops = {
25905 +static const struct sysfs_ops class_sysfs_ops = {
25906 .show = class_attr_show,
25907 .store = class_attr_store,
25908 };
25909 diff -urNp linux-2.6.32.41/drivers/base/core.c linux-2.6.32.41/drivers/base/core.c
25910 --- linux-2.6.32.41/drivers/base/core.c 2011-03-27 14:31:47.000000000 -0400
25911 +++ linux-2.6.32.41/drivers/base/core.c 2011-04-17 15:56:46.000000000 -0400
25912 @@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kob
25913 return ret;
25914 }
25915
25916 -static struct sysfs_ops dev_sysfs_ops = {
25917 +static const struct sysfs_ops dev_sysfs_ops = {
25918 .show = dev_attr_show,
25919 .store = dev_attr_store,
25920 };
25921 @@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset,
25922 return retval;
25923 }
25924
25925 -static struct kset_uevent_ops device_uevent_ops = {
25926 +static const struct kset_uevent_ops device_uevent_ops = {
25927 .filter = dev_uevent_filter,
25928 .name = dev_uevent_name,
25929 .uevent = dev_uevent,
25930 diff -urNp linux-2.6.32.41/drivers/base/memory.c linux-2.6.32.41/drivers/base/memory.c
25931 --- linux-2.6.32.41/drivers/base/memory.c 2011-03-27 14:31:47.000000000 -0400
25932 +++ linux-2.6.32.41/drivers/base/memory.c 2011-04-17 15:56:46.000000000 -0400
25933 @@ -44,7 +44,7 @@ static int memory_uevent(struct kset *ks
25934 return retval;
25935 }
25936
25937 -static struct kset_uevent_ops memory_uevent_ops = {
25938 +static const struct kset_uevent_ops memory_uevent_ops = {
25939 .name = memory_uevent_name,
25940 .uevent = memory_uevent,
25941 };
25942 diff -urNp linux-2.6.32.41/drivers/base/sys.c linux-2.6.32.41/drivers/base/sys.c
25943 --- linux-2.6.32.41/drivers/base/sys.c 2011-03-27 14:31:47.000000000 -0400
25944 +++ linux-2.6.32.41/drivers/base/sys.c 2011-04-17 15:56:46.000000000 -0400
25945 @@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struc
25946 return -EIO;
25947 }
25948
25949 -static struct sysfs_ops sysfs_ops = {
25950 +static const struct sysfs_ops sysfs_ops = {
25951 .show = sysdev_show,
25952 .store = sysdev_store,
25953 };
25954 @@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct
25955 return -EIO;
25956 }
25957
25958 -static struct sysfs_ops sysfs_class_ops = {
25959 +static const struct sysfs_ops sysfs_class_ops = {
25960 .show = sysdev_class_show,
25961 .store = sysdev_class_store,
25962 };
25963 diff -urNp linux-2.6.32.41/drivers/block/cciss.c linux-2.6.32.41/drivers/block/cciss.c
25964 --- linux-2.6.32.41/drivers/block/cciss.c 2011-03-27 14:31:47.000000000 -0400
25965 +++ linux-2.6.32.41/drivers/block/cciss.c 2011-04-17 15:56:46.000000000 -0400
25966 @@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct
25967 int err;
25968 u32 cp;
25969
25970 + memset(&arg64, 0, sizeof(arg64));
25971 +
25972 err = 0;
25973 err |=
25974 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
25975 diff -urNp linux-2.6.32.41/drivers/block/cpqarray.c linux-2.6.32.41/drivers/block/cpqarray.c
25976 --- linux-2.6.32.41/drivers/block/cpqarray.c 2011-03-27 14:31:47.000000000 -0400
25977 +++ linux-2.6.32.41/drivers/block/cpqarray.c 2011-05-16 21:46:57.000000000 -0400
25978 @@ -896,6 +896,8 @@ static void do_ida_request(struct reques
25979 struct scatterlist tmp_sg[SG_MAX];
25980 int i, dir, seg;
25981
25982 + pax_track_stack();
25983 +
25984 if (blk_queue_plugged(q))
25985 goto startio;
25986
25987 diff -urNp linux-2.6.32.41/drivers/block/DAC960.c linux-2.6.32.41/drivers/block/DAC960.c
25988 --- linux-2.6.32.41/drivers/block/DAC960.c 2011-03-27 14:31:47.000000000 -0400
25989 +++ linux-2.6.32.41/drivers/block/DAC960.c 2011-05-16 21:46:57.000000000 -0400
25990 @@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfigur
25991 unsigned long flags;
25992 int Channel, TargetID;
25993
25994 + pax_track_stack();
25995 +
25996 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
25997 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
25998 sizeof(DAC960_SCSI_Inquiry_T) +
25999 diff -urNp linux-2.6.32.41/drivers/block/nbd.c linux-2.6.32.41/drivers/block/nbd.c
26000 --- linux-2.6.32.41/drivers/block/nbd.c 2011-03-27 14:31:47.000000000 -0400
26001 +++ linux-2.6.32.41/drivers/block/nbd.c 2011-05-16 21:46:57.000000000 -0400
26002 @@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *
26003 struct kvec iov;
26004 sigset_t blocked, oldset;
26005
26006 + pax_track_stack();
26007 +
26008 if (unlikely(!sock)) {
26009 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
26010 lo->disk->disk_name, (send ? "send" : "recv"));
26011 @@ -569,6 +571,8 @@ static void do_nbd_request(struct reques
26012 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
26013 unsigned int cmd, unsigned long arg)
26014 {
26015 + pax_track_stack();
26016 +
26017 switch (cmd) {
26018 case NBD_DISCONNECT: {
26019 struct request sreq;
26020 diff -urNp linux-2.6.32.41/drivers/block/pktcdvd.c linux-2.6.32.41/drivers/block/pktcdvd.c
26021 --- linux-2.6.32.41/drivers/block/pktcdvd.c 2011-03-27 14:31:47.000000000 -0400
26022 +++ linux-2.6.32.41/drivers/block/pktcdvd.c 2011-04-17 15:56:46.000000000 -0400
26023 @@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kob
26024 return len;
26025 }
26026
26027 -static struct sysfs_ops kobj_pkt_ops = {
26028 +static const struct sysfs_ops kobj_pkt_ops = {
26029 .show = kobj_pkt_show,
26030 .store = kobj_pkt_store
26031 };
26032 diff -urNp linux-2.6.32.41/drivers/char/agp/frontend.c linux-2.6.32.41/drivers/char/agp/frontend.c
26033 --- linux-2.6.32.41/drivers/char/agp/frontend.c 2011-03-27 14:31:47.000000000 -0400
26034 +++ linux-2.6.32.41/drivers/char/agp/frontend.c 2011-04-17 15:56:46.000000000 -0400
26035 @@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct ag
26036 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
26037 return -EFAULT;
26038
26039 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
26040 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
26041 return -EFAULT;
26042
26043 client = agp_find_client_by_pid(reserve.pid);
26044 diff -urNp linux-2.6.32.41/drivers/char/briq_panel.c linux-2.6.32.41/drivers/char/briq_panel.c
26045 --- linux-2.6.32.41/drivers/char/briq_panel.c 2011-03-27 14:31:47.000000000 -0400
26046 +++ linux-2.6.32.41/drivers/char/briq_panel.c 2011-04-18 19:48:57.000000000 -0400
26047 @@ -10,6 +10,7 @@
26048 #include <linux/types.h>
26049 #include <linux/errno.h>
26050 #include <linux/tty.h>
26051 +#include <linux/mutex.h>
26052 #include <linux/timer.h>
26053 #include <linux/kernel.h>
26054 #include <linux/wait.h>
26055 @@ -36,6 +37,7 @@ static int vfd_is_open;
26056 static unsigned char vfd[40];
26057 static int vfd_cursor;
26058 static unsigned char ledpb, led;
26059 +static DEFINE_MUTEX(vfd_mutex);
26060
26061 static void update_vfd(void)
26062 {
26063 @@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct f
26064 if (!vfd_is_open)
26065 return -EBUSY;
26066
26067 + mutex_lock(&vfd_mutex);
26068 for (;;) {
26069 char c;
26070 if (!indx)
26071 break;
26072 - if (get_user(c, buf))
26073 + if (get_user(c, buf)) {
26074 + mutex_unlock(&vfd_mutex);
26075 return -EFAULT;
26076 + }
26077 if (esc) {
26078 set_led(c);
26079 esc = 0;
26080 @@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct f
26081 buf++;
26082 }
26083 update_vfd();
26084 + mutex_unlock(&vfd_mutex);
26085
26086 return len;
26087 }
26088 diff -urNp linux-2.6.32.41/drivers/char/genrtc.c linux-2.6.32.41/drivers/char/genrtc.c
26089 --- linux-2.6.32.41/drivers/char/genrtc.c 2011-03-27 14:31:47.000000000 -0400
26090 +++ linux-2.6.32.41/drivers/char/genrtc.c 2011-04-18 19:45:42.000000000 -0400
26091 @@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *i
26092 switch (cmd) {
26093
26094 case RTC_PLL_GET:
26095 + memset(&pll, 0, sizeof(pll));
26096 if (get_rtc_pll(&pll))
26097 return -EINVAL;
26098 else
26099 diff -urNp linux-2.6.32.41/drivers/char/hpet.c linux-2.6.32.41/drivers/char/hpet.c
26100 --- linux-2.6.32.41/drivers/char/hpet.c 2011-03-27 14:31:47.000000000 -0400
26101 +++ linux-2.6.32.41/drivers/char/hpet.c 2011-04-23 12:56:11.000000000 -0400
26102 @@ -430,7 +430,7 @@ static int hpet_release(struct inode *in
26103 return 0;
26104 }
26105
26106 -static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
26107 +static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
26108
26109 static int
26110 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
26111 @@ -565,7 +565,7 @@ static inline unsigned long hpet_time_di
26112 }
26113
26114 static int
26115 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
26116 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
26117 {
26118 struct hpet_timer __iomem *timer;
26119 struct hpet __iomem *hpet;
26120 @@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp,
26121 {
26122 struct hpet_info info;
26123
26124 + memset(&info, 0, sizeof(info));
26125 +
26126 if (devp->hd_ireqfreq)
26127 info.hi_ireqfreq =
26128 hpet_time_div(hpetp, devp->hd_ireqfreq);
26129 - else
26130 - info.hi_ireqfreq = 0;
26131 info.hi_flags =
26132 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
26133 info.hi_hpet = hpetp->hp_which;
26134 diff -urNp linux-2.6.32.41/drivers/char/hvc_beat.c linux-2.6.32.41/drivers/char/hvc_beat.c
26135 --- linux-2.6.32.41/drivers/char/hvc_beat.c 2011-03-27 14:31:47.000000000 -0400
26136 +++ linux-2.6.32.41/drivers/char/hvc_beat.c 2011-04-17 15:56:46.000000000 -0400
26137 @@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t v
26138 return cnt;
26139 }
26140
26141 -static struct hv_ops hvc_beat_get_put_ops = {
26142 +static const struct hv_ops hvc_beat_get_put_ops = {
26143 .get_chars = hvc_beat_get_chars,
26144 .put_chars = hvc_beat_put_chars,
26145 };
26146 diff -urNp linux-2.6.32.41/drivers/char/hvc_console.c linux-2.6.32.41/drivers/char/hvc_console.c
26147 --- linux-2.6.32.41/drivers/char/hvc_console.c 2011-03-27 14:31:47.000000000 -0400
26148 +++ linux-2.6.32.41/drivers/char/hvc_console.c 2011-04-17 15:56:46.000000000 -0400
26149 @@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_ind
26150 * console interfaces but can still be used as a tty device. This has to be
26151 * static because kmalloc will not work during early console init.
26152 */
26153 -static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
26154 +static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
26155 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
26156 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
26157
26158 @@ -247,7 +247,7 @@ static void destroy_hvc_struct(struct kr
26159 * vty adapters do NOT get an hvc_instantiate() callback since they
26160 * appear after early console init.
26161 */
26162 -int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
26163 +int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
26164 {
26165 struct hvc_struct *hp;
26166
26167 @@ -756,7 +756,7 @@ static const struct tty_operations hvc_o
26168 };
26169
26170 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
26171 - struct hv_ops *ops, int outbuf_size)
26172 + const struct hv_ops *ops, int outbuf_size)
26173 {
26174 struct hvc_struct *hp;
26175 int i;
26176 diff -urNp linux-2.6.32.41/drivers/char/hvc_console.h linux-2.6.32.41/drivers/char/hvc_console.h
26177 --- linux-2.6.32.41/drivers/char/hvc_console.h 2011-03-27 14:31:47.000000000 -0400
26178 +++ linux-2.6.32.41/drivers/char/hvc_console.h 2011-04-17 15:56:46.000000000 -0400
26179 @@ -55,7 +55,7 @@ struct hvc_struct {
26180 int outbuf_size;
26181 int n_outbuf;
26182 uint32_t vtermno;
26183 - struct hv_ops *ops;
26184 + const struct hv_ops *ops;
26185 int irq_requested;
26186 int data;
26187 struct winsize ws;
26188 @@ -76,11 +76,11 @@ struct hv_ops {
26189 };
26190
26191 /* Register a vterm and a slot index for use as a console (console_init) */
26192 -extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
26193 +extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
26194
26195 /* register a vterm for hvc tty operation (module_init or hotplug add) */
26196 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
26197 - struct hv_ops *ops, int outbuf_size);
26198 + const struct hv_ops *ops, int outbuf_size);
26199 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
26200 extern int hvc_remove(struct hvc_struct *hp);
26201
26202 diff -urNp linux-2.6.32.41/drivers/char/hvc_iseries.c linux-2.6.32.41/drivers/char/hvc_iseries.c
26203 --- linux-2.6.32.41/drivers/char/hvc_iseries.c 2011-03-27 14:31:47.000000000 -0400
26204 +++ linux-2.6.32.41/drivers/char/hvc_iseries.c 2011-04-17 15:56:46.000000000 -0400
26205 @@ -197,7 +197,7 @@ done:
26206 return sent;
26207 }
26208
26209 -static struct hv_ops hvc_get_put_ops = {
26210 +static const struct hv_ops hvc_get_put_ops = {
26211 .get_chars = get_chars,
26212 .put_chars = put_chars,
26213 .notifier_add = notifier_add_irq,
26214 diff -urNp linux-2.6.32.41/drivers/char/hvc_iucv.c linux-2.6.32.41/drivers/char/hvc_iucv.c
26215 --- linux-2.6.32.41/drivers/char/hvc_iucv.c 2011-03-27 14:31:47.000000000 -0400
26216 +++ linux-2.6.32.41/drivers/char/hvc_iucv.c 2011-04-17 15:56:46.000000000 -0400
26217 @@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(stru
26218
26219
26220 /* HVC operations */
26221 -static struct hv_ops hvc_iucv_ops = {
26222 +static const struct hv_ops hvc_iucv_ops = {
26223 .get_chars = hvc_iucv_get_chars,
26224 .put_chars = hvc_iucv_put_chars,
26225 .notifier_add = hvc_iucv_notifier_add,
26226 diff -urNp linux-2.6.32.41/drivers/char/hvc_rtas.c linux-2.6.32.41/drivers/char/hvc_rtas.c
26227 --- linux-2.6.32.41/drivers/char/hvc_rtas.c 2011-03-27 14:31:47.000000000 -0400
26228 +++ linux-2.6.32.41/drivers/char/hvc_rtas.c 2011-04-17 15:56:46.000000000 -0400
26229 @@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_
26230 return i;
26231 }
26232
26233 -static struct hv_ops hvc_rtas_get_put_ops = {
26234 +static const struct hv_ops hvc_rtas_get_put_ops = {
26235 .get_chars = hvc_rtas_read_console,
26236 .put_chars = hvc_rtas_write_console,
26237 };
26238 diff -urNp linux-2.6.32.41/drivers/char/hvcs.c linux-2.6.32.41/drivers/char/hvcs.c
26239 --- linux-2.6.32.41/drivers/char/hvcs.c 2011-03-27 14:31:47.000000000 -0400
26240 +++ linux-2.6.32.41/drivers/char/hvcs.c 2011-04-17 15:56:46.000000000 -0400
26241 @@ -82,6 +82,7 @@
26242 #include <asm/hvcserver.h>
26243 #include <asm/uaccess.h>
26244 #include <asm/vio.h>
26245 +#include <asm/local.h>
26246
26247 /*
26248 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
26249 @@ -269,7 +270,7 @@ struct hvcs_struct {
26250 unsigned int index;
26251
26252 struct tty_struct *tty;
26253 - int open_count;
26254 + local_t open_count;
26255
26256 /*
26257 * Used to tell the driver kernel_thread what operations need to take
26258 @@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(st
26259
26260 spin_lock_irqsave(&hvcsd->lock, flags);
26261
26262 - if (hvcsd->open_count > 0) {
26263 + if (local_read(&hvcsd->open_count) > 0) {
26264 spin_unlock_irqrestore(&hvcsd->lock, flags);
26265 printk(KERN_INFO "HVCS: vterm state unchanged. "
26266 "The hvcs device node is still in use.\n");
26267 @@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *
26268 if ((retval = hvcs_partner_connect(hvcsd)))
26269 goto error_release;
26270
26271 - hvcsd->open_count = 1;
26272 + local_set(&hvcsd->open_count, 1);
26273 hvcsd->tty = tty;
26274 tty->driver_data = hvcsd;
26275
26276 @@ -1169,7 +1170,7 @@ fast_open:
26277
26278 spin_lock_irqsave(&hvcsd->lock, flags);
26279 kref_get(&hvcsd->kref);
26280 - hvcsd->open_count++;
26281 + local_inc(&hvcsd->open_count);
26282 hvcsd->todo_mask |= HVCS_SCHED_READ;
26283 spin_unlock_irqrestore(&hvcsd->lock, flags);
26284
26285 @@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct
26286 hvcsd = tty->driver_data;
26287
26288 spin_lock_irqsave(&hvcsd->lock, flags);
26289 - if (--hvcsd->open_count == 0) {
26290 + if (local_dec_and_test(&hvcsd->open_count)) {
26291
26292 vio_disable_interrupts(hvcsd->vdev);
26293
26294 @@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct
26295 free_irq(irq, hvcsd);
26296 kref_put(&hvcsd->kref, destroy_hvcs_struct);
26297 return;
26298 - } else if (hvcsd->open_count < 0) {
26299 + } else if (local_read(&hvcsd->open_count) < 0) {
26300 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
26301 " is missmanaged.\n",
26302 - hvcsd->vdev->unit_address, hvcsd->open_count);
26303 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
26304 }
26305
26306 spin_unlock_irqrestore(&hvcsd->lock, flags);
26307 @@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struc
26308
26309 spin_lock_irqsave(&hvcsd->lock, flags);
26310 /* Preserve this so that we know how many kref refs to put */
26311 - temp_open_count = hvcsd->open_count;
26312 + temp_open_count = local_read(&hvcsd->open_count);
26313
26314 /*
26315 * Don't kref put inside the spinlock because the destruction
26316 @@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struc
26317 hvcsd->tty->driver_data = NULL;
26318 hvcsd->tty = NULL;
26319
26320 - hvcsd->open_count = 0;
26321 + local_set(&hvcsd->open_count, 0);
26322
26323 /* This will drop any buffered data on the floor which is OK in a hangup
26324 * scenario. */
26325 @@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct
26326 * the middle of a write operation? This is a crummy place to do this
26327 * but we want to keep it all in the spinlock.
26328 */
26329 - if (hvcsd->open_count <= 0) {
26330 + if (local_read(&hvcsd->open_count) <= 0) {
26331 spin_unlock_irqrestore(&hvcsd->lock, flags);
26332 return -ENODEV;
26333 }
26334 @@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_st
26335 {
26336 struct hvcs_struct *hvcsd = tty->driver_data;
26337
26338 - if (!hvcsd || hvcsd->open_count <= 0)
26339 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
26340 return 0;
26341
26342 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
26343 diff -urNp linux-2.6.32.41/drivers/char/hvc_udbg.c linux-2.6.32.41/drivers/char/hvc_udbg.c
26344 --- linux-2.6.32.41/drivers/char/hvc_udbg.c 2011-03-27 14:31:47.000000000 -0400
26345 +++ linux-2.6.32.41/drivers/char/hvc_udbg.c 2011-04-17 15:56:46.000000000 -0400
26346 @@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno
26347 return i;
26348 }
26349
26350 -static struct hv_ops hvc_udbg_ops = {
26351 +static const struct hv_ops hvc_udbg_ops = {
26352 .get_chars = hvc_udbg_get,
26353 .put_chars = hvc_udbg_put,
26354 };
26355 diff -urNp linux-2.6.32.41/drivers/char/hvc_vio.c linux-2.6.32.41/drivers/char/hvc_vio.c
26356 --- linux-2.6.32.41/drivers/char/hvc_vio.c 2011-03-27 14:31:47.000000000 -0400
26357 +++ linux-2.6.32.41/drivers/char/hvc_vio.c 2011-04-17 15:56:46.000000000 -0400
26358 @@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t v
26359 return got;
26360 }
26361
26362 -static struct hv_ops hvc_get_put_ops = {
26363 +static const struct hv_ops hvc_get_put_ops = {
26364 .get_chars = filtered_get_chars,
26365 .put_chars = hvc_put_chars,
26366 .notifier_add = notifier_add_irq,
26367 diff -urNp linux-2.6.32.41/drivers/char/hvc_xen.c linux-2.6.32.41/drivers/char/hvc_xen.c
26368 --- linux-2.6.32.41/drivers/char/hvc_xen.c 2011-03-27 14:31:47.000000000 -0400
26369 +++ linux-2.6.32.41/drivers/char/hvc_xen.c 2011-04-17 15:56:46.000000000 -0400
26370 @@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno
26371 return recv;
26372 }
26373
26374 -static struct hv_ops hvc_ops = {
26375 +static const struct hv_ops hvc_ops = {
26376 .get_chars = read_console,
26377 .put_chars = write_console,
26378 .notifier_add = notifier_add_irq,
26379 diff -urNp linux-2.6.32.41/drivers/char/ipmi/ipmi_msghandler.c linux-2.6.32.41/drivers/char/ipmi/ipmi_msghandler.c
26380 --- linux-2.6.32.41/drivers/char/ipmi/ipmi_msghandler.c 2011-03-27 14:31:47.000000000 -0400
26381 +++ linux-2.6.32.41/drivers/char/ipmi/ipmi_msghandler.c 2011-05-16 21:46:57.000000000 -0400
26382 @@ -414,7 +414,7 @@ struct ipmi_smi {
26383 struct proc_dir_entry *proc_dir;
26384 char proc_dir_name[10];
26385
26386 - atomic_t stats[IPMI_NUM_STATS];
26387 + atomic_unchecked_t stats[IPMI_NUM_STATS];
26388
26389 /*
26390 * run_to_completion duplicate of smb_info, smi_info
26391 @@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
26392
26393
26394 #define ipmi_inc_stat(intf, stat) \
26395 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
26396 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
26397 #define ipmi_get_stat(intf, stat) \
26398 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
26399 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
26400
26401 static int is_lan_addr(struct ipmi_addr *addr)
26402 {
26403 @@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
26404 INIT_LIST_HEAD(&intf->cmd_rcvrs);
26405 init_waitqueue_head(&intf->waitq);
26406 for (i = 0; i < IPMI_NUM_STATS; i++)
26407 - atomic_set(&intf->stats[i], 0);
26408 + atomic_set_unchecked(&intf->stats[i], 0);
26409
26410 intf->proc_dir = NULL;
26411
26412 @@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
26413 struct ipmi_smi_msg smi_msg;
26414 struct ipmi_recv_msg recv_msg;
26415
26416 + pax_track_stack();
26417 +
26418 si = (struct ipmi_system_interface_addr *) &addr;
26419 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
26420 si->channel = IPMI_BMC_CHANNEL;
26421 diff -urNp linux-2.6.32.41/drivers/char/ipmi/ipmi_si_intf.c linux-2.6.32.41/drivers/char/ipmi/ipmi_si_intf.c
26422 --- linux-2.6.32.41/drivers/char/ipmi/ipmi_si_intf.c 2011-03-27 14:31:47.000000000 -0400
26423 +++ linux-2.6.32.41/drivers/char/ipmi/ipmi_si_intf.c 2011-04-17 15:56:46.000000000 -0400
26424 @@ -277,7 +277,7 @@ struct smi_info {
26425 unsigned char slave_addr;
26426
26427 /* Counters and things for the proc filesystem. */
26428 - atomic_t stats[SI_NUM_STATS];
26429 + atomic_unchecked_t stats[SI_NUM_STATS];
26430
26431 struct task_struct *thread;
26432
26433 @@ -285,9 +285,9 @@ struct smi_info {
26434 };
26435
26436 #define smi_inc_stat(smi, stat) \
26437 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
26438 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
26439 #define smi_get_stat(smi, stat) \
26440 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
26441 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
26442
26443 #define SI_MAX_PARMS 4
26444
26445 @@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info
26446 atomic_set(&new_smi->req_events, 0);
26447 new_smi->run_to_completion = 0;
26448 for (i = 0; i < SI_NUM_STATS; i++)
26449 - atomic_set(&new_smi->stats[i], 0);
26450 + atomic_set_unchecked(&new_smi->stats[i], 0);
26451
26452 new_smi->interrupt_disabled = 0;
26453 atomic_set(&new_smi->stop_operation, 0);
26454 diff -urNp linux-2.6.32.41/drivers/char/istallion.c linux-2.6.32.41/drivers/char/istallion.c
26455 --- linux-2.6.32.41/drivers/char/istallion.c 2011-03-27 14:31:47.000000000 -0400
26456 +++ linux-2.6.32.41/drivers/char/istallion.c 2011-05-16 21:46:57.000000000 -0400
26457 @@ -187,7 +187,6 @@ static struct ktermios stli_deftermios
26458 * re-used for each stats call.
26459 */
26460 static comstats_t stli_comstats;
26461 -static combrd_t stli_brdstats;
26462 static struct asystats stli_cdkstats;
26463
26464 /*****************************************************************************/
26465 @@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __u
26466 {
26467 struct stlibrd *brdp;
26468 unsigned int i;
26469 + combrd_t stli_brdstats;
26470
26471 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
26472 return -EFAULT;
26473 @@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stl
26474 struct stliport stli_dummyport;
26475 struct stliport *portp;
26476
26477 + pax_track_stack();
26478 +
26479 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
26480 return -EFAULT;
26481 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
26482 @@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stli
26483 struct stlibrd stli_dummybrd;
26484 struct stlibrd *brdp;
26485
26486 + pax_track_stack();
26487 +
26488 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
26489 return -EFAULT;
26490 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
26491 diff -urNp linux-2.6.32.41/drivers/char/Kconfig linux-2.6.32.41/drivers/char/Kconfig
26492 --- linux-2.6.32.41/drivers/char/Kconfig 2011-03-27 14:31:47.000000000 -0400
26493 +++ linux-2.6.32.41/drivers/char/Kconfig 2011-04-18 19:20:15.000000000 -0400
26494 @@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
26495
26496 config DEVKMEM
26497 bool "/dev/kmem virtual device support"
26498 - default y
26499 + default n
26500 + depends on !GRKERNSEC_KMEM
26501 help
26502 Say Y here if you want to support the /dev/kmem device. The
26503 /dev/kmem device is rarely used, but can be used for certain
26504 @@ -1114,6 +1115,7 @@ config DEVPORT
26505 bool
26506 depends on !M68K
26507 depends on ISA || PCI
26508 + depends on !GRKERNSEC_KMEM
26509 default y
26510
26511 source "drivers/s390/char/Kconfig"
26512 diff -urNp linux-2.6.32.41/drivers/char/keyboard.c linux-2.6.32.41/drivers/char/keyboard.c
26513 --- linux-2.6.32.41/drivers/char/keyboard.c 2011-03-27 14:31:47.000000000 -0400
26514 +++ linux-2.6.32.41/drivers/char/keyboard.c 2011-04-17 15:56:46.000000000 -0400
26515 @@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, u
26516 kbd->kbdmode == VC_MEDIUMRAW) &&
26517 value != KVAL(K_SAK))
26518 return; /* SAK is allowed even in raw mode */
26519 +
26520 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
26521 + {
26522 + void *func = fn_handler[value];
26523 + if (func == fn_show_state || func == fn_show_ptregs ||
26524 + func == fn_show_mem)
26525 + return;
26526 + }
26527 +#endif
26528 +
26529 fn_handler[value](vc);
26530 }
26531
26532 @@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_
26533 .evbit = { BIT_MASK(EV_SND) },
26534 },
26535
26536 - { }, /* Terminating entry */
26537 + { 0 }, /* Terminating entry */
26538 };
26539
26540 MODULE_DEVICE_TABLE(input, kbd_ids);
26541 diff -urNp linux-2.6.32.41/drivers/char/mem.c linux-2.6.32.41/drivers/char/mem.c
26542 --- linux-2.6.32.41/drivers/char/mem.c 2011-03-27 14:31:47.000000000 -0400
26543 +++ linux-2.6.32.41/drivers/char/mem.c 2011-04-17 15:56:46.000000000 -0400
26544 @@ -18,6 +18,7 @@
26545 #include <linux/raw.h>
26546 #include <linux/tty.h>
26547 #include <linux/capability.h>
26548 +#include <linux/security.h>
26549 #include <linux/ptrace.h>
26550 #include <linux/device.h>
26551 #include <linux/highmem.h>
26552 @@ -35,6 +36,10 @@
26553 # include <linux/efi.h>
26554 #endif
26555
26556 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
26557 +extern struct file_operations grsec_fops;
26558 +#endif
26559 +
26560 static inline unsigned long size_inside_page(unsigned long start,
26561 unsigned long size)
26562 {
26563 @@ -102,9 +107,13 @@ static inline int range_is_allowed(unsig
26564
26565 while (cursor < to) {
26566 if (!devmem_is_allowed(pfn)) {
26567 +#ifdef CONFIG_GRKERNSEC_KMEM
26568 + gr_handle_mem_readwrite(from, to);
26569 +#else
26570 printk(KERN_INFO
26571 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
26572 current->comm, from, to);
26573 +#endif
26574 return 0;
26575 }
26576 cursor += PAGE_SIZE;
26577 @@ -112,6 +121,11 @@ static inline int range_is_allowed(unsig
26578 }
26579 return 1;
26580 }
26581 +#elif defined(CONFIG_GRKERNSEC_KMEM)
26582 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
26583 +{
26584 + return 0;
26585 +}
26586 #else
26587 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
26588 {
26589 @@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * fi
26590 #endif
26591
26592 while (count > 0) {
26593 + char *temp;
26594 +
26595 /*
26596 * Handle first page in case it's not aligned
26597 */
26598 @@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * fi
26599 if (!ptr)
26600 return -EFAULT;
26601
26602 - if (copy_to_user(buf, ptr, sz)) {
26603 +#ifdef CONFIG_PAX_USERCOPY
26604 + temp = kmalloc(sz, GFP_KERNEL);
26605 + if (!temp) {
26606 + unxlate_dev_mem_ptr(p, ptr);
26607 + return -ENOMEM;
26608 + }
26609 + memcpy(temp, ptr, sz);
26610 +#else
26611 + temp = ptr;
26612 +#endif
26613 +
26614 + if (copy_to_user(buf, temp, sz)) {
26615 +
26616 +#ifdef CONFIG_PAX_USERCOPY
26617 + kfree(temp);
26618 +#endif
26619 +
26620 unxlate_dev_mem_ptr(p, ptr);
26621 return -EFAULT;
26622 }
26623
26624 +#ifdef CONFIG_PAX_USERCOPY
26625 + kfree(temp);
26626 +#endif
26627 +
26628 unxlate_dev_mem_ptr(p, ptr);
26629
26630 buf += sz;
26631 @@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *fi
26632 size_t count, loff_t *ppos)
26633 {
26634 unsigned long p = *ppos;
26635 - ssize_t low_count, read, sz;
26636 + ssize_t low_count, read, sz, err = 0;
26637 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
26638 - int err = 0;
26639
26640 read = 0;
26641 if (p < (unsigned long) high_memory) {
26642 @@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *fi
26643 }
26644 #endif
26645 while (low_count > 0) {
26646 + char *temp;
26647 +
26648 sz = size_inside_page(p, low_count);
26649
26650 /*
26651 @@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *fi
26652 */
26653 kbuf = xlate_dev_kmem_ptr((char *)p);
26654
26655 - if (copy_to_user(buf, kbuf, sz))
26656 +#ifdef CONFIG_PAX_USERCOPY
26657 + temp = kmalloc(sz, GFP_KERNEL);
26658 + if (!temp)
26659 + return -ENOMEM;
26660 + memcpy(temp, kbuf, sz);
26661 +#else
26662 + temp = kbuf;
26663 +#endif
26664 +
26665 + err = copy_to_user(buf, temp, sz);
26666 +
26667 +#ifdef CONFIG_PAX_USERCOPY
26668 + kfree(temp);
26669 +#endif
26670 +
26671 + if (err)
26672 return -EFAULT;
26673 buf += sz;
26674 p += sz;
26675 @@ -889,6 +941,9 @@ static const struct memdev {
26676 #ifdef CONFIG_CRASH_DUMP
26677 [12] = { "oldmem", 0, &oldmem_fops, NULL },
26678 #endif
26679 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
26680 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
26681 +#endif
26682 };
26683
26684 static int memory_open(struct inode *inode, struct file *filp)
26685 diff -urNp linux-2.6.32.41/drivers/char/pcmcia/ipwireless/tty.c linux-2.6.32.41/drivers/char/pcmcia/ipwireless/tty.c
26686 --- linux-2.6.32.41/drivers/char/pcmcia/ipwireless/tty.c 2011-03-27 14:31:47.000000000 -0400
26687 +++ linux-2.6.32.41/drivers/char/pcmcia/ipwireless/tty.c 2011-04-17 15:56:46.000000000 -0400
26688 @@ -29,6 +29,7 @@
26689 #include <linux/tty_driver.h>
26690 #include <linux/tty_flip.h>
26691 #include <linux/uaccess.h>
26692 +#include <asm/local.h>
26693
26694 #include "tty.h"
26695 #include "network.h"
26696 @@ -51,7 +52,7 @@ struct ipw_tty {
26697 int tty_type;
26698 struct ipw_network *network;
26699 struct tty_struct *linux_tty;
26700 - int open_count;
26701 + local_t open_count;
26702 unsigned int control_lines;
26703 struct mutex ipw_tty_mutex;
26704 int tx_bytes_queued;
26705 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
26706 mutex_unlock(&tty->ipw_tty_mutex);
26707 return -ENODEV;
26708 }
26709 - if (tty->open_count == 0)
26710 + if (local_read(&tty->open_count) == 0)
26711 tty->tx_bytes_queued = 0;
26712
26713 - tty->open_count++;
26714 + local_inc(&tty->open_count);
26715
26716 tty->linux_tty = linux_tty;
26717 linux_tty->driver_data = tty;
26718 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
26719
26720 static void do_ipw_close(struct ipw_tty *tty)
26721 {
26722 - tty->open_count--;
26723 -
26724 - if (tty->open_count == 0) {
26725 + if (local_dec_return(&tty->open_count) == 0) {
26726 struct tty_struct *linux_tty = tty->linux_tty;
26727
26728 if (linux_tty != NULL) {
26729 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
26730 return;
26731
26732 mutex_lock(&tty->ipw_tty_mutex);
26733 - if (tty->open_count == 0) {
26734 + if (local_read(&tty->open_count) == 0) {
26735 mutex_unlock(&tty->ipw_tty_mutex);
26736 return;
26737 }
26738 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
26739 return;
26740 }
26741
26742 - if (!tty->open_count) {
26743 + if (!local_read(&tty->open_count)) {
26744 mutex_unlock(&tty->ipw_tty_mutex);
26745 return;
26746 }
26747 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
26748 return -ENODEV;
26749
26750 mutex_lock(&tty->ipw_tty_mutex);
26751 - if (!tty->open_count) {
26752 + if (!local_read(&tty->open_count)) {
26753 mutex_unlock(&tty->ipw_tty_mutex);
26754 return -EINVAL;
26755 }
26756 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
26757 if (!tty)
26758 return -ENODEV;
26759
26760 - if (!tty->open_count)
26761 + if (!local_read(&tty->open_count))
26762 return -EINVAL;
26763
26764 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
26765 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
26766 if (!tty)
26767 return 0;
26768
26769 - if (!tty->open_count)
26770 + if (!local_read(&tty->open_count))
26771 return 0;
26772
26773 return tty->tx_bytes_queued;
26774 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
26775 if (!tty)
26776 return -ENODEV;
26777
26778 - if (!tty->open_count)
26779 + if (!local_read(&tty->open_count))
26780 return -EINVAL;
26781
26782 return get_control_lines(tty);
26783 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
26784 if (!tty)
26785 return -ENODEV;
26786
26787 - if (!tty->open_count)
26788 + if (!local_read(&tty->open_count))
26789 return -EINVAL;
26790
26791 return set_control_lines(tty, set, clear);
26792 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
26793 if (!tty)
26794 return -ENODEV;
26795
26796 - if (!tty->open_count)
26797 + if (!local_read(&tty->open_count))
26798 return -EINVAL;
26799
26800 /* FIXME: Exactly how is the tty object locked here .. */
26801 @@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty
26802 against a parallel ioctl etc */
26803 mutex_lock(&ttyj->ipw_tty_mutex);
26804 }
26805 - while (ttyj->open_count)
26806 + while (local_read(&ttyj->open_count))
26807 do_ipw_close(ttyj);
26808 ipwireless_disassociate_network_ttys(network,
26809 ttyj->channel_idx);
26810 diff -urNp linux-2.6.32.41/drivers/char/pty.c linux-2.6.32.41/drivers/char/pty.c
26811 --- linux-2.6.32.41/drivers/char/pty.c 2011-03-27 14:31:47.000000000 -0400
26812 +++ linux-2.6.32.41/drivers/char/pty.c 2011-04-17 15:56:46.000000000 -0400
26813 @@ -682,7 +682,18 @@ static int ptmx_open(struct inode *inode
26814 return ret;
26815 }
26816
26817 -static struct file_operations ptmx_fops;
26818 +static const struct file_operations ptmx_fops = {
26819 + .llseek = no_llseek,
26820 + .read = tty_read,
26821 + .write = tty_write,
26822 + .poll = tty_poll,
26823 + .unlocked_ioctl = tty_ioctl,
26824 + .compat_ioctl = tty_compat_ioctl,
26825 + .open = ptmx_open,
26826 + .release = tty_release,
26827 + .fasync = tty_fasync,
26828 +};
26829 +
26830
26831 static void __init unix98_pty_init(void)
26832 {
26833 @@ -736,9 +747,6 @@ static void __init unix98_pty_init(void)
26834 register_sysctl_table(pty_root_table);
26835
26836 /* Now create the /dev/ptmx special device */
26837 - tty_default_fops(&ptmx_fops);
26838 - ptmx_fops.open = ptmx_open;
26839 -
26840 cdev_init(&ptmx_cdev, &ptmx_fops);
26841 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
26842 register_chrdev_region(MKDEV(TTYAUX_MAJOR, 2), 1, "/dev/ptmx") < 0)
26843 diff -urNp linux-2.6.32.41/drivers/char/random.c linux-2.6.32.41/drivers/char/random.c
26844 --- linux-2.6.32.41/drivers/char/random.c 2011-03-27 14:31:47.000000000 -0400
26845 +++ linux-2.6.32.41/drivers/char/random.c 2011-04-17 15:56:46.000000000 -0400
26846 @@ -254,8 +254,13 @@
26847 /*
26848 * Configuration information
26849 */
26850 +#ifdef CONFIG_GRKERNSEC_RANDNET
26851 +#define INPUT_POOL_WORDS 512
26852 +#define OUTPUT_POOL_WORDS 128
26853 +#else
26854 #define INPUT_POOL_WORDS 128
26855 #define OUTPUT_POOL_WORDS 32
26856 +#endif
26857 #define SEC_XFER_SIZE 512
26858
26859 /*
26860 @@ -292,10 +297,17 @@ static struct poolinfo {
26861 int poolwords;
26862 int tap1, tap2, tap3, tap4, tap5;
26863 } poolinfo_table[] = {
26864 +#ifdef CONFIG_GRKERNSEC_RANDNET
26865 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
26866 + { 512, 411, 308, 208, 104, 1 },
26867 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
26868 + { 128, 103, 76, 51, 25, 1 },
26869 +#else
26870 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
26871 { 128, 103, 76, 51, 25, 1 },
26872 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
26873 { 32, 26, 20, 14, 7, 1 },
26874 +#endif
26875 #if 0
26876 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
26877 { 2048, 1638, 1231, 819, 411, 1 },
26878 @@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
26879 #include <linux/sysctl.h>
26880
26881 static int min_read_thresh = 8, min_write_thresh;
26882 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
26883 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
26884 static int max_write_thresh = INPUT_POOL_WORDS * 32;
26885 static char sysctl_bootid[16];
26886
26887 diff -urNp linux-2.6.32.41/drivers/char/rocket.c linux-2.6.32.41/drivers/char/rocket.c
26888 --- linux-2.6.32.41/drivers/char/rocket.c 2011-03-27 14:31:47.000000000 -0400
26889 +++ linux-2.6.32.41/drivers/char/rocket.c 2011-05-16 21:46:57.000000000 -0400
26890 @@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info
26891 struct rocket_ports tmp;
26892 int board;
26893
26894 + pax_track_stack();
26895 +
26896 if (!retports)
26897 return -EFAULT;
26898 memset(&tmp, 0, sizeof (tmp));
26899 diff -urNp linux-2.6.32.41/drivers/char/sonypi.c linux-2.6.32.41/drivers/char/sonypi.c
26900 --- linux-2.6.32.41/drivers/char/sonypi.c 2011-03-27 14:31:47.000000000 -0400
26901 +++ linux-2.6.32.41/drivers/char/sonypi.c 2011-04-17 15:56:46.000000000 -0400
26902 @@ -55,6 +55,7 @@
26903 #include <asm/uaccess.h>
26904 #include <asm/io.h>
26905 #include <asm/system.h>
26906 +#include <asm/local.h>
26907
26908 #include <linux/sonypi.h>
26909
26910 @@ -491,7 +492,7 @@ static struct sonypi_device {
26911 spinlock_t fifo_lock;
26912 wait_queue_head_t fifo_proc_list;
26913 struct fasync_struct *fifo_async;
26914 - int open_count;
26915 + local_t open_count;
26916 int model;
26917 struct input_dev *input_jog_dev;
26918 struct input_dev *input_key_dev;
26919 @@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, st
26920 static int sonypi_misc_release(struct inode *inode, struct file *file)
26921 {
26922 mutex_lock(&sonypi_device.lock);
26923 - sonypi_device.open_count--;
26924 + local_dec(&sonypi_device.open_count);
26925 mutex_unlock(&sonypi_device.lock);
26926 return 0;
26927 }
26928 @@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode
26929 lock_kernel();
26930 mutex_lock(&sonypi_device.lock);
26931 /* Flush input queue on first open */
26932 - if (!sonypi_device.open_count)
26933 + if (!local_read(&sonypi_device.open_count))
26934 kfifo_reset(sonypi_device.fifo);
26935 - sonypi_device.open_count++;
26936 + local_inc(&sonypi_device.open_count);
26937 mutex_unlock(&sonypi_device.lock);
26938 unlock_kernel();
26939 return 0;
26940 diff -urNp linux-2.6.32.41/drivers/char/stallion.c linux-2.6.32.41/drivers/char/stallion.c
26941 --- linux-2.6.32.41/drivers/char/stallion.c 2011-03-27 14:31:47.000000000 -0400
26942 +++ linux-2.6.32.41/drivers/char/stallion.c 2011-05-16 21:46:57.000000000 -0400
26943 @@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlp
26944 struct stlport stl_dummyport;
26945 struct stlport *portp;
26946
26947 + pax_track_stack();
26948 +
26949 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
26950 return -EFAULT;
26951 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
26952 diff -urNp linux-2.6.32.41/drivers/char/tpm/tpm_bios.c linux-2.6.32.41/drivers/char/tpm/tpm_bios.c
26953 --- linux-2.6.32.41/drivers/char/tpm/tpm_bios.c 2011-03-27 14:31:47.000000000 -0400
26954 +++ linux-2.6.32.41/drivers/char/tpm/tpm_bios.c 2011-04-17 15:56:46.000000000 -0400
26955 @@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start
26956 event = addr;
26957
26958 if ((event->event_type == 0 && event->event_size == 0) ||
26959 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
26960 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
26961 return NULL;
26962
26963 return addr;
26964 @@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(
26965 return NULL;
26966
26967 if ((event->event_type == 0 && event->event_size == 0) ||
26968 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
26969 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
26970 return NULL;
26971
26972 (*pos)++;
26973 @@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_
26974 int i;
26975
26976 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
26977 - seq_putc(m, data[i]);
26978 + if (!seq_putc(m, data[i]))
26979 + return -EFAULT;
26980
26981 return 0;
26982 }
26983 @@ -409,6 +410,11 @@ static int read_log(struct tpm_bios_log
26984 log->bios_event_log_end = log->bios_event_log + len;
26985
26986 virt = acpi_os_map_memory(start, len);
26987 + if (!virt) {
26988 + kfree(log->bios_event_log);
26989 + log->bios_event_log = NULL;
26990 + return -EFAULT;
26991 + }
26992
26993 memcpy(log->bios_event_log, virt, len);
26994
26995 diff -urNp linux-2.6.32.41/drivers/char/tpm/tpm.c linux-2.6.32.41/drivers/char/tpm/tpm.c
26996 --- linux-2.6.32.41/drivers/char/tpm/tpm.c 2011-04-17 17:00:52.000000000 -0400
26997 +++ linux-2.6.32.41/drivers/char/tpm/tpm.c 2011-05-16 21:46:57.000000000 -0400
26998 @@ -402,7 +402,7 @@ static ssize_t tpm_transmit(struct tpm_c
26999 chip->vendor.req_complete_val)
27000 goto out_recv;
27001
27002 - if ((status == chip->vendor.req_canceled)) {
27003 + if (status == chip->vendor.req_canceled) {
27004 dev_err(chip->dev, "Operation Canceled\n");
27005 rc = -ECANCELED;
27006 goto out;
27007 @@ -821,6 +821,8 @@ ssize_t tpm_show_pubek(struct device *de
27008
27009 struct tpm_chip *chip = dev_get_drvdata(dev);
27010
27011 + pax_track_stack();
27012 +
27013 tpm_cmd.header.in = tpm_readpubek_header;
27014 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
27015 "attempting to read the PUBEK");
27016 diff -urNp linux-2.6.32.41/drivers/char/tty_io.c linux-2.6.32.41/drivers/char/tty_io.c
27017 --- linux-2.6.32.41/drivers/char/tty_io.c 2011-03-27 14:31:47.000000000 -0400
27018 +++ linux-2.6.32.41/drivers/char/tty_io.c 2011-04-17 15:56:46.000000000 -0400
27019 @@ -136,21 +136,10 @@ LIST_HEAD(tty_drivers); /* linked list
27020 DEFINE_MUTEX(tty_mutex);
27021 EXPORT_SYMBOL(tty_mutex);
27022
27023 -static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
27024 -static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
27025 ssize_t redirected_tty_write(struct file *, const char __user *,
27026 size_t, loff_t *);
27027 -static unsigned int tty_poll(struct file *, poll_table *);
27028 static int tty_open(struct inode *, struct file *);
27029 -static int tty_release(struct inode *, struct file *);
27030 long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
27031 -#ifdef CONFIG_COMPAT
27032 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
27033 - unsigned long arg);
27034 -#else
27035 -#define tty_compat_ioctl NULL
27036 -#endif
27037 -static int tty_fasync(int fd, struct file *filp, int on);
27038 static void release_tty(struct tty_struct *tty, int idx);
27039 static void __proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
27040 static void proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
27041 @@ -870,7 +859,7 @@ EXPORT_SYMBOL(start_tty);
27042 * read calls may be outstanding in parallel.
27043 */
27044
27045 -static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
27046 +ssize_t tty_read(struct file *file, char __user *buf, size_t count,
27047 loff_t *ppos)
27048 {
27049 int i;
27050 @@ -898,6 +887,8 @@ static ssize_t tty_read(struct file *fil
27051 return i;
27052 }
27053
27054 +EXPORT_SYMBOL(tty_read);
27055 +
27056 void tty_write_unlock(struct tty_struct *tty)
27057 {
27058 mutex_unlock(&tty->atomic_write_lock);
27059 @@ -1045,7 +1036,7 @@ void tty_write_message(struct tty_struct
27060 * write method will not be invoked in parallel for each device.
27061 */
27062
27063 -static ssize_t tty_write(struct file *file, const char __user *buf,
27064 +ssize_t tty_write(struct file *file, const char __user *buf,
27065 size_t count, loff_t *ppos)
27066 {
27067 struct tty_struct *tty;
27068 @@ -1072,6 +1063,8 @@ static ssize_t tty_write(struct file *fi
27069 return ret;
27070 }
27071
27072 +EXPORT_SYMBOL(tty_write);
27073 +
27074 ssize_t redirected_tty_write(struct file *file, const char __user *buf,
27075 size_t count, loff_t *ppos)
27076 {
27077 @@ -1867,7 +1860,7 @@ static int tty_open(struct inode *inode,
27078 * Takes bkl. See tty_release_dev
27079 */
27080
27081 -static int tty_release(struct inode *inode, struct file *filp)
27082 +int tty_release(struct inode *inode, struct file *filp)
27083 {
27084 lock_kernel();
27085 tty_release_dev(filp);
27086 @@ -1875,6 +1868,8 @@ static int tty_release(struct inode *ino
27087 return 0;
27088 }
27089
27090 +EXPORT_SYMBOL(tty_release);
27091 +
27092 /**
27093 * tty_poll - check tty status
27094 * @filp: file being polled
27095 @@ -1887,7 +1882,7 @@ static int tty_release(struct inode *ino
27096 * may be re-entered freely by other callers.
27097 */
27098
27099 -static unsigned int tty_poll(struct file *filp, poll_table *wait)
27100 +unsigned int tty_poll(struct file *filp, poll_table *wait)
27101 {
27102 struct tty_struct *tty;
27103 struct tty_ldisc *ld;
27104 @@ -1904,7 +1899,9 @@ static unsigned int tty_poll(struct file
27105 return ret;
27106 }
27107
27108 -static int tty_fasync(int fd, struct file *filp, int on)
27109 +EXPORT_SYMBOL(tty_poll);
27110 +
27111 +int tty_fasync(int fd, struct file *filp, int on)
27112 {
27113 struct tty_struct *tty;
27114 unsigned long flags;
27115 @@ -1948,6 +1945,8 @@ out:
27116 return retval;
27117 }
27118
27119 +EXPORT_SYMBOL(tty_fasync);
27120 +
27121 /**
27122 * tiocsti - fake input character
27123 * @tty: tty to fake input into
27124 @@ -2582,8 +2581,10 @@ long tty_ioctl(struct file *file, unsign
27125 return retval;
27126 }
27127
27128 +EXPORT_SYMBOL(tty_ioctl);
27129 +
27130 #ifdef CONFIG_COMPAT
27131 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
27132 +long tty_compat_ioctl(struct file *file, unsigned int cmd,
27133 unsigned long arg)
27134 {
27135 struct inode *inode = file->f_dentry->d_inode;
27136 @@ -2607,6 +2608,8 @@ static long tty_compat_ioctl(struct file
27137
27138 return retval;
27139 }
27140 +
27141 +EXPORT_SYMBOL(tty_compat_ioctl);
27142 #endif
27143
27144 /*
27145 @@ -3050,11 +3053,6 @@ struct tty_struct *get_current_tty(void)
27146 }
27147 EXPORT_SYMBOL_GPL(get_current_tty);
27148
27149 -void tty_default_fops(struct file_operations *fops)
27150 -{
27151 - *fops = tty_fops;
27152 -}
27153 -
27154 /*
27155 * Initialize the console device. This is called *early*, so
27156 * we can't necessarily depend on lots of kernel help here.
27157 diff -urNp linux-2.6.32.41/drivers/char/tty_ldisc.c linux-2.6.32.41/drivers/char/tty_ldisc.c
27158 --- linux-2.6.32.41/drivers/char/tty_ldisc.c 2011-03-27 14:31:47.000000000 -0400
27159 +++ linux-2.6.32.41/drivers/char/tty_ldisc.c 2011-04-17 15:56:46.000000000 -0400
27160 @@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
27161 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
27162 struct tty_ldisc_ops *ldo = ld->ops;
27163
27164 - ldo->refcount--;
27165 + atomic_dec(&ldo->refcount);
27166 module_put(ldo->owner);
27167 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
27168
27169 @@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
27170 spin_lock_irqsave(&tty_ldisc_lock, flags);
27171 tty_ldiscs[disc] = new_ldisc;
27172 new_ldisc->num = disc;
27173 - new_ldisc->refcount = 0;
27174 + atomic_set(&new_ldisc->refcount, 0);
27175 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
27176
27177 return ret;
27178 @@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
27179 return -EINVAL;
27180
27181 spin_lock_irqsave(&tty_ldisc_lock, flags);
27182 - if (tty_ldiscs[disc]->refcount)
27183 + if (atomic_read(&tty_ldiscs[disc]->refcount))
27184 ret = -EBUSY;
27185 else
27186 tty_ldiscs[disc] = NULL;
27187 @@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
27188 if (ldops) {
27189 ret = ERR_PTR(-EAGAIN);
27190 if (try_module_get(ldops->owner)) {
27191 - ldops->refcount++;
27192 + atomic_inc(&ldops->refcount);
27193 ret = ldops;
27194 }
27195 }
27196 @@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
27197 unsigned long flags;
27198
27199 spin_lock_irqsave(&tty_ldisc_lock, flags);
27200 - ldops->refcount--;
27201 + atomic_dec(&ldops->refcount);
27202 module_put(ldops->owner);
27203 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
27204 }
27205 diff -urNp linux-2.6.32.41/drivers/char/virtio_console.c linux-2.6.32.41/drivers/char/virtio_console.c
27206 --- linux-2.6.32.41/drivers/char/virtio_console.c 2011-03-27 14:31:47.000000000 -0400
27207 +++ linux-2.6.32.41/drivers/char/virtio_console.c 2011-04-17 15:56:46.000000000 -0400
27208 @@ -44,6 +44,7 @@ static unsigned int in_len;
27209 static char *in, *inbuf;
27210
27211 /* The operations for our console. */
27212 +/* cannot be const */
27213 static struct hv_ops virtio_cons;
27214
27215 /* The hvc device */
27216 diff -urNp linux-2.6.32.41/drivers/char/vt.c linux-2.6.32.41/drivers/char/vt.c
27217 --- linux-2.6.32.41/drivers/char/vt.c 2011-03-27 14:31:47.000000000 -0400
27218 +++ linux-2.6.32.41/drivers/char/vt.c 2011-04-17 15:56:46.000000000 -0400
27219 @@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
27220
27221 static void notify_write(struct vc_data *vc, unsigned int unicode)
27222 {
27223 - struct vt_notifier_param param = { .vc = vc, unicode = unicode };
27224 + struct vt_notifier_param param = { .vc = vc, .c = unicode };
27225 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
27226 }
27227
27228 diff -urNp linux-2.6.32.41/drivers/char/vt_ioctl.c linux-2.6.32.41/drivers/char/vt_ioctl.c
27229 --- linux-2.6.32.41/drivers/char/vt_ioctl.c 2011-03-27 14:31:47.000000000 -0400
27230 +++ linux-2.6.32.41/drivers/char/vt_ioctl.c 2011-04-17 15:56:46.000000000 -0400
27231 @@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
27232 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
27233 return -EFAULT;
27234
27235 - if (!capable(CAP_SYS_TTY_CONFIG))
27236 - perm = 0;
27237 -
27238 switch (cmd) {
27239 case KDGKBENT:
27240 key_map = key_maps[s];
27241 @@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __
27242 val = (i ? K_HOLE : K_NOSUCHMAP);
27243 return put_user(val, &user_kbe->kb_value);
27244 case KDSKBENT:
27245 + if (!capable(CAP_SYS_TTY_CONFIG))
27246 + perm = 0;
27247 +
27248 if (!perm)
27249 return -EPERM;
27250 +
27251 if (!i && v == K_NOSUCHMAP) {
27252 /* deallocate map */
27253 key_map = key_maps[s];
27254 @@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
27255 int i, j, k;
27256 int ret;
27257
27258 - if (!capable(CAP_SYS_TTY_CONFIG))
27259 - perm = 0;
27260 -
27261 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
27262 if (!kbs) {
27263 ret = -ENOMEM;
27264 @@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
27265 kfree(kbs);
27266 return ((p && *p) ? -EOVERFLOW : 0);
27267 case KDSKBSENT:
27268 + if (!capable(CAP_SYS_TTY_CONFIG))
27269 + perm = 0;
27270 +
27271 if (!perm) {
27272 ret = -EPERM;
27273 goto reterr;
27274 diff -urNp linux-2.6.32.41/drivers/cpufreq/cpufreq.c linux-2.6.32.41/drivers/cpufreq/cpufreq.c
27275 --- linux-2.6.32.41/drivers/cpufreq/cpufreq.c 2011-03-27 14:31:47.000000000 -0400
27276 +++ linux-2.6.32.41/drivers/cpufreq/cpufreq.c 2011-04-17 15:56:46.000000000 -0400
27277 @@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct
27278 complete(&policy->kobj_unregister);
27279 }
27280
27281 -static struct sysfs_ops sysfs_ops = {
27282 +static const struct sysfs_ops sysfs_ops = {
27283 .show = show,
27284 .store = store,
27285 };
27286 diff -urNp linux-2.6.32.41/drivers/cpuidle/sysfs.c linux-2.6.32.41/drivers/cpuidle/sysfs.c
27287 --- linux-2.6.32.41/drivers/cpuidle/sysfs.c 2011-03-27 14:31:47.000000000 -0400
27288 +++ linux-2.6.32.41/drivers/cpuidle/sysfs.c 2011-04-17 15:56:46.000000000 -0400
27289 @@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobj
27290 return ret;
27291 }
27292
27293 -static struct sysfs_ops cpuidle_sysfs_ops = {
27294 +static const struct sysfs_ops cpuidle_sysfs_ops = {
27295 .show = cpuidle_show,
27296 .store = cpuidle_store,
27297 };
27298 @@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct
27299 return ret;
27300 }
27301
27302 -static struct sysfs_ops cpuidle_state_sysfs_ops = {
27303 +static const struct sysfs_ops cpuidle_state_sysfs_ops = {
27304 .show = cpuidle_state_show,
27305 };
27306
27307 @@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpui
27308 .release = cpuidle_state_sysfs_release,
27309 };
27310
27311 -static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
27312 +static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
27313 {
27314 kobject_put(&device->kobjs[i]->kobj);
27315 wait_for_completion(&device->kobjs[i]->kobj_unregister);
27316 diff -urNp linux-2.6.32.41/drivers/crypto/hifn_795x.c linux-2.6.32.41/drivers/crypto/hifn_795x.c
27317 --- linux-2.6.32.41/drivers/crypto/hifn_795x.c 2011-03-27 14:31:47.000000000 -0400
27318 +++ linux-2.6.32.41/drivers/crypto/hifn_795x.c 2011-05-16 21:46:57.000000000 -0400
27319 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
27320 0xCA, 0x34, 0x2B, 0x2E};
27321 struct scatterlist sg;
27322
27323 + pax_track_stack();
27324 +
27325 memset(src, 0, sizeof(src));
27326 memset(ctx.key, 0, sizeof(ctx.key));
27327
27328 diff -urNp linux-2.6.32.41/drivers/crypto/padlock-aes.c linux-2.6.32.41/drivers/crypto/padlock-aes.c
27329 --- linux-2.6.32.41/drivers/crypto/padlock-aes.c 2011-03-27 14:31:47.000000000 -0400
27330 +++ linux-2.6.32.41/drivers/crypto/padlock-aes.c 2011-05-16 21:46:57.000000000 -0400
27331 @@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm
27332 struct crypto_aes_ctx gen_aes;
27333 int cpu;
27334
27335 + pax_track_stack();
27336 +
27337 if (key_len % 8) {
27338 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
27339 return -EINVAL;
27340 diff -urNp linux-2.6.32.41/drivers/dma/ioat/dma.c linux-2.6.32.41/drivers/dma/ioat/dma.c
27341 --- linux-2.6.32.41/drivers/dma/ioat/dma.c 2011-03-27 14:31:47.000000000 -0400
27342 +++ linux-2.6.32.41/drivers/dma/ioat/dma.c 2011-04-17 15:56:46.000000000 -0400
27343 @@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, str
27344 return entry->show(&chan->common, page);
27345 }
27346
27347 -struct sysfs_ops ioat_sysfs_ops = {
27348 +const struct sysfs_ops ioat_sysfs_ops = {
27349 .show = ioat_attr_show,
27350 };
27351
27352 diff -urNp linux-2.6.32.41/drivers/dma/ioat/dma.h linux-2.6.32.41/drivers/dma/ioat/dma.h
27353 --- linux-2.6.32.41/drivers/dma/ioat/dma.h 2011-03-27 14:31:47.000000000 -0400
27354 +++ linux-2.6.32.41/drivers/dma/ioat/dma.h 2011-04-17 15:56:46.000000000 -0400
27355 @@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_c
27356 unsigned long *phys_complete);
27357 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
27358 void ioat_kobject_del(struct ioatdma_device *device);
27359 -extern struct sysfs_ops ioat_sysfs_ops;
27360 +extern const struct sysfs_ops ioat_sysfs_ops;
27361 extern struct ioat_sysfs_entry ioat_version_attr;
27362 extern struct ioat_sysfs_entry ioat_cap_attr;
27363 #endif /* IOATDMA_H */
27364 diff -urNp linux-2.6.32.41/drivers/edac/edac_device_sysfs.c linux-2.6.32.41/drivers/edac/edac_device_sysfs.c
27365 --- linux-2.6.32.41/drivers/edac/edac_device_sysfs.c 2011-03-27 14:31:47.000000000 -0400
27366 +++ linux-2.6.32.41/drivers/edac/edac_device_sysfs.c 2011-04-17 15:56:46.000000000 -0400
27367 @@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(s
27368 }
27369
27370 /* edac_dev file operations for an 'ctl_info' */
27371 -static struct sysfs_ops device_ctl_info_ops = {
27372 +static const struct sysfs_ops device_ctl_info_ops = {
27373 .show = edac_dev_ctl_info_show,
27374 .store = edac_dev_ctl_info_store
27375 };
27376 @@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(s
27377 }
27378
27379 /* edac_dev file operations for an 'instance' */
27380 -static struct sysfs_ops device_instance_ops = {
27381 +static const struct sysfs_ops device_instance_ops = {
27382 .show = edac_dev_instance_show,
27383 .store = edac_dev_instance_store
27384 };
27385 @@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(stru
27386 }
27387
27388 /* edac_dev file operations for a 'block' */
27389 -static struct sysfs_ops device_block_ops = {
27390 +static const struct sysfs_ops device_block_ops = {
27391 .show = edac_dev_block_show,
27392 .store = edac_dev_block_store
27393 };
27394 diff -urNp linux-2.6.32.41/drivers/edac/edac_mc_sysfs.c linux-2.6.32.41/drivers/edac/edac_mc_sysfs.c
27395 --- linux-2.6.32.41/drivers/edac/edac_mc_sysfs.c 2011-03-27 14:31:47.000000000 -0400
27396 +++ linux-2.6.32.41/drivers/edac/edac_mc_sysfs.c 2011-04-17 15:56:46.000000000 -0400
27397 @@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kob
27398 return -EIO;
27399 }
27400
27401 -static struct sysfs_ops csrowfs_ops = {
27402 +static const struct sysfs_ops csrowfs_ops = {
27403 .show = csrowdev_show,
27404 .store = csrowdev_store
27405 };
27406 @@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobje
27407 }
27408
27409 /* Intermediate show/store table */
27410 -static struct sysfs_ops mci_ops = {
27411 +static const struct sysfs_ops mci_ops = {
27412 .show = mcidev_show,
27413 .store = mcidev_store
27414 };
27415 diff -urNp linux-2.6.32.41/drivers/edac/edac_pci_sysfs.c linux-2.6.32.41/drivers/edac/edac_pci_sysfs.c
27416 --- linux-2.6.32.41/drivers/edac/edac_pci_sysfs.c 2011-03-27 14:31:47.000000000 -0400
27417 +++ linux-2.6.32.41/drivers/edac/edac_pci_sysfs.c 2011-05-04 17:56:20.000000000 -0400
27418 @@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log
27419 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
27420 static int edac_pci_poll_msec = 1000; /* one second workq period */
27421
27422 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
27423 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
27424 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
27425 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
27426
27427 static struct kobject *edac_pci_top_main_kobj;
27428 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
27429 @@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(s
27430 }
27431
27432 /* fs_ops table */
27433 -static struct sysfs_ops pci_instance_ops = {
27434 +static const struct sysfs_ops pci_instance_ops = {
27435 .show = edac_pci_instance_show,
27436 .store = edac_pci_instance_store
27437 };
27438 @@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct
27439 return -EIO;
27440 }
27441
27442 -static struct sysfs_ops edac_pci_sysfs_ops = {
27443 +static const struct sysfs_ops edac_pci_sysfs_ops = {
27444 .show = edac_pci_dev_show,
27445 .store = edac_pci_dev_store
27446 };
27447 @@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(str
27448 edac_printk(KERN_CRIT, EDAC_PCI,
27449 "Signaled System Error on %s\n",
27450 pci_name(dev));
27451 - atomic_inc(&pci_nonparity_count);
27452 + atomic_inc_unchecked(&pci_nonparity_count);
27453 }
27454
27455 if (status & (PCI_STATUS_PARITY)) {
27456 @@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(str
27457 "Master Data Parity Error on %s\n",
27458 pci_name(dev));
27459
27460 - atomic_inc(&pci_parity_count);
27461 + atomic_inc_unchecked(&pci_parity_count);
27462 }
27463
27464 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27465 @@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(str
27466 "Detected Parity Error on %s\n",
27467 pci_name(dev));
27468
27469 - atomic_inc(&pci_parity_count);
27470 + atomic_inc_unchecked(&pci_parity_count);
27471 }
27472 }
27473
27474 @@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(str
27475 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
27476 "Signaled System Error on %s\n",
27477 pci_name(dev));
27478 - atomic_inc(&pci_nonparity_count);
27479 + atomic_inc_unchecked(&pci_nonparity_count);
27480 }
27481
27482 if (status & (PCI_STATUS_PARITY)) {
27483 @@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(str
27484 "Master Data Parity Error on "
27485 "%s\n", pci_name(dev));
27486
27487 - atomic_inc(&pci_parity_count);
27488 + atomic_inc_unchecked(&pci_parity_count);
27489 }
27490
27491 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27492 @@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(str
27493 "Detected Parity Error on %s\n",
27494 pci_name(dev));
27495
27496 - atomic_inc(&pci_parity_count);
27497 + atomic_inc_unchecked(&pci_parity_count);
27498 }
27499 }
27500 }
27501 @@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
27502 if (!check_pci_errors)
27503 return;
27504
27505 - before_count = atomic_read(&pci_parity_count);
27506 + before_count = atomic_read_unchecked(&pci_parity_count);
27507
27508 /* scan all PCI devices looking for a Parity Error on devices and
27509 * bridges.
27510 @@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
27511 /* Only if operator has selected panic on PCI Error */
27512 if (edac_pci_get_panic_on_pe()) {
27513 /* If the count is different 'after' from 'before' */
27514 - if (before_count != atomic_read(&pci_parity_count))
27515 + if (before_count != atomic_read_unchecked(&pci_parity_count))
27516 panic("EDAC: PCI Parity Error");
27517 }
27518 }
27519 diff -urNp linux-2.6.32.41/drivers/firewire/core-cdev.c linux-2.6.32.41/drivers/firewire/core-cdev.c
27520 --- linux-2.6.32.41/drivers/firewire/core-cdev.c 2011-03-27 14:31:47.000000000 -0400
27521 +++ linux-2.6.32.41/drivers/firewire/core-cdev.c 2011-04-17 15:56:46.000000000 -0400
27522 @@ -1141,8 +1141,7 @@ static int init_iso_resource(struct clie
27523 int ret;
27524
27525 if ((request->channels == 0 && request->bandwidth == 0) ||
27526 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
27527 - request->bandwidth < 0)
27528 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
27529 return -EINVAL;
27530
27531 r = kmalloc(sizeof(*r), GFP_KERNEL);
27532 diff -urNp linux-2.6.32.41/drivers/firewire/core-transaction.c linux-2.6.32.41/drivers/firewire/core-transaction.c
27533 --- linux-2.6.32.41/drivers/firewire/core-transaction.c 2011-03-27 14:31:47.000000000 -0400
27534 +++ linux-2.6.32.41/drivers/firewire/core-transaction.c 2011-05-16 21:46:57.000000000 -0400
27535 @@ -36,6 +36,7 @@
27536 #include <linux/string.h>
27537 #include <linux/timer.h>
27538 #include <linux/types.h>
27539 +#include <linux/sched.h>
27540
27541 #include <asm/byteorder.h>
27542
27543 @@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *c
27544 struct transaction_callback_data d;
27545 struct fw_transaction t;
27546
27547 + pax_track_stack();
27548 +
27549 init_completion(&d.done);
27550 d.payload = payload;
27551 fw_send_request(card, &t, tcode, destination_id, generation, speed,
27552 diff -urNp linux-2.6.32.41/drivers/firmware/dmi_scan.c linux-2.6.32.41/drivers/firmware/dmi_scan.c
27553 --- linux-2.6.32.41/drivers/firmware/dmi_scan.c 2011-03-27 14:31:47.000000000 -0400
27554 +++ linux-2.6.32.41/drivers/firmware/dmi_scan.c 2011-04-17 15:56:46.000000000 -0400
27555 @@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
27556 }
27557 }
27558 else {
27559 - /*
27560 - * no iounmap() for that ioremap(); it would be a no-op, but
27561 - * it's so early in setup that sucker gets confused into doing
27562 - * what it shouldn't if we actually call it.
27563 - */
27564 p = dmi_ioremap(0xF0000, 0x10000);
27565 if (p == NULL)
27566 goto error;
27567 diff -urNp linux-2.6.32.41/drivers/firmware/edd.c linux-2.6.32.41/drivers/firmware/edd.c
27568 --- linux-2.6.32.41/drivers/firmware/edd.c 2011-03-27 14:31:47.000000000 -0400
27569 +++ linux-2.6.32.41/drivers/firmware/edd.c 2011-04-17 15:56:46.000000000 -0400
27570 @@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, str
27571 return ret;
27572 }
27573
27574 -static struct sysfs_ops edd_attr_ops = {
27575 +static const struct sysfs_ops edd_attr_ops = {
27576 .show = edd_attr_show,
27577 };
27578
27579 diff -urNp linux-2.6.32.41/drivers/firmware/efivars.c linux-2.6.32.41/drivers/firmware/efivars.c
27580 --- linux-2.6.32.41/drivers/firmware/efivars.c 2011-03-27 14:31:47.000000000 -0400
27581 +++ linux-2.6.32.41/drivers/firmware/efivars.c 2011-04-17 15:56:46.000000000 -0400
27582 @@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct
27583 return ret;
27584 }
27585
27586 -static struct sysfs_ops efivar_attr_ops = {
27587 +static const struct sysfs_ops efivar_attr_ops = {
27588 .show = efivar_attr_show,
27589 .store = efivar_attr_store,
27590 };
27591 diff -urNp linux-2.6.32.41/drivers/firmware/iscsi_ibft.c linux-2.6.32.41/drivers/firmware/iscsi_ibft.c
27592 --- linux-2.6.32.41/drivers/firmware/iscsi_ibft.c 2011-03-27 14:31:47.000000000 -0400
27593 +++ linux-2.6.32.41/drivers/firmware/iscsi_ibft.c 2011-04-17 15:56:46.000000000 -0400
27594 @@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struc
27595 return ret;
27596 }
27597
27598 -static struct sysfs_ops ibft_attr_ops = {
27599 +static const struct sysfs_ops ibft_attr_ops = {
27600 .show = ibft_show_attribute,
27601 };
27602
27603 diff -urNp linux-2.6.32.41/drivers/firmware/memmap.c linux-2.6.32.41/drivers/firmware/memmap.c
27604 --- linux-2.6.32.41/drivers/firmware/memmap.c 2011-03-27 14:31:47.000000000 -0400
27605 +++ linux-2.6.32.41/drivers/firmware/memmap.c 2011-04-17 15:56:46.000000000 -0400
27606 @@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
27607 NULL
27608 };
27609
27610 -static struct sysfs_ops memmap_attr_ops = {
27611 +static const struct sysfs_ops memmap_attr_ops = {
27612 .show = memmap_attr_show,
27613 };
27614
27615 diff -urNp linux-2.6.32.41/drivers/gpio/vr41xx_giu.c linux-2.6.32.41/drivers/gpio/vr41xx_giu.c
27616 --- linux-2.6.32.41/drivers/gpio/vr41xx_giu.c 2011-03-27 14:31:47.000000000 -0400
27617 +++ linux-2.6.32.41/drivers/gpio/vr41xx_giu.c 2011-05-04 17:56:28.000000000 -0400
27618 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
27619 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
27620 maskl, pendl, maskh, pendh);
27621
27622 - atomic_inc(&irq_err_count);
27623 + atomic_inc_unchecked(&irq_err_count);
27624
27625 return -EINVAL;
27626 }
27627 diff -urNp linux-2.6.32.41/drivers/gpu/drm/drm_crtc_helper.c linux-2.6.32.41/drivers/gpu/drm/drm_crtc_helper.c
27628 --- linux-2.6.32.41/drivers/gpu/drm/drm_crtc_helper.c 2011-03-27 14:31:47.000000000 -0400
27629 +++ linux-2.6.32.41/drivers/gpu/drm/drm_crtc_helper.c 2011-05-16 21:46:57.000000000 -0400
27630 @@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct d
27631 struct drm_crtc *tmp;
27632 int crtc_mask = 1;
27633
27634 - WARN(!crtc, "checking null crtc?");
27635 + BUG_ON(!crtc);
27636
27637 dev = crtc->dev;
27638
27639 @@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm
27640
27641 adjusted_mode = drm_mode_duplicate(dev, mode);
27642
27643 + pax_track_stack();
27644 +
27645 crtc->enabled = drm_helper_crtc_in_use(crtc);
27646
27647 if (!crtc->enabled)
27648 diff -urNp linux-2.6.32.41/drivers/gpu/drm/drm_drv.c linux-2.6.32.41/drivers/gpu/drm/drm_drv.c
27649 --- linux-2.6.32.41/drivers/gpu/drm/drm_drv.c 2011-03-27 14:31:47.000000000 -0400
27650 +++ linux-2.6.32.41/drivers/gpu/drm/drm_drv.c 2011-04-17 15:56:46.000000000 -0400
27651 @@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struc
27652 char *kdata = NULL;
27653
27654 atomic_inc(&dev->ioctl_count);
27655 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
27656 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
27657 ++file_priv->ioctl_count;
27658
27659 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
27660 diff -urNp linux-2.6.32.41/drivers/gpu/drm/drm_fops.c linux-2.6.32.41/drivers/gpu/drm/drm_fops.c
27661 --- linux-2.6.32.41/drivers/gpu/drm/drm_fops.c 2011-03-27 14:31:47.000000000 -0400
27662 +++ linux-2.6.32.41/drivers/gpu/drm/drm_fops.c 2011-04-17 15:56:46.000000000 -0400
27663 @@ -66,7 +66,7 @@ static int drm_setup(struct drm_device *
27664 }
27665
27666 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
27667 - atomic_set(&dev->counts[i], 0);
27668 + atomic_set_unchecked(&dev->counts[i], 0);
27669
27670 dev->sigdata.lock = NULL;
27671
27672 @@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct
27673
27674 retcode = drm_open_helper(inode, filp, dev);
27675 if (!retcode) {
27676 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
27677 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
27678 spin_lock(&dev->count_lock);
27679 - if (!dev->open_count++) {
27680 + if (local_inc_return(&dev->open_count) == 1) {
27681 spin_unlock(&dev->count_lock);
27682 retcode = drm_setup(dev);
27683 goto out;
27684 @@ -435,7 +435,7 @@ int drm_release(struct inode *inode, str
27685
27686 lock_kernel();
27687
27688 - DRM_DEBUG("open_count = %d\n", dev->open_count);
27689 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
27690
27691 if (dev->driver->preclose)
27692 dev->driver->preclose(dev, file_priv);
27693 @@ -447,7 +447,7 @@ int drm_release(struct inode *inode, str
27694 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
27695 task_pid_nr(current),
27696 (long)old_encode_dev(file_priv->minor->device),
27697 - dev->open_count);
27698 + local_read(&dev->open_count));
27699
27700 /* if the master has gone away we can't do anything with the lock */
27701 if (file_priv->minor->master)
27702 @@ -524,9 +524,9 @@ int drm_release(struct inode *inode, str
27703 * End inline drm_release
27704 */
27705
27706 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
27707 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
27708 spin_lock(&dev->count_lock);
27709 - if (!--dev->open_count) {
27710 + if (local_dec_and_test(&dev->open_count)) {
27711 if (atomic_read(&dev->ioctl_count)) {
27712 DRM_ERROR("Device busy: %d\n",
27713 atomic_read(&dev->ioctl_count));
27714 diff -urNp linux-2.6.32.41/drivers/gpu/drm/drm_gem.c linux-2.6.32.41/drivers/gpu/drm/drm_gem.c
27715 --- linux-2.6.32.41/drivers/gpu/drm/drm_gem.c 2011-03-27 14:31:47.000000000 -0400
27716 +++ linux-2.6.32.41/drivers/gpu/drm/drm_gem.c 2011-04-17 15:56:46.000000000 -0400
27717 @@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
27718 spin_lock_init(&dev->object_name_lock);
27719 idr_init(&dev->object_name_idr);
27720 atomic_set(&dev->object_count, 0);
27721 - atomic_set(&dev->object_memory, 0);
27722 + atomic_set_unchecked(&dev->object_memory, 0);
27723 atomic_set(&dev->pin_count, 0);
27724 - atomic_set(&dev->pin_memory, 0);
27725 + atomic_set_unchecked(&dev->pin_memory, 0);
27726 atomic_set(&dev->gtt_count, 0);
27727 - atomic_set(&dev->gtt_memory, 0);
27728 + atomic_set_unchecked(&dev->gtt_memory, 0);
27729
27730 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
27731 if (!mm) {
27732 @@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *
27733 goto fput;
27734 }
27735 atomic_inc(&dev->object_count);
27736 - atomic_add(obj->size, &dev->object_memory);
27737 + atomic_add_unchecked(obj->size, &dev->object_memory);
27738 return obj;
27739 fput:
27740 fput(obj->filp);
27741 @@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
27742
27743 fput(obj->filp);
27744 atomic_dec(&dev->object_count);
27745 - atomic_sub(obj->size, &dev->object_memory);
27746 + atomic_sub_unchecked(obj->size, &dev->object_memory);
27747 kfree(obj);
27748 }
27749 EXPORT_SYMBOL(drm_gem_object_free);
27750 diff -urNp linux-2.6.32.41/drivers/gpu/drm/drm_info.c linux-2.6.32.41/drivers/gpu/drm/drm_info.c
27751 --- linux-2.6.32.41/drivers/gpu/drm/drm_info.c 2011-03-27 14:31:47.000000000 -0400
27752 +++ linux-2.6.32.41/drivers/gpu/drm/drm_info.c 2011-04-17 15:56:46.000000000 -0400
27753 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
27754 struct drm_local_map *map;
27755 struct drm_map_list *r_list;
27756
27757 - /* Hardcoded from _DRM_FRAME_BUFFER,
27758 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
27759 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
27760 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
27761 + static const char * const types[] = {
27762 + [_DRM_FRAME_BUFFER] = "FB",
27763 + [_DRM_REGISTERS] = "REG",
27764 + [_DRM_SHM] = "SHM",
27765 + [_DRM_AGP] = "AGP",
27766 + [_DRM_SCATTER_GATHER] = "SG",
27767 + [_DRM_CONSISTENT] = "PCI",
27768 + [_DRM_GEM] = "GEM" };
27769 const char *type;
27770 int i;
27771
27772 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
27773 map = r_list->map;
27774 if (!map)
27775 continue;
27776 - if (map->type < 0 || map->type > 5)
27777 + if (map->type >= ARRAY_SIZE(types))
27778 type = "??";
27779 else
27780 type = types[map->type];
27781 @@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file
27782 struct drm_device *dev = node->minor->dev;
27783
27784 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
27785 - seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
27786 + seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
27787 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
27788 - seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
27789 - seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
27790 + seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
27791 + seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
27792 seq_printf(m, "%d gtt total\n", dev->gtt_total);
27793 return 0;
27794 }
27795 @@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, voi
27796 mutex_lock(&dev->struct_mutex);
27797 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
27798 atomic_read(&dev->vma_count),
27799 +#ifdef CONFIG_GRKERNSEC_HIDESYM
27800 + NULL, 0);
27801 +#else
27802 high_memory, (u64)virt_to_phys(high_memory));
27803 +#endif
27804
27805 list_for_each_entry(pt, &dev->vmalist, head) {
27806 vma = pt->vma;
27807 @@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, voi
27808 continue;
27809 seq_printf(m,
27810 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
27811 - pt->pid, vma->vm_start, vma->vm_end,
27812 + pt->pid,
27813 +#ifdef CONFIG_GRKERNSEC_HIDESYM
27814 + 0, 0,
27815 +#else
27816 + vma->vm_start, vma->vm_end,
27817 +#endif
27818 vma->vm_flags & VM_READ ? 'r' : '-',
27819 vma->vm_flags & VM_WRITE ? 'w' : '-',
27820 vma->vm_flags & VM_EXEC ? 'x' : '-',
27821 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
27822 vma->vm_flags & VM_LOCKED ? 'l' : '-',
27823 vma->vm_flags & VM_IO ? 'i' : '-',
27824 +#ifdef CONFIG_GRKERNSEC_HIDESYM
27825 + 0);
27826 +#else
27827 vma->vm_pgoff);
27828 +#endif
27829
27830 #if defined(__i386__)
27831 pgprot = pgprot_val(vma->vm_page_prot);
27832 diff -urNp linux-2.6.32.41/drivers/gpu/drm/drm_ioctl.c linux-2.6.32.41/drivers/gpu/drm/drm_ioctl.c
27833 --- linux-2.6.32.41/drivers/gpu/drm/drm_ioctl.c 2011-03-27 14:31:47.000000000 -0400
27834 +++ linux-2.6.32.41/drivers/gpu/drm/drm_ioctl.c 2011-04-17 15:56:46.000000000 -0400
27835 @@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev,
27836 stats->data[i].value =
27837 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
27838 else
27839 - stats->data[i].value = atomic_read(&dev->counts[i]);
27840 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
27841 stats->data[i].type = dev->types[i];
27842 }
27843
27844 diff -urNp linux-2.6.32.41/drivers/gpu/drm/drm_lock.c linux-2.6.32.41/drivers/gpu/drm/drm_lock.c
27845 --- linux-2.6.32.41/drivers/gpu/drm/drm_lock.c 2011-03-27 14:31:47.000000000 -0400
27846 +++ linux-2.6.32.41/drivers/gpu/drm/drm_lock.c 2011-04-17 15:56:46.000000000 -0400
27847 @@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, voi
27848 if (drm_lock_take(&master->lock, lock->context)) {
27849 master->lock.file_priv = file_priv;
27850 master->lock.lock_time = jiffies;
27851 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
27852 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
27853 break; /* Got lock */
27854 }
27855
27856 @@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, v
27857 return -EINVAL;
27858 }
27859
27860 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
27861 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
27862
27863 /* kernel_context_switch isn't used by any of the x86 drm
27864 * modules but is required by the Sparc driver.
27865 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i810/i810_dma.c linux-2.6.32.41/drivers/gpu/drm/i810/i810_dma.c
27866 --- linux-2.6.32.41/drivers/gpu/drm/i810/i810_dma.c 2011-03-27 14:31:47.000000000 -0400
27867 +++ linux-2.6.32.41/drivers/gpu/drm/i810/i810_dma.c 2011-04-17 15:56:46.000000000 -0400
27868 @@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_de
27869 dma->buflist[vertex->idx],
27870 vertex->discard, vertex->used);
27871
27872 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
27873 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
27874 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
27875 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
27876 sarea_priv->last_enqueue = dev_priv->counter - 1;
27877 sarea_priv->last_dispatch = (int)hw_status[5];
27878
27879 @@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device
27880 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
27881 mc->last_render);
27882
27883 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
27884 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
27885 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
27886 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
27887 sarea_priv->last_enqueue = dev_priv->counter - 1;
27888 sarea_priv->last_dispatch = (int)hw_status[5];
27889
27890 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i810/i810_drv.h linux-2.6.32.41/drivers/gpu/drm/i810/i810_drv.h
27891 --- linux-2.6.32.41/drivers/gpu/drm/i810/i810_drv.h 2011-03-27 14:31:47.000000000 -0400
27892 +++ linux-2.6.32.41/drivers/gpu/drm/i810/i810_drv.h 2011-05-04 17:56:28.000000000 -0400
27893 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
27894 int page_flipping;
27895
27896 wait_queue_head_t irq_queue;
27897 - atomic_t irq_received;
27898 - atomic_t irq_emitted;
27899 + atomic_unchecked_t irq_received;
27900 + atomic_unchecked_t irq_emitted;
27901
27902 int front_offset;
27903 } drm_i810_private_t;
27904 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i830/i830_drv.h linux-2.6.32.41/drivers/gpu/drm/i830/i830_drv.h
27905 --- linux-2.6.32.41/drivers/gpu/drm/i830/i830_drv.h 2011-03-27 14:31:47.000000000 -0400
27906 +++ linux-2.6.32.41/drivers/gpu/drm/i830/i830_drv.h 2011-05-04 17:56:28.000000000 -0400
27907 @@ -115,8 +115,8 @@ typedef struct drm_i830_private {
27908 int page_flipping;
27909
27910 wait_queue_head_t irq_queue;
27911 - atomic_t irq_received;
27912 - atomic_t irq_emitted;
27913 + atomic_unchecked_t irq_received;
27914 + atomic_unchecked_t irq_emitted;
27915
27916 int use_mi_batchbuffer_start;
27917
27918 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i830/i830_irq.c linux-2.6.32.41/drivers/gpu/drm/i830/i830_irq.c
27919 --- linux-2.6.32.41/drivers/gpu/drm/i830/i830_irq.c 2011-03-27 14:31:47.000000000 -0400
27920 +++ linux-2.6.32.41/drivers/gpu/drm/i830/i830_irq.c 2011-05-04 17:56:28.000000000 -0400
27921 @@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_
27922
27923 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
27924
27925 - atomic_inc(&dev_priv->irq_received);
27926 + atomic_inc_unchecked(&dev_priv->irq_received);
27927 wake_up_interruptible(&dev_priv->irq_queue);
27928
27929 return IRQ_HANDLED;
27930 @@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_devi
27931
27932 DRM_DEBUG("%s\n", __func__);
27933
27934 - atomic_inc(&dev_priv->irq_emitted);
27935 + atomic_inc_unchecked(&dev_priv->irq_emitted);
27936
27937 BEGIN_LP_RING(2);
27938 OUT_RING(0);
27939 OUT_RING(GFX_OP_USER_INTERRUPT);
27940 ADVANCE_LP_RING();
27941
27942 - return atomic_read(&dev_priv->irq_emitted);
27943 + return atomic_read_unchecked(&dev_priv->irq_emitted);
27944 }
27945
27946 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
27947 @@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_devi
27948
27949 DRM_DEBUG("%s\n", __func__);
27950
27951 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
27952 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
27953 return 0;
27954
27955 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
27956 @@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_devi
27957
27958 for (;;) {
27959 __set_current_state(TASK_INTERRUPTIBLE);
27960 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
27961 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
27962 break;
27963 if ((signed)(end - jiffies) <= 0) {
27964 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
27965 @@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct d
27966 I830_WRITE16(I830REG_HWSTAM, 0xffff);
27967 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
27968 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
27969 - atomic_set(&dev_priv->irq_received, 0);
27970 - atomic_set(&dev_priv->irq_emitted, 0);
27971 + atomic_set_unchecked(&dev_priv->irq_received, 0);
27972 + atomic_set_unchecked(&dev_priv->irq_emitted, 0);
27973 init_waitqueue_head(&dev_priv->irq_queue);
27974 }
27975
27976 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ch7017.c linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ch7017.c
27977 --- linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ch7017.c 2011-03-27 14:31:47.000000000 -0400
27978 +++ linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ch7017.c 2011-04-17 15:56:46.000000000 -0400
27979 @@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_
27980 }
27981 }
27982
27983 -struct intel_dvo_dev_ops ch7017_ops = {
27984 +const struct intel_dvo_dev_ops ch7017_ops = {
27985 .init = ch7017_init,
27986 .detect = ch7017_detect,
27987 .mode_valid = ch7017_mode_valid,
27988 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ch7xxx.c linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ch7xxx.c
27989 --- linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-03-27 14:31:47.000000000 -0400
27990 +++ linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-04-17 15:56:46.000000000 -0400
27991 @@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_
27992 }
27993 }
27994
27995 -struct intel_dvo_dev_ops ch7xxx_ops = {
27996 +const struct intel_dvo_dev_ops ch7xxx_ops = {
27997 .init = ch7xxx_init,
27998 .detect = ch7xxx_detect,
27999 .mode_valid = ch7xxx_mode_valid,
28000 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/dvo.h linux-2.6.32.41/drivers/gpu/drm/i915/dvo.h
28001 --- linux-2.6.32.41/drivers/gpu/drm/i915/dvo.h 2011-03-27 14:31:47.000000000 -0400
28002 +++ linux-2.6.32.41/drivers/gpu/drm/i915/dvo.h 2011-04-17 15:56:46.000000000 -0400
28003 @@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
28004 *
28005 * \return singly-linked list of modes or NULL if no modes found.
28006 */
28007 - struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
28008 + struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
28009
28010 /**
28011 * Clean up driver-specific bits of the output
28012 */
28013 - void (*destroy) (struct intel_dvo_device *dvo);
28014 + void (* const destroy) (struct intel_dvo_device *dvo);
28015
28016 /**
28017 * Debugging hook to dump device registers to log file
28018 */
28019 - void (*dump_regs)(struct intel_dvo_device *dvo);
28020 + void (* const dump_regs)(struct intel_dvo_device *dvo);
28021 };
28022
28023 -extern struct intel_dvo_dev_ops sil164_ops;
28024 -extern struct intel_dvo_dev_ops ch7xxx_ops;
28025 -extern struct intel_dvo_dev_ops ivch_ops;
28026 -extern struct intel_dvo_dev_ops tfp410_ops;
28027 -extern struct intel_dvo_dev_ops ch7017_ops;
28028 +extern const struct intel_dvo_dev_ops sil164_ops;
28029 +extern const struct intel_dvo_dev_ops ch7xxx_ops;
28030 +extern const struct intel_dvo_dev_ops ivch_ops;
28031 +extern const struct intel_dvo_dev_ops tfp410_ops;
28032 +extern const struct intel_dvo_dev_ops ch7017_ops;
28033
28034 #endif /* _INTEL_DVO_H */
28035 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ivch.c linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ivch.c
28036 --- linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ivch.c 2011-03-27 14:31:47.000000000 -0400
28037 +++ linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ivch.c 2011-04-17 15:56:46.000000000 -0400
28038 @@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dv
28039 }
28040 }
28041
28042 -struct intel_dvo_dev_ops ivch_ops= {
28043 +const struct intel_dvo_dev_ops ivch_ops= {
28044 .init = ivch_init,
28045 .dpms = ivch_dpms,
28046 .save = ivch_save,
28047 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/dvo_sil164.c linux-2.6.32.41/drivers/gpu/drm/i915/dvo_sil164.c
28048 --- linux-2.6.32.41/drivers/gpu/drm/i915/dvo_sil164.c 2011-03-27 14:31:47.000000000 -0400
28049 +++ linux-2.6.32.41/drivers/gpu/drm/i915/dvo_sil164.c 2011-04-17 15:56:46.000000000 -0400
28050 @@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_
28051 }
28052 }
28053
28054 -struct intel_dvo_dev_ops sil164_ops = {
28055 +const struct intel_dvo_dev_ops sil164_ops = {
28056 .init = sil164_init,
28057 .detect = sil164_detect,
28058 .mode_valid = sil164_mode_valid,
28059 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/dvo_tfp410.c linux-2.6.32.41/drivers/gpu/drm/i915/dvo_tfp410.c
28060 --- linux-2.6.32.41/drivers/gpu/drm/i915/dvo_tfp410.c 2011-03-27 14:31:47.000000000 -0400
28061 +++ linux-2.6.32.41/drivers/gpu/drm/i915/dvo_tfp410.c 2011-04-17 15:56:46.000000000 -0400
28062 @@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_
28063 }
28064 }
28065
28066 -struct intel_dvo_dev_ops tfp410_ops = {
28067 +const struct intel_dvo_dev_ops tfp410_ops = {
28068 .init = tfp410_init,
28069 .detect = tfp410_detect,
28070 .mode_valid = tfp410_mode_valid,
28071 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/i915_debugfs.c linux-2.6.32.41/drivers/gpu/drm/i915/i915_debugfs.c
28072 --- linux-2.6.32.41/drivers/gpu/drm/i915/i915_debugfs.c 2011-03-27 14:31:47.000000000 -0400
28073 +++ linux-2.6.32.41/drivers/gpu/drm/i915/i915_debugfs.c 2011-05-04 17:56:28.000000000 -0400
28074 @@ -192,7 +192,7 @@ static int i915_interrupt_info(struct se
28075 I915_READ(GTIMR));
28076 }
28077 seq_printf(m, "Interrupts received: %d\n",
28078 - atomic_read(&dev_priv->irq_received));
28079 + atomic_read_unchecked(&dev_priv->irq_received));
28080 if (dev_priv->hw_status_page != NULL) {
28081 seq_printf(m, "Current sequence: %d\n",
28082 i915_get_gem_seqno(dev));
28083 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/i915_drv.c linux-2.6.32.41/drivers/gpu/drm/i915/i915_drv.c
28084 --- linux-2.6.32.41/drivers/gpu/drm/i915/i915_drv.c 2011-03-27 14:31:47.000000000 -0400
28085 +++ linux-2.6.32.41/drivers/gpu/drm/i915/i915_drv.c 2011-04-17 15:56:46.000000000 -0400
28086 @@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
28087 return i915_resume(dev);
28088 }
28089
28090 -static struct vm_operations_struct i915_gem_vm_ops = {
28091 +static const struct vm_operations_struct i915_gem_vm_ops = {
28092 .fault = i915_gem_fault,
28093 .open = drm_gem_vm_open,
28094 .close = drm_gem_vm_close,
28095 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/i915_drv.h linux-2.6.32.41/drivers/gpu/drm/i915/i915_drv.h
28096 --- linux-2.6.32.41/drivers/gpu/drm/i915/i915_drv.h 2011-03-27 14:31:47.000000000 -0400
28097 +++ linux-2.6.32.41/drivers/gpu/drm/i915/i915_drv.h 2011-05-04 17:56:28.000000000 -0400
28098 @@ -197,7 +197,7 @@ typedef struct drm_i915_private {
28099 int page_flipping;
28100
28101 wait_queue_head_t irq_queue;
28102 - atomic_t irq_received;
28103 + atomic_unchecked_t irq_received;
28104 /** Protects user_irq_refcount and irq_mask_reg */
28105 spinlock_t user_irq_lock;
28106 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
28107 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/i915_gem.c linux-2.6.32.41/drivers/gpu/drm/i915/i915_gem.c
28108 --- linux-2.6.32.41/drivers/gpu/drm/i915/i915_gem.c 2011-03-27 14:31:47.000000000 -0400
28109 +++ linux-2.6.32.41/drivers/gpu/drm/i915/i915_gem.c 2011-04-17 15:56:46.000000000 -0400
28110 @@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_d
28111
28112 args->aper_size = dev->gtt_total;
28113 args->aper_available_size = (args->aper_size -
28114 - atomic_read(&dev->pin_memory));
28115 + atomic_read_unchecked(&dev->pin_memory));
28116
28117 return 0;
28118 }
28119 @@ -492,6 +492,11 @@ i915_gem_pread_ioctl(struct drm_device *
28120 return -EINVAL;
28121 }
28122
28123 + if (!access_ok(VERIFY_WRITE, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
28124 + drm_gem_object_unreference(obj);
28125 + return -EFAULT;
28126 + }
28127 +
28128 if (i915_gem_object_needs_bit17_swizzle(obj)) {
28129 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
28130 } else {
28131 @@ -965,6 +970,11 @@ i915_gem_pwrite_ioctl(struct drm_device
28132 return -EINVAL;
28133 }
28134
28135 + if (!access_ok(VERIFY_READ, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
28136 + drm_gem_object_unreference(obj);
28137 + return -EFAULT;
28138 + }
28139 +
28140 /* We can only do the GTT pwrite on untiled buffers, as otherwise
28141 * it would end up going through the fenced access, and we'll get
28142 * different detiling behavior between reading and writing.
28143 @@ -2054,7 +2064,7 @@ i915_gem_object_unbind(struct drm_gem_ob
28144
28145 if (obj_priv->gtt_space) {
28146 atomic_dec(&dev->gtt_count);
28147 - atomic_sub(obj->size, &dev->gtt_memory);
28148 + atomic_sub_unchecked(obj->size, &dev->gtt_memory);
28149
28150 drm_mm_put_block(obj_priv->gtt_space);
28151 obj_priv->gtt_space = NULL;
28152 @@ -2697,7 +2707,7 @@ i915_gem_object_bind_to_gtt(struct drm_g
28153 goto search_free;
28154 }
28155 atomic_inc(&dev->gtt_count);
28156 - atomic_add(obj->size, &dev->gtt_memory);
28157 + atomic_add_unchecked(obj->size, &dev->gtt_memory);
28158
28159 /* Assert that the object is not currently in any GPU domain. As it
28160 * wasn't in the GTT, there shouldn't be any way it could have been in
28161 @@ -3751,9 +3761,9 @@ i915_gem_execbuffer(struct drm_device *d
28162 "%d/%d gtt bytes\n",
28163 atomic_read(&dev->object_count),
28164 atomic_read(&dev->pin_count),
28165 - atomic_read(&dev->object_memory),
28166 - atomic_read(&dev->pin_memory),
28167 - atomic_read(&dev->gtt_memory),
28168 + atomic_read_unchecked(&dev->object_memory),
28169 + atomic_read_unchecked(&dev->pin_memory),
28170 + atomic_read_unchecked(&dev->gtt_memory),
28171 dev->gtt_total);
28172 }
28173 goto err;
28174 @@ -3985,7 +3995,7 @@ i915_gem_object_pin(struct drm_gem_objec
28175 */
28176 if (obj_priv->pin_count == 1) {
28177 atomic_inc(&dev->pin_count);
28178 - atomic_add(obj->size, &dev->pin_memory);
28179 + atomic_add_unchecked(obj->size, &dev->pin_memory);
28180 if (!obj_priv->active &&
28181 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
28182 !list_empty(&obj_priv->list))
28183 @@ -4018,7 +4028,7 @@ i915_gem_object_unpin(struct drm_gem_obj
28184 list_move_tail(&obj_priv->list,
28185 &dev_priv->mm.inactive_list);
28186 atomic_dec(&dev->pin_count);
28187 - atomic_sub(obj->size, &dev->pin_memory);
28188 + atomic_sub_unchecked(obj->size, &dev->pin_memory);
28189 }
28190 i915_verify_inactive(dev, __FILE__, __LINE__);
28191 }
28192 diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/i915_irq.c linux-2.6.32.41/drivers/gpu/drm/i915/i915_irq.c
28193 --- linux-2.6.32.41/drivers/gpu/drm/i915/i915_irq.c 2011-03-27 14:31:47.000000000 -0400
28194 +++ linux-2.6.32.41/drivers/gpu/drm/i915/i915_irq.c 2011-05-04 17:56:28.000000000 -0400
28195 @@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_
28196 int irq_received;
28197 int ret = IRQ_NONE;
28198
28199 - atomic_inc(&dev_priv->irq_received);
28200 + atomic_inc_unchecked(&dev_priv->irq_received);
28201
28202 if (IS_IGDNG(dev))
28203 return igdng_irq_handler(dev);
28204 @@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct d
28205 {
28206 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28207
28208 - atomic_set(&dev_priv->irq_received, 0);
28209 + atomic_set_unchecked(&dev_priv->irq_received, 0);
28210
28211 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28212 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28213 diff -urNp linux-2.6.32.41/drivers/gpu/drm/mga/mga_drv.h linux-2.6.32.41/drivers/gpu/drm/mga/mga_drv.h
28214 --- linux-2.6.32.41/drivers/gpu/drm/mga/mga_drv.h 2011-03-27 14:31:47.000000000 -0400
28215 +++ linux-2.6.32.41/drivers/gpu/drm/mga/mga_drv.h 2011-05-04 17:56:28.000000000 -0400
28216 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
28217 u32 clear_cmd;
28218 u32 maccess;
28219
28220 - atomic_t vbl_received; /**< Number of vblanks received. */
28221 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
28222 wait_queue_head_t fence_queue;
28223 - atomic_t last_fence_retired;
28224 + atomic_unchecked_t last_fence_retired;
28225 u32 next_fence_to_post;
28226
28227 unsigned int fb_cpp;
28228 diff -urNp linux-2.6.32.41/drivers/gpu/drm/mga/mga_irq.c linux-2.6.32.41/drivers/gpu/drm/mga/mga_irq.c
28229 --- linux-2.6.32.41/drivers/gpu/drm/mga/mga_irq.c 2011-03-27 14:31:47.000000000 -0400
28230 +++ linux-2.6.32.41/drivers/gpu/drm/mga/mga_irq.c 2011-05-04 17:56:28.000000000 -0400
28231 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
28232 if (crtc != 0)
28233 return 0;
28234
28235 - return atomic_read(&dev_priv->vbl_received);
28236 + return atomic_read_unchecked(&dev_priv->vbl_received);
28237 }
28238
28239
28240 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
28241 /* VBLANK interrupt */
28242 if (status & MGA_VLINEPEN) {
28243 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
28244 - atomic_inc(&dev_priv->vbl_received);
28245 + atomic_inc_unchecked(&dev_priv->vbl_received);
28246 drm_handle_vblank(dev, 0);
28247 handled = 1;
28248 }
28249 @@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
28250 MGA_WRITE(MGA_PRIMEND, prim_end);
28251 }
28252
28253 - atomic_inc(&dev_priv->last_fence_retired);
28254 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
28255 DRM_WAKEUP(&dev_priv->fence_queue);
28256 handled = 1;
28257 }
28258 @@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_dev
28259 * using fences.
28260 */
28261 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
28262 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
28263 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
28264 - *sequence) <= (1 << 23)));
28265
28266 *sequence = cur_fence;
28267 diff -urNp linux-2.6.32.41/drivers/gpu/drm/r128/r128_cce.c linux-2.6.32.41/drivers/gpu/drm/r128/r128_cce.c
28268 --- linux-2.6.32.41/drivers/gpu/drm/r128/r128_cce.c 2011-03-27 14:31:47.000000000 -0400
28269 +++ linux-2.6.32.41/drivers/gpu/drm/r128/r128_cce.c 2011-05-04 17:56:28.000000000 -0400
28270 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
28271
28272 /* GH: Simple idle check.
28273 */
28274 - atomic_set(&dev_priv->idle_count, 0);
28275 + atomic_set_unchecked(&dev_priv->idle_count, 0);
28276
28277 /* We don't support anything other than bus-mastering ring mode,
28278 * but the ring can be in either AGP or PCI space for the ring
28279 diff -urNp linux-2.6.32.41/drivers/gpu/drm/r128/r128_drv.h linux-2.6.32.41/drivers/gpu/drm/r128/r128_drv.h
28280 --- linux-2.6.32.41/drivers/gpu/drm/r128/r128_drv.h 2011-03-27 14:31:47.000000000 -0400
28281 +++ linux-2.6.32.41/drivers/gpu/drm/r128/r128_drv.h 2011-05-04 17:56:28.000000000 -0400
28282 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
28283 int is_pci;
28284 unsigned long cce_buffers_offset;
28285
28286 - atomic_t idle_count;
28287 + atomic_unchecked_t idle_count;
28288
28289 int page_flipping;
28290 int current_page;
28291 u32 crtc_offset;
28292 u32 crtc_offset_cntl;
28293
28294 - atomic_t vbl_received;
28295 + atomic_unchecked_t vbl_received;
28296
28297 u32 color_fmt;
28298 unsigned int front_offset;
28299 diff -urNp linux-2.6.32.41/drivers/gpu/drm/r128/r128_irq.c linux-2.6.32.41/drivers/gpu/drm/r128/r128_irq.c
28300 --- linux-2.6.32.41/drivers/gpu/drm/r128/r128_irq.c 2011-03-27 14:31:47.000000000 -0400
28301 +++ linux-2.6.32.41/drivers/gpu/drm/r128/r128_irq.c 2011-05-04 17:56:28.000000000 -0400
28302 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
28303 if (crtc != 0)
28304 return 0;
28305
28306 - return atomic_read(&dev_priv->vbl_received);
28307 + return atomic_read_unchecked(&dev_priv->vbl_received);
28308 }
28309
28310 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28311 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
28312 /* VBLANK interrupt */
28313 if (status & R128_CRTC_VBLANK_INT) {
28314 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
28315 - atomic_inc(&dev_priv->vbl_received);
28316 + atomic_inc_unchecked(&dev_priv->vbl_received);
28317 drm_handle_vblank(dev, 0);
28318 return IRQ_HANDLED;
28319 }
28320 diff -urNp linux-2.6.32.41/drivers/gpu/drm/r128/r128_state.c linux-2.6.32.41/drivers/gpu/drm/r128/r128_state.c
28321 --- linux-2.6.32.41/drivers/gpu/drm/r128/r128_state.c 2011-03-27 14:31:47.000000000 -0400
28322 +++ linux-2.6.32.41/drivers/gpu/drm/r128/r128_state.c 2011-05-04 17:56:28.000000000 -0400
28323 @@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_priv
28324
28325 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
28326 {
28327 - if (atomic_read(&dev_priv->idle_count) == 0) {
28328 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
28329 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
28330 } else {
28331 - atomic_set(&dev_priv->idle_count, 0);
28332 + atomic_set_unchecked(&dev_priv->idle_count, 0);
28333 }
28334 }
28335
28336 diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/atom.c linux-2.6.32.41/drivers/gpu/drm/radeon/atom.c
28337 --- linux-2.6.32.41/drivers/gpu/drm/radeon/atom.c 2011-05-10 22:12:01.000000000 -0400
28338 +++ linux-2.6.32.41/drivers/gpu/drm/radeon/atom.c 2011-05-16 21:46:57.000000000 -0400
28339 @@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct c
28340 char name[512];
28341 int i;
28342
28343 + pax_track_stack();
28344 +
28345 ctx->card = card;
28346 ctx->bios = bios;
28347
28348 diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/mkregtable.c linux-2.6.32.41/drivers/gpu/drm/radeon/mkregtable.c
28349 --- linux-2.6.32.41/drivers/gpu/drm/radeon/mkregtable.c 2011-03-27 14:31:47.000000000 -0400
28350 +++ linux-2.6.32.41/drivers/gpu/drm/radeon/mkregtable.c 2011-04-17 15:56:46.000000000 -0400
28351 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
28352 regex_t mask_rex;
28353 regmatch_t match[4];
28354 char buf[1024];
28355 - size_t end;
28356 + long end;
28357 int len;
28358 int done = 0;
28359 int r;
28360 unsigned o;
28361 struct offset *offset;
28362 char last_reg_s[10];
28363 - int last_reg;
28364 + unsigned long last_reg;
28365
28366 if (regcomp
28367 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
28368 diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_atombios.c linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_atombios.c
28369 --- linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_atombios.c 2011-03-27 14:31:47.000000000 -0400
28370 +++ linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_atombios.c 2011-05-16 21:46:57.000000000 -0400
28371 @@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from
28372 bool linkb;
28373 struct radeon_i2c_bus_rec ddc_bus;
28374
28375 + pax_track_stack();
28376 +
28377 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
28378
28379 if (data_offset == 0)
28380 @@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_o
28381 }
28382 }
28383
28384 -struct bios_connector {
28385 +static struct bios_connector {
28386 bool valid;
28387 uint16_t line_mux;
28388 uint16_t devices;
28389 int connector_type;
28390 struct radeon_i2c_bus_rec ddc_bus;
28391 -};
28392 +} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
28393
28394 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
28395 drm_device
28396 @@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from
28397 uint8_t dac;
28398 union atom_supported_devices *supported_devices;
28399 int i, j;
28400 - struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
28401
28402 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
28403
28404 diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_display.c linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_display.c
28405 --- linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_display.c 2011-03-27 14:31:47.000000000 -0400
28406 +++ linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_display.c 2011-04-17 15:56:46.000000000 -0400
28407 @@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pl
28408
28409 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
28410 error = freq - current_freq;
28411 - error = error < 0 ? 0xffffffff : error;
28412 + error = (int32_t)error < 0 ? 0xffffffff : error;
28413 } else
28414 error = abs(current_freq - freq);
28415 vco_diff = abs(vco - best_vco);
28416 diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_drv.h linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_drv.h
28417 --- linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_drv.h 2011-03-27 14:31:47.000000000 -0400
28418 +++ linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_drv.h 2011-05-04 17:56:28.000000000 -0400
28419 @@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
28420
28421 /* SW interrupt */
28422 wait_queue_head_t swi_queue;
28423 - atomic_t swi_emitted;
28424 + atomic_unchecked_t swi_emitted;
28425 int vblank_crtc;
28426 uint32_t irq_enable_reg;
28427 uint32_t r500_disp_irq_reg;
28428 diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_fence.c linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_fence.c
28429 --- linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_fence.c 2011-03-27 14:31:47.000000000 -0400
28430 +++ linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_fence.c 2011-05-04 17:56:28.000000000 -0400
28431 @@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_devi
28432 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
28433 return 0;
28434 }
28435 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
28436 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
28437 if (!rdev->cp.ready) {
28438 /* FIXME: cp is not running assume everythings is done right
28439 * away
28440 @@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct rade
28441 return r;
28442 }
28443 WREG32(rdev->fence_drv.scratch_reg, 0);
28444 - atomic_set(&rdev->fence_drv.seq, 0);
28445 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
28446 INIT_LIST_HEAD(&rdev->fence_drv.created);
28447 INIT_LIST_HEAD(&rdev->fence_drv.emited);
28448 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
28449 diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/radeon.h linux-2.6.32.41/drivers/gpu/drm/radeon/radeon.h
28450 --- linux-2.6.32.41/drivers/gpu/drm/radeon/radeon.h 2011-03-27 14:31:47.000000000 -0400
28451 +++ linux-2.6.32.41/drivers/gpu/drm/radeon/radeon.h 2011-05-04 17:56:28.000000000 -0400
28452 @@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device
28453 */
28454 struct radeon_fence_driver {
28455 uint32_t scratch_reg;
28456 - atomic_t seq;
28457 + atomic_unchecked_t seq;
28458 uint32_t last_seq;
28459 unsigned long count_timeout;
28460 wait_queue_head_t queue;
28461 diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_ioc32.c linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_ioc32.c
28462 --- linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-03-27 14:31:47.000000000 -0400
28463 +++ linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-04-23 13:57:24.000000000 -0400
28464 @@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(str
28465 request = compat_alloc_user_space(sizeof(*request));
28466 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
28467 || __put_user(req32.param, &request->param)
28468 - || __put_user((void __user *)(unsigned long)req32.value,
28469 + || __put_user((unsigned long)req32.value,
28470 &request->value))
28471 return -EFAULT;
28472
28473 diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_irq.c linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_irq.c
28474 --- linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_irq.c 2011-03-27 14:31:47.000000000 -0400
28475 +++ linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_irq.c 2011-05-04 17:56:28.000000000 -0400
28476 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
28477 unsigned int ret;
28478 RING_LOCALS;
28479
28480 - atomic_inc(&dev_priv->swi_emitted);
28481 - ret = atomic_read(&dev_priv->swi_emitted);
28482 + atomic_inc_unchecked(&dev_priv->swi_emitted);
28483 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
28484
28485 BEGIN_RING(4);
28486 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
28487 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
28488 drm_radeon_private_t *dev_priv =
28489 (drm_radeon_private_t *) dev->dev_private;
28490
28491 - atomic_set(&dev_priv->swi_emitted, 0);
28492 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
28493 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
28494
28495 dev->max_vblank_count = 0x001fffff;
28496 diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_state.c linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_state.c
28497 --- linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_state.c 2011-03-27 14:31:47.000000000 -0400
28498 +++ linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_state.c 2011-04-17 15:56:46.000000000 -0400
28499 @@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm
28500 {
28501 drm_radeon_private_t *dev_priv = dev->dev_private;
28502 drm_radeon_getparam_t *param = data;
28503 - int value;
28504 + int value = 0;
28505
28506 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
28507
28508 diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_ttm.c linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_ttm.c
28509 --- linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_ttm.c 2011-03-27 14:31:47.000000000 -0400
28510 +++ linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_ttm.c 2011-04-17 15:56:46.000000000 -0400
28511 @@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_devic
28512 DRM_INFO("radeon: ttm finalized\n");
28513 }
28514
28515 -static struct vm_operations_struct radeon_ttm_vm_ops;
28516 -static const struct vm_operations_struct *ttm_vm_ops = NULL;
28517 -
28518 -static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
28519 -{
28520 - struct ttm_buffer_object *bo;
28521 - int r;
28522 -
28523 - bo = (struct ttm_buffer_object *)vma->vm_private_data;
28524 - if (bo == NULL) {
28525 - return VM_FAULT_NOPAGE;
28526 - }
28527 - r = ttm_vm_ops->fault(vma, vmf);
28528 - return r;
28529 -}
28530 -
28531 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
28532 {
28533 struct drm_file *file_priv;
28534 struct radeon_device *rdev;
28535 - int r;
28536
28537 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
28538 return drm_mmap(filp, vma);
28539 @@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struc
28540
28541 file_priv = (struct drm_file *)filp->private_data;
28542 rdev = file_priv->minor->dev->dev_private;
28543 - if (rdev == NULL) {
28544 + if (!rdev)
28545 return -EINVAL;
28546 - }
28547 - r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
28548 - if (unlikely(r != 0)) {
28549 - return r;
28550 - }
28551 - if (unlikely(ttm_vm_ops == NULL)) {
28552 - ttm_vm_ops = vma->vm_ops;
28553 - radeon_ttm_vm_ops = *ttm_vm_ops;
28554 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
28555 - }
28556 - vma->vm_ops = &radeon_ttm_vm_ops;
28557 - return 0;
28558 + return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
28559 }
28560
28561
28562 diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/rs690.c linux-2.6.32.41/drivers/gpu/drm/radeon/rs690.c
28563 --- linux-2.6.32.41/drivers/gpu/drm/radeon/rs690.c 2011-03-27 14:31:47.000000000 -0400
28564 +++ linux-2.6.32.41/drivers/gpu/drm/radeon/rs690.c 2011-04-17 15:56:46.000000000 -0400
28565 @@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct
28566 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
28567 rdev->pm.sideport_bandwidth.full)
28568 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
28569 - read_delay_latency.full = rfixed_const(370 * 800 * 1000);
28570 + read_delay_latency.full = rfixed_const(800 * 1000);
28571 read_delay_latency.full = rfixed_div(read_delay_latency,
28572 rdev->pm.igp_sideport_mclk);
28573 + a.full = rfixed_const(370);
28574 + read_delay_latency.full = rfixed_mul(read_delay_latency, a);
28575 } else {
28576 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
28577 rdev->pm.k8_bandwidth.full)
28578 diff -urNp linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_bo.c linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_bo.c
28579 --- linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_bo.c 2011-03-27 14:31:47.000000000 -0400
28580 +++ linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_bo.c 2011-04-23 12:56:11.000000000 -0400
28581 @@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_a
28582 NULL
28583 };
28584
28585 -static struct sysfs_ops ttm_bo_global_ops = {
28586 +static const struct sysfs_ops ttm_bo_global_ops = {
28587 .show = &ttm_bo_global_show
28588 };
28589
28590 diff -urNp linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_bo_vm.c linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_bo_vm.c
28591 --- linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-03-27 14:31:47.000000000 -0400
28592 +++ linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-04-17 15:56:46.000000000 -0400
28593 @@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_are
28594 {
28595 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
28596 vma->vm_private_data;
28597 - struct ttm_bo_device *bdev = bo->bdev;
28598 + struct ttm_bo_device *bdev;
28599 unsigned long bus_base;
28600 unsigned long bus_offset;
28601 unsigned long bus_size;
28602 @@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_are
28603 unsigned long address = (unsigned long)vmf->virtual_address;
28604 int retval = VM_FAULT_NOPAGE;
28605
28606 + if (!bo)
28607 + return VM_FAULT_NOPAGE;
28608 + bdev = bo->bdev;
28609 +
28610 /*
28611 * Work around locking order reversal in fault / nopfn
28612 * between mmap_sem and bo_reserve: Perform a trylock operation
28613 diff -urNp linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_global.c linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_global.c
28614 --- linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_global.c 2011-03-27 14:31:47.000000000 -0400
28615 +++ linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_global.c 2011-04-17 15:56:46.000000000 -0400
28616 @@ -36,7 +36,7 @@
28617 struct ttm_global_item {
28618 struct mutex mutex;
28619 void *object;
28620 - int refcount;
28621 + atomic_t refcount;
28622 };
28623
28624 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
28625 @@ -49,7 +49,7 @@ void ttm_global_init(void)
28626 struct ttm_global_item *item = &glob[i];
28627 mutex_init(&item->mutex);
28628 item->object = NULL;
28629 - item->refcount = 0;
28630 + atomic_set(&item->refcount, 0);
28631 }
28632 }
28633
28634 @@ -59,7 +59,7 @@ void ttm_global_release(void)
28635 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
28636 struct ttm_global_item *item = &glob[i];
28637 BUG_ON(item->object != NULL);
28638 - BUG_ON(item->refcount != 0);
28639 + BUG_ON(atomic_read(&item->refcount) != 0);
28640 }
28641 }
28642
28643 @@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_globa
28644 void *object;
28645
28646 mutex_lock(&item->mutex);
28647 - if (item->refcount == 0) {
28648 + if (atomic_read(&item->refcount) == 0) {
28649 item->object = kzalloc(ref->size, GFP_KERNEL);
28650 if (unlikely(item->object == NULL)) {
28651 ret = -ENOMEM;
28652 @@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_globa
28653 goto out_err;
28654
28655 }
28656 - ++item->refcount;
28657 + atomic_inc(&item->refcount);
28658 ref->object = item->object;
28659 object = item->object;
28660 mutex_unlock(&item->mutex);
28661 @@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_gl
28662 struct ttm_global_item *item = &glob[ref->global_type];
28663
28664 mutex_lock(&item->mutex);
28665 - BUG_ON(item->refcount == 0);
28666 + BUG_ON(atomic_read(&item->refcount) == 0);
28667 BUG_ON(ref->object != item->object);
28668 - if (--item->refcount == 0) {
28669 + if (atomic_dec_and_test(&item->refcount)) {
28670 ref->release(ref);
28671 item->object = NULL;
28672 }
28673 diff -urNp linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_memory.c linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_memory.c
28674 --- linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_memory.c 2011-03-27 14:31:47.000000000 -0400
28675 +++ linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_memory.c 2011-04-17 15:56:46.000000000 -0400
28676 @@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_at
28677 NULL
28678 };
28679
28680 -static struct sysfs_ops ttm_mem_zone_ops = {
28681 +static const struct sysfs_ops ttm_mem_zone_ops = {
28682 .show = &ttm_mem_zone_show,
28683 .store = &ttm_mem_zone_store
28684 };
28685 diff -urNp linux-2.6.32.41/drivers/gpu/drm/via/via_drv.h linux-2.6.32.41/drivers/gpu/drm/via/via_drv.h
28686 --- linux-2.6.32.41/drivers/gpu/drm/via/via_drv.h 2011-03-27 14:31:47.000000000 -0400
28687 +++ linux-2.6.32.41/drivers/gpu/drm/via/via_drv.h 2011-05-04 17:56:28.000000000 -0400
28688 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
28689 typedef uint32_t maskarray_t[5];
28690
28691 typedef struct drm_via_irq {
28692 - atomic_t irq_received;
28693 + atomic_unchecked_t irq_received;
28694 uint32_t pending_mask;
28695 uint32_t enable_mask;
28696 wait_queue_head_t irq_queue;
28697 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
28698 struct timeval last_vblank;
28699 int last_vblank_valid;
28700 unsigned usec_per_vblank;
28701 - atomic_t vbl_received;
28702 + atomic_unchecked_t vbl_received;
28703 drm_via_state_t hc_state;
28704 char pci_buf[VIA_PCI_BUF_SIZE];
28705 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
28706 diff -urNp linux-2.6.32.41/drivers/gpu/drm/via/via_irq.c linux-2.6.32.41/drivers/gpu/drm/via/via_irq.c
28707 --- linux-2.6.32.41/drivers/gpu/drm/via/via_irq.c 2011-03-27 14:31:47.000000000 -0400
28708 +++ linux-2.6.32.41/drivers/gpu/drm/via/via_irq.c 2011-05-04 17:56:28.000000000 -0400
28709 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
28710 if (crtc != 0)
28711 return 0;
28712
28713 - return atomic_read(&dev_priv->vbl_received);
28714 + return atomic_read_unchecked(&dev_priv->vbl_received);
28715 }
28716
28717 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28718 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
28719
28720 status = VIA_READ(VIA_REG_INTERRUPT);
28721 if (status & VIA_IRQ_VBLANK_PENDING) {
28722 - atomic_inc(&dev_priv->vbl_received);
28723 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
28724 + atomic_inc_unchecked(&dev_priv->vbl_received);
28725 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
28726 do_gettimeofday(&cur_vblank);
28727 if (dev_priv->last_vblank_valid) {
28728 dev_priv->usec_per_vblank =
28729 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
28730 dev_priv->last_vblank = cur_vblank;
28731 dev_priv->last_vblank_valid = 1;
28732 }
28733 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
28734 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
28735 DRM_DEBUG("US per vblank is: %u\n",
28736 dev_priv->usec_per_vblank);
28737 }
28738 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
28739
28740 for (i = 0; i < dev_priv->num_irqs; ++i) {
28741 if (status & cur_irq->pending_mask) {
28742 - atomic_inc(&cur_irq->irq_received);
28743 + atomic_inc_unchecked(&cur_irq->irq_received);
28744 DRM_WAKEUP(&cur_irq->irq_queue);
28745 handled = 1;
28746 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
28747 @@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device *
28748 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
28749 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
28750 masks[irq][4]));
28751 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
28752 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
28753 } else {
28754 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
28755 (((cur_irq_sequence =
28756 - atomic_read(&cur_irq->irq_received)) -
28757 + atomic_read_unchecked(&cur_irq->irq_received)) -
28758 *sequence) <= (1 << 23)));
28759 }
28760 *sequence = cur_irq_sequence;
28761 @@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct dr
28762 }
28763
28764 for (i = 0; i < dev_priv->num_irqs; ++i) {
28765 - atomic_set(&cur_irq->irq_received, 0);
28766 + atomic_set_unchecked(&cur_irq->irq_received, 0);
28767 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
28768 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
28769 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
28770 @@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev,
28771 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
28772 case VIA_IRQ_RELATIVE:
28773 irqwait->request.sequence +=
28774 - atomic_read(&cur_irq->irq_received);
28775 + atomic_read_unchecked(&cur_irq->irq_received);
28776 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
28777 case VIA_IRQ_ABSOLUTE:
28778 break;
28779 diff -urNp linux-2.6.32.41/drivers/hid/hid-core.c linux-2.6.32.41/drivers/hid/hid-core.c
28780 --- linux-2.6.32.41/drivers/hid/hid-core.c 2011-05-10 22:12:01.000000000 -0400
28781 +++ linux-2.6.32.41/drivers/hid/hid-core.c 2011-05-10 22:12:32.000000000 -0400
28782 @@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device
28783
28784 int hid_add_device(struct hid_device *hdev)
28785 {
28786 - static atomic_t id = ATOMIC_INIT(0);
28787 + static atomic_unchecked_t id = ATOMIC_INIT(0);
28788 int ret;
28789
28790 if (WARN_ON(hdev->status & HID_STAT_ADDED))
28791 @@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hd
28792 /* XXX hack, any other cleaner solution after the driver core
28793 * is converted to allow more than 20 bytes as the device name? */
28794 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
28795 - hdev->vendor, hdev->product, atomic_inc_return(&id));
28796 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
28797
28798 ret = device_add(&hdev->dev);
28799 if (!ret)
28800 diff -urNp linux-2.6.32.41/drivers/hid/usbhid/hiddev.c linux-2.6.32.41/drivers/hid/usbhid/hiddev.c
28801 --- linux-2.6.32.41/drivers/hid/usbhid/hiddev.c 2011-03-27 14:31:47.000000000 -0400
28802 +++ linux-2.6.32.41/drivers/hid/usbhid/hiddev.c 2011-04-17 15:56:46.000000000 -0400
28803 @@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *fi
28804 return put_user(HID_VERSION, (int __user *)arg);
28805
28806 case HIDIOCAPPLICATION:
28807 - if (arg < 0 || arg >= hid->maxapplication)
28808 + if (arg >= hid->maxapplication)
28809 return -EINVAL;
28810
28811 for (i = 0; i < hid->maxcollection; i++)
28812 diff -urNp linux-2.6.32.41/drivers/hwmon/lis3lv02d.c linux-2.6.32.41/drivers/hwmon/lis3lv02d.c
28813 --- linux-2.6.32.41/drivers/hwmon/lis3lv02d.c 2011-03-27 14:31:47.000000000 -0400
28814 +++ linux-2.6.32.41/drivers/hwmon/lis3lv02d.c 2011-05-04 17:56:28.000000000 -0400
28815 @@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(in
28816 * the lid is closed. This leads to interrupts as soon as a little move
28817 * is done.
28818 */
28819 - atomic_inc(&lis3_dev.count);
28820 + atomic_inc_unchecked(&lis3_dev.count);
28821
28822 wake_up_interruptible(&lis3_dev.misc_wait);
28823 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
28824 @@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct in
28825 if (test_and_set_bit(0, &lis3_dev.misc_opened))
28826 return -EBUSY; /* already open */
28827
28828 - atomic_set(&lis3_dev.count, 0);
28829 + atomic_set_unchecked(&lis3_dev.count, 0);
28830
28831 /*
28832 * The sensor can generate interrupts for free-fall and direction
28833 @@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struc
28834 add_wait_queue(&lis3_dev.misc_wait, &wait);
28835 while (true) {
28836 set_current_state(TASK_INTERRUPTIBLE);
28837 - data = atomic_xchg(&lis3_dev.count, 0);
28838 + data = atomic_xchg_unchecked(&lis3_dev.count, 0);
28839 if (data)
28840 break;
28841
28842 @@ -244,7 +244,7 @@ out:
28843 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
28844 {
28845 poll_wait(file, &lis3_dev.misc_wait, wait);
28846 - if (atomic_read(&lis3_dev.count))
28847 + if (atomic_read_unchecked(&lis3_dev.count))
28848 return POLLIN | POLLRDNORM;
28849 return 0;
28850 }
28851 diff -urNp linux-2.6.32.41/drivers/hwmon/lis3lv02d.h linux-2.6.32.41/drivers/hwmon/lis3lv02d.h
28852 --- linux-2.6.32.41/drivers/hwmon/lis3lv02d.h 2011-03-27 14:31:47.000000000 -0400
28853 +++ linux-2.6.32.41/drivers/hwmon/lis3lv02d.h 2011-05-04 17:56:28.000000000 -0400
28854 @@ -201,7 +201,7 @@ struct lis3lv02d {
28855
28856 struct input_polled_dev *idev; /* input device */
28857 struct platform_device *pdev; /* platform device */
28858 - atomic_t count; /* interrupt count after last read */
28859 + atomic_unchecked_t count; /* interrupt count after last read */
28860 int xcalib; /* calibrated null value for x */
28861 int ycalib; /* calibrated null value for y */
28862 int zcalib; /* calibrated null value for z */
28863 diff -urNp linux-2.6.32.41/drivers/hwmon/sht15.c linux-2.6.32.41/drivers/hwmon/sht15.c
28864 --- linux-2.6.32.41/drivers/hwmon/sht15.c 2011-03-27 14:31:47.000000000 -0400
28865 +++ linux-2.6.32.41/drivers/hwmon/sht15.c 2011-05-04 17:56:28.000000000 -0400
28866 @@ -112,7 +112,7 @@ struct sht15_data {
28867 int supply_uV;
28868 int supply_uV_valid;
28869 struct work_struct update_supply_work;
28870 - atomic_t interrupt_handled;
28871 + atomic_unchecked_t interrupt_handled;
28872 };
28873
28874 /**
28875 @@ -245,13 +245,13 @@ static inline int sht15_update_single_va
28876 return ret;
28877
28878 gpio_direction_input(data->pdata->gpio_data);
28879 - atomic_set(&data->interrupt_handled, 0);
28880 + atomic_set_unchecked(&data->interrupt_handled, 0);
28881
28882 enable_irq(gpio_to_irq(data->pdata->gpio_data));
28883 if (gpio_get_value(data->pdata->gpio_data) == 0) {
28884 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
28885 /* Only relevant if the interrupt hasn't occured. */
28886 - if (!atomic_read(&data->interrupt_handled))
28887 + if (!atomic_read_unchecked(&data->interrupt_handled))
28888 schedule_work(&data->read_work);
28889 }
28890 ret = wait_event_timeout(data->wait_queue,
28891 @@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired
28892 struct sht15_data *data = d;
28893 /* First disable the interrupt */
28894 disable_irq_nosync(irq);
28895 - atomic_inc(&data->interrupt_handled);
28896 + atomic_inc_unchecked(&data->interrupt_handled);
28897 /* Then schedule a reading work struct */
28898 if (data->flag != SHT15_READING_NOTHING)
28899 schedule_work(&data->read_work);
28900 @@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct wo
28901 here as could have gone low in meantime so verify
28902 it hasn't!
28903 */
28904 - atomic_set(&data->interrupt_handled, 0);
28905 + atomic_set_unchecked(&data->interrupt_handled, 0);
28906 enable_irq(gpio_to_irq(data->pdata->gpio_data));
28907 /* If still not occured or another handler has been scheduled */
28908 if (gpio_get_value(data->pdata->gpio_data)
28909 - || atomic_read(&data->interrupt_handled))
28910 + || atomic_read_unchecked(&data->interrupt_handled))
28911 return;
28912 }
28913 /* Read the data back from the device */
28914 diff -urNp linux-2.6.32.41/drivers/hwmon/w83791d.c linux-2.6.32.41/drivers/hwmon/w83791d.c
28915 --- linux-2.6.32.41/drivers/hwmon/w83791d.c 2011-03-27 14:31:47.000000000 -0400
28916 +++ linux-2.6.32.41/drivers/hwmon/w83791d.c 2011-04-17 15:56:46.000000000 -0400
28917 @@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_cli
28918 struct i2c_board_info *info);
28919 static int w83791d_remove(struct i2c_client *client);
28920
28921 -static int w83791d_read(struct i2c_client *client, u8 register);
28922 -static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
28923 +static int w83791d_read(struct i2c_client *client, u8 reg);
28924 +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
28925 static struct w83791d_data *w83791d_update_device(struct device *dev);
28926
28927 #ifdef DEBUG
28928 diff -urNp linux-2.6.32.41/drivers/ide/ide-cd.c linux-2.6.32.41/drivers/ide/ide-cd.c
28929 --- linux-2.6.32.41/drivers/ide/ide-cd.c 2011-03-27 14:31:47.000000000 -0400
28930 +++ linux-2.6.32.41/drivers/ide/ide-cd.c 2011-04-17 15:56:46.000000000 -0400
28931 @@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_
28932 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
28933 if ((unsigned long)buf & alignment
28934 || blk_rq_bytes(rq) & q->dma_pad_mask
28935 - || object_is_on_stack(buf))
28936 + || object_starts_on_stack(buf))
28937 drive->dma = 0;
28938 }
28939 }
28940 diff -urNp linux-2.6.32.41/drivers/ide/ide-floppy.c linux-2.6.32.41/drivers/ide/ide-floppy.c
28941 --- linux-2.6.32.41/drivers/ide/ide-floppy.c 2011-03-27 14:31:47.000000000 -0400
28942 +++ linux-2.6.32.41/drivers/ide/ide-floppy.c 2011-05-16 21:46:57.000000000 -0400
28943 @@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_d
28944 u8 pc_buf[256], header_len, desc_cnt;
28945 int i, rc = 1, blocks, length;
28946
28947 + pax_track_stack();
28948 +
28949 ide_debug_log(IDE_DBG_FUNC, "enter");
28950
28951 drive->bios_cyl = 0;
28952 diff -urNp linux-2.6.32.41/drivers/ide/setup-pci.c linux-2.6.32.41/drivers/ide/setup-pci.c
28953 --- linux-2.6.32.41/drivers/ide/setup-pci.c 2011-03-27 14:31:47.000000000 -0400
28954 +++ linux-2.6.32.41/drivers/ide/setup-pci.c 2011-05-16 21:46:57.000000000 -0400
28955 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
28956 int ret, i, n_ports = dev2 ? 4 : 2;
28957 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
28958
28959 + pax_track_stack();
28960 +
28961 for (i = 0; i < n_ports / 2; i++) {
28962 ret = ide_setup_pci_controller(pdev[i], d, !i);
28963 if (ret < 0)
28964 diff -urNp linux-2.6.32.41/drivers/ieee1394/dv1394.c linux-2.6.32.41/drivers/ieee1394/dv1394.c
28965 --- linux-2.6.32.41/drivers/ieee1394/dv1394.c 2011-03-27 14:31:47.000000000 -0400
28966 +++ linux-2.6.32.41/drivers/ieee1394/dv1394.c 2011-04-23 12:56:11.000000000 -0400
28967 @@ -739,7 +739,7 @@ static void frame_prepare(struct video_c
28968 based upon DIF section and sequence
28969 */
28970
28971 -static void inline
28972 +static inline void
28973 frame_put_packet (struct frame *f, struct packet *p)
28974 {
28975 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
28976 diff -urNp linux-2.6.32.41/drivers/ieee1394/hosts.c linux-2.6.32.41/drivers/ieee1394/hosts.c
28977 --- linux-2.6.32.41/drivers/ieee1394/hosts.c 2011-03-27 14:31:47.000000000 -0400
28978 +++ linux-2.6.32.41/drivers/ieee1394/hosts.c 2011-04-17 15:56:46.000000000 -0400
28979 @@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso
28980 }
28981
28982 static struct hpsb_host_driver dummy_driver = {
28983 + .name = "dummy",
28984 .transmit_packet = dummy_transmit_packet,
28985 .devctl = dummy_devctl,
28986 .isoctl = dummy_isoctl
28987 diff -urNp linux-2.6.32.41/drivers/ieee1394/init_ohci1394_dma.c linux-2.6.32.41/drivers/ieee1394/init_ohci1394_dma.c
28988 --- linux-2.6.32.41/drivers/ieee1394/init_ohci1394_dma.c 2011-03-27 14:31:47.000000000 -0400
28989 +++ linux-2.6.32.41/drivers/ieee1394/init_ohci1394_dma.c 2011-04-17 15:56:46.000000000 -0400
28990 @@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_con
28991 for (func = 0; func < 8; func++) {
28992 u32 class = read_pci_config(num,slot,func,
28993 PCI_CLASS_REVISION);
28994 - if ((class == 0xffffffff))
28995 + if (class == 0xffffffff)
28996 continue; /* No device at this func */
28997
28998 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
28999 diff -urNp linux-2.6.32.41/drivers/ieee1394/ohci1394.c linux-2.6.32.41/drivers/ieee1394/ohci1394.c
29000 --- linux-2.6.32.41/drivers/ieee1394/ohci1394.c 2011-03-27 14:31:47.000000000 -0400
29001 +++ linux-2.6.32.41/drivers/ieee1394/ohci1394.c 2011-04-23 12:56:11.000000000 -0400
29002 @@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_
29003 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
29004
29005 /* Module Parameters */
29006 -static int phys_dma = 1;
29007 +static int phys_dma;
29008 module_param(phys_dma, int, 0444);
29009 -MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
29010 +MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
29011
29012 static void dma_trm_tasklet(unsigned long data);
29013 static void dma_trm_reset(struct dma_trm_ctx *d);
29014 diff -urNp linux-2.6.32.41/drivers/ieee1394/sbp2.c linux-2.6.32.41/drivers/ieee1394/sbp2.c
29015 --- linux-2.6.32.41/drivers/ieee1394/sbp2.c 2011-03-27 14:31:47.000000000 -0400
29016 +++ linux-2.6.32.41/drivers/ieee1394/sbp2.c 2011-04-23 12:56:11.000000000 -0400
29017 @@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 prot
29018 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
29019 MODULE_LICENSE("GPL");
29020
29021 -static int sbp2_module_init(void)
29022 +static int __init sbp2_module_init(void)
29023 {
29024 int ret;
29025
29026 diff -urNp linux-2.6.32.41/drivers/infiniband/core/cm.c linux-2.6.32.41/drivers/infiniband/core/cm.c
29027 --- linux-2.6.32.41/drivers/infiniband/core/cm.c 2011-03-27 14:31:47.000000000 -0400
29028 +++ linux-2.6.32.41/drivers/infiniband/core/cm.c 2011-04-17 15:56:46.000000000 -0400
29029 @@ -112,7 +112,7 @@ static char const counter_group_names[CM
29030
29031 struct cm_counter_group {
29032 struct kobject obj;
29033 - atomic_long_t counter[CM_ATTR_COUNT];
29034 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
29035 };
29036
29037 struct cm_counter_attribute {
29038 @@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm
29039 struct ib_mad_send_buf *msg = NULL;
29040 int ret;
29041
29042 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29043 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29044 counter[CM_REQ_COUNTER]);
29045
29046 /* Quick state check to discard duplicate REQs. */
29047 @@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm
29048 if (!cm_id_priv)
29049 return;
29050
29051 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29052 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29053 counter[CM_REP_COUNTER]);
29054 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
29055 if (ret)
29056 @@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work
29057 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
29058 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
29059 spin_unlock_irq(&cm_id_priv->lock);
29060 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29061 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29062 counter[CM_RTU_COUNTER]);
29063 goto out;
29064 }
29065 @@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_wor
29066 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
29067 dreq_msg->local_comm_id);
29068 if (!cm_id_priv) {
29069 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29070 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29071 counter[CM_DREQ_COUNTER]);
29072 cm_issue_drep(work->port, work->mad_recv_wc);
29073 return -EINVAL;
29074 @@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_wor
29075 case IB_CM_MRA_REP_RCVD:
29076 break;
29077 case IB_CM_TIMEWAIT:
29078 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29079 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29080 counter[CM_DREQ_COUNTER]);
29081 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29082 goto unlock;
29083 @@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_wor
29084 cm_free_msg(msg);
29085 goto deref;
29086 case IB_CM_DREQ_RCVD:
29087 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29088 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29089 counter[CM_DREQ_COUNTER]);
29090 goto unlock;
29091 default:
29092 @@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work
29093 ib_modify_mad(cm_id_priv->av.port->mad_agent,
29094 cm_id_priv->msg, timeout)) {
29095 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
29096 - atomic_long_inc(&work->port->
29097 + atomic_long_inc_unchecked(&work->port->
29098 counter_group[CM_RECV_DUPLICATES].
29099 counter[CM_MRA_COUNTER]);
29100 goto out;
29101 @@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work
29102 break;
29103 case IB_CM_MRA_REQ_RCVD:
29104 case IB_CM_MRA_REP_RCVD:
29105 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29106 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29107 counter[CM_MRA_COUNTER]);
29108 /* fall through */
29109 default:
29110 @@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work
29111 case IB_CM_LAP_IDLE:
29112 break;
29113 case IB_CM_MRA_LAP_SENT:
29114 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29115 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29116 counter[CM_LAP_COUNTER]);
29117 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29118 goto unlock;
29119 @@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work
29120 cm_free_msg(msg);
29121 goto deref;
29122 case IB_CM_LAP_RCVD:
29123 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29124 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29125 counter[CM_LAP_COUNTER]);
29126 goto unlock;
29127 default:
29128 @@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm
29129 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
29130 if (cur_cm_id_priv) {
29131 spin_unlock_irq(&cm.lock);
29132 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29133 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29134 counter[CM_SIDR_REQ_COUNTER]);
29135 goto out; /* Duplicate message. */
29136 }
29137 @@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_ma
29138 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
29139 msg->retries = 1;
29140
29141 - atomic_long_add(1 + msg->retries,
29142 + atomic_long_add_unchecked(1 + msg->retries,
29143 &port->counter_group[CM_XMIT].counter[attr_index]);
29144 if (msg->retries)
29145 - atomic_long_add(msg->retries,
29146 + atomic_long_add_unchecked(msg->retries,
29147 &port->counter_group[CM_XMIT_RETRIES].
29148 counter[attr_index]);
29149
29150 @@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_ma
29151 }
29152
29153 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
29154 - atomic_long_inc(&port->counter_group[CM_RECV].
29155 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
29156 counter[attr_id - CM_ATTR_ID_OFFSET]);
29157
29158 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
29159 @@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct ko
29160 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
29161
29162 return sprintf(buf, "%ld\n",
29163 - atomic_long_read(&group->counter[cm_attr->index]));
29164 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
29165 }
29166
29167 -static struct sysfs_ops cm_counter_ops = {
29168 +static const struct sysfs_ops cm_counter_ops = {
29169 .show = cm_show_counter
29170 };
29171
29172 diff -urNp linux-2.6.32.41/drivers/infiniband/core/fmr_pool.c linux-2.6.32.41/drivers/infiniband/core/fmr_pool.c
29173 --- linux-2.6.32.41/drivers/infiniband/core/fmr_pool.c 2011-03-27 14:31:47.000000000 -0400
29174 +++ linux-2.6.32.41/drivers/infiniband/core/fmr_pool.c 2011-05-04 17:56:28.000000000 -0400
29175 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
29176
29177 struct task_struct *thread;
29178
29179 - atomic_t req_ser;
29180 - atomic_t flush_ser;
29181 + atomic_unchecked_t req_ser;
29182 + atomic_unchecked_t flush_ser;
29183
29184 wait_queue_head_t force_wait;
29185 };
29186 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
29187 struct ib_fmr_pool *pool = pool_ptr;
29188
29189 do {
29190 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
29191 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
29192 ib_fmr_batch_release(pool);
29193
29194 - atomic_inc(&pool->flush_ser);
29195 + atomic_inc_unchecked(&pool->flush_ser);
29196 wake_up_interruptible(&pool->force_wait);
29197
29198 if (pool->flush_function)
29199 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
29200 }
29201
29202 set_current_state(TASK_INTERRUPTIBLE);
29203 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
29204 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
29205 !kthread_should_stop())
29206 schedule();
29207 __set_current_state(TASK_RUNNING);
29208 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
29209 pool->dirty_watermark = params->dirty_watermark;
29210 pool->dirty_len = 0;
29211 spin_lock_init(&pool->pool_lock);
29212 - atomic_set(&pool->req_ser, 0);
29213 - atomic_set(&pool->flush_ser, 0);
29214 + atomic_set_unchecked(&pool->req_ser, 0);
29215 + atomic_set_unchecked(&pool->flush_ser, 0);
29216 init_waitqueue_head(&pool->force_wait);
29217
29218 pool->thread = kthread_run(ib_fmr_cleanup_thread,
29219 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
29220 }
29221 spin_unlock_irq(&pool->pool_lock);
29222
29223 - serial = atomic_inc_return(&pool->req_ser);
29224 + serial = atomic_inc_return_unchecked(&pool->req_ser);
29225 wake_up_process(pool->thread);
29226
29227 if (wait_event_interruptible(pool->force_wait,
29228 - atomic_read(&pool->flush_ser) - serial >= 0))
29229 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
29230 return -EINTR;
29231
29232 return 0;
29233 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
29234 } else {
29235 list_add_tail(&fmr->list, &pool->dirty_list);
29236 if (++pool->dirty_len >= pool->dirty_watermark) {
29237 - atomic_inc(&pool->req_ser);
29238 + atomic_inc_unchecked(&pool->req_ser);
29239 wake_up_process(pool->thread);
29240 }
29241 }
29242 diff -urNp linux-2.6.32.41/drivers/infiniband/core/sysfs.c linux-2.6.32.41/drivers/infiniband/core/sysfs.c
29243 --- linux-2.6.32.41/drivers/infiniband/core/sysfs.c 2011-03-27 14:31:47.000000000 -0400
29244 +++ linux-2.6.32.41/drivers/infiniband/core/sysfs.c 2011-04-17 15:56:46.000000000 -0400
29245 @@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kob
29246 return port_attr->show(p, port_attr, buf);
29247 }
29248
29249 -static struct sysfs_ops port_sysfs_ops = {
29250 +static const struct sysfs_ops port_sysfs_ops = {
29251 .show = port_attr_show
29252 };
29253
29254 diff -urNp linux-2.6.32.41/drivers/infiniband/core/uverbs_marshall.c linux-2.6.32.41/drivers/infiniband/core/uverbs_marshall.c
29255 --- linux-2.6.32.41/drivers/infiniband/core/uverbs_marshall.c 2011-03-27 14:31:47.000000000 -0400
29256 +++ linux-2.6.32.41/drivers/infiniband/core/uverbs_marshall.c 2011-04-17 15:56:46.000000000 -0400
29257 @@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_u
29258 dst->grh.sgid_index = src->grh.sgid_index;
29259 dst->grh.hop_limit = src->grh.hop_limit;
29260 dst->grh.traffic_class = src->grh.traffic_class;
29261 + memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
29262 dst->dlid = src->dlid;
29263 dst->sl = src->sl;
29264 dst->src_path_bits = src->src_path_bits;
29265 dst->static_rate = src->static_rate;
29266 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
29267 dst->port_num = src->port_num;
29268 + dst->reserved = 0;
29269 }
29270 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
29271
29272 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
29273 struct ib_qp_attr *src)
29274 {
29275 + dst->qp_state = src->qp_state;
29276 dst->cur_qp_state = src->cur_qp_state;
29277 dst->path_mtu = src->path_mtu;
29278 dst->path_mig_state = src->path_mig_state;
29279 @@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_u
29280 dst->rnr_retry = src->rnr_retry;
29281 dst->alt_port_num = src->alt_port_num;
29282 dst->alt_timeout = src->alt_timeout;
29283 + memset(dst->reserved, 0, sizeof(dst->reserved));
29284 }
29285 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
29286
29287 diff -urNp linux-2.6.32.41/drivers/infiniband/hw/ipath/ipath_fs.c linux-2.6.32.41/drivers/infiniband/hw/ipath/ipath_fs.c
29288 --- linux-2.6.32.41/drivers/infiniband/hw/ipath/ipath_fs.c 2011-03-27 14:31:47.000000000 -0400
29289 +++ linux-2.6.32.41/drivers/infiniband/hw/ipath/ipath_fs.c 2011-05-16 21:46:57.000000000 -0400
29290 @@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(stru
29291 struct infinipath_counters counters;
29292 struct ipath_devdata *dd;
29293
29294 + pax_track_stack();
29295 +
29296 dd = file->f_path.dentry->d_inode->i_private;
29297 dd->ipath_f_read_counters(dd, &counters);
29298
29299 diff -urNp linux-2.6.32.41/drivers/infiniband/hw/nes/nes.c linux-2.6.32.41/drivers/infiniband/hw/nes/nes.c
29300 --- linux-2.6.32.41/drivers/infiniband/hw/nes/nes.c 2011-03-27 14:31:47.000000000 -0400
29301 +++ linux-2.6.32.41/drivers/infiniband/hw/nes/nes.c 2011-05-04 17:56:28.000000000 -0400
29302 @@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
29303 LIST_HEAD(nes_adapter_list);
29304 static LIST_HEAD(nes_dev_list);
29305
29306 -atomic_t qps_destroyed;
29307 +atomic_unchecked_t qps_destroyed;
29308
29309 static unsigned int ee_flsh_adapter;
29310 static unsigned int sysfs_nonidx_addr;
29311 @@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(str
29312 struct nes_adapter *nesadapter = nesdev->nesadapter;
29313 u32 qp_id;
29314
29315 - atomic_inc(&qps_destroyed);
29316 + atomic_inc_unchecked(&qps_destroyed);
29317
29318 /* Free the control structures */
29319
29320 diff -urNp linux-2.6.32.41/drivers/infiniband/hw/nes/nes_cm.c linux-2.6.32.41/drivers/infiniband/hw/nes/nes_cm.c
29321 --- linux-2.6.32.41/drivers/infiniband/hw/nes/nes_cm.c 2011-03-27 14:31:47.000000000 -0400
29322 +++ linux-2.6.32.41/drivers/infiniband/hw/nes/nes_cm.c 2011-05-04 17:56:28.000000000 -0400
29323 @@ -69,11 +69,11 @@ u32 cm_packets_received;
29324 u32 cm_listens_created;
29325 u32 cm_listens_destroyed;
29326 u32 cm_backlog_drops;
29327 -atomic_t cm_loopbacks;
29328 -atomic_t cm_nodes_created;
29329 -atomic_t cm_nodes_destroyed;
29330 -atomic_t cm_accel_dropped_pkts;
29331 -atomic_t cm_resets_recvd;
29332 +atomic_unchecked_t cm_loopbacks;
29333 +atomic_unchecked_t cm_nodes_created;
29334 +atomic_unchecked_t cm_nodes_destroyed;
29335 +atomic_unchecked_t cm_accel_dropped_pkts;
29336 +atomic_unchecked_t cm_resets_recvd;
29337
29338 static inline int mini_cm_accelerated(struct nes_cm_core *,
29339 struct nes_cm_node *);
29340 @@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
29341
29342 static struct nes_cm_core *g_cm_core;
29343
29344 -atomic_t cm_connects;
29345 -atomic_t cm_accepts;
29346 -atomic_t cm_disconnects;
29347 -atomic_t cm_closes;
29348 -atomic_t cm_connecteds;
29349 -atomic_t cm_connect_reqs;
29350 -atomic_t cm_rejects;
29351 +atomic_unchecked_t cm_connects;
29352 +atomic_unchecked_t cm_accepts;
29353 +atomic_unchecked_t cm_disconnects;
29354 +atomic_unchecked_t cm_closes;
29355 +atomic_unchecked_t cm_connecteds;
29356 +atomic_unchecked_t cm_connect_reqs;
29357 +atomic_unchecked_t cm_rejects;
29358
29359
29360 /**
29361 @@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(
29362 cm_node->rem_mac);
29363
29364 add_hte_node(cm_core, cm_node);
29365 - atomic_inc(&cm_nodes_created);
29366 + atomic_inc_unchecked(&cm_nodes_created);
29367
29368 return cm_node;
29369 }
29370 @@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm
29371 }
29372
29373 atomic_dec(&cm_core->node_cnt);
29374 - atomic_inc(&cm_nodes_destroyed);
29375 + atomic_inc_unchecked(&cm_nodes_destroyed);
29376 nesqp = cm_node->nesqp;
29377 if (nesqp) {
29378 nesqp->cm_node = NULL;
29379 @@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm
29380
29381 static void drop_packet(struct sk_buff *skb)
29382 {
29383 - atomic_inc(&cm_accel_dropped_pkts);
29384 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
29385 dev_kfree_skb_any(skb);
29386 }
29387
29388 @@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm
29389
29390 int reset = 0; /* whether to send reset in case of err.. */
29391 int passive_state;
29392 - atomic_inc(&cm_resets_recvd);
29393 + atomic_inc_unchecked(&cm_resets_recvd);
29394 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
29395 " refcnt=%d\n", cm_node, cm_node->state,
29396 atomic_read(&cm_node->ref_count));
29397 @@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_conne
29398 rem_ref_cm_node(cm_node->cm_core, cm_node);
29399 return NULL;
29400 }
29401 - atomic_inc(&cm_loopbacks);
29402 + atomic_inc_unchecked(&cm_loopbacks);
29403 loopbackremotenode->loopbackpartner = cm_node;
29404 loopbackremotenode->tcp_cntxt.rcv_wscale =
29405 NES_CM_DEFAULT_RCV_WND_SCALE;
29406 @@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_c
29407 add_ref_cm_node(cm_node);
29408 } else if (cm_node->state == NES_CM_STATE_TSA) {
29409 rem_ref_cm_node(cm_core, cm_node);
29410 - atomic_inc(&cm_accel_dropped_pkts);
29411 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
29412 dev_kfree_skb_any(skb);
29413 break;
29414 }
29415 @@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct ne
29416
29417 if ((cm_id) && (cm_id->event_handler)) {
29418 if (issue_disconn) {
29419 - atomic_inc(&cm_disconnects);
29420 + atomic_inc_unchecked(&cm_disconnects);
29421 cm_event.event = IW_CM_EVENT_DISCONNECT;
29422 cm_event.status = disconn_status;
29423 cm_event.local_addr = cm_id->local_addr;
29424 @@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct ne
29425 }
29426
29427 if (issue_close) {
29428 - atomic_inc(&cm_closes);
29429 + atomic_inc_unchecked(&cm_closes);
29430 nes_disconnect(nesqp, 1);
29431
29432 cm_id->provider_data = nesqp;
29433 @@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
29434
29435 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
29436 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
29437 - atomic_inc(&cm_accepts);
29438 + atomic_inc_unchecked(&cm_accepts);
29439
29440 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
29441 atomic_read(&nesvnic->netdev->refcnt));
29442 @@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
29443
29444 struct nes_cm_core *cm_core;
29445
29446 - atomic_inc(&cm_rejects);
29447 + atomic_inc_unchecked(&cm_rejects);
29448 cm_node = (struct nes_cm_node *) cm_id->provider_data;
29449 loopback = cm_node->loopbackpartner;
29450 cm_core = cm_node->cm_core;
29451 @@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id,
29452 ntohl(cm_id->local_addr.sin_addr.s_addr),
29453 ntohs(cm_id->local_addr.sin_port));
29454
29455 - atomic_inc(&cm_connects);
29456 + atomic_inc_unchecked(&cm_connects);
29457 nesqp->active_conn = 1;
29458
29459 /* cache the cm_id in the qp */
29460 @@ -3195,7 +3195,7 @@ static void cm_event_connected(struct ne
29461 if (nesqp->destroyed) {
29462 return;
29463 }
29464 - atomic_inc(&cm_connecteds);
29465 + atomic_inc_unchecked(&cm_connecteds);
29466 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
29467 " local port 0x%04X. jiffies = %lu.\n",
29468 nesqp->hwqp.qp_id,
29469 @@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm
29470
29471 ret = cm_id->event_handler(cm_id, &cm_event);
29472 cm_id->add_ref(cm_id);
29473 - atomic_inc(&cm_closes);
29474 + atomic_inc_unchecked(&cm_closes);
29475 cm_event.event = IW_CM_EVENT_CLOSE;
29476 cm_event.status = IW_CM_EVENT_STATUS_OK;
29477 cm_event.provider_data = cm_id->provider_data;
29478 @@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_
29479 return;
29480 cm_id = cm_node->cm_id;
29481
29482 - atomic_inc(&cm_connect_reqs);
29483 + atomic_inc_unchecked(&cm_connect_reqs);
29484 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
29485 cm_node, cm_id, jiffies);
29486
29487 @@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct n
29488 return;
29489 cm_id = cm_node->cm_id;
29490
29491 - atomic_inc(&cm_connect_reqs);
29492 + atomic_inc_unchecked(&cm_connect_reqs);
29493 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
29494 cm_node, cm_id, jiffies);
29495
29496 diff -urNp linux-2.6.32.41/drivers/infiniband/hw/nes/nes.h linux-2.6.32.41/drivers/infiniband/hw/nes/nes.h
29497 --- linux-2.6.32.41/drivers/infiniband/hw/nes/nes.h 2011-03-27 14:31:47.000000000 -0400
29498 +++ linux-2.6.32.41/drivers/infiniband/hw/nes/nes.h 2011-05-04 17:56:28.000000000 -0400
29499 @@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
29500 extern unsigned int wqm_quanta;
29501 extern struct list_head nes_adapter_list;
29502
29503 -extern atomic_t cm_connects;
29504 -extern atomic_t cm_accepts;
29505 -extern atomic_t cm_disconnects;
29506 -extern atomic_t cm_closes;
29507 -extern atomic_t cm_connecteds;
29508 -extern atomic_t cm_connect_reqs;
29509 -extern atomic_t cm_rejects;
29510 -extern atomic_t mod_qp_timouts;
29511 -extern atomic_t qps_created;
29512 -extern atomic_t qps_destroyed;
29513 -extern atomic_t sw_qps_destroyed;
29514 +extern atomic_unchecked_t cm_connects;
29515 +extern atomic_unchecked_t cm_accepts;
29516 +extern atomic_unchecked_t cm_disconnects;
29517 +extern atomic_unchecked_t cm_closes;
29518 +extern atomic_unchecked_t cm_connecteds;
29519 +extern atomic_unchecked_t cm_connect_reqs;
29520 +extern atomic_unchecked_t cm_rejects;
29521 +extern atomic_unchecked_t mod_qp_timouts;
29522 +extern atomic_unchecked_t qps_created;
29523 +extern atomic_unchecked_t qps_destroyed;
29524 +extern atomic_unchecked_t sw_qps_destroyed;
29525 extern u32 mh_detected;
29526 extern u32 mh_pauses_sent;
29527 extern u32 cm_packets_sent;
29528 @@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
29529 extern u32 cm_listens_created;
29530 extern u32 cm_listens_destroyed;
29531 extern u32 cm_backlog_drops;
29532 -extern atomic_t cm_loopbacks;
29533 -extern atomic_t cm_nodes_created;
29534 -extern atomic_t cm_nodes_destroyed;
29535 -extern atomic_t cm_accel_dropped_pkts;
29536 -extern atomic_t cm_resets_recvd;
29537 +extern atomic_unchecked_t cm_loopbacks;
29538 +extern atomic_unchecked_t cm_nodes_created;
29539 +extern atomic_unchecked_t cm_nodes_destroyed;
29540 +extern atomic_unchecked_t cm_accel_dropped_pkts;
29541 +extern atomic_unchecked_t cm_resets_recvd;
29542
29543 extern u32 int_mod_timer_init;
29544 extern u32 int_mod_cq_depth_256;
29545 diff -urNp linux-2.6.32.41/drivers/infiniband/hw/nes/nes_nic.c linux-2.6.32.41/drivers/infiniband/hw/nes/nes_nic.c
29546 --- linux-2.6.32.41/drivers/infiniband/hw/nes/nes_nic.c 2011-03-27 14:31:47.000000000 -0400
29547 +++ linux-2.6.32.41/drivers/infiniband/hw/nes/nes_nic.c 2011-05-04 17:56:28.000000000 -0400
29548 @@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats
29549 target_stat_values[++index] = mh_detected;
29550 target_stat_values[++index] = mh_pauses_sent;
29551 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
29552 - target_stat_values[++index] = atomic_read(&cm_connects);
29553 - target_stat_values[++index] = atomic_read(&cm_accepts);
29554 - target_stat_values[++index] = atomic_read(&cm_disconnects);
29555 - target_stat_values[++index] = atomic_read(&cm_connecteds);
29556 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
29557 - target_stat_values[++index] = atomic_read(&cm_rejects);
29558 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
29559 - target_stat_values[++index] = atomic_read(&qps_created);
29560 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
29561 - target_stat_values[++index] = atomic_read(&qps_destroyed);
29562 - target_stat_values[++index] = atomic_read(&cm_closes);
29563 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
29564 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
29565 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
29566 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
29567 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
29568 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
29569 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
29570 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
29571 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
29572 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
29573 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
29574 target_stat_values[++index] = cm_packets_sent;
29575 target_stat_values[++index] = cm_packets_bounced;
29576 target_stat_values[++index] = cm_packets_created;
29577 @@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats
29578 target_stat_values[++index] = cm_listens_created;
29579 target_stat_values[++index] = cm_listens_destroyed;
29580 target_stat_values[++index] = cm_backlog_drops;
29581 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
29582 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
29583 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
29584 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
29585 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
29586 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
29587 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
29588 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
29589 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
29590 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
29591 target_stat_values[++index] = int_mod_timer_init;
29592 target_stat_values[++index] = int_mod_cq_depth_1;
29593 target_stat_values[++index] = int_mod_cq_depth_4;
29594 diff -urNp linux-2.6.32.41/drivers/infiniband/hw/nes/nes_verbs.c linux-2.6.32.41/drivers/infiniband/hw/nes/nes_verbs.c
29595 --- linux-2.6.32.41/drivers/infiniband/hw/nes/nes_verbs.c 2011-03-27 14:31:47.000000000 -0400
29596 +++ linux-2.6.32.41/drivers/infiniband/hw/nes/nes_verbs.c 2011-05-04 17:56:28.000000000 -0400
29597 @@ -45,9 +45,9 @@
29598
29599 #include <rdma/ib_umem.h>
29600
29601 -atomic_t mod_qp_timouts;
29602 -atomic_t qps_created;
29603 -atomic_t sw_qps_destroyed;
29604 +atomic_unchecked_t mod_qp_timouts;
29605 +atomic_unchecked_t qps_created;
29606 +atomic_unchecked_t sw_qps_destroyed;
29607
29608 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
29609
29610 @@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struc
29611 if (init_attr->create_flags)
29612 return ERR_PTR(-EINVAL);
29613
29614 - atomic_inc(&qps_created);
29615 + atomic_inc_unchecked(&qps_created);
29616 switch (init_attr->qp_type) {
29617 case IB_QPT_RC:
29618 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
29619 @@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *
29620 struct iw_cm_event cm_event;
29621 int ret;
29622
29623 - atomic_inc(&sw_qps_destroyed);
29624 + atomic_inc_unchecked(&sw_qps_destroyed);
29625 nesqp->destroyed = 1;
29626
29627 /* Blow away the connection if it exists. */
29628 diff -urNp linux-2.6.32.41/drivers/input/gameport/gameport.c linux-2.6.32.41/drivers/input/gameport/gameport.c
29629 --- linux-2.6.32.41/drivers/input/gameport/gameport.c 2011-03-27 14:31:47.000000000 -0400
29630 +++ linux-2.6.32.41/drivers/input/gameport/gameport.c 2011-05-04 17:56:28.000000000 -0400
29631 @@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
29632 */
29633 static void gameport_init_port(struct gameport *gameport)
29634 {
29635 - static atomic_t gameport_no = ATOMIC_INIT(0);
29636 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
29637
29638 __module_get(THIS_MODULE);
29639
29640 mutex_init(&gameport->drv_mutex);
29641 device_initialize(&gameport->dev);
29642 - dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
29643 + dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
29644 gameport->dev.bus = &gameport_bus;
29645 gameport->dev.release = gameport_release_port;
29646 if (gameport->parent)
29647 diff -urNp linux-2.6.32.41/drivers/input/input.c linux-2.6.32.41/drivers/input/input.c
29648 --- linux-2.6.32.41/drivers/input/input.c 2011-03-27 14:31:47.000000000 -0400
29649 +++ linux-2.6.32.41/drivers/input/input.c 2011-05-04 17:56:28.000000000 -0400
29650 @@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
29651 */
29652 int input_register_device(struct input_dev *dev)
29653 {
29654 - static atomic_t input_no = ATOMIC_INIT(0);
29655 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
29656 struct input_handler *handler;
29657 const char *path;
29658 int error;
29659 @@ -1585,7 +1585,7 @@ int input_register_device(struct input_d
29660 dev->setkeycode = input_default_setkeycode;
29661
29662 dev_set_name(&dev->dev, "input%ld",
29663 - (unsigned long) atomic_inc_return(&input_no) - 1);
29664 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
29665
29666 error = device_add(&dev->dev);
29667 if (error)
29668 diff -urNp linux-2.6.32.41/drivers/input/joystick/sidewinder.c linux-2.6.32.41/drivers/input/joystick/sidewinder.c
29669 --- linux-2.6.32.41/drivers/input/joystick/sidewinder.c 2011-03-27 14:31:47.000000000 -0400
29670 +++ linux-2.6.32.41/drivers/input/joystick/sidewinder.c 2011-05-18 20:09:36.000000000 -0400
29671 @@ -30,6 +30,7 @@
29672 #include <linux/kernel.h>
29673 #include <linux/module.h>
29674 #include <linux/slab.h>
29675 +#include <linux/sched.h>
29676 #include <linux/init.h>
29677 #include <linux/input.h>
29678 #include <linux/gameport.h>
29679 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
29680 unsigned char buf[SW_LENGTH];
29681 int i;
29682
29683 + pax_track_stack();
29684 +
29685 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
29686
29687 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
29688 diff -urNp linux-2.6.32.41/drivers/input/joystick/xpad.c linux-2.6.32.41/drivers/input/joystick/xpad.c
29689 --- linux-2.6.32.41/drivers/input/joystick/xpad.c 2011-03-27 14:31:47.000000000 -0400
29690 +++ linux-2.6.32.41/drivers/input/joystick/xpad.c 2011-05-04 17:56:28.000000000 -0400
29691 @@ -621,7 +621,7 @@ static void xpad_led_set(struct led_clas
29692
29693 static int xpad_led_probe(struct usb_xpad *xpad)
29694 {
29695 - static atomic_t led_seq = ATOMIC_INIT(0);
29696 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
29697 long led_no;
29698 struct xpad_led *led;
29699 struct led_classdev *led_cdev;
29700 @@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpa
29701 if (!led)
29702 return -ENOMEM;
29703
29704 - led_no = (long)atomic_inc_return(&led_seq) - 1;
29705 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
29706
29707 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
29708 led->xpad = xpad;
29709 diff -urNp linux-2.6.32.41/drivers/input/serio/serio.c linux-2.6.32.41/drivers/input/serio/serio.c
29710 --- linux-2.6.32.41/drivers/input/serio/serio.c 2011-03-27 14:31:47.000000000 -0400
29711 +++ linux-2.6.32.41/drivers/input/serio/serio.c 2011-05-04 17:56:28.000000000 -0400
29712 @@ -527,7 +527,7 @@ static void serio_release_port(struct de
29713 */
29714 static void serio_init_port(struct serio *serio)
29715 {
29716 - static atomic_t serio_no = ATOMIC_INIT(0);
29717 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
29718
29719 __module_get(THIS_MODULE);
29720
29721 @@ -536,7 +536,7 @@ static void serio_init_port(struct serio
29722 mutex_init(&serio->drv_mutex);
29723 device_initialize(&serio->dev);
29724 dev_set_name(&serio->dev, "serio%ld",
29725 - (long)atomic_inc_return(&serio_no) - 1);
29726 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
29727 serio->dev.bus = &serio_bus;
29728 serio->dev.release = serio_release_port;
29729 if (serio->parent) {
29730 diff -urNp linux-2.6.32.41/drivers/isdn/gigaset/common.c linux-2.6.32.41/drivers/isdn/gigaset/common.c
29731 --- linux-2.6.32.41/drivers/isdn/gigaset/common.c 2011-03-27 14:31:47.000000000 -0400
29732 +++ linux-2.6.32.41/drivers/isdn/gigaset/common.c 2011-04-17 15:56:46.000000000 -0400
29733 @@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct
29734 cs->commands_pending = 0;
29735 cs->cur_at_seq = 0;
29736 cs->gotfwver = -1;
29737 - cs->open_count = 0;
29738 + local_set(&cs->open_count, 0);
29739 cs->dev = NULL;
29740 cs->tty = NULL;
29741 cs->tty_dev = NULL;
29742 diff -urNp linux-2.6.32.41/drivers/isdn/gigaset/gigaset.h linux-2.6.32.41/drivers/isdn/gigaset/gigaset.h
29743 --- linux-2.6.32.41/drivers/isdn/gigaset/gigaset.h 2011-03-27 14:31:47.000000000 -0400
29744 +++ linux-2.6.32.41/drivers/isdn/gigaset/gigaset.h 2011-04-17 15:56:46.000000000 -0400
29745 @@ -34,6 +34,7 @@
29746 #include <linux/tty_driver.h>
29747 #include <linux/list.h>
29748 #include <asm/atomic.h>
29749 +#include <asm/local.h>
29750
29751 #define GIG_VERSION {0,5,0,0}
29752 #define GIG_COMPAT {0,4,0,0}
29753 @@ -446,7 +447,7 @@ struct cardstate {
29754 spinlock_t cmdlock;
29755 unsigned curlen, cmdbytes;
29756
29757 - unsigned open_count;
29758 + local_t open_count;
29759 struct tty_struct *tty;
29760 struct tasklet_struct if_wake_tasklet;
29761 unsigned control_state;
29762 diff -urNp linux-2.6.32.41/drivers/isdn/gigaset/interface.c linux-2.6.32.41/drivers/isdn/gigaset/interface.c
29763 --- linux-2.6.32.41/drivers/isdn/gigaset/interface.c 2011-03-27 14:31:47.000000000 -0400
29764 +++ linux-2.6.32.41/drivers/isdn/gigaset/interface.c 2011-04-17 15:56:46.000000000 -0400
29765 @@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tt
29766 return -ERESTARTSYS; // FIXME -EINTR?
29767 tty->driver_data = cs;
29768
29769 - ++cs->open_count;
29770 -
29771 - if (cs->open_count == 1) {
29772 + if (local_inc_return(&cs->open_count) == 1) {
29773 spin_lock_irqsave(&cs->lock, flags);
29774 cs->tty = tty;
29775 spin_unlock_irqrestore(&cs->lock, flags);
29776 @@ -195,10 +193,10 @@ static void if_close(struct tty_struct *
29777
29778 if (!cs->connected)
29779 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
29780 - else if (!cs->open_count)
29781 + else if (!local_read(&cs->open_count))
29782 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29783 else {
29784 - if (!--cs->open_count) {
29785 + if (!local_dec_return(&cs->open_count)) {
29786 spin_lock_irqsave(&cs->lock, flags);
29787 cs->tty = NULL;
29788 spin_unlock_irqrestore(&cs->lock, flags);
29789 @@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *t
29790 if (!cs->connected) {
29791 gig_dbg(DEBUG_IF, "not connected");
29792 retval = -ENODEV;
29793 - } else if (!cs->open_count)
29794 + } else if (!local_read(&cs->open_count))
29795 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29796 else {
29797 retval = 0;
29798 @@ -361,7 +359,7 @@ static int if_write(struct tty_struct *t
29799 if (!cs->connected) {
29800 gig_dbg(DEBUG_IF, "not connected");
29801 retval = -ENODEV;
29802 - } else if (!cs->open_count)
29803 + } else if (!local_read(&cs->open_count))
29804 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29805 else if (cs->mstate != MS_LOCKED) {
29806 dev_warn(cs->dev, "can't write to unlocked device\n");
29807 @@ -395,7 +393,7 @@ static int if_write_room(struct tty_stru
29808 if (!cs->connected) {
29809 gig_dbg(DEBUG_IF, "not connected");
29810 retval = -ENODEV;
29811 - } else if (!cs->open_count)
29812 + } else if (!local_read(&cs->open_count))
29813 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29814 else if (cs->mstate != MS_LOCKED) {
29815 dev_warn(cs->dev, "can't write to unlocked device\n");
29816 @@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty
29817
29818 if (!cs->connected)
29819 gig_dbg(DEBUG_IF, "not connected");
29820 - else if (!cs->open_count)
29821 + else if (!local_read(&cs->open_count))
29822 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29823 else if (cs->mstate != MS_LOCKED)
29824 dev_warn(cs->dev, "can't write to unlocked device\n");
29825 @@ -453,7 +451,7 @@ static void if_throttle(struct tty_struc
29826
29827 if (!cs->connected)
29828 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
29829 - else if (!cs->open_count)
29830 + else if (!local_read(&cs->open_count))
29831 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29832 else {
29833 //FIXME
29834 @@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_str
29835
29836 if (!cs->connected)
29837 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
29838 - else if (!cs->open_count)
29839 + else if (!local_read(&cs->open_count))
29840 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29841 else {
29842 //FIXME
29843 @@ -510,7 +508,7 @@ static void if_set_termios(struct tty_st
29844 goto out;
29845 }
29846
29847 - if (!cs->open_count) {
29848 + if (!local_read(&cs->open_count)) {
29849 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29850 goto out;
29851 }
29852 diff -urNp linux-2.6.32.41/drivers/isdn/hardware/avm/b1.c linux-2.6.32.41/drivers/isdn/hardware/avm/b1.c
29853 --- linux-2.6.32.41/drivers/isdn/hardware/avm/b1.c 2011-03-27 14:31:47.000000000 -0400
29854 +++ linux-2.6.32.41/drivers/isdn/hardware/avm/b1.c 2011-04-17 15:56:46.000000000 -0400
29855 @@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capilo
29856 }
29857 if (left) {
29858 if (t4file->user) {
29859 - if (copy_from_user(buf, dp, left))
29860 + if (left > sizeof buf || copy_from_user(buf, dp, left))
29861 return -EFAULT;
29862 } else {
29863 memcpy(buf, dp, left);
29864 @@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capilo
29865 }
29866 if (left) {
29867 if (config->user) {
29868 - if (copy_from_user(buf, dp, left))
29869 + if (left > sizeof buf || copy_from_user(buf, dp, left))
29870 return -EFAULT;
29871 } else {
29872 memcpy(buf, dp, left);
29873 diff -urNp linux-2.6.32.41/drivers/isdn/hardware/eicon/capidtmf.c linux-2.6.32.41/drivers/isdn/hardware/eicon/capidtmf.c
29874 --- linux-2.6.32.41/drivers/isdn/hardware/eicon/capidtmf.c 2011-03-27 14:31:47.000000000 -0400
29875 +++ linux-2.6.32.41/drivers/isdn/hardware/eicon/capidtmf.c 2011-05-16 21:46:57.000000000 -0400
29876 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
29877 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
29878 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
29879
29880 + pax_track_stack();
29881
29882 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
29883 {
29884 diff -urNp linux-2.6.32.41/drivers/isdn/hardware/eicon/capifunc.c linux-2.6.32.41/drivers/isdn/hardware/eicon/capifunc.c
29885 --- linux-2.6.32.41/drivers/isdn/hardware/eicon/capifunc.c 2011-03-27 14:31:47.000000000 -0400
29886 +++ linux-2.6.32.41/drivers/isdn/hardware/eicon/capifunc.c 2011-05-16 21:46:57.000000000 -0400
29887 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
29888 IDI_SYNC_REQ req;
29889 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29890
29891 + pax_track_stack();
29892 +
29893 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29894
29895 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29896 diff -urNp linux-2.6.32.41/drivers/isdn/hardware/eicon/diddfunc.c linux-2.6.32.41/drivers/isdn/hardware/eicon/diddfunc.c
29897 --- linux-2.6.32.41/drivers/isdn/hardware/eicon/diddfunc.c 2011-03-27 14:31:47.000000000 -0400
29898 +++ linux-2.6.32.41/drivers/isdn/hardware/eicon/diddfunc.c 2011-05-16 21:46:57.000000000 -0400
29899 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
29900 IDI_SYNC_REQ req;
29901 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29902
29903 + pax_track_stack();
29904 +
29905 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29906
29907 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29908 diff -urNp linux-2.6.32.41/drivers/isdn/hardware/eicon/divasfunc.c linux-2.6.32.41/drivers/isdn/hardware/eicon/divasfunc.c
29909 --- linux-2.6.32.41/drivers/isdn/hardware/eicon/divasfunc.c 2011-03-27 14:31:47.000000000 -0400
29910 +++ linux-2.6.32.41/drivers/isdn/hardware/eicon/divasfunc.c 2011-05-16 21:46:57.000000000 -0400
29911 @@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_di
29912 IDI_SYNC_REQ req;
29913 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29914
29915 + pax_track_stack();
29916 +
29917 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29918
29919 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29920 diff -urNp linux-2.6.32.41/drivers/isdn/hardware/eicon/idifunc.c linux-2.6.32.41/drivers/isdn/hardware/eicon/idifunc.c
29921 --- linux-2.6.32.41/drivers/isdn/hardware/eicon/idifunc.c 2011-03-27 14:31:47.000000000 -0400
29922 +++ linux-2.6.32.41/drivers/isdn/hardware/eicon/idifunc.c 2011-05-16 21:46:57.000000000 -0400
29923 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
29924 IDI_SYNC_REQ req;
29925 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29926
29927 + pax_track_stack();
29928 +
29929 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29930
29931 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29932 diff -urNp linux-2.6.32.41/drivers/isdn/hardware/eicon/message.c linux-2.6.32.41/drivers/isdn/hardware/eicon/message.c
29933 --- linux-2.6.32.41/drivers/isdn/hardware/eicon/message.c 2011-03-27 14:31:47.000000000 -0400
29934 +++ linux-2.6.32.41/drivers/isdn/hardware/eicon/message.c 2011-05-16 21:46:57.000000000 -0400
29935 @@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
29936 dword d;
29937 word w;
29938
29939 + pax_track_stack();
29940 +
29941 a = plci->adapter;
29942 Id = ((word)plci->Id<<8)|a->Id;
29943 PUT_WORD(&SS_Ind[4],0x0000);
29944 @@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE
29945 word j, n, w;
29946 dword d;
29947
29948 + pax_track_stack();
29949 +
29950
29951 for(i=0;i<8;i++) bp_parms[i].length = 0;
29952 for(i=0;i<2;i++) global_config[i].length = 0;
29953 @@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARS
29954 const byte llc3[] = {4,3,2,2,6,6,0};
29955 const byte header[] = {0,2,3,3,0,0,0};
29956
29957 + pax_track_stack();
29958 +
29959 for(i=0;i<8;i++) bp_parms[i].length = 0;
29960 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
29961 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
29962 @@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI
29963 word appl_number_group_type[MAX_APPL];
29964 PLCI *auxplci;
29965
29966 + pax_track_stack();
29967 +
29968 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
29969
29970 if(!a->group_optimization_enabled)
29971 diff -urNp linux-2.6.32.41/drivers/isdn/hardware/eicon/mntfunc.c linux-2.6.32.41/drivers/isdn/hardware/eicon/mntfunc.c
29972 --- linux-2.6.32.41/drivers/isdn/hardware/eicon/mntfunc.c 2011-03-27 14:31:47.000000000 -0400
29973 +++ linux-2.6.32.41/drivers/isdn/hardware/eicon/mntfunc.c 2011-05-16 21:46:57.000000000 -0400
29974 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
29975 IDI_SYNC_REQ req;
29976 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29977
29978 + pax_track_stack();
29979 +
29980 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29981
29982 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29983 diff -urNp linux-2.6.32.41/drivers/isdn/i4l/isdn_common.c linux-2.6.32.41/drivers/isdn/i4l/isdn_common.c
29984 --- linux-2.6.32.41/drivers/isdn/i4l/isdn_common.c 2011-03-27 14:31:47.000000000 -0400
29985 +++ linux-2.6.32.41/drivers/isdn/i4l/isdn_common.c 2011-05-16 21:46:57.000000000 -0400
29986 @@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct f
29987 } iocpar;
29988 void __user *argp = (void __user *)arg;
29989
29990 + pax_track_stack();
29991 +
29992 #define name iocpar.name
29993 #define bname iocpar.bname
29994 #define iocts iocpar.iocts
29995 diff -urNp linux-2.6.32.41/drivers/isdn/icn/icn.c linux-2.6.32.41/drivers/isdn/icn/icn.c
29996 --- linux-2.6.32.41/drivers/isdn/icn/icn.c 2011-03-27 14:31:47.000000000 -0400
29997 +++ linux-2.6.32.41/drivers/isdn/icn/icn.c 2011-04-17 15:56:46.000000000 -0400
29998 @@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len
29999 if (count > len)
30000 count = len;
30001 if (user) {
30002 - if (copy_from_user(msg, buf, count))
30003 + if (count > sizeof msg || copy_from_user(msg, buf, count))
30004 return -EFAULT;
30005 } else
30006 memcpy(msg, buf, count);
30007 diff -urNp linux-2.6.32.41/drivers/isdn/mISDN/socket.c linux-2.6.32.41/drivers/isdn/mISDN/socket.c
30008 --- linux-2.6.32.41/drivers/isdn/mISDN/socket.c 2011-03-27 14:31:47.000000000 -0400
30009 +++ linux-2.6.32.41/drivers/isdn/mISDN/socket.c 2011-04-17 15:56:46.000000000 -0400
30010 @@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, uns
30011 if (dev) {
30012 struct mISDN_devinfo di;
30013
30014 + memset(&di, 0, sizeof(di));
30015 di.id = dev->id;
30016 di.Dprotocols = dev->Dprotocols;
30017 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
30018 @@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, uns
30019 if (dev) {
30020 struct mISDN_devinfo di;
30021
30022 + memset(&di, 0, sizeof(di));
30023 di.id = dev->id;
30024 di.Dprotocols = dev->Dprotocols;
30025 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
30026 diff -urNp linux-2.6.32.41/drivers/isdn/sc/interrupt.c linux-2.6.32.41/drivers/isdn/sc/interrupt.c
30027 --- linux-2.6.32.41/drivers/isdn/sc/interrupt.c 2011-03-27 14:31:47.000000000 -0400
30028 +++ linux-2.6.32.41/drivers/isdn/sc/interrupt.c 2011-04-17 15:56:46.000000000 -0400
30029 @@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy,
30030 }
30031 else if(callid>=0x0000 && callid<=0x7FFF)
30032 {
30033 + int len;
30034 +
30035 pr_debug("%s: Got Incoming Call\n",
30036 sc_adapter[card]->devicename);
30037 - strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
30038 - strcpy(setup.eazmsn,
30039 - sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
30040 + len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
30041 + sizeof(setup.phone));
30042 + if (len >= sizeof(setup.phone))
30043 + continue;
30044 + len = strlcpy(setup.eazmsn,
30045 + sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
30046 + sizeof(setup.eazmsn));
30047 + if (len >= sizeof(setup.eazmsn))
30048 + continue;
30049 setup.si1 = 7;
30050 setup.si2 = 0;
30051 setup.plan = 0;
30052 @@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy,
30053 * Handle a GetMyNumber Rsp
30054 */
30055 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
30056 - strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
30057 + strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
30058 + rcvmsg.msg_data.byte_array,
30059 + sizeof(rcvmsg.msg_data.byte_array));
30060 continue;
30061 }
30062
30063 diff -urNp linux-2.6.32.41/drivers/lguest/core.c linux-2.6.32.41/drivers/lguest/core.c
30064 --- linux-2.6.32.41/drivers/lguest/core.c 2011-03-27 14:31:47.000000000 -0400
30065 +++ linux-2.6.32.41/drivers/lguest/core.c 2011-04-17 15:56:46.000000000 -0400
30066 @@ -91,9 +91,17 @@ static __init int map_switcher(void)
30067 * it's worked so far. The end address needs +1 because __get_vm_area
30068 * allocates an extra guard page, so we need space for that.
30069 */
30070 +
30071 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
30072 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30073 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
30074 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30075 +#else
30076 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30077 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
30078 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30079 +#endif
30080 +
30081 if (!switcher_vma) {
30082 err = -ENOMEM;
30083 printk("lguest: could not map switcher pages high\n");
30084 @@ -118,7 +126,7 @@ static __init int map_switcher(void)
30085 * Now the Switcher is mapped at the right address, we can't fail!
30086 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
30087 */
30088 - memcpy(switcher_vma->addr, start_switcher_text,
30089 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
30090 end_switcher_text - start_switcher_text);
30091
30092 printk(KERN_INFO "lguest: mapped switcher at %p\n",
30093 diff -urNp linux-2.6.32.41/drivers/lguest/x86/core.c linux-2.6.32.41/drivers/lguest/x86/core.c
30094 --- linux-2.6.32.41/drivers/lguest/x86/core.c 2011-03-27 14:31:47.000000000 -0400
30095 +++ linux-2.6.32.41/drivers/lguest/x86/core.c 2011-04-17 15:56:46.000000000 -0400
30096 @@ -59,7 +59,7 @@ static struct {
30097 /* Offset from where switcher.S was compiled to where we've copied it */
30098 static unsigned long switcher_offset(void)
30099 {
30100 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
30101 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
30102 }
30103
30104 /* This cpu's struct lguest_pages. */
30105 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
30106 * These copies are pretty cheap, so we do them unconditionally: */
30107 /* Save the current Host top-level page directory.
30108 */
30109 +
30110 +#ifdef CONFIG_PAX_PER_CPU_PGD
30111 + pages->state.host_cr3 = read_cr3();
30112 +#else
30113 pages->state.host_cr3 = __pa(current->mm->pgd);
30114 +#endif
30115 +
30116 /*
30117 * Set up the Guest's page tables to see this CPU's pages (and no
30118 * other CPU's pages).
30119 @@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
30120 * compiled-in switcher code and the high-mapped copy we just made.
30121 */
30122 for (i = 0; i < IDT_ENTRIES; i++)
30123 - default_idt_entries[i] += switcher_offset();
30124 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
30125
30126 /*
30127 * Set up the Switcher's per-cpu areas.
30128 @@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
30129 * it will be undisturbed when we switch. To change %cs and jump we
30130 * need this structure to feed to Intel's "lcall" instruction.
30131 */
30132 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
30133 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
30134 lguest_entry.segment = LGUEST_CS;
30135
30136 /*
30137 diff -urNp linux-2.6.32.41/drivers/lguest/x86/switcher_32.S linux-2.6.32.41/drivers/lguest/x86/switcher_32.S
30138 --- linux-2.6.32.41/drivers/lguest/x86/switcher_32.S 2011-03-27 14:31:47.000000000 -0400
30139 +++ linux-2.6.32.41/drivers/lguest/x86/switcher_32.S 2011-04-17 15:56:46.000000000 -0400
30140 @@ -87,6 +87,7 @@
30141 #include <asm/page.h>
30142 #include <asm/segment.h>
30143 #include <asm/lguest.h>
30144 +#include <asm/processor-flags.h>
30145
30146 // We mark the start of the code to copy
30147 // It's placed in .text tho it's never run here
30148 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
30149 // Changes type when we load it: damn Intel!
30150 // For after we switch over our page tables
30151 // That entry will be read-only: we'd crash.
30152 +
30153 +#ifdef CONFIG_PAX_KERNEXEC
30154 + mov %cr0, %edx
30155 + xor $X86_CR0_WP, %edx
30156 + mov %edx, %cr0
30157 +#endif
30158 +
30159 movl $(GDT_ENTRY_TSS*8), %edx
30160 ltr %dx
30161
30162 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
30163 // Let's clear it again for our return.
30164 // The GDT descriptor of the Host
30165 // Points to the table after two "size" bytes
30166 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
30167 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
30168 // Clear "used" from type field (byte 5, bit 2)
30169 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
30170 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
30171 +
30172 +#ifdef CONFIG_PAX_KERNEXEC
30173 + mov %cr0, %eax
30174 + xor $X86_CR0_WP, %eax
30175 + mov %eax, %cr0
30176 +#endif
30177
30178 // Once our page table's switched, the Guest is live!
30179 // The Host fades as we run this final step.
30180 @@ -295,13 +309,12 @@ deliver_to_host:
30181 // I consulted gcc, and it gave
30182 // These instructions, which I gladly credit:
30183 leal (%edx,%ebx,8), %eax
30184 - movzwl (%eax),%edx
30185 - movl 4(%eax), %eax
30186 - xorw %ax, %ax
30187 - orl %eax, %edx
30188 + movl 4(%eax), %edx
30189 + movw (%eax), %dx
30190 // Now the address of the handler's in %edx
30191 // We call it now: its "iret" drops us home.
30192 - jmp *%edx
30193 + ljmp $__KERNEL_CS, $1f
30194 +1: jmp *%edx
30195
30196 // Every interrupt can come to us here
30197 // But we must truly tell each apart.
30198 diff -urNp linux-2.6.32.41/drivers/macintosh/via-pmu-backlight.c linux-2.6.32.41/drivers/macintosh/via-pmu-backlight.c
30199 --- linux-2.6.32.41/drivers/macintosh/via-pmu-backlight.c 2011-03-27 14:31:47.000000000 -0400
30200 +++ linux-2.6.32.41/drivers/macintosh/via-pmu-backlight.c 2011-04-17 15:56:46.000000000 -0400
30201 @@ -15,7 +15,7 @@
30202
30203 #define MAX_PMU_LEVEL 0xFF
30204
30205 -static struct backlight_ops pmu_backlight_data;
30206 +static const struct backlight_ops pmu_backlight_data;
30207 static DEFINE_SPINLOCK(pmu_backlight_lock);
30208 static int sleeping, uses_pmu_bl;
30209 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
30210 @@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(
30211 return bd->props.brightness;
30212 }
30213
30214 -static struct backlight_ops pmu_backlight_data = {
30215 +static const struct backlight_ops pmu_backlight_data = {
30216 .get_brightness = pmu_backlight_get_brightness,
30217 .update_status = pmu_backlight_update_status,
30218
30219 diff -urNp linux-2.6.32.41/drivers/macintosh/via-pmu.c linux-2.6.32.41/drivers/macintosh/via-pmu.c
30220 --- linux-2.6.32.41/drivers/macintosh/via-pmu.c 2011-03-27 14:31:47.000000000 -0400
30221 +++ linux-2.6.32.41/drivers/macintosh/via-pmu.c 2011-04-17 15:56:46.000000000 -0400
30222 @@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state
30223 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
30224 }
30225
30226 -static struct platform_suspend_ops pmu_pm_ops = {
30227 +static const struct platform_suspend_ops pmu_pm_ops = {
30228 .enter = powerbook_sleep,
30229 .valid = pmu_sleep_valid,
30230 };
30231 diff -urNp linux-2.6.32.41/drivers/md/dm.c linux-2.6.32.41/drivers/md/dm.c
30232 --- linux-2.6.32.41/drivers/md/dm.c 2011-03-27 14:31:47.000000000 -0400
30233 +++ linux-2.6.32.41/drivers/md/dm.c 2011-05-04 17:56:28.000000000 -0400
30234 @@ -163,9 +163,9 @@ struct mapped_device {
30235 /*
30236 * Event handling.
30237 */
30238 - atomic_t event_nr;
30239 + atomic_unchecked_t event_nr;
30240 wait_queue_head_t eventq;
30241 - atomic_t uevent_seq;
30242 + atomic_unchecked_t uevent_seq;
30243 struct list_head uevent_list;
30244 spinlock_t uevent_lock; /* Protect access to uevent_list */
30245
30246 @@ -1770,8 +1770,8 @@ static struct mapped_device *alloc_dev(i
30247 rwlock_init(&md->map_lock);
30248 atomic_set(&md->holders, 1);
30249 atomic_set(&md->open_count, 0);
30250 - atomic_set(&md->event_nr, 0);
30251 - atomic_set(&md->uevent_seq, 0);
30252 + atomic_set_unchecked(&md->event_nr, 0);
30253 + atomic_set_unchecked(&md->uevent_seq, 0);
30254 INIT_LIST_HEAD(&md->uevent_list);
30255 spin_lock_init(&md->uevent_lock);
30256
30257 @@ -1921,7 +1921,7 @@ static void event_callback(void *context
30258
30259 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
30260
30261 - atomic_inc(&md->event_nr);
30262 + atomic_inc_unchecked(&md->event_nr);
30263 wake_up(&md->eventq);
30264 }
30265
30266 @@ -2556,18 +2556,18 @@ void dm_kobject_uevent(struct mapped_dev
30267
30268 uint32_t dm_next_uevent_seq(struct mapped_device *md)
30269 {
30270 - return atomic_add_return(1, &md->uevent_seq);
30271 + return atomic_add_return_unchecked(1, &md->uevent_seq);
30272 }
30273
30274 uint32_t dm_get_event_nr(struct mapped_device *md)
30275 {
30276 - return atomic_read(&md->event_nr);
30277 + return atomic_read_unchecked(&md->event_nr);
30278 }
30279
30280 int dm_wait_event(struct mapped_device *md, int event_nr)
30281 {
30282 return wait_event_interruptible(md->eventq,
30283 - (event_nr != atomic_read(&md->event_nr)));
30284 + (event_nr != atomic_read_unchecked(&md->event_nr)));
30285 }
30286
30287 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
30288 diff -urNp linux-2.6.32.41/drivers/md/dm-ioctl.c linux-2.6.32.41/drivers/md/dm-ioctl.c
30289 --- linux-2.6.32.41/drivers/md/dm-ioctl.c 2011-03-27 14:31:47.000000000 -0400
30290 +++ linux-2.6.32.41/drivers/md/dm-ioctl.c 2011-04-17 15:56:46.000000000 -0400
30291 @@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, str
30292 cmd == DM_LIST_VERSIONS_CMD)
30293 return 0;
30294
30295 - if ((cmd == DM_DEV_CREATE_CMD)) {
30296 + if (cmd == DM_DEV_CREATE_CMD) {
30297 if (!*param->name) {
30298 DMWARN("name not supplied when creating device");
30299 return -EINVAL;
30300 diff -urNp linux-2.6.32.41/drivers/md/dm-raid1.c linux-2.6.32.41/drivers/md/dm-raid1.c
30301 --- linux-2.6.32.41/drivers/md/dm-raid1.c 2011-03-27 14:31:47.000000000 -0400
30302 +++ linux-2.6.32.41/drivers/md/dm-raid1.c 2011-05-04 17:56:28.000000000 -0400
30303 @@ -41,7 +41,7 @@ enum dm_raid1_error {
30304
30305 struct mirror {
30306 struct mirror_set *ms;
30307 - atomic_t error_count;
30308 + atomic_unchecked_t error_count;
30309 unsigned long error_type;
30310 struct dm_dev *dev;
30311 sector_t offset;
30312 @@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m
30313 * simple way to tell if a device has encountered
30314 * errors.
30315 */
30316 - atomic_inc(&m->error_count);
30317 + atomic_inc_unchecked(&m->error_count);
30318
30319 if (test_and_set_bit(error_type, &m->error_type))
30320 return;
30321 @@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m
30322 }
30323
30324 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
30325 - if (!atomic_read(&new->error_count)) {
30326 + if (!atomic_read_unchecked(&new->error_count)) {
30327 set_default_mirror(new);
30328 break;
30329 }
30330 @@ -363,7 +363,7 @@ static struct mirror *choose_mirror(stru
30331 struct mirror *m = get_default_mirror(ms);
30332
30333 do {
30334 - if (likely(!atomic_read(&m->error_count)))
30335 + if (likely(!atomic_read_unchecked(&m->error_count)))
30336 return m;
30337
30338 if (m-- == ms->mirror)
30339 @@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
30340 {
30341 struct mirror *default_mirror = get_default_mirror(m->ms);
30342
30343 - return !atomic_read(&default_mirror->error_count);
30344 + return !atomic_read_unchecked(&default_mirror->error_count);
30345 }
30346
30347 static int mirror_available(struct mirror_set *ms, struct bio *bio)
30348 @@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *
30349 */
30350 if (likely(region_in_sync(ms, region, 1)))
30351 m = choose_mirror(ms, bio->bi_sector);
30352 - else if (m && atomic_read(&m->error_count))
30353 + else if (m && atomic_read_unchecked(&m->error_count))
30354 m = NULL;
30355
30356 if (likely(m))
30357 @@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set
30358 }
30359
30360 ms->mirror[mirror].ms = ms;
30361 - atomic_set(&(ms->mirror[mirror].error_count), 0);
30362 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
30363 ms->mirror[mirror].error_type = 0;
30364 ms->mirror[mirror].offset = offset;
30365
30366 @@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_targ
30367 */
30368 static char device_status_char(struct mirror *m)
30369 {
30370 - if (!atomic_read(&(m->error_count)))
30371 + if (!atomic_read_unchecked(&(m->error_count)))
30372 return 'A';
30373
30374 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
30375 diff -urNp linux-2.6.32.41/drivers/md/dm-stripe.c linux-2.6.32.41/drivers/md/dm-stripe.c
30376 --- linux-2.6.32.41/drivers/md/dm-stripe.c 2011-03-27 14:31:47.000000000 -0400
30377 +++ linux-2.6.32.41/drivers/md/dm-stripe.c 2011-05-04 17:56:28.000000000 -0400
30378 @@ -20,7 +20,7 @@ struct stripe {
30379 struct dm_dev *dev;
30380 sector_t physical_start;
30381
30382 - atomic_t error_count;
30383 + atomic_unchecked_t error_count;
30384 };
30385
30386 struct stripe_c {
30387 @@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *
30388 kfree(sc);
30389 return r;
30390 }
30391 - atomic_set(&(sc->stripe[i].error_count), 0);
30392 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
30393 }
30394
30395 ti->private = sc;
30396 @@ -257,7 +257,7 @@ static int stripe_status(struct dm_targe
30397 DMEMIT("%d ", sc->stripes);
30398 for (i = 0; i < sc->stripes; i++) {
30399 DMEMIT("%s ", sc->stripe[i].dev->name);
30400 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
30401 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
30402 'D' : 'A';
30403 }
30404 buffer[i] = '\0';
30405 @@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_targe
30406 */
30407 for (i = 0; i < sc->stripes; i++)
30408 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
30409 - atomic_inc(&(sc->stripe[i].error_count));
30410 - if (atomic_read(&(sc->stripe[i].error_count)) <
30411 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
30412 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
30413 DM_IO_ERROR_THRESHOLD)
30414 queue_work(kstriped, &sc->kstriped_ws);
30415 }
30416 diff -urNp linux-2.6.32.41/drivers/md/dm-sysfs.c linux-2.6.32.41/drivers/md/dm-sysfs.c
30417 --- linux-2.6.32.41/drivers/md/dm-sysfs.c 2011-03-27 14:31:47.000000000 -0400
30418 +++ linux-2.6.32.41/drivers/md/dm-sysfs.c 2011-04-17 15:56:46.000000000 -0400
30419 @@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
30420 NULL,
30421 };
30422
30423 -static struct sysfs_ops dm_sysfs_ops = {
30424 +static const struct sysfs_ops dm_sysfs_ops = {
30425 .show = dm_attr_show,
30426 };
30427
30428 diff -urNp linux-2.6.32.41/drivers/md/dm-table.c linux-2.6.32.41/drivers/md/dm-table.c
30429 --- linux-2.6.32.41/drivers/md/dm-table.c 2011-03-27 14:31:47.000000000 -0400
30430 +++ linux-2.6.32.41/drivers/md/dm-table.c 2011-04-17 15:56:46.000000000 -0400
30431 @@ -359,7 +359,7 @@ static int device_area_is_invalid(struct
30432 if (!dev_size)
30433 return 0;
30434
30435 - if ((start >= dev_size) || (start + len > dev_size)) {
30436 + if ((start >= dev_size) || (len > dev_size - start)) {
30437 DMWARN("%s: %s too small for target: "
30438 "start=%llu, len=%llu, dev_size=%llu",
30439 dm_device_name(ti->table->md), bdevname(bdev, b),
30440 diff -urNp linux-2.6.32.41/drivers/md/md.c linux-2.6.32.41/drivers/md/md.c
30441 --- linux-2.6.32.41/drivers/md/md.c 2011-03-27 14:31:47.000000000 -0400
30442 +++ linux-2.6.32.41/drivers/md/md.c 2011-05-04 17:56:20.000000000 -0400
30443 @@ -153,10 +153,10 @@ static int start_readonly;
30444 * start build, activate spare
30445 */
30446 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
30447 -static atomic_t md_event_count;
30448 +static atomic_unchecked_t md_event_count;
30449 void md_new_event(mddev_t *mddev)
30450 {
30451 - atomic_inc(&md_event_count);
30452 + atomic_inc_unchecked(&md_event_count);
30453 wake_up(&md_event_waiters);
30454 }
30455 EXPORT_SYMBOL_GPL(md_new_event);
30456 @@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
30457 */
30458 static void md_new_event_inintr(mddev_t *mddev)
30459 {
30460 - atomic_inc(&md_event_count);
30461 + atomic_inc_unchecked(&md_event_count);
30462 wake_up(&md_event_waiters);
30463 }
30464
30465 @@ -1218,7 +1218,7 @@ static int super_1_load(mdk_rdev_t *rdev
30466
30467 rdev->preferred_minor = 0xffff;
30468 rdev->data_offset = le64_to_cpu(sb->data_offset);
30469 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
30470 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
30471
30472 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
30473 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
30474 @@ -1392,7 +1392,7 @@ static void super_1_sync(mddev_t *mddev,
30475 else
30476 sb->resync_offset = cpu_to_le64(0);
30477
30478 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
30479 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
30480
30481 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
30482 sb->size = cpu_to_le64(mddev->dev_sectors);
30483 @@ -2214,7 +2214,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
30484 static ssize_t
30485 errors_show(mdk_rdev_t *rdev, char *page)
30486 {
30487 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
30488 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
30489 }
30490
30491 static ssize_t
30492 @@ -2223,7 +2223,7 @@ errors_store(mdk_rdev_t *rdev, const cha
30493 char *e;
30494 unsigned long n = simple_strtoul(buf, &e, 10);
30495 if (*buf && (*e == 0 || *e == '\n')) {
30496 - atomic_set(&rdev->corrected_errors, n);
30497 + atomic_set_unchecked(&rdev->corrected_errors, n);
30498 return len;
30499 }
30500 return -EINVAL;
30501 @@ -2517,7 +2517,7 @@ static void rdev_free(struct kobject *ko
30502 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
30503 kfree(rdev);
30504 }
30505 -static struct sysfs_ops rdev_sysfs_ops = {
30506 +static const struct sysfs_ops rdev_sysfs_ops = {
30507 .show = rdev_attr_show,
30508 .store = rdev_attr_store,
30509 };
30510 @@ -2566,8 +2566,8 @@ static mdk_rdev_t *md_import_device(dev_
30511 rdev->data_offset = 0;
30512 rdev->sb_events = 0;
30513 atomic_set(&rdev->nr_pending, 0);
30514 - atomic_set(&rdev->read_errors, 0);
30515 - atomic_set(&rdev->corrected_errors, 0);
30516 + atomic_set_unchecked(&rdev->read_errors, 0);
30517 + atomic_set_unchecked(&rdev->corrected_errors, 0);
30518
30519 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
30520 if (!size) {
30521 @@ -3887,7 +3887,7 @@ static void md_free(struct kobject *ko)
30522 kfree(mddev);
30523 }
30524
30525 -static struct sysfs_ops md_sysfs_ops = {
30526 +static const struct sysfs_ops md_sysfs_ops = {
30527 .show = md_attr_show,
30528 .store = md_attr_store,
30529 };
30530 @@ -4474,7 +4474,8 @@ out:
30531 err = 0;
30532 blk_integrity_unregister(disk);
30533 md_new_event(mddev);
30534 - sysfs_notify_dirent(mddev->sysfs_state);
30535 + if (mddev->sysfs_state)
30536 + sysfs_notify_dirent(mddev->sysfs_state);
30537 return err;
30538 }
30539
30540 @@ -5954,7 +5955,7 @@ static int md_seq_show(struct seq_file *
30541
30542 spin_unlock(&pers_lock);
30543 seq_printf(seq, "\n");
30544 - mi->event = atomic_read(&md_event_count);
30545 + mi->event = atomic_read_unchecked(&md_event_count);
30546 return 0;
30547 }
30548 if (v == (void*)2) {
30549 @@ -6043,7 +6044,7 @@ static int md_seq_show(struct seq_file *
30550 chunk_kb ? "KB" : "B");
30551 if (bitmap->file) {
30552 seq_printf(seq, ", file: ");
30553 - seq_path(seq, &bitmap->file->f_path, " \t\n");
30554 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
30555 }
30556
30557 seq_printf(seq, "\n");
30558 @@ -6077,7 +6078,7 @@ static int md_seq_open(struct inode *ino
30559 else {
30560 struct seq_file *p = file->private_data;
30561 p->private = mi;
30562 - mi->event = atomic_read(&md_event_count);
30563 + mi->event = atomic_read_unchecked(&md_event_count);
30564 }
30565 return error;
30566 }
30567 @@ -6093,7 +6094,7 @@ static unsigned int mdstat_poll(struct f
30568 /* always allow read */
30569 mask = POLLIN | POLLRDNORM;
30570
30571 - if (mi->event != atomic_read(&md_event_count))
30572 + if (mi->event != atomic_read_unchecked(&md_event_count))
30573 mask |= POLLERR | POLLPRI;
30574 return mask;
30575 }
30576 @@ -6137,7 +6138,7 @@ static int is_mddev_idle(mddev_t *mddev,
30577 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
30578 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
30579 (int)part_stat_read(&disk->part0, sectors[1]) -
30580 - atomic_read(&disk->sync_io);
30581 + atomic_read_unchecked(&disk->sync_io);
30582 /* sync IO will cause sync_io to increase before the disk_stats
30583 * as sync_io is counted when a request starts, and
30584 * disk_stats is counted when it completes.
30585 diff -urNp linux-2.6.32.41/drivers/md/md.h linux-2.6.32.41/drivers/md/md.h
30586 --- linux-2.6.32.41/drivers/md/md.h 2011-03-27 14:31:47.000000000 -0400
30587 +++ linux-2.6.32.41/drivers/md/md.h 2011-05-04 17:56:20.000000000 -0400
30588 @@ -94,10 +94,10 @@ struct mdk_rdev_s
30589 * only maintained for arrays that
30590 * support hot removal
30591 */
30592 - atomic_t read_errors; /* number of consecutive read errors that
30593 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
30594 * we have tried to ignore.
30595 */
30596 - atomic_t corrected_errors; /* number of corrected read errors,
30597 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
30598 * for reporting to userspace and storing
30599 * in superblock.
30600 */
30601 @@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_
30602
30603 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
30604 {
30605 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
30606 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
30607 }
30608
30609 struct mdk_personality
30610 diff -urNp linux-2.6.32.41/drivers/md/raid10.c linux-2.6.32.41/drivers/md/raid10.c
30611 --- linux-2.6.32.41/drivers/md/raid10.c 2011-03-27 14:31:47.000000000 -0400
30612 +++ linux-2.6.32.41/drivers/md/raid10.c 2011-05-04 17:56:28.000000000 -0400
30613 @@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bi
30614 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
30615 set_bit(R10BIO_Uptodate, &r10_bio->state);
30616 else {
30617 - atomic_add(r10_bio->sectors,
30618 + atomic_add_unchecked(r10_bio->sectors,
30619 &conf->mirrors[d].rdev->corrected_errors);
30620 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
30621 md_error(r10_bio->mddev,
30622 @@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
30623 test_bit(In_sync, &rdev->flags)) {
30624 atomic_inc(&rdev->nr_pending);
30625 rcu_read_unlock();
30626 - atomic_add(s, &rdev->corrected_errors);
30627 + atomic_add_unchecked(s, &rdev->corrected_errors);
30628 if (sync_page_io(rdev->bdev,
30629 r10_bio->devs[sl].addr +
30630 sect + rdev->data_offset,
30631 diff -urNp linux-2.6.32.41/drivers/md/raid1.c linux-2.6.32.41/drivers/md/raid1.c
30632 --- linux-2.6.32.41/drivers/md/raid1.c 2011-03-27 14:31:47.000000000 -0400
30633 +++ linux-2.6.32.41/drivers/md/raid1.c 2011-05-04 17:56:28.000000000 -0400
30634 @@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *
30635 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
30636 continue;
30637 rdev = conf->mirrors[d].rdev;
30638 - atomic_add(s, &rdev->corrected_errors);
30639 + atomic_add_unchecked(s, &rdev->corrected_errors);
30640 if (sync_page_io(rdev->bdev,
30641 sect + rdev->data_offset,
30642 s<<9,
30643 @@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf,
30644 /* Well, this device is dead */
30645 md_error(mddev, rdev);
30646 else {
30647 - atomic_add(s, &rdev->corrected_errors);
30648 + atomic_add_unchecked(s, &rdev->corrected_errors);
30649 printk(KERN_INFO
30650 "raid1:%s: read error corrected "
30651 "(%d sectors at %llu on %s)\n",
30652 diff -urNp linux-2.6.32.41/drivers/md/raid5.c linux-2.6.32.41/drivers/md/raid5.c
30653 --- linux-2.6.32.41/drivers/md/raid5.c 2011-03-27 14:31:47.000000000 -0400
30654 +++ linux-2.6.32.41/drivers/md/raid5.c 2011-05-16 21:46:57.000000000 -0400
30655 @@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_hea
30656 bi->bi_next = NULL;
30657 if (rw == WRITE &&
30658 test_bit(R5_ReWrite, &sh->dev[i].flags))
30659 - atomic_add(STRIPE_SECTORS,
30660 + atomic_add_unchecked(STRIPE_SECTORS,
30661 &rdev->corrected_errors);
30662 generic_make_request(bi);
30663 } else {
30664 @@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struc
30665 clear_bit(R5_ReadError, &sh->dev[i].flags);
30666 clear_bit(R5_ReWrite, &sh->dev[i].flags);
30667 }
30668 - if (atomic_read(&conf->disks[i].rdev->read_errors))
30669 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
30670 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
30671 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
30672 } else {
30673 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
30674 int retry = 0;
30675 rdev = conf->disks[i].rdev;
30676
30677 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
30678 - atomic_inc(&rdev->read_errors);
30679 + atomic_inc_unchecked(&rdev->read_errors);
30680 if (conf->mddev->degraded >= conf->max_degraded)
30681 printk_rl(KERN_WARNING
30682 "raid5:%s: read error not correctable "
30683 @@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struc
30684 (unsigned long long)(sh->sector
30685 + rdev->data_offset),
30686 bdn);
30687 - else if (atomic_read(&rdev->read_errors)
30688 + else if (atomic_read_unchecked(&rdev->read_errors)
30689 > conf->max_nr_stripes)
30690 printk(KERN_WARNING
30691 "raid5:%s: Too many read errors, failing device %s.\n",
30692 @@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct s
30693 sector_t r_sector;
30694 struct stripe_head sh2;
30695
30696 + pax_track_stack();
30697
30698 chunk_offset = sector_div(new_sector, sectors_per_chunk);
30699 stripe = new_sector;
30700 diff -urNp linux-2.6.32.41/drivers/media/common/saa7146_hlp.c linux-2.6.32.41/drivers/media/common/saa7146_hlp.c
30701 --- linux-2.6.32.41/drivers/media/common/saa7146_hlp.c 2011-03-27 14:31:47.000000000 -0400
30702 +++ linux-2.6.32.41/drivers/media/common/saa7146_hlp.c 2011-05-16 21:46:57.000000000 -0400
30703 @@ -353,6 +353,8 @@ static void calculate_clipping_registers
30704
30705 int x[32], y[32], w[32], h[32];
30706
30707 + pax_track_stack();
30708 +
30709 /* clear out memory */
30710 memset(&line_list[0], 0x00, sizeof(u32)*32);
30711 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
30712 diff -urNp linux-2.6.32.41/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-2.6.32.41/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
30713 --- linux-2.6.32.41/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-03-27 14:31:47.000000000 -0400
30714 +++ linux-2.6.32.41/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-05-16 21:46:57.000000000 -0400
30715 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
30716 u8 buf[HOST_LINK_BUF_SIZE];
30717 int i;
30718
30719 + pax_track_stack();
30720 +
30721 dprintk("%s\n", __func__);
30722
30723 /* check if we have space for a link buf in the rx_buffer */
30724 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
30725 unsigned long timeout;
30726 int written;
30727
30728 + pax_track_stack();
30729 +
30730 dprintk("%s\n", __func__);
30731
30732 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
30733 diff -urNp linux-2.6.32.41/drivers/media/dvb/dvb-core/dvbdev.c linux-2.6.32.41/drivers/media/dvb/dvb-core/dvbdev.c
30734 --- linux-2.6.32.41/drivers/media/dvb/dvb-core/dvbdev.c 2011-03-27 14:31:47.000000000 -0400
30735 +++ linux-2.6.32.41/drivers/media/dvb/dvb-core/dvbdev.c 2011-04-17 15:56:46.000000000 -0400
30736 @@ -191,6 +191,7 @@ int dvb_register_device(struct dvb_adapt
30737 const struct dvb_device *template, void *priv, int type)
30738 {
30739 struct dvb_device *dvbdev;
30740 + /* cannot be const */
30741 struct file_operations *dvbdevfops;
30742 struct device *clsdev;
30743 int minor;
30744 diff -urNp linux-2.6.32.41/drivers/media/dvb/dvb-usb/dib0700_core.c linux-2.6.32.41/drivers/media/dvb/dvb-usb/dib0700_core.c
30745 --- linux-2.6.32.41/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-03-27 14:31:47.000000000 -0400
30746 +++ linux-2.6.32.41/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-05-16 21:46:57.000000000 -0400
30747 @@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb
30748
30749 u8 buf[260];
30750
30751 + pax_track_stack();
30752 +
30753 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
30754 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
30755
30756 diff -urNp linux-2.6.32.41/drivers/media/dvb/frontends/or51211.c linux-2.6.32.41/drivers/media/dvb/frontends/or51211.c
30757 --- linux-2.6.32.41/drivers/media/dvb/frontends/or51211.c 2011-03-27 14:31:47.000000000 -0400
30758 +++ linux-2.6.32.41/drivers/media/dvb/frontends/or51211.c 2011-05-16 21:46:57.000000000 -0400
30759 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
30760 u8 tudata[585];
30761 int i;
30762
30763 + pax_track_stack();
30764 +
30765 dprintk("Firmware is %zd bytes\n",fw->size);
30766
30767 /* Get eprom data */
30768 diff -urNp linux-2.6.32.41/drivers/media/radio/radio-cadet.c linux-2.6.32.41/drivers/media/radio/radio-cadet.c
30769 --- linux-2.6.32.41/drivers/media/radio/radio-cadet.c 2011-03-27 14:31:47.000000000 -0400
30770 +++ linux-2.6.32.41/drivers/media/radio/radio-cadet.c 2011-04-17 15:56:46.000000000 -0400
30771 @@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *f
30772 while (i < count && dev->rdsin != dev->rdsout)
30773 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
30774
30775 - if (copy_to_user(data, readbuf, i))
30776 + if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
30777 return -EFAULT;
30778 return i;
30779 }
30780 diff -urNp linux-2.6.32.41/drivers/media/video/cx18/cx18-driver.c linux-2.6.32.41/drivers/media/video/cx18/cx18-driver.c
30781 --- linux-2.6.32.41/drivers/media/video/cx18/cx18-driver.c 2011-03-27 14:31:47.000000000 -0400
30782 +++ linux-2.6.32.41/drivers/media/video/cx18/cx18-driver.c 2011-05-16 21:46:57.000000000 -0400
30783 @@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl
30784
30785 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
30786
30787 -static atomic_t cx18_instance = ATOMIC_INIT(0);
30788 +static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
30789
30790 /* Parameter declarations */
30791 static int cardtype[CX18_MAX_CARDS];
30792 @@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
30793 struct i2c_client c;
30794 u8 eedata[256];
30795
30796 + pax_track_stack();
30797 +
30798 memset(&c, 0, sizeof(c));
30799 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
30800 c.adapter = &cx->i2c_adap[0];
30801 @@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct p
30802 struct cx18 *cx;
30803
30804 /* FIXME - module parameter arrays constrain max instances */
30805 - i = atomic_inc_return(&cx18_instance) - 1;
30806 + i = atomic_inc_return_unchecked(&cx18_instance) - 1;
30807 if (i >= CX18_MAX_CARDS) {
30808 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
30809 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
30810 diff -urNp linux-2.6.32.41/drivers/media/video/ivtv/ivtv-driver.c linux-2.6.32.41/drivers/media/video/ivtv/ivtv-driver.c
30811 --- linux-2.6.32.41/drivers/media/video/ivtv/ivtv-driver.c 2011-03-27 14:31:47.000000000 -0400
30812 +++ linux-2.6.32.41/drivers/media/video/ivtv/ivtv-driver.c 2011-05-04 17:56:28.000000000 -0400
30813 @@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl
30814 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
30815
30816 /* ivtv instance counter */
30817 -static atomic_t ivtv_instance = ATOMIC_INIT(0);
30818 +static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
30819
30820 /* Parameter declarations */
30821 static int cardtype[IVTV_MAX_CARDS];
30822 diff -urNp linux-2.6.32.41/drivers/media/video/omap24xxcam.c linux-2.6.32.41/drivers/media/video/omap24xxcam.c
30823 --- linux-2.6.32.41/drivers/media/video/omap24xxcam.c 2011-03-27 14:31:47.000000000 -0400
30824 +++ linux-2.6.32.41/drivers/media/video/omap24xxcam.c 2011-05-04 17:56:28.000000000 -0400
30825 @@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(str
30826 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
30827
30828 do_gettimeofday(&vb->ts);
30829 - vb->field_count = atomic_add_return(2, &fh->field_count);
30830 + vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
30831 if (csr & csr_error) {
30832 vb->state = VIDEOBUF_ERROR;
30833 if (!atomic_read(&fh->cam->in_reset)) {
30834 diff -urNp linux-2.6.32.41/drivers/media/video/omap24xxcam.h linux-2.6.32.41/drivers/media/video/omap24xxcam.h
30835 --- linux-2.6.32.41/drivers/media/video/omap24xxcam.h 2011-03-27 14:31:47.000000000 -0400
30836 +++ linux-2.6.32.41/drivers/media/video/omap24xxcam.h 2011-05-04 17:56:28.000000000 -0400
30837 @@ -533,7 +533,7 @@ struct omap24xxcam_fh {
30838 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
30839 struct videobuf_queue vbq;
30840 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
30841 - atomic_t field_count; /* field counter for videobuf_buffer */
30842 + atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
30843 /* accessing cam here doesn't need serialisation: it's constant */
30844 struct omap24xxcam_device *cam;
30845 };
30846 diff -urNp linux-2.6.32.41/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-2.6.32.41/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
30847 --- linux-2.6.32.41/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-03-27 14:31:47.000000000 -0400
30848 +++ linux-2.6.32.41/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-05-16 21:46:57.000000000 -0400
30849 @@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
30850 u8 *eeprom;
30851 struct tveeprom tvdata;
30852
30853 + pax_track_stack();
30854 +
30855 memset(&tvdata,0,sizeof(tvdata));
30856
30857 eeprom = pvr2_eeprom_fetch(hdw);
30858 diff -urNp linux-2.6.32.41/drivers/media/video/saa7134/saa6752hs.c linux-2.6.32.41/drivers/media/video/saa7134/saa6752hs.c
30859 --- linux-2.6.32.41/drivers/media/video/saa7134/saa6752hs.c 2011-03-27 14:31:47.000000000 -0400
30860 +++ linux-2.6.32.41/drivers/media/video/saa7134/saa6752hs.c 2011-05-16 21:46:57.000000000 -0400
30861 @@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_su
30862 unsigned char localPAT[256];
30863 unsigned char localPMT[256];
30864
30865 + pax_track_stack();
30866 +
30867 /* Set video format - must be done first as it resets other settings */
30868 set_reg8(client, 0x41, h->video_format);
30869
30870 diff -urNp linux-2.6.32.41/drivers/media/video/saa7164/saa7164-cmd.c linux-2.6.32.41/drivers/media/video/saa7164/saa7164-cmd.c
30871 --- linux-2.6.32.41/drivers/media/video/saa7164/saa7164-cmd.c 2011-03-27 14:31:47.000000000 -0400
30872 +++ linux-2.6.32.41/drivers/media/video/saa7164/saa7164-cmd.c 2011-05-16 21:46:57.000000000 -0400
30873 @@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_d
30874 wait_queue_head_t *q = 0;
30875 dprintk(DBGLVL_CMD, "%s()\n", __func__);
30876
30877 + pax_track_stack();
30878 +
30879 /* While any outstand message on the bus exists... */
30880 do {
30881
30882 @@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
30883 u8 tmp[512];
30884 dprintk(DBGLVL_CMD, "%s()\n", __func__);
30885
30886 + pax_track_stack();
30887 +
30888 while (loop) {
30889
30890 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
30891 diff -urNp linux-2.6.32.41/drivers/media/video/usbvideo/konicawc.c linux-2.6.32.41/drivers/media/video/usbvideo/konicawc.c
30892 --- linux-2.6.32.41/drivers/media/video/usbvideo/konicawc.c 2011-03-27 14:31:47.000000000 -0400
30893 +++ linux-2.6.32.41/drivers/media/video/usbvideo/konicawc.c 2011-04-17 15:56:46.000000000 -0400
30894 @@ -225,7 +225,7 @@ static void konicawc_register_input(stru
30895 int error;
30896
30897 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
30898 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
30899 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
30900
30901 cam->input = input_dev = input_allocate_device();
30902 if (!input_dev) {
30903 diff -urNp linux-2.6.32.41/drivers/media/video/usbvideo/quickcam_messenger.c linux-2.6.32.41/drivers/media/video/usbvideo/quickcam_messenger.c
30904 --- linux-2.6.32.41/drivers/media/video/usbvideo/quickcam_messenger.c 2011-03-27 14:31:47.000000000 -0400
30905 +++ linux-2.6.32.41/drivers/media/video/usbvideo/quickcam_messenger.c 2011-04-17 15:56:46.000000000 -0400
30906 @@ -89,7 +89,7 @@ static void qcm_register_input(struct qc
30907 int error;
30908
30909 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
30910 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
30911 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
30912
30913 cam->input = input_dev = input_allocate_device();
30914 if (!input_dev) {
30915 diff -urNp linux-2.6.32.41/drivers/media/video/usbvision/usbvision-core.c linux-2.6.32.41/drivers/media/video/usbvision/usbvision-core.c
30916 --- linux-2.6.32.41/drivers/media/video/usbvision/usbvision-core.c 2011-03-27 14:31:47.000000000 -0400
30917 +++ linux-2.6.32.41/drivers/media/video/usbvision/usbvision-core.c 2011-05-16 21:46:57.000000000 -0400
30918 @@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_c
30919 unsigned char rv, gv, bv;
30920 static unsigned char *Y, *U, *V;
30921
30922 + pax_track_stack();
30923 +
30924 frame = usbvision->curFrame;
30925 imageSize = frame->frmwidth * frame->frmheight;
30926 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
30927 diff -urNp linux-2.6.32.41/drivers/media/video/v4l2-device.c linux-2.6.32.41/drivers/media/video/v4l2-device.c
30928 --- linux-2.6.32.41/drivers/media/video/v4l2-device.c 2011-03-27 14:31:47.000000000 -0400
30929 +++ linux-2.6.32.41/drivers/media/video/v4l2-device.c 2011-05-04 17:56:28.000000000 -0400
30930 @@ -50,9 +50,9 @@ int v4l2_device_register(struct device *
30931 EXPORT_SYMBOL_GPL(v4l2_device_register);
30932
30933 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
30934 - atomic_t *instance)
30935 + atomic_unchecked_t *instance)
30936 {
30937 - int num = atomic_inc_return(instance) - 1;
30938 + int num = atomic_inc_return_unchecked(instance) - 1;
30939 int len = strlen(basename);
30940
30941 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
30942 diff -urNp linux-2.6.32.41/drivers/media/video/videobuf-dma-sg.c linux-2.6.32.41/drivers/media/video/videobuf-dma-sg.c
30943 --- linux-2.6.32.41/drivers/media/video/videobuf-dma-sg.c 2011-03-27 14:31:47.000000000 -0400
30944 +++ linux-2.6.32.41/drivers/media/video/videobuf-dma-sg.c 2011-05-16 21:46:57.000000000 -0400
30945 @@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
30946 {
30947 struct videobuf_queue q;
30948
30949 + pax_track_stack();
30950 +
30951 /* Required to make generic handler to call __videobuf_alloc */
30952 q.int_ops = &sg_ops;
30953
30954 diff -urNp linux-2.6.32.41/drivers/message/fusion/mptbase.c linux-2.6.32.41/drivers/message/fusion/mptbase.c
30955 --- linux-2.6.32.41/drivers/message/fusion/mptbase.c 2011-03-27 14:31:47.000000000 -0400
30956 +++ linux-2.6.32.41/drivers/message/fusion/mptbase.c 2011-04-17 15:56:46.000000000 -0400
30957 @@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **s
30958 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
30959 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
30960
30961 +#ifdef CONFIG_GRKERNSEC_HIDESYM
30962 + len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
30963 + NULL, NULL);
30964 +#else
30965 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
30966 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
30967 +#endif
30968 +
30969 /*
30970 * Rounding UP to nearest 4-kB boundary here...
30971 */
30972 diff -urNp linux-2.6.32.41/drivers/message/fusion/mptsas.c linux-2.6.32.41/drivers/message/fusion/mptsas.c
30973 --- linux-2.6.32.41/drivers/message/fusion/mptsas.c 2011-03-27 14:31:47.000000000 -0400
30974 +++ linux-2.6.32.41/drivers/message/fusion/mptsas.c 2011-04-17 15:56:46.000000000 -0400
30975 @@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devin
30976 return 0;
30977 }
30978
30979 +static inline void
30980 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
30981 +{
30982 + if (phy_info->port_details) {
30983 + phy_info->port_details->rphy = rphy;
30984 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
30985 + ioc->name, rphy));
30986 + }
30987 +
30988 + if (rphy) {
30989 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
30990 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
30991 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
30992 + ioc->name, rphy, rphy->dev.release));
30993 + }
30994 +}
30995 +
30996 /* no mutex */
30997 static void
30998 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
30999 @@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
31000 return NULL;
31001 }
31002
31003 -static inline void
31004 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31005 -{
31006 - if (phy_info->port_details) {
31007 - phy_info->port_details->rphy = rphy;
31008 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31009 - ioc->name, rphy));
31010 - }
31011 -
31012 - if (rphy) {
31013 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31014 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31015 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31016 - ioc->name, rphy, rphy->dev.release));
31017 - }
31018 -}
31019 -
31020 static inline struct sas_port *
31021 mptsas_get_port(struct mptsas_phyinfo *phy_info)
31022 {
31023 diff -urNp linux-2.6.32.41/drivers/message/fusion/mptscsih.c linux-2.6.32.41/drivers/message/fusion/mptscsih.c
31024 --- linux-2.6.32.41/drivers/message/fusion/mptscsih.c 2011-03-27 14:31:47.000000000 -0400
31025 +++ linux-2.6.32.41/drivers/message/fusion/mptscsih.c 2011-04-17 15:56:46.000000000 -0400
31026 @@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
31027
31028 h = shost_priv(SChost);
31029
31030 - if (h) {
31031 - if (h->info_kbuf == NULL)
31032 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31033 - return h->info_kbuf;
31034 - h->info_kbuf[0] = '\0';
31035 + if (!h)
31036 + return NULL;
31037
31038 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31039 - h->info_kbuf[size-1] = '\0';
31040 - }
31041 + if (h->info_kbuf == NULL)
31042 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31043 + return h->info_kbuf;
31044 + h->info_kbuf[0] = '\0';
31045 +
31046 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31047 + h->info_kbuf[size-1] = '\0';
31048
31049 return h->info_kbuf;
31050 }
31051 diff -urNp linux-2.6.32.41/drivers/message/i2o/i2o_config.c linux-2.6.32.41/drivers/message/i2o/i2o_config.c
31052 --- linux-2.6.32.41/drivers/message/i2o/i2o_config.c 2011-03-27 14:31:47.000000000 -0400
31053 +++ linux-2.6.32.41/drivers/message/i2o/i2o_config.c 2011-05-16 21:46:57.000000000 -0400
31054 @@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned lon
31055 struct i2o_message *msg;
31056 unsigned int iop;
31057
31058 + pax_track_stack();
31059 +
31060 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
31061 return -EFAULT;
31062
31063 diff -urNp linux-2.6.32.41/drivers/message/i2o/i2o_proc.c linux-2.6.32.41/drivers/message/i2o/i2o_proc.c
31064 --- linux-2.6.32.41/drivers/message/i2o/i2o_proc.c 2011-03-27 14:31:47.000000000 -0400
31065 +++ linux-2.6.32.41/drivers/message/i2o/i2o_proc.c 2011-04-17 15:56:46.000000000 -0400
31066 @@ -259,13 +259,6 @@ static char *scsi_devices[] = {
31067 "Array Controller Device"
31068 };
31069
31070 -static char *chtostr(u8 * chars, int n)
31071 -{
31072 - char tmp[256];
31073 - tmp[0] = 0;
31074 - return strncat(tmp, (char *)chars, n);
31075 -}
31076 -
31077 static int i2o_report_query_status(struct seq_file *seq, int block_status,
31078 char *group)
31079 {
31080 @@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct
31081
31082 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
31083 seq_printf(seq, "%-#8x", ddm_table.module_id);
31084 - seq_printf(seq, "%-29s",
31085 - chtostr(ddm_table.module_name_version, 28));
31086 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
31087 seq_printf(seq, "%9d ", ddm_table.data_size);
31088 seq_printf(seq, "%8d", ddm_table.code_size);
31089
31090 @@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(s
31091
31092 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
31093 seq_printf(seq, "%-#8x", dst->module_id);
31094 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
31095 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
31096 + seq_printf(seq, "%-.28s", dst->module_name_version);
31097 + seq_printf(seq, "%-.8s", dst->date);
31098 seq_printf(seq, "%8d ", dst->module_size);
31099 seq_printf(seq, "%8d ", dst->mpb_size);
31100 seq_printf(seq, "0x%04x", dst->module_flags);
31101 @@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(str
31102 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
31103 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
31104 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
31105 - seq_printf(seq, "Vendor info : %s\n",
31106 - chtostr((u8 *) (work32 + 2), 16));
31107 - seq_printf(seq, "Product info : %s\n",
31108 - chtostr((u8 *) (work32 + 6), 16));
31109 - seq_printf(seq, "Description : %s\n",
31110 - chtostr((u8 *) (work32 + 10), 16));
31111 - seq_printf(seq, "Product rev. : %s\n",
31112 - chtostr((u8 *) (work32 + 14), 8));
31113 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
31114 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
31115 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
31116 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
31117
31118 seq_printf(seq, "Serial number : ");
31119 print_serial_number(seq, (u8 *) (work32 + 16),
31120 @@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(str
31121 }
31122
31123 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
31124 - seq_printf(seq, "Module name : %s\n",
31125 - chtostr(result.module_name, 24));
31126 - seq_printf(seq, "Module revision : %s\n",
31127 - chtostr(result.module_rev, 8));
31128 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
31129 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
31130
31131 seq_printf(seq, "Serial number : ");
31132 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
31133 @@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq
31134 return 0;
31135 }
31136
31137 - seq_printf(seq, "Device name : %s\n",
31138 - chtostr(result.device_name, 64));
31139 - seq_printf(seq, "Service name : %s\n",
31140 - chtostr(result.service_name, 64));
31141 - seq_printf(seq, "Physical name : %s\n",
31142 - chtostr(result.physical_location, 64));
31143 - seq_printf(seq, "Instance number : %s\n",
31144 - chtostr(result.instance_number, 4));
31145 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
31146 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
31147 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
31148 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
31149
31150 return 0;
31151 }
31152 diff -urNp linux-2.6.32.41/drivers/message/i2o/iop.c linux-2.6.32.41/drivers/message/i2o/iop.c
31153 --- linux-2.6.32.41/drivers/message/i2o/iop.c 2011-03-27 14:31:47.000000000 -0400
31154 +++ linux-2.6.32.41/drivers/message/i2o/iop.c 2011-05-04 17:56:28.000000000 -0400
31155 @@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
31156
31157 spin_lock_irqsave(&c->context_list_lock, flags);
31158
31159 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
31160 - atomic_inc(&c->context_list_counter);
31161 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
31162 + atomic_inc_unchecked(&c->context_list_counter);
31163
31164 - entry->context = atomic_read(&c->context_list_counter);
31165 + entry->context = atomic_read_unchecked(&c->context_list_counter);
31166
31167 list_add(&entry->list, &c->context_list);
31168
31169 @@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(voi
31170
31171 #if BITS_PER_LONG == 64
31172 spin_lock_init(&c->context_list_lock);
31173 - atomic_set(&c->context_list_counter, 0);
31174 + atomic_set_unchecked(&c->context_list_counter, 0);
31175 INIT_LIST_HEAD(&c->context_list);
31176 #endif
31177
31178 diff -urNp linux-2.6.32.41/drivers/mfd/wm8350-i2c.c linux-2.6.32.41/drivers/mfd/wm8350-i2c.c
31179 --- linux-2.6.32.41/drivers/mfd/wm8350-i2c.c 2011-03-27 14:31:47.000000000 -0400
31180 +++ linux-2.6.32.41/drivers/mfd/wm8350-i2c.c 2011-05-16 21:46:57.000000000 -0400
31181 @@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struc
31182 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
31183 int ret;
31184
31185 + pax_track_stack();
31186 +
31187 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
31188 return -EINVAL;
31189
31190 diff -urNp linux-2.6.32.41/drivers/misc/kgdbts.c linux-2.6.32.41/drivers/misc/kgdbts.c
31191 --- linux-2.6.32.41/drivers/misc/kgdbts.c 2011-03-27 14:31:47.000000000 -0400
31192 +++ linux-2.6.32.41/drivers/misc/kgdbts.c 2011-04-17 15:56:46.000000000 -0400
31193 @@ -118,7 +118,7 @@
31194 } while (0)
31195 #define MAX_CONFIG_LEN 40
31196
31197 -static struct kgdb_io kgdbts_io_ops;
31198 +static const struct kgdb_io kgdbts_io_ops;
31199 static char get_buf[BUFMAX];
31200 static int get_buf_cnt;
31201 static char put_buf[BUFMAX];
31202 @@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void
31203 module_put(THIS_MODULE);
31204 }
31205
31206 -static struct kgdb_io kgdbts_io_ops = {
31207 +static const struct kgdb_io kgdbts_io_ops = {
31208 .name = "kgdbts",
31209 .read_char = kgdbts_get_char,
31210 .write_char = kgdbts_put_char,
31211 diff -urNp linux-2.6.32.41/drivers/misc/sgi-gru/gruhandles.c linux-2.6.32.41/drivers/misc/sgi-gru/gruhandles.c
31212 --- linux-2.6.32.41/drivers/misc/sgi-gru/gruhandles.c 2011-03-27 14:31:47.000000000 -0400
31213 +++ linux-2.6.32.41/drivers/misc/sgi-gru/gruhandles.c 2011-04-17 15:56:46.000000000 -0400
31214 @@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistic
31215
31216 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
31217 {
31218 - atomic_long_inc(&mcs_op_statistics[op].count);
31219 - atomic_long_add(clks, &mcs_op_statistics[op].total);
31220 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
31221 + atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
31222 if (mcs_op_statistics[op].max < clks)
31223 mcs_op_statistics[op].max = clks;
31224 }
31225 diff -urNp linux-2.6.32.41/drivers/misc/sgi-gru/gruprocfs.c linux-2.6.32.41/drivers/misc/sgi-gru/gruprocfs.c
31226 --- linux-2.6.32.41/drivers/misc/sgi-gru/gruprocfs.c 2011-03-27 14:31:47.000000000 -0400
31227 +++ linux-2.6.32.41/drivers/misc/sgi-gru/gruprocfs.c 2011-04-17 15:56:46.000000000 -0400
31228 @@ -32,9 +32,9 @@
31229
31230 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
31231
31232 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
31233 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
31234 {
31235 - unsigned long val = atomic_long_read(v);
31236 + unsigned long val = atomic_long_read_unchecked(v);
31237
31238 if (val)
31239 seq_printf(s, "%16lu %s\n", val, id);
31240 @@ -136,8 +136,8 @@ static int mcs_statistics_show(struct se
31241 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
31242
31243 for (op = 0; op < mcsop_last; op++) {
31244 - count = atomic_long_read(&mcs_op_statistics[op].count);
31245 - total = atomic_long_read(&mcs_op_statistics[op].total);
31246 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
31247 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
31248 max = mcs_op_statistics[op].max;
31249 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
31250 count ? total / count : 0, max);
31251 diff -urNp linux-2.6.32.41/drivers/misc/sgi-gru/grutables.h linux-2.6.32.41/drivers/misc/sgi-gru/grutables.h
31252 --- linux-2.6.32.41/drivers/misc/sgi-gru/grutables.h 2011-03-27 14:31:47.000000000 -0400
31253 +++ linux-2.6.32.41/drivers/misc/sgi-gru/grutables.h 2011-04-17 15:56:46.000000000 -0400
31254 @@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
31255 * GRU statistics.
31256 */
31257 struct gru_stats_s {
31258 - atomic_long_t vdata_alloc;
31259 - atomic_long_t vdata_free;
31260 - atomic_long_t gts_alloc;
31261 - atomic_long_t gts_free;
31262 - atomic_long_t vdata_double_alloc;
31263 - atomic_long_t gts_double_allocate;
31264 - atomic_long_t assign_context;
31265 - atomic_long_t assign_context_failed;
31266 - atomic_long_t free_context;
31267 - atomic_long_t load_user_context;
31268 - atomic_long_t load_kernel_context;
31269 - atomic_long_t lock_kernel_context;
31270 - atomic_long_t unlock_kernel_context;
31271 - atomic_long_t steal_user_context;
31272 - atomic_long_t steal_kernel_context;
31273 - atomic_long_t steal_context_failed;
31274 - atomic_long_t nopfn;
31275 - atomic_long_t break_cow;
31276 - atomic_long_t asid_new;
31277 - atomic_long_t asid_next;
31278 - atomic_long_t asid_wrap;
31279 - atomic_long_t asid_reuse;
31280 - atomic_long_t intr;
31281 - atomic_long_t intr_mm_lock_failed;
31282 - atomic_long_t call_os;
31283 - atomic_long_t call_os_offnode_reference;
31284 - atomic_long_t call_os_check_for_bug;
31285 - atomic_long_t call_os_wait_queue;
31286 - atomic_long_t user_flush_tlb;
31287 - atomic_long_t user_unload_context;
31288 - atomic_long_t user_exception;
31289 - atomic_long_t set_context_option;
31290 - atomic_long_t migrate_check;
31291 - atomic_long_t migrated_retarget;
31292 - atomic_long_t migrated_unload;
31293 - atomic_long_t migrated_unload_delay;
31294 - atomic_long_t migrated_nopfn_retarget;
31295 - atomic_long_t migrated_nopfn_unload;
31296 - atomic_long_t tlb_dropin;
31297 - atomic_long_t tlb_dropin_fail_no_asid;
31298 - atomic_long_t tlb_dropin_fail_upm;
31299 - atomic_long_t tlb_dropin_fail_invalid;
31300 - atomic_long_t tlb_dropin_fail_range_active;
31301 - atomic_long_t tlb_dropin_fail_idle;
31302 - atomic_long_t tlb_dropin_fail_fmm;
31303 - atomic_long_t tlb_dropin_fail_no_exception;
31304 - atomic_long_t tlb_dropin_fail_no_exception_war;
31305 - atomic_long_t tfh_stale_on_fault;
31306 - atomic_long_t mmu_invalidate_range;
31307 - atomic_long_t mmu_invalidate_page;
31308 - atomic_long_t mmu_clear_flush_young;
31309 - atomic_long_t flush_tlb;
31310 - atomic_long_t flush_tlb_gru;
31311 - atomic_long_t flush_tlb_gru_tgh;
31312 - atomic_long_t flush_tlb_gru_zero_asid;
31313 -
31314 - atomic_long_t copy_gpa;
31315 -
31316 - atomic_long_t mesq_receive;
31317 - atomic_long_t mesq_receive_none;
31318 - atomic_long_t mesq_send;
31319 - atomic_long_t mesq_send_failed;
31320 - atomic_long_t mesq_noop;
31321 - atomic_long_t mesq_send_unexpected_error;
31322 - atomic_long_t mesq_send_lb_overflow;
31323 - atomic_long_t mesq_send_qlimit_reached;
31324 - atomic_long_t mesq_send_amo_nacked;
31325 - atomic_long_t mesq_send_put_nacked;
31326 - atomic_long_t mesq_qf_not_full;
31327 - atomic_long_t mesq_qf_locked;
31328 - atomic_long_t mesq_qf_noop_not_full;
31329 - atomic_long_t mesq_qf_switch_head_failed;
31330 - atomic_long_t mesq_qf_unexpected_error;
31331 - atomic_long_t mesq_noop_unexpected_error;
31332 - atomic_long_t mesq_noop_lb_overflow;
31333 - atomic_long_t mesq_noop_qlimit_reached;
31334 - atomic_long_t mesq_noop_amo_nacked;
31335 - atomic_long_t mesq_noop_put_nacked;
31336 + atomic_long_unchecked_t vdata_alloc;
31337 + atomic_long_unchecked_t vdata_free;
31338 + atomic_long_unchecked_t gts_alloc;
31339 + atomic_long_unchecked_t gts_free;
31340 + atomic_long_unchecked_t vdata_double_alloc;
31341 + atomic_long_unchecked_t gts_double_allocate;
31342 + atomic_long_unchecked_t assign_context;
31343 + atomic_long_unchecked_t assign_context_failed;
31344 + atomic_long_unchecked_t free_context;
31345 + atomic_long_unchecked_t load_user_context;
31346 + atomic_long_unchecked_t load_kernel_context;
31347 + atomic_long_unchecked_t lock_kernel_context;
31348 + atomic_long_unchecked_t unlock_kernel_context;
31349 + atomic_long_unchecked_t steal_user_context;
31350 + atomic_long_unchecked_t steal_kernel_context;
31351 + atomic_long_unchecked_t steal_context_failed;
31352 + atomic_long_unchecked_t nopfn;
31353 + atomic_long_unchecked_t break_cow;
31354 + atomic_long_unchecked_t asid_new;
31355 + atomic_long_unchecked_t asid_next;
31356 + atomic_long_unchecked_t asid_wrap;
31357 + atomic_long_unchecked_t asid_reuse;
31358 + atomic_long_unchecked_t intr;
31359 + atomic_long_unchecked_t intr_mm_lock_failed;
31360 + atomic_long_unchecked_t call_os;
31361 + atomic_long_unchecked_t call_os_offnode_reference;
31362 + atomic_long_unchecked_t call_os_check_for_bug;
31363 + atomic_long_unchecked_t call_os_wait_queue;
31364 + atomic_long_unchecked_t user_flush_tlb;
31365 + atomic_long_unchecked_t user_unload_context;
31366 + atomic_long_unchecked_t user_exception;
31367 + atomic_long_unchecked_t set_context_option;
31368 + atomic_long_unchecked_t migrate_check;
31369 + atomic_long_unchecked_t migrated_retarget;
31370 + atomic_long_unchecked_t migrated_unload;
31371 + atomic_long_unchecked_t migrated_unload_delay;
31372 + atomic_long_unchecked_t migrated_nopfn_retarget;
31373 + atomic_long_unchecked_t migrated_nopfn_unload;
31374 + atomic_long_unchecked_t tlb_dropin;
31375 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
31376 + atomic_long_unchecked_t tlb_dropin_fail_upm;
31377 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
31378 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
31379 + atomic_long_unchecked_t tlb_dropin_fail_idle;
31380 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
31381 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
31382 + atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
31383 + atomic_long_unchecked_t tfh_stale_on_fault;
31384 + atomic_long_unchecked_t mmu_invalidate_range;
31385 + atomic_long_unchecked_t mmu_invalidate_page;
31386 + atomic_long_unchecked_t mmu_clear_flush_young;
31387 + atomic_long_unchecked_t flush_tlb;
31388 + atomic_long_unchecked_t flush_tlb_gru;
31389 + atomic_long_unchecked_t flush_tlb_gru_tgh;
31390 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
31391 +
31392 + atomic_long_unchecked_t copy_gpa;
31393 +
31394 + atomic_long_unchecked_t mesq_receive;
31395 + atomic_long_unchecked_t mesq_receive_none;
31396 + atomic_long_unchecked_t mesq_send;
31397 + atomic_long_unchecked_t mesq_send_failed;
31398 + atomic_long_unchecked_t mesq_noop;
31399 + atomic_long_unchecked_t mesq_send_unexpected_error;
31400 + atomic_long_unchecked_t mesq_send_lb_overflow;
31401 + atomic_long_unchecked_t mesq_send_qlimit_reached;
31402 + atomic_long_unchecked_t mesq_send_amo_nacked;
31403 + atomic_long_unchecked_t mesq_send_put_nacked;
31404 + atomic_long_unchecked_t mesq_qf_not_full;
31405 + atomic_long_unchecked_t mesq_qf_locked;
31406 + atomic_long_unchecked_t mesq_qf_noop_not_full;
31407 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
31408 + atomic_long_unchecked_t mesq_qf_unexpected_error;
31409 + atomic_long_unchecked_t mesq_noop_unexpected_error;
31410 + atomic_long_unchecked_t mesq_noop_lb_overflow;
31411 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
31412 + atomic_long_unchecked_t mesq_noop_amo_nacked;
31413 + atomic_long_unchecked_t mesq_noop_put_nacked;
31414
31415 };
31416
31417 @@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start
31418 cchop_deallocate, tghop_invalidate, mcsop_last};
31419
31420 struct mcs_op_statistic {
31421 - atomic_long_t count;
31422 - atomic_long_t total;
31423 + atomic_long_unchecked_t count;
31424 + atomic_long_unchecked_t total;
31425 unsigned long max;
31426 };
31427
31428 @@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_st
31429
31430 #define STAT(id) do { \
31431 if (gru_options & OPT_STATS) \
31432 - atomic_long_inc(&gru_stats.id); \
31433 + atomic_long_inc_unchecked(&gru_stats.id); \
31434 } while (0)
31435
31436 #ifdef CONFIG_SGI_GRU_DEBUG
31437 diff -urNp linux-2.6.32.41/drivers/mtd/chips/cfi_cmdset_0001.c linux-2.6.32.41/drivers/mtd/chips/cfi_cmdset_0001.c
31438 --- linux-2.6.32.41/drivers/mtd/chips/cfi_cmdset_0001.c 2011-03-27 14:31:47.000000000 -0400
31439 +++ linux-2.6.32.41/drivers/mtd/chips/cfi_cmdset_0001.c 2011-05-16 21:46:57.000000000 -0400
31440 @@ -743,6 +743,8 @@ static int chip_ready (struct map_info *
31441 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
31442 unsigned long timeo = jiffies + HZ;
31443
31444 + pax_track_stack();
31445 +
31446 /* Prevent setting state FL_SYNCING for chip in suspended state. */
31447 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
31448 goto sleep;
31449 @@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(stru
31450 unsigned long initial_adr;
31451 int initial_len = len;
31452
31453 + pax_track_stack();
31454 +
31455 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
31456 adr += chip->start;
31457 initial_adr = adr;
31458 @@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(st
31459 int retries = 3;
31460 int ret;
31461
31462 + pax_track_stack();
31463 +
31464 adr += chip->start;
31465
31466 retry:
31467 diff -urNp linux-2.6.32.41/drivers/mtd/chips/cfi_cmdset_0020.c linux-2.6.32.41/drivers/mtd/chips/cfi_cmdset_0020.c
31468 --- linux-2.6.32.41/drivers/mtd/chips/cfi_cmdset_0020.c 2011-03-27 14:31:47.000000000 -0400
31469 +++ linux-2.6.32.41/drivers/mtd/chips/cfi_cmdset_0020.c 2011-05-16 21:46:57.000000000 -0400
31470 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
31471 unsigned long cmd_addr;
31472 struct cfi_private *cfi = map->fldrv_priv;
31473
31474 + pax_track_stack();
31475 +
31476 adr += chip->start;
31477
31478 /* Ensure cmd read/writes are aligned. */
31479 @@ -428,6 +430,8 @@ static inline int do_write_buffer(struct
31480 DECLARE_WAITQUEUE(wait, current);
31481 int wbufsize, z;
31482
31483 + pax_track_stack();
31484 +
31485 /* M58LW064A requires bus alignment for buffer wriets -- saw */
31486 if (adr & (map_bankwidth(map)-1))
31487 return -EINVAL;
31488 @@ -742,6 +746,8 @@ static inline int do_erase_oneblock(stru
31489 DECLARE_WAITQUEUE(wait, current);
31490 int ret = 0;
31491
31492 + pax_track_stack();
31493 +
31494 adr += chip->start;
31495
31496 /* Let's determine this according to the interleave only once */
31497 @@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struc
31498 unsigned long timeo = jiffies + HZ;
31499 DECLARE_WAITQUEUE(wait, current);
31500
31501 + pax_track_stack();
31502 +
31503 adr += chip->start;
31504
31505 /* Let's determine this according to the interleave only once */
31506 @@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(str
31507 unsigned long timeo = jiffies + HZ;
31508 DECLARE_WAITQUEUE(wait, current);
31509
31510 + pax_track_stack();
31511 +
31512 adr += chip->start;
31513
31514 /* Let's determine this according to the interleave only once */
31515 diff -urNp linux-2.6.32.41/drivers/mtd/devices/doc2000.c linux-2.6.32.41/drivers/mtd/devices/doc2000.c
31516 --- linux-2.6.32.41/drivers/mtd/devices/doc2000.c 2011-03-27 14:31:47.000000000 -0400
31517 +++ linux-2.6.32.41/drivers/mtd/devices/doc2000.c 2011-04-17 15:56:46.000000000 -0400
31518 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
31519
31520 /* The ECC will not be calculated correctly if less than 512 is written */
31521 /* DBB-
31522 - if (len != 0x200 && eccbuf)
31523 + if (len != 0x200)
31524 printk(KERN_WARNING
31525 "ECC needs a full sector write (adr: %lx size %lx)\n",
31526 (long) to, (long) len);
31527 diff -urNp linux-2.6.32.41/drivers/mtd/devices/doc2001.c linux-2.6.32.41/drivers/mtd/devices/doc2001.c
31528 --- linux-2.6.32.41/drivers/mtd/devices/doc2001.c 2011-03-27 14:31:47.000000000 -0400
31529 +++ linux-2.6.32.41/drivers/mtd/devices/doc2001.c 2011-04-17 15:56:46.000000000 -0400
31530 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
31531 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
31532
31533 /* Don't allow read past end of device */
31534 - if (from >= this->totlen)
31535 + if (from >= this->totlen || !len)
31536 return -EINVAL;
31537
31538 /* Don't allow a single read to cross a 512-byte block boundary */
31539 diff -urNp linux-2.6.32.41/drivers/mtd/ftl.c linux-2.6.32.41/drivers/mtd/ftl.c
31540 --- linux-2.6.32.41/drivers/mtd/ftl.c 2011-03-27 14:31:47.000000000 -0400
31541 +++ linux-2.6.32.41/drivers/mtd/ftl.c 2011-05-16 21:46:57.000000000 -0400
31542 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
31543 loff_t offset;
31544 uint16_t srcunitswap = cpu_to_le16(srcunit);
31545
31546 + pax_track_stack();
31547 +
31548 eun = &part->EUNInfo[srcunit];
31549 xfer = &part->XferInfo[xferunit];
31550 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
31551 diff -urNp linux-2.6.32.41/drivers/mtd/inftlcore.c linux-2.6.32.41/drivers/mtd/inftlcore.c
31552 --- linux-2.6.32.41/drivers/mtd/inftlcore.c 2011-03-27 14:31:47.000000000 -0400
31553 +++ linux-2.6.32.41/drivers/mtd/inftlcore.c 2011-05-16 21:46:57.000000000 -0400
31554 @@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLr
31555 struct inftl_oob oob;
31556 size_t retlen;
31557
31558 + pax_track_stack();
31559 +
31560 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
31561 "pending=%d)\n", inftl, thisVUC, pendingblock);
31562
31563 diff -urNp linux-2.6.32.41/drivers/mtd/inftlmount.c linux-2.6.32.41/drivers/mtd/inftlmount.c
31564 --- linux-2.6.32.41/drivers/mtd/inftlmount.c 2011-03-27 14:31:47.000000000 -0400
31565 +++ linux-2.6.32.41/drivers/mtd/inftlmount.c 2011-05-16 21:46:57.000000000 -0400
31566 @@ -54,6 +54,8 @@ static int find_boot_record(struct INFTL
31567 struct INFTLPartition *ip;
31568 size_t retlen;
31569
31570 + pax_track_stack();
31571 +
31572 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
31573
31574 /*
31575 diff -urNp linux-2.6.32.41/drivers/mtd/lpddr/qinfo_probe.c linux-2.6.32.41/drivers/mtd/lpddr/qinfo_probe.c
31576 --- linux-2.6.32.41/drivers/mtd/lpddr/qinfo_probe.c 2011-03-27 14:31:47.000000000 -0400
31577 +++ linux-2.6.32.41/drivers/mtd/lpddr/qinfo_probe.c 2011-05-16 21:46:57.000000000 -0400
31578 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
31579 {
31580 map_word pfow_val[4];
31581
31582 + pax_track_stack();
31583 +
31584 /* Check identification string */
31585 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
31586 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
31587 diff -urNp linux-2.6.32.41/drivers/mtd/mtdchar.c linux-2.6.32.41/drivers/mtd/mtdchar.c
31588 --- linux-2.6.32.41/drivers/mtd/mtdchar.c 2011-03-27 14:31:47.000000000 -0400
31589 +++ linux-2.6.32.41/drivers/mtd/mtdchar.c 2011-05-16 21:46:57.000000000 -0400
31590 @@ -460,6 +460,8 @@ static int mtd_ioctl(struct inode *inode
31591 u_long size;
31592 struct mtd_info_user info;
31593
31594 + pax_track_stack();
31595 +
31596 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
31597
31598 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
31599 diff -urNp linux-2.6.32.41/drivers/mtd/nftlcore.c linux-2.6.32.41/drivers/mtd/nftlcore.c
31600 --- linux-2.6.32.41/drivers/mtd/nftlcore.c 2011-03-27 14:31:47.000000000 -0400
31601 +++ linux-2.6.32.41/drivers/mtd/nftlcore.c 2011-05-16 21:46:57.000000000 -0400
31602 @@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLre
31603 int inplace = 1;
31604 size_t retlen;
31605
31606 + pax_track_stack();
31607 +
31608 memset(BlockMap, 0xff, sizeof(BlockMap));
31609 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
31610
31611 diff -urNp linux-2.6.32.41/drivers/mtd/nftlmount.c linux-2.6.32.41/drivers/mtd/nftlmount.c
31612 --- linux-2.6.32.41/drivers/mtd/nftlmount.c 2011-03-27 14:31:47.000000000 -0400
31613 +++ linux-2.6.32.41/drivers/mtd/nftlmount.c 2011-05-18 20:09:37.000000000 -0400
31614 @@ -23,6 +23,7 @@
31615 #include <asm/errno.h>
31616 #include <linux/delay.h>
31617 #include <linux/slab.h>
31618 +#include <linux/sched.h>
31619 #include <linux/mtd/mtd.h>
31620 #include <linux/mtd/nand.h>
31621 #include <linux/mtd/nftl.h>
31622 @@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLr
31623 struct mtd_info *mtd = nftl->mbd.mtd;
31624 unsigned int i;
31625
31626 + pax_track_stack();
31627 +
31628 /* Assume logical EraseSize == physical erasesize for starting the scan.
31629 We'll sort it out later if we find a MediaHeader which says otherwise */
31630 /* Actually, we won't. The new DiskOnChip driver has already scanned
31631 diff -urNp linux-2.6.32.41/drivers/mtd/ubi/build.c linux-2.6.32.41/drivers/mtd/ubi/build.c
31632 --- linux-2.6.32.41/drivers/mtd/ubi/build.c 2011-03-27 14:31:47.000000000 -0400
31633 +++ linux-2.6.32.41/drivers/mtd/ubi/build.c 2011-04-17 15:56:46.000000000 -0400
31634 @@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
31635 static int __init bytes_str_to_int(const char *str)
31636 {
31637 char *endp;
31638 - unsigned long result;
31639 + unsigned long result, scale = 1;
31640
31641 result = simple_strtoul(str, &endp, 0);
31642 if (str == endp || result >= INT_MAX) {
31643 @@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const
31644
31645 switch (*endp) {
31646 case 'G':
31647 - result *= 1024;
31648 + scale *= 1024;
31649 case 'M':
31650 - result *= 1024;
31651 + scale *= 1024;
31652 case 'K':
31653 - result *= 1024;
31654 + scale *= 1024;
31655 if (endp[1] == 'i' && endp[2] == 'B')
31656 endp += 2;
31657 case '\0':
31658 @@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const
31659 return -EINVAL;
31660 }
31661
31662 - return result;
31663 + if ((intoverflow_t)result*scale >= INT_MAX) {
31664 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
31665 + str);
31666 + return -EINVAL;
31667 + }
31668 +
31669 + return result*scale;
31670 }
31671
31672 /**
31673 diff -urNp linux-2.6.32.41/drivers/net/bnx2.c linux-2.6.32.41/drivers/net/bnx2.c
31674 --- linux-2.6.32.41/drivers/net/bnx2.c 2011-03-27 14:31:47.000000000 -0400
31675 +++ linux-2.6.32.41/drivers/net/bnx2.c 2011-05-16 21:46:57.000000000 -0400
31676 @@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
31677 int rc = 0;
31678 u32 magic, csum;
31679
31680 + pax_track_stack();
31681 +
31682 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
31683 goto test_nvram_done;
31684
31685 diff -urNp linux-2.6.32.41/drivers/net/cxgb3/t3_hw.c linux-2.6.32.41/drivers/net/cxgb3/t3_hw.c
31686 --- linux-2.6.32.41/drivers/net/cxgb3/t3_hw.c 2011-03-27 14:31:47.000000000 -0400
31687 +++ linux-2.6.32.41/drivers/net/cxgb3/t3_hw.c 2011-05-16 21:46:57.000000000 -0400
31688 @@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter
31689 int i, addr, ret;
31690 struct t3_vpd vpd;
31691
31692 + pax_track_stack();
31693 +
31694 /*
31695 * Card information is normally at VPD_BASE but some early cards had
31696 * it at 0.
31697 diff -urNp linux-2.6.32.41/drivers/net/e1000e/82571.c linux-2.6.32.41/drivers/net/e1000e/82571.c
31698 --- linux-2.6.32.41/drivers/net/e1000e/82571.c 2011-03-27 14:31:47.000000000 -0400
31699 +++ linux-2.6.32.41/drivers/net/e1000e/82571.c 2011-04-17 15:56:46.000000000 -0400
31700 @@ -212,6 +212,7 @@ static s32 e1000_init_mac_params_82571(s
31701 {
31702 struct e1000_hw *hw = &adapter->hw;
31703 struct e1000_mac_info *mac = &hw->mac;
31704 + /* cannot be const */
31705 struct e1000_mac_operations *func = &mac->ops;
31706 u32 swsm = 0;
31707 u32 swsm2 = 0;
31708 @@ -1656,7 +1657,7 @@ static void e1000_clear_hw_cntrs_82571(s
31709 temp = er32(ICRXDMTC);
31710 }
31711
31712 -static struct e1000_mac_operations e82571_mac_ops = {
31713 +static const struct e1000_mac_operations e82571_mac_ops = {
31714 /* .check_mng_mode: mac type dependent */
31715 /* .check_for_link: media type dependent */
31716 .id_led_init = e1000e_id_led_init,
31717 @@ -1674,7 +1675,7 @@ static struct e1000_mac_operations e8257
31718 .setup_led = e1000e_setup_led_generic,
31719 };
31720
31721 -static struct e1000_phy_operations e82_phy_ops_igp = {
31722 +static const struct e1000_phy_operations e82_phy_ops_igp = {
31723 .acquire_phy = e1000_get_hw_semaphore_82571,
31724 .check_reset_block = e1000e_check_reset_block_generic,
31725 .commit_phy = NULL,
31726 @@ -1691,7 +1692,7 @@ static struct e1000_phy_operations e82_p
31727 .cfg_on_link_up = NULL,
31728 };
31729
31730 -static struct e1000_phy_operations e82_phy_ops_m88 = {
31731 +static const struct e1000_phy_operations e82_phy_ops_m88 = {
31732 .acquire_phy = e1000_get_hw_semaphore_82571,
31733 .check_reset_block = e1000e_check_reset_block_generic,
31734 .commit_phy = e1000e_phy_sw_reset,
31735 @@ -1708,7 +1709,7 @@ static struct e1000_phy_operations e82_p
31736 .cfg_on_link_up = NULL,
31737 };
31738
31739 -static struct e1000_phy_operations e82_phy_ops_bm = {
31740 +static const struct e1000_phy_operations e82_phy_ops_bm = {
31741 .acquire_phy = e1000_get_hw_semaphore_82571,
31742 .check_reset_block = e1000e_check_reset_block_generic,
31743 .commit_phy = e1000e_phy_sw_reset,
31744 @@ -1725,7 +1726,7 @@ static struct e1000_phy_operations e82_p
31745 .cfg_on_link_up = NULL,
31746 };
31747
31748 -static struct e1000_nvm_operations e82571_nvm_ops = {
31749 +static const struct e1000_nvm_operations e82571_nvm_ops = {
31750 .acquire_nvm = e1000_acquire_nvm_82571,
31751 .read_nvm = e1000e_read_nvm_eerd,
31752 .release_nvm = e1000_release_nvm_82571,
31753 diff -urNp linux-2.6.32.41/drivers/net/e1000e/e1000.h linux-2.6.32.41/drivers/net/e1000e/e1000.h
31754 --- linux-2.6.32.41/drivers/net/e1000e/e1000.h 2011-03-27 14:31:47.000000000 -0400
31755 +++ linux-2.6.32.41/drivers/net/e1000e/e1000.h 2011-04-17 15:56:46.000000000 -0400
31756 @@ -375,9 +375,9 @@ struct e1000_info {
31757 u32 pba;
31758 u32 max_hw_frame_size;
31759 s32 (*get_variants)(struct e1000_adapter *);
31760 - struct e1000_mac_operations *mac_ops;
31761 - struct e1000_phy_operations *phy_ops;
31762 - struct e1000_nvm_operations *nvm_ops;
31763 + const struct e1000_mac_operations *mac_ops;
31764 + const struct e1000_phy_operations *phy_ops;
31765 + const struct e1000_nvm_operations *nvm_ops;
31766 };
31767
31768 /* hardware capability, feature, and workaround flags */
31769 diff -urNp linux-2.6.32.41/drivers/net/e1000e/es2lan.c linux-2.6.32.41/drivers/net/e1000e/es2lan.c
31770 --- linux-2.6.32.41/drivers/net/e1000e/es2lan.c 2011-03-27 14:31:47.000000000 -0400
31771 +++ linux-2.6.32.41/drivers/net/e1000e/es2lan.c 2011-04-17 15:56:46.000000000 -0400
31772 @@ -207,6 +207,7 @@ static s32 e1000_init_mac_params_80003es
31773 {
31774 struct e1000_hw *hw = &adapter->hw;
31775 struct e1000_mac_info *mac = &hw->mac;
31776 + /* cannot be const */
31777 struct e1000_mac_operations *func = &mac->ops;
31778
31779 /* Set media type */
31780 @@ -1365,7 +1366,7 @@ static void e1000_clear_hw_cntrs_80003es
31781 temp = er32(ICRXDMTC);
31782 }
31783
31784 -static struct e1000_mac_operations es2_mac_ops = {
31785 +static const struct e1000_mac_operations es2_mac_ops = {
31786 .id_led_init = e1000e_id_led_init,
31787 .check_mng_mode = e1000e_check_mng_mode_generic,
31788 /* check_for_link dependent on media type */
31789 @@ -1383,7 +1384,7 @@ static struct e1000_mac_operations es2_m
31790 .setup_led = e1000e_setup_led_generic,
31791 };
31792
31793 -static struct e1000_phy_operations es2_phy_ops = {
31794 +static const struct e1000_phy_operations es2_phy_ops = {
31795 .acquire_phy = e1000_acquire_phy_80003es2lan,
31796 .check_reset_block = e1000e_check_reset_block_generic,
31797 .commit_phy = e1000e_phy_sw_reset,
31798 @@ -1400,7 +1401,7 @@ static struct e1000_phy_operations es2_p
31799 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
31800 };
31801
31802 -static struct e1000_nvm_operations es2_nvm_ops = {
31803 +static const struct e1000_nvm_operations es2_nvm_ops = {
31804 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
31805 .read_nvm = e1000e_read_nvm_eerd,
31806 .release_nvm = e1000_release_nvm_80003es2lan,
31807 diff -urNp linux-2.6.32.41/drivers/net/e1000e/hw.h linux-2.6.32.41/drivers/net/e1000e/hw.h
31808 --- linux-2.6.32.41/drivers/net/e1000e/hw.h 2011-03-27 14:31:47.000000000 -0400
31809 +++ linux-2.6.32.41/drivers/net/e1000e/hw.h 2011-04-17 15:56:46.000000000 -0400
31810 @@ -756,34 +756,34 @@ struct e1000_mac_operations {
31811
31812 /* Function pointers for the PHY. */
31813 struct e1000_phy_operations {
31814 - s32 (*acquire_phy)(struct e1000_hw *);
31815 - s32 (*check_polarity)(struct e1000_hw *);
31816 - s32 (*check_reset_block)(struct e1000_hw *);
31817 - s32 (*commit_phy)(struct e1000_hw *);
31818 - s32 (*force_speed_duplex)(struct e1000_hw *);
31819 - s32 (*get_cfg_done)(struct e1000_hw *hw);
31820 - s32 (*get_cable_length)(struct e1000_hw *);
31821 - s32 (*get_phy_info)(struct e1000_hw *);
31822 - s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *);
31823 - s32 (*read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
31824 - void (*release_phy)(struct e1000_hw *);
31825 - s32 (*reset_phy)(struct e1000_hw *);
31826 - s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
31827 - s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
31828 - s32 (*write_phy_reg)(struct e1000_hw *, u32, u16);
31829 - s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
31830 - s32 (*cfg_on_link_up)(struct e1000_hw *);
31831 + s32 (* acquire_phy)(struct e1000_hw *);
31832 + s32 (* check_polarity)(struct e1000_hw *);
31833 + s32 (* check_reset_block)(struct e1000_hw *);
31834 + s32 (* commit_phy)(struct e1000_hw *);
31835 + s32 (* force_speed_duplex)(struct e1000_hw *);
31836 + s32 (* get_cfg_done)(struct e1000_hw *hw);
31837 + s32 (* get_cable_length)(struct e1000_hw *);
31838 + s32 (* get_phy_info)(struct e1000_hw *);
31839 + s32 (* read_phy_reg)(struct e1000_hw *, u32, u16 *);
31840 + s32 (* read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
31841 + void (* release_phy)(struct e1000_hw *);
31842 + s32 (* reset_phy)(struct e1000_hw *);
31843 + s32 (* set_d0_lplu_state)(struct e1000_hw *, bool);
31844 + s32 (* set_d3_lplu_state)(struct e1000_hw *, bool);
31845 + s32 (* write_phy_reg)(struct e1000_hw *, u32, u16);
31846 + s32 (* write_phy_reg_locked)(struct e1000_hw *, u32, u16);
31847 + s32 (* cfg_on_link_up)(struct e1000_hw *);
31848 };
31849
31850 /* Function pointers for the NVM. */
31851 struct e1000_nvm_operations {
31852 - s32 (*acquire_nvm)(struct e1000_hw *);
31853 - s32 (*read_nvm)(struct e1000_hw *, u16, u16, u16 *);
31854 - void (*release_nvm)(struct e1000_hw *);
31855 - s32 (*update_nvm)(struct e1000_hw *);
31856 - s32 (*valid_led_default)(struct e1000_hw *, u16 *);
31857 - s32 (*validate_nvm)(struct e1000_hw *);
31858 - s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
31859 + s32 (* const acquire_nvm)(struct e1000_hw *);
31860 + s32 (* const read_nvm)(struct e1000_hw *, u16, u16, u16 *);
31861 + void (* const release_nvm)(struct e1000_hw *);
31862 + s32 (* const update_nvm)(struct e1000_hw *);
31863 + s32 (* const valid_led_default)(struct e1000_hw *, u16 *);
31864 + s32 (* const validate_nvm)(struct e1000_hw *);
31865 + s32 (* const write_nvm)(struct e1000_hw *, u16, u16, u16 *);
31866 };
31867
31868 struct e1000_mac_info {
31869 diff -urNp linux-2.6.32.41/drivers/net/e1000e/ich8lan.c linux-2.6.32.41/drivers/net/e1000e/ich8lan.c
31870 --- linux-2.6.32.41/drivers/net/e1000e/ich8lan.c 2011-05-10 22:12:01.000000000 -0400
31871 +++ linux-2.6.32.41/drivers/net/e1000e/ich8lan.c 2011-05-10 22:12:32.000000000 -0400
31872 @@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan
31873 }
31874 }
31875
31876 -static struct e1000_mac_operations ich8_mac_ops = {
31877 +static const struct e1000_mac_operations ich8_mac_ops = {
31878 .id_led_init = e1000e_id_led_init,
31879 .check_mng_mode = e1000_check_mng_mode_ich8lan,
31880 .check_for_link = e1000_check_for_copper_link_ich8lan,
31881 @@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_
31882 /* id_led_init dependent on mac type */
31883 };
31884
31885 -static struct e1000_phy_operations ich8_phy_ops = {
31886 +static const struct e1000_phy_operations ich8_phy_ops = {
31887 .acquire_phy = e1000_acquire_swflag_ich8lan,
31888 .check_reset_block = e1000_check_reset_block_ich8lan,
31889 .commit_phy = NULL,
31890 @@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_
31891 .write_phy_reg = e1000e_write_phy_reg_igp,
31892 };
31893
31894 -static struct e1000_nvm_operations ich8_nvm_ops = {
31895 +static const struct e1000_nvm_operations ich8_nvm_ops = {
31896 .acquire_nvm = e1000_acquire_nvm_ich8lan,
31897 .read_nvm = e1000_read_nvm_ich8lan,
31898 .release_nvm = e1000_release_nvm_ich8lan,
31899 diff -urNp linux-2.6.32.41/drivers/net/hamradio/6pack.c linux-2.6.32.41/drivers/net/hamradio/6pack.c
31900 --- linux-2.6.32.41/drivers/net/hamradio/6pack.c 2011-03-27 14:31:47.000000000 -0400
31901 +++ linux-2.6.32.41/drivers/net/hamradio/6pack.c 2011-05-16 21:46:57.000000000 -0400
31902 @@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct t
31903 unsigned char buf[512];
31904 int count1;
31905
31906 + pax_track_stack();
31907 +
31908 if (!count)
31909 return;
31910
31911 diff -urNp linux-2.6.32.41/drivers/net/ibmveth.c linux-2.6.32.41/drivers/net/ibmveth.c
31912 --- linux-2.6.32.41/drivers/net/ibmveth.c 2011-03-27 14:31:47.000000000 -0400
31913 +++ linux-2.6.32.41/drivers/net/ibmveth.c 2011-04-17 15:56:46.000000000 -0400
31914 @@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attr
31915 NULL,
31916 };
31917
31918 -static struct sysfs_ops veth_pool_ops = {
31919 +static const struct sysfs_ops veth_pool_ops = {
31920 .show = veth_pool_show,
31921 .store = veth_pool_store,
31922 };
31923 diff -urNp linux-2.6.32.41/drivers/net/igb/e1000_82575.c linux-2.6.32.41/drivers/net/igb/e1000_82575.c
31924 --- linux-2.6.32.41/drivers/net/igb/e1000_82575.c 2011-03-27 14:31:47.000000000 -0400
31925 +++ linux-2.6.32.41/drivers/net/igb/e1000_82575.c 2011-04-17 15:56:46.000000000 -0400
31926 @@ -1410,7 +1410,7 @@ void igb_vmdq_set_replication_pf(struct
31927 wr32(E1000_VT_CTL, vt_ctl);
31928 }
31929
31930 -static struct e1000_mac_operations e1000_mac_ops_82575 = {
31931 +static const struct e1000_mac_operations e1000_mac_ops_82575 = {
31932 .reset_hw = igb_reset_hw_82575,
31933 .init_hw = igb_init_hw_82575,
31934 .check_for_link = igb_check_for_link_82575,
31935 @@ -1419,13 +1419,13 @@ static struct e1000_mac_operations e1000
31936 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
31937 };
31938
31939 -static struct e1000_phy_operations e1000_phy_ops_82575 = {
31940 +static const struct e1000_phy_operations e1000_phy_ops_82575 = {
31941 .acquire = igb_acquire_phy_82575,
31942 .get_cfg_done = igb_get_cfg_done_82575,
31943 .release = igb_release_phy_82575,
31944 };
31945
31946 -static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
31947 +static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
31948 .acquire = igb_acquire_nvm_82575,
31949 .read = igb_read_nvm_eerd,
31950 .release = igb_release_nvm_82575,
31951 diff -urNp linux-2.6.32.41/drivers/net/igb/e1000_hw.h linux-2.6.32.41/drivers/net/igb/e1000_hw.h
31952 --- linux-2.6.32.41/drivers/net/igb/e1000_hw.h 2011-03-27 14:31:47.000000000 -0400
31953 +++ linux-2.6.32.41/drivers/net/igb/e1000_hw.h 2011-04-17 15:56:46.000000000 -0400
31954 @@ -305,17 +305,17 @@ struct e1000_phy_operations {
31955 };
31956
31957 struct e1000_nvm_operations {
31958 - s32 (*acquire)(struct e1000_hw *);
31959 - s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
31960 - void (*release)(struct e1000_hw *);
31961 - s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
31962 + s32 (* const acquire)(struct e1000_hw *);
31963 + s32 (* const read)(struct e1000_hw *, u16, u16, u16 *);
31964 + void (* const release)(struct e1000_hw *);
31965 + s32 (* const write)(struct e1000_hw *, u16, u16, u16 *);
31966 };
31967
31968 struct e1000_info {
31969 s32 (*get_invariants)(struct e1000_hw *);
31970 - struct e1000_mac_operations *mac_ops;
31971 - struct e1000_phy_operations *phy_ops;
31972 - struct e1000_nvm_operations *nvm_ops;
31973 + const struct e1000_mac_operations *mac_ops;
31974 + const struct e1000_phy_operations *phy_ops;
31975 + const struct e1000_nvm_operations *nvm_ops;
31976 };
31977
31978 extern const struct e1000_info e1000_82575_info;
31979 diff -urNp linux-2.6.32.41/drivers/net/iseries_veth.c linux-2.6.32.41/drivers/net/iseries_veth.c
31980 --- linux-2.6.32.41/drivers/net/iseries_veth.c 2011-03-27 14:31:47.000000000 -0400
31981 +++ linux-2.6.32.41/drivers/net/iseries_veth.c 2011-04-17 15:56:46.000000000 -0400
31982 @@ -384,7 +384,7 @@ static struct attribute *veth_cnx_defaul
31983 NULL
31984 };
31985
31986 -static struct sysfs_ops veth_cnx_sysfs_ops = {
31987 +static const struct sysfs_ops veth_cnx_sysfs_ops = {
31988 .show = veth_cnx_attribute_show
31989 };
31990
31991 @@ -441,7 +441,7 @@ static struct attribute *veth_port_defau
31992 NULL
31993 };
31994
31995 -static struct sysfs_ops veth_port_sysfs_ops = {
31996 +static const struct sysfs_ops veth_port_sysfs_ops = {
31997 .show = veth_port_attribute_show
31998 };
31999
32000 diff -urNp linux-2.6.32.41/drivers/net/ixgb/ixgb_main.c linux-2.6.32.41/drivers/net/ixgb/ixgb_main.c
32001 --- linux-2.6.32.41/drivers/net/ixgb/ixgb_main.c 2011-03-27 14:31:47.000000000 -0400
32002 +++ linux-2.6.32.41/drivers/net/ixgb/ixgb_main.c 2011-05-16 21:46:57.000000000 -0400
32003 @@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev
32004 u32 rctl;
32005 int i;
32006
32007 + pax_track_stack();
32008 +
32009 /* Check for Promiscuous and All Multicast modes */
32010
32011 rctl = IXGB_READ_REG(hw, RCTL);
32012 diff -urNp linux-2.6.32.41/drivers/net/ixgb/ixgb_param.c linux-2.6.32.41/drivers/net/ixgb/ixgb_param.c
32013 --- linux-2.6.32.41/drivers/net/ixgb/ixgb_param.c 2011-03-27 14:31:47.000000000 -0400
32014 +++ linux-2.6.32.41/drivers/net/ixgb/ixgb_param.c 2011-05-16 21:46:57.000000000 -0400
32015 @@ -260,6 +260,9 @@ void __devinit
32016 ixgb_check_options(struct ixgb_adapter *adapter)
32017 {
32018 int bd = adapter->bd_number;
32019 +
32020 + pax_track_stack();
32021 +
32022 if (bd >= IXGB_MAX_NIC) {
32023 printk(KERN_NOTICE
32024 "Warning: no configuration for board #%i\n", bd);
32025 diff -urNp linux-2.6.32.41/drivers/net/mlx4/main.c linux-2.6.32.41/drivers/net/mlx4/main.c
32026 --- linux-2.6.32.41/drivers/net/mlx4/main.c 2011-03-27 14:31:47.000000000 -0400
32027 +++ linux-2.6.32.41/drivers/net/mlx4/main.c 2011-05-18 20:09:37.000000000 -0400
32028 @@ -38,6 +38,7 @@
32029 #include <linux/errno.h>
32030 #include <linux/pci.h>
32031 #include <linux/dma-mapping.h>
32032 +#include <linux/sched.h>
32033
32034 #include <linux/mlx4/device.h>
32035 #include <linux/mlx4/doorbell.h>
32036 @@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev
32037 u64 icm_size;
32038 int err;
32039
32040 + pax_track_stack();
32041 +
32042 err = mlx4_QUERY_FW(dev);
32043 if (err) {
32044 if (err == -EACCES)
32045 diff -urNp linux-2.6.32.41/drivers/net/niu.c linux-2.6.32.41/drivers/net/niu.c
32046 --- linux-2.6.32.41/drivers/net/niu.c 2011-05-10 22:12:01.000000000 -0400
32047 +++ linux-2.6.32.41/drivers/net/niu.c 2011-05-16 21:46:57.000000000 -0400
32048 @@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struc
32049 int i, num_irqs, err;
32050 u8 first_ldg;
32051
32052 + pax_track_stack();
32053 +
32054 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
32055 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
32056 ldg_num_map[i] = first_ldg + i;
32057 diff -urNp linux-2.6.32.41/drivers/net/pcnet32.c linux-2.6.32.41/drivers/net/pcnet32.c
32058 --- linux-2.6.32.41/drivers/net/pcnet32.c 2011-03-27 14:31:47.000000000 -0400
32059 +++ linux-2.6.32.41/drivers/net/pcnet32.c 2011-04-17 15:56:46.000000000 -0400
32060 @@ -79,7 +79,7 @@ static int cards_found;
32061 /*
32062 * VLB I/O addresses
32063 */
32064 -static unsigned int pcnet32_portlist[] __initdata =
32065 +static unsigned int pcnet32_portlist[] __devinitdata =
32066 { 0x300, 0x320, 0x340, 0x360, 0 };
32067
32068 static int pcnet32_debug = 0;
32069 diff -urNp linux-2.6.32.41/drivers/net/tg3.h linux-2.6.32.41/drivers/net/tg3.h
32070 --- linux-2.6.32.41/drivers/net/tg3.h 2011-03-27 14:31:47.000000000 -0400
32071 +++ linux-2.6.32.41/drivers/net/tg3.h 2011-04-17 15:56:46.000000000 -0400
32072 @@ -95,6 +95,7 @@
32073 #define CHIPREV_ID_5750_A0 0x4000
32074 #define CHIPREV_ID_5750_A1 0x4001
32075 #define CHIPREV_ID_5750_A3 0x4003
32076 +#define CHIPREV_ID_5750_C1 0x4201
32077 #define CHIPREV_ID_5750_C2 0x4202
32078 #define CHIPREV_ID_5752_A0_HW 0x5000
32079 #define CHIPREV_ID_5752_A0 0x6000
32080 diff -urNp linux-2.6.32.41/drivers/net/tulip/de2104x.c linux-2.6.32.41/drivers/net/tulip/de2104x.c
32081 --- linux-2.6.32.41/drivers/net/tulip/de2104x.c 2011-03-27 14:31:47.000000000 -0400
32082 +++ linux-2.6.32.41/drivers/net/tulip/de2104x.c 2011-05-16 21:46:57.000000000 -0400
32083 @@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_i
32084 struct de_srom_info_leaf *il;
32085 void *bufp;
32086
32087 + pax_track_stack();
32088 +
32089 /* download entire eeprom */
32090 for (i = 0; i < DE_EEPROM_WORDS; i++)
32091 ((__le16 *)ee_data)[i] =
32092 diff -urNp linux-2.6.32.41/drivers/net/tulip/de4x5.c linux-2.6.32.41/drivers/net/tulip/de4x5.c
32093 --- linux-2.6.32.41/drivers/net/tulip/de4x5.c 2011-03-27 14:31:47.000000000 -0400
32094 +++ linux-2.6.32.41/drivers/net/tulip/de4x5.c 2011-04-17 15:56:46.000000000 -0400
32095 @@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, stru
32096 for (i=0; i<ETH_ALEN; i++) {
32097 tmp.addr[i] = dev->dev_addr[i];
32098 }
32099 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32100 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32101 break;
32102
32103 case DE4X5_SET_HWADDR: /* Set the hardware address */
32104 @@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, stru
32105 spin_lock_irqsave(&lp->lock, flags);
32106 memcpy(&statbuf, &lp->pktStats, ioc->len);
32107 spin_unlock_irqrestore(&lp->lock, flags);
32108 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
32109 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
32110 return -EFAULT;
32111 break;
32112 }
32113 diff -urNp linux-2.6.32.41/drivers/net/usb/hso.c linux-2.6.32.41/drivers/net/usb/hso.c
32114 --- linux-2.6.32.41/drivers/net/usb/hso.c 2011-03-27 14:31:47.000000000 -0400
32115 +++ linux-2.6.32.41/drivers/net/usb/hso.c 2011-04-17 15:56:46.000000000 -0400
32116 @@ -71,7 +71,7 @@
32117 #include <asm/byteorder.h>
32118 #include <linux/serial_core.h>
32119 #include <linux/serial.h>
32120 -
32121 +#include <asm/local.h>
32122
32123 #define DRIVER_VERSION "1.2"
32124 #define MOD_AUTHOR "Option Wireless"
32125 @@ -258,7 +258,7 @@ struct hso_serial {
32126
32127 /* from usb_serial_port */
32128 struct tty_struct *tty;
32129 - int open_count;
32130 + local_t open_count;
32131 spinlock_t serial_lock;
32132
32133 int (*write_data) (struct hso_serial *serial);
32134 @@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_
32135 struct urb *urb;
32136
32137 urb = serial->rx_urb[0];
32138 - if (serial->open_count > 0) {
32139 + if (local_read(&serial->open_count) > 0) {
32140 count = put_rxbuf_data(urb, serial);
32141 if (count == -1)
32142 return;
32143 @@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_cal
32144 DUMP1(urb->transfer_buffer, urb->actual_length);
32145
32146 /* Anyone listening? */
32147 - if (serial->open_count == 0)
32148 + if (local_read(&serial->open_count) == 0)
32149 return;
32150
32151 if (status == 0) {
32152 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
32153 spin_unlock_irq(&serial->serial_lock);
32154
32155 /* check for port already opened, if not set the termios */
32156 - serial->open_count++;
32157 - if (serial->open_count == 1) {
32158 + if (local_inc_return(&serial->open_count) == 1) {
32159 tty->low_latency = 1;
32160 serial->rx_state = RX_IDLE;
32161 /* Force default termio settings */
32162 @@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_st
32163 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
32164 if (result) {
32165 hso_stop_serial_device(serial->parent);
32166 - serial->open_count--;
32167 + local_dec(&serial->open_count);
32168 kref_put(&serial->parent->ref, hso_serial_ref_free);
32169 }
32170 } else {
32171 @@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_
32172
32173 /* reset the rts and dtr */
32174 /* do the actual close */
32175 - serial->open_count--;
32176 + local_dec(&serial->open_count);
32177
32178 - if (serial->open_count <= 0) {
32179 - serial->open_count = 0;
32180 + if (local_read(&serial->open_count) <= 0) {
32181 + local_set(&serial->open_count, 0);
32182 spin_lock_irq(&serial->serial_lock);
32183 if (serial->tty == tty) {
32184 serial->tty->driver_data = NULL;
32185 @@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struc
32186
32187 /* the actual setup */
32188 spin_lock_irqsave(&serial->serial_lock, flags);
32189 - if (serial->open_count)
32190 + if (local_read(&serial->open_count))
32191 _hso_serial_set_termios(tty, old);
32192 else
32193 tty->termios = old;
32194 @@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interfa
32195 /* Start all serial ports */
32196 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
32197 if (serial_table[i] && (serial_table[i]->interface == iface)) {
32198 - if (dev2ser(serial_table[i])->open_count) {
32199 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
32200 result =
32201 hso_start_serial_device(serial_table[i], GFP_NOIO);
32202 hso_kick_transmit(dev2ser(serial_table[i]));
32203 diff -urNp linux-2.6.32.41/drivers/net/vxge/vxge-main.c linux-2.6.32.41/drivers/net/vxge/vxge-main.c
32204 --- linux-2.6.32.41/drivers/net/vxge/vxge-main.c 2011-03-27 14:31:47.000000000 -0400
32205 +++ linux-2.6.32.41/drivers/net/vxge/vxge-main.c 2011-05-16 21:46:57.000000000 -0400
32206 @@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_T
32207 struct sk_buff *completed[NR_SKB_COMPLETED];
32208 int more;
32209
32210 + pax_track_stack();
32211 +
32212 do {
32213 more = 0;
32214 skb_ptr = completed;
32215 @@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_conf
32216 u8 mtable[256] = {0}; /* CPU to vpath mapping */
32217 int index;
32218
32219 + pax_track_stack();
32220 +
32221 /*
32222 * Filling
32223 * - itable with bucket numbers
32224 diff -urNp linux-2.6.32.41/drivers/net/wan/cycx_x25.c linux-2.6.32.41/drivers/net/wan/cycx_x25.c
32225 --- linux-2.6.32.41/drivers/net/wan/cycx_x25.c 2011-03-27 14:31:47.000000000 -0400
32226 +++ linux-2.6.32.41/drivers/net/wan/cycx_x25.c 2011-05-16 21:46:57.000000000 -0400
32227 @@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned
32228 unsigned char hex[1024],
32229 * phex = hex;
32230
32231 + pax_track_stack();
32232 +
32233 if (len >= (sizeof(hex) / 2))
32234 len = (sizeof(hex) / 2) - 1;
32235
32236 diff -urNp linux-2.6.32.41/drivers/net/wimax/i2400m/usb-fw.c linux-2.6.32.41/drivers/net/wimax/i2400m/usb-fw.c
32237 --- linux-2.6.32.41/drivers/net/wimax/i2400m/usb-fw.c 2011-03-27 14:31:47.000000000 -0400
32238 +++ linux-2.6.32.41/drivers/net/wimax/i2400m/usb-fw.c 2011-05-16 21:46:57.000000000 -0400
32239 @@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
32240 int do_autopm = 1;
32241 DECLARE_COMPLETION_ONSTACK(notif_completion);
32242
32243 + pax_track_stack();
32244 +
32245 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
32246 i2400m, ack, ack_size);
32247 BUG_ON(_ack == i2400m->bm_ack_buf);
32248 diff -urNp linux-2.6.32.41/drivers/net/wireless/airo.c linux-2.6.32.41/drivers/net/wireless/airo.c
32249 --- linux-2.6.32.41/drivers/net/wireless/airo.c 2011-03-27 14:31:47.000000000 -0400
32250 +++ linux-2.6.32.41/drivers/net/wireless/airo.c 2011-05-16 21:46:57.000000000 -0400
32251 @@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
32252 BSSListElement * loop_net;
32253 BSSListElement * tmp_net;
32254
32255 + pax_track_stack();
32256 +
32257 /* Blow away current list of scan results */
32258 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
32259 list_move_tail (&loop_net->list, &ai->network_free_list);
32260 @@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *
32261 WepKeyRid wkr;
32262 int rc;
32263
32264 + pax_track_stack();
32265 +
32266 memset( &mySsid, 0, sizeof( mySsid ) );
32267 kfree (ai->flash);
32268 ai->flash = NULL;
32269 @@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct i
32270 __le32 *vals = stats.vals;
32271 int len;
32272
32273 + pax_track_stack();
32274 +
32275 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
32276 return -ENOMEM;
32277 data = (struct proc_data *)file->private_data;
32278 @@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct ino
32279 /* If doLoseSync is not 1, we won't do a Lose Sync */
32280 int doLoseSync = -1;
32281
32282 + pax_track_stack();
32283 +
32284 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
32285 return -ENOMEM;
32286 data = (struct proc_data *)file->private_data;
32287 @@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_de
32288 int i;
32289 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
32290
32291 + pax_track_stack();
32292 +
32293 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
32294 if (!qual)
32295 return -ENOMEM;
32296 @@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(str
32297 CapabilityRid cap_rid;
32298 __le32 *vals = stats_rid.vals;
32299
32300 + pax_track_stack();
32301 +
32302 /* Get stats out of the card */
32303 clear_bit(JOB_WSTATS, &local->jobs);
32304 if (local->power.event) {
32305 diff -urNp linux-2.6.32.41/drivers/net/wireless/ath/ath5k/debug.c linux-2.6.32.41/drivers/net/wireless/ath/ath5k/debug.c
32306 --- linux-2.6.32.41/drivers/net/wireless/ath/ath5k/debug.c 2011-03-27 14:31:47.000000000 -0400
32307 +++ linux-2.6.32.41/drivers/net/wireless/ath/ath5k/debug.c 2011-05-16 21:46:57.000000000 -0400
32308 @@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct f
32309 unsigned int v;
32310 u64 tsf;
32311
32312 + pax_track_stack();
32313 +
32314 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
32315 len += snprintf(buf+len, sizeof(buf)-len,
32316 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
32317 @@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct fi
32318 unsigned int len = 0;
32319 unsigned int i;
32320
32321 + pax_track_stack();
32322 +
32323 len += snprintf(buf+len, sizeof(buf)-len,
32324 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
32325
32326 diff -urNp linux-2.6.32.41/drivers/net/wireless/ath/ath9k/debug.c linux-2.6.32.41/drivers/net/wireless/ath/ath9k/debug.c
32327 --- linux-2.6.32.41/drivers/net/wireless/ath/ath9k/debug.c 2011-03-27 14:31:47.000000000 -0400
32328 +++ linux-2.6.32.41/drivers/net/wireless/ath/ath9k/debug.c 2011-05-16 21:46:57.000000000 -0400
32329 @@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struc
32330 char buf[512];
32331 unsigned int len = 0;
32332
32333 + pax_track_stack();
32334 +
32335 len += snprintf(buf + len, sizeof(buf) - len,
32336 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
32337 len += snprintf(buf + len, sizeof(buf) - len,
32338 @@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct fi
32339 int i;
32340 u8 addr[ETH_ALEN];
32341
32342 + pax_track_stack();
32343 +
32344 len += snprintf(buf + len, sizeof(buf) - len,
32345 "primary: %s (%s chan=%d ht=%d)\n",
32346 wiphy_name(sc->pri_wiphy->hw->wiphy),
32347 diff -urNp linux-2.6.32.41/drivers/net/wireless/b43/debugfs.c linux-2.6.32.41/drivers/net/wireless/b43/debugfs.c
32348 --- linux-2.6.32.41/drivers/net/wireless/b43/debugfs.c 2011-03-27 14:31:47.000000000 -0400
32349 +++ linux-2.6.32.41/drivers/net/wireless/b43/debugfs.c 2011-04-17 15:56:46.000000000 -0400
32350 @@ -43,7 +43,7 @@ static struct dentry *rootdir;
32351 struct b43_debugfs_fops {
32352 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
32353 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
32354 - struct file_operations fops;
32355 + const struct file_operations fops;
32356 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
32357 size_t file_struct_offset;
32358 };
32359 diff -urNp linux-2.6.32.41/drivers/net/wireless/b43legacy/debugfs.c linux-2.6.32.41/drivers/net/wireless/b43legacy/debugfs.c
32360 --- linux-2.6.32.41/drivers/net/wireless/b43legacy/debugfs.c 2011-03-27 14:31:47.000000000 -0400
32361 +++ linux-2.6.32.41/drivers/net/wireless/b43legacy/debugfs.c 2011-04-17 15:56:46.000000000 -0400
32362 @@ -44,7 +44,7 @@ static struct dentry *rootdir;
32363 struct b43legacy_debugfs_fops {
32364 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
32365 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
32366 - struct file_operations fops;
32367 + const struct file_operations fops;
32368 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
32369 size_t file_struct_offset;
32370 /* Take wl->irq_lock before calling read/write? */
32371 diff -urNp linux-2.6.32.41/drivers/net/wireless/ipw2x00/ipw2100.c linux-2.6.32.41/drivers/net/wireless/ipw2x00/ipw2100.c
32372 --- linux-2.6.32.41/drivers/net/wireless/ipw2x00/ipw2100.c 2011-03-27 14:31:47.000000000 -0400
32373 +++ linux-2.6.32.41/drivers/net/wireless/ipw2x00/ipw2100.c 2011-05-16 21:46:57.000000000 -0400
32374 @@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2
32375 int err;
32376 DECLARE_SSID_BUF(ssid);
32377
32378 + pax_track_stack();
32379 +
32380 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
32381
32382 if (ssid_len)
32383 @@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw210
32384 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
32385 int err;
32386
32387 + pax_track_stack();
32388 +
32389 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
32390 idx, keylen, len);
32391
32392 diff -urNp linux-2.6.32.41/drivers/net/wireless/ipw2x00/libipw_rx.c linux-2.6.32.41/drivers/net/wireless/ipw2x00/libipw_rx.c
32393 --- linux-2.6.32.41/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-03-27 14:31:47.000000000 -0400
32394 +++ linux-2.6.32.41/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-05-16 21:46:57.000000000 -0400
32395 @@ -1566,6 +1566,8 @@ static void libipw_process_probe_respons
32396 unsigned long flags;
32397 DECLARE_SSID_BUF(ssid);
32398
32399 + pax_track_stack();
32400 +
32401 LIBIPW_DEBUG_SCAN("'%s' (%pM"
32402 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
32403 print_ssid(ssid, info_element->data, info_element->len),
32404 diff -urNp linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-1000.c linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-1000.c
32405 --- linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-03-27 14:31:47.000000000 -0400
32406 +++ linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-04-17 15:56:46.000000000 -0400
32407 @@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib =
32408 },
32409 };
32410
32411 -static struct iwl_ops iwl1000_ops = {
32412 +static const struct iwl_ops iwl1000_ops = {
32413 .ucode = &iwl5000_ucode,
32414 .lib = &iwl1000_lib,
32415 .hcmd = &iwl5000_hcmd,
32416 diff -urNp linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-3945.c linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-3945.c
32417 --- linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-03-27 14:31:47.000000000 -0400
32418 +++ linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-04-17 15:56:46.000000000 -0400
32419 @@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945
32420 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
32421 };
32422
32423 -static struct iwl_ops iwl3945_ops = {
32424 +static const struct iwl_ops iwl3945_ops = {
32425 .ucode = &iwl3945_ucode,
32426 .lib = &iwl3945_lib,
32427 .hcmd = &iwl3945_hcmd,
32428 diff -urNp linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-4965.c linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-4965.c
32429 --- linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-03-27 14:31:47.000000000 -0400
32430 +++ linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-04-17 15:56:46.000000000 -0400
32431 @@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib =
32432 },
32433 };
32434
32435 -static struct iwl_ops iwl4965_ops = {
32436 +static const struct iwl_ops iwl4965_ops = {
32437 .ucode = &iwl4965_ucode,
32438 .lib = &iwl4965_lib,
32439 .hcmd = &iwl4965_hcmd,
32440 diff -urNp linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-5000.c linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-5000.c
32441 --- linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-05-10 22:12:01.000000000 -0400
32442 +++ linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-05-10 22:12:32.000000000 -0400
32443 @@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib =
32444 },
32445 };
32446
32447 -struct iwl_ops iwl5000_ops = {
32448 +const struct iwl_ops iwl5000_ops = {
32449 .ucode = &iwl5000_ucode,
32450 .lib = &iwl5000_lib,
32451 .hcmd = &iwl5000_hcmd,
32452 .utils = &iwl5000_hcmd_utils,
32453 };
32454
32455 -static struct iwl_ops iwl5150_ops = {
32456 +static const struct iwl_ops iwl5150_ops = {
32457 .ucode = &iwl5000_ucode,
32458 .lib = &iwl5150_lib,
32459 .hcmd = &iwl5000_hcmd,
32460 diff -urNp linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-6000.c linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-6000.c
32461 --- linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-03-27 14:31:47.000000000 -0400
32462 +++ linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-04-17 15:56:46.000000000 -0400
32463 @@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000
32464 .calc_rssi = iwl5000_calc_rssi,
32465 };
32466
32467 -static struct iwl_ops iwl6000_ops = {
32468 +static const struct iwl_ops iwl6000_ops = {
32469 .ucode = &iwl5000_ucode,
32470 .lib = &iwl6000_lib,
32471 .hcmd = &iwl5000_hcmd,
32472 diff -urNp linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
32473 --- linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-03-27 14:31:47.000000000 -0400
32474 +++ linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-05-16 21:46:57.000000000 -0400
32475 @@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, s
32476 u8 active_index = 0;
32477 s32 tpt = 0;
32478
32479 + pax_track_stack();
32480 +
32481 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
32482
32483 if (!ieee80211_is_data(hdr->frame_control) ||
32484 @@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_
32485 u8 valid_tx_ant = 0;
32486 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
32487
32488 + pax_track_stack();
32489 +
32490 /* Override starting rate (index 0) if needed for debug purposes */
32491 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
32492
32493 diff -urNp linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-debugfs.c
32494 --- linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-03-27 14:31:47.000000000 -0400
32495 +++ linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-05-16 21:46:57.000000000 -0400
32496 @@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(str
32497 int pos = 0;
32498 const size_t bufsz = sizeof(buf);
32499
32500 + pax_track_stack();
32501 +
32502 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
32503 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
32504 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
32505 @@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
32506 const size_t bufsz = sizeof(buf);
32507 ssize_t ret;
32508
32509 + pax_track_stack();
32510 +
32511 for (i = 0; i < AC_NUM; i++) {
32512 pos += scnprintf(buf + pos, bufsz - pos,
32513 "\tcw_min\tcw_max\taifsn\ttxop\n");
32514 diff -urNp linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-debug.h linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-debug.h
32515 --- linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-03-27 14:31:47.000000000 -0400
32516 +++ linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-04-17 15:56:46.000000000 -0400
32517 @@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_pri
32518 #endif
32519
32520 #else
32521 -#define IWL_DEBUG(__priv, level, fmt, args...)
32522 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
32523 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
32524 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
32525 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
32526 void *p, u32 len)
32527 {}
32528 diff -urNp linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-dev.h linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-dev.h
32529 --- linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-03-27 14:31:47.000000000 -0400
32530 +++ linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-04-17 15:56:46.000000000 -0400
32531 @@ -68,7 +68,7 @@ struct iwl_tx_queue;
32532
32533 /* shared structures from iwl-5000.c */
32534 extern struct iwl_mod_params iwl50_mod_params;
32535 -extern struct iwl_ops iwl5000_ops;
32536 +extern const struct iwl_ops iwl5000_ops;
32537 extern struct iwl_ucode_ops iwl5000_ucode;
32538 extern struct iwl_lib_ops iwl5000_lib;
32539 extern struct iwl_hcmd_ops iwl5000_hcmd;
32540 diff -urNp linux-2.6.32.41/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-2.6.32.41/drivers/net/wireless/iwmc3200wifi/debugfs.c
32541 --- linux-2.6.32.41/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-03-27 14:31:47.000000000 -0400
32542 +++ linux-2.6.32.41/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-05-16 21:46:57.000000000 -0400
32543 @@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
32544 int buf_len = 512;
32545 size_t len = 0;
32546
32547 + pax_track_stack();
32548 +
32549 if (*ppos != 0)
32550 return 0;
32551 if (count < sizeof(buf))
32552 diff -urNp linux-2.6.32.41/drivers/net/wireless/libertas/debugfs.c linux-2.6.32.41/drivers/net/wireless/libertas/debugfs.c
32553 --- linux-2.6.32.41/drivers/net/wireless/libertas/debugfs.c 2011-03-27 14:31:47.000000000 -0400
32554 +++ linux-2.6.32.41/drivers/net/wireless/libertas/debugfs.c 2011-04-17 15:56:46.000000000 -0400
32555 @@ -708,7 +708,7 @@ out_unlock:
32556 struct lbs_debugfs_files {
32557 const char *name;
32558 int perm;
32559 - struct file_operations fops;
32560 + const struct file_operations fops;
32561 };
32562
32563 static const struct lbs_debugfs_files debugfs_files[] = {
32564 diff -urNp linux-2.6.32.41/drivers/net/wireless/rndis_wlan.c linux-2.6.32.41/drivers/net/wireless/rndis_wlan.c
32565 --- linux-2.6.32.41/drivers/net/wireless/rndis_wlan.c 2011-03-27 14:31:47.000000000 -0400
32566 +++ linux-2.6.32.41/drivers/net/wireless/rndis_wlan.c 2011-04-17 15:56:46.000000000 -0400
32567 @@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbn
32568
32569 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
32570
32571 - if (rts_threshold < 0 || rts_threshold > 2347)
32572 + if (rts_threshold > 2347)
32573 rts_threshold = 2347;
32574
32575 tmp = cpu_to_le32(rts_threshold);
32576 diff -urNp linux-2.6.32.41/drivers/oprofile/buffer_sync.c linux-2.6.32.41/drivers/oprofile/buffer_sync.c
32577 --- linux-2.6.32.41/drivers/oprofile/buffer_sync.c 2011-03-27 14:31:47.000000000 -0400
32578 +++ linux-2.6.32.41/drivers/oprofile/buffer_sync.c 2011-04-17 15:56:46.000000000 -0400
32579 @@ -341,7 +341,7 @@ static void add_data(struct op_entry *en
32580 if (cookie == NO_COOKIE)
32581 offset = pc;
32582 if (cookie == INVALID_COOKIE) {
32583 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
32584 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
32585 offset = pc;
32586 }
32587 if (cookie != last_cookie) {
32588 @@ -385,14 +385,14 @@ add_sample(struct mm_struct *mm, struct
32589 /* add userspace sample */
32590
32591 if (!mm) {
32592 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
32593 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
32594 return 0;
32595 }
32596
32597 cookie = lookup_dcookie(mm, s->eip, &offset);
32598
32599 if (cookie == INVALID_COOKIE) {
32600 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
32601 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
32602 return 0;
32603 }
32604
32605 @@ -561,7 +561,7 @@ void sync_buffer(int cpu)
32606 /* ignore backtraces if failed to add a sample */
32607 if (state == sb_bt_start) {
32608 state = sb_bt_ignore;
32609 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
32610 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
32611 }
32612 }
32613 release_mm(mm);
32614 diff -urNp linux-2.6.32.41/drivers/oprofile/event_buffer.c linux-2.6.32.41/drivers/oprofile/event_buffer.c
32615 --- linux-2.6.32.41/drivers/oprofile/event_buffer.c 2011-03-27 14:31:47.000000000 -0400
32616 +++ linux-2.6.32.41/drivers/oprofile/event_buffer.c 2011-04-17 15:56:46.000000000 -0400
32617 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
32618 }
32619
32620 if (buffer_pos == buffer_size) {
32621 - atomic_inc(&oprofile_stats.event_lost_overflow);
32622 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
32623 return;
32624 }
32625
32626 diff -urNp linux-2.6.32.41/drivers/oprofile/oprof.c linux-2.6.32.41/drivers/oprofile/oprof.c
32627 --- linux-2.6.32.41/drivers/oprofile/oprof.c 2011-03-27 14:31:47.000000000 -0400
32628 +++ linux-2.6.32.41/drivers/oprofile/oprof.c 2011-04-17 15:56:46.000000000 -0400
32629 @@ -110,7 +110,7 @@ static void switch_worker(struct work_st
32630 if (oprofile_ops.switch_events())
32631 return;
32632
32633 - atomic_inc(&oprofile_stats.multiplex_counter);
32634 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
32635 start_switch_worker();
32636 }
32637
32638 diff -urNp linux-2.6.32.41/drivers/oprofile/oprofilefs.c linux-2.6.32.41/drivers/oprofile/oprofilefs.c
32639 --- linux-2.6.32.41/drivers/oprofile/oprofilefs.c 2011-03-27 14:31:47.000000000 -0400
32640 +++ linux-2.6.32.41/drivers/oprofile/oprofilefs.c 2011-04-17 15:56:46.000000000 -0400
32641 @@ -187,7 +187,7 @@ static const struct file_operations atom
32642
32643
32644 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
32645 - char const *name, atomic_t *val)
32646 + char const *name, atomic_unchecked_t *val)
32647 {
32648 struct dentry *d = __oprofilefs_create_file(sb, root, name,
32649 &atomic_ro_fops, 0444);
32650 diff -urNp linux-2.6.32.41/drivers/oprofile/oprofile_stats.c linux-2.6.32.41/drivers/oprofile/oprofile_stats.c
32651 --- linux-2.6.32.41/drivers/oprofile/oprofile_stats.c 2011-03-27 14:31:47.000000000 -0400
32652 +++ linux-2.6.32.41/drivers/oprofile/oprofile_stats.c 2011-04-17 15:56:46.000000000 -0400
32653 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
32654 cpu_buf->sample_invalid_eip = 0;
32655 }
32656
32657 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
32658 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
32659 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
32660 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
32661 - atomic_set(&oprofile_stats.multiplex_counter, 0);
32662 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
32663 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
32664 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
32665 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
32666 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
32667 }
32668
32669
32670 diff -urNp linux-2.6.32.41/drivers/oprofile/oprofile_stats.h linux-2.6.32.41/drivers/oprofile/oprofile_stats.h
32671 --- linux-2.6.32.41/drivers/oprofile/oprofile_stats.h 2011-03-27 14:31:47.000000000 -0400
32672 +++ linux-2.6.32.41/drivers/oprofile/oprofile_stats.h 2011-04-17 15:56:46.000000000 -0400
32673 @@ -13,11 +13,11 @@
32674 #include <asm/atomic.h>
32675
32676 struct oprofile_stat_struct {
32677 - atomic_t sample_lost_no_mm;
32678 - atomic_t sample_lost_no_mapping;
32679 - atomic_t bt_lost_no_mapping;
32680 - atomic_t event_lost_overflow;
32681 - atomic_t multiplex_counter;
32682 + atomic_unchecked_t sample_lost_no_mm;
32683 + atomic_unchecked_t sample_lost_no_mapping;
32684 + atomic_unchecked_t bt_lost_no_mapping;
32685 + atomic_unchecked_t event_lost_overflow;
32686 + atomic_unchecked_t multiplex_counter;
32687 };
32688
32689 extern struct oprofile_stat_struct oprofile_stats;
32690 diff -urNp linux-2.6.32.41/drivers/parisc/pdc_stable.c linux-2.6.32.41/drivers/parisc/pdc_stable.c
32691 --- linux-2.6.32.41/drivers/parisc/pdc_stable.c 2011-03-27 14:31:47.000000000 -0400
32692 +++ linux-2.6.32.41/drivers/parisc/pdc_stable.c 2011-04-17 15:56:46.000000000 -0400
32693 @@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj
32694 return ret;
32695 }
32696
32697 -static struct sysfs_ops pdcspath_attr_ops = {
32698 +static const struct sysfs_ops pdcspath_attr_ops = {
32699 .show = pdcspath_attr_show,
32700 .store = pdcspath_attr_store,
32701 };
32702 diff -urNp linux-2.6.32.41/drivers/parport/procfs.c linux-2.6.32.41/drivers/parport/procfs.c
32703 --- linux-2.6.32.41/drivers/parport/procfs.c 2011-03-27 14:31:47.000000000 -0400
32704 +++ linux-2.6.32.41/drivers/parport/procfs.c 2011-04-17 15:56:46.000000000 -0400
32705 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
32706
32707 *ppos += len;
32708
32709 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
32710 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
32711 }
32712
32713 #ifdef CONFIG_PARPORT_1284
32714 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
32715
32716 *ppos += len;
32717
32718 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
32719 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
32720 }
32721 #endif /* IEEE1284.3 support. */
32722
32723 diff -urNp linux-2.6.32.41/drivers/pci/hotplug/acpiphp_glue.c linux-2.6.32.41/drivers/pci/hotplug/acpiphp_glue.c
32724 --- linux-2.6.32.41/drivers/pci/hotplug/acpiphp_glue.c 2011-03-27 14:31:47.000000000 -0400
32725 +++ linux-2.6.32.41/drivers/pci/hotplug/acpiphp_glue.c 2011-04-17 15:56:46.000000000 -0400
32726 @@ -111,7 +111,7 @@ static int post_dock_fixups(struct notif
32727 }
32728
32729
32730 -static struct acpi_dock_ops acpiphp_dock_ops = {
32731 +static const struct acpi_dock_ops acpiphp_dock_ops = {
32732 .handler = handle_hotplug_event_func,
32733 };
32734
32735 diff -urNp linux-2.6.32.41/drivers/pci/hotplug/cpqphp_nvram.c linux-2.6.32.41/drivers/pci/hotplug/cpqphp_nvram.c
32736 --- linux-2.6.32.41/drivers/pci/hotplug/cpqphp_nvram.c 2011-03-27 14:31:47.000000000 -0400
32737 +++ linux-2.6.32.41/drivers/pci/hotplug/cpqphp_nvram.c 2011-04-17 15:56:46.000000000 -0400
32738 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
32739
32740 void compaq_nvram_init (void __iomem *rom_start)
32741 {
32742 +
32743 +#ifndef CONFIG_PAX_KERNEXEC
32744 if (rom_start) {
32745 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
32746 }
32747 +#endif
32748 +
32749 dbg("int15 entry = %p\n", compaq_int15_entry_point);
32750
32751 /* initialize our int15 lock */
32752 diff -urNp linux-2.6.32.41/drivers/pci/hotplug/fakephp.c linux-2.6.32.41/drivers/pci/hotplug/fakephp.c
32753 --- linux-2.6.32.41/drivers/pci/hotplug/fakephp.c 2011-03-27 14:31:47.000000000 -0400
32754 +++ linux-2.6.32.41/drivers/pci/hotplug/fakephp.c 2011-04-17 15:56:46.000000000 -0400
32755 @@ -73,7 +73,7 @@ static void legacy_release(struct kobjec
32756 }
32757
32758 static struct kobj_type legacy_ktype = {
32759 - .sysfs_ops = &(struct sysfs_ops){
32760 + .sysfs_ops = &(const struct sysfs_ops){
32761 .store = legacy_store, .show = legacy_show
32762 },
32763 .release = &legacy_release,
32764 diff -urNp linux-2.6.32.41/drivers/pci/intel-iommu.c linux-2.6.32.41/drivers/pci/intel-iommu.c
32765 --- linux-2.6.32.41/drivers/pci/intel-iommu.c 2011-05-10 22:12:01.000000000 -0400
32766 +++ linux-2.6.32.41/drivers/pci/intel-iommu.c 2011-05-10 22:12:33.000000000 -0400
32767 @@ -2643,7 +2643,7 @@ error:
32768 return 0;
32769 }
32770
32771 -static dma_addr_t intel_map_page(struct device *dev, struct page *page,
32772 +dma_addr_t intel_map_page(struct device *dev, struct page *page,
32773 unsigned long offset, size_t size,
32774 enum dma_data_direction dir,
32775 struct dma_attrs *attrs)
32776 @@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain
32777 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
32778 }
32779
32780 -static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
32781 +void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
32782 size_t size, enum dma_data_direction dir,
32783 struct dma_attrs *attrs)
32784 {
32785 @@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct devi
32786 }
32787 }
32788
32789 -static void *intel_alloc_coherent(struct device *hwdev, size_t size,
32790 +void *intel_alloc_coherent(struct device *hwdev, size_t size,
32791 dma_addr_t *dma_handle, gfp_t flags)
32792 {
32793 void *vaddr;
32794 @@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct
32795 return NULL;
32796 }
32797
32798 -static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
32799 +void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
32800 dma_addr_t dma_handle)
32801 {
32802 int order;
32803 @@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct d
32804 free_pages((unsigned long)vaddr, order);
32805 }
32806
32807 -static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
32808 +void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
32809 int nelems, enum dma_data_direction dir,
32810 struct dma_attrs *attrs)
32811 {
32812 @@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(str
32813 return nelems;
32814 }
32815
32816 -static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
32817 +int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
32818 enum dma_data_direction dir, struct dma_attrs *attrs)
32819 {
32820 int i;
32821 @@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *h
32822 return nelems;
32823 }
32824
32825 -static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
32826 +int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
32827 {
32828 return !dma_addr;
32829 }
32830
32831 -struct dma_map_ops intel_dma_ops = {
32832 +const struct dma_map_ops intel_dma_ops = {
32833 .alloc_coherent = intel_alloc_coherent,
32834 .free_coherent = intel_free_coherent,
32835 .map_sg = intel_map_sg,
32836 diff -urNp linux-2.6.32.41/drivers/pci/pcie/aspm.c linux-2.6.32.41/drivers/pci/pcie/aspm.c
32837 --- linux-2.6.32.41/drivers/pci/pcie/aspm.c 2011-03-27 14:31:47.000000000 -0400
32838 +++ linux-2.6.32.41/drivers/pci/pcie/aspm.c 2011-04-17 15:56:46.000000000 -0400
32839 @@ -27,9 +27,9 @@
32840 #define MODULE_PARAM_PREFIX "pcie_aspm."
32841
32842 /* Note: those are not register definitions */
32843 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
32844 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
32845 -#define ASPM_STATE_L1 (4) /* L1 state */
32846 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
32847 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
32848 +#define ASPM_STATE_L1 (4U) /* L1 state */
32849 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
32850 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
32851
32852 diff -urNp linux-2.6.32.41/drivers/pci/probe.c linux-2.6.32.41/drivers/pci/probe.c
32853 --- linux-2.6.32.41/drivers/pci/probe.c 2011-03-27 14:31:47.000000000 -0400
32854 +++ linux-2.6.32.41/drivers/pci/probe.c 2011-04-17 15:56:46.000000000 -0400
32855 @@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(
32856 return ret;
32857 }
32858
32859 -static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
32860 +static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
32861 struct device_attribute *attr,
32862 char *buf)
32863 {
32864 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
32865 }
32866
32867 -static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
32868 +static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
32869 struct device_attribute *attr,
32870 char *buf)
32871 {
32872 diff -urNp linux-2.6.32.41/drivers/pci/proc.c linux-2.6.32.41/drivers/pci/proc.c
32873 --- linux-2.6.32.41/drivers/pci/proc.c 2011-03-27 14:31:47.000000000 -0400
32874 +++ linux-2.6.32.41/drivers/pci/proc.c 2011-04-17 15:56:46.000000000 -0400
32875 @@ -480,7 +480,16 @@ static const struct file_operations proc
32876 static int __init pci_proc_init(void)
32877 {
32878 struct pci_dev *dev = NULL;
32879 +
32880 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
32881 +#ifdef CONFIG_GRKERNSEC_PROC_USER
32882 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
32883 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
32884 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
32885 +#endif
32886 +#else
32887 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
32888 +#endif
32889 proc_create("devices", 0, proc_bus_pci_dir,
32890 &proc_bus_pci_dev_operations);
32891 proc_initialized = 1;
32892 diff -urNp linux-2.6.32.41/drivers/pci/slot.c linux-2.6.32.41/drivers/pci/slot.c
32893 --- linux-2.6.32.41/drivers/pci/slot.c 2011-03-27 14:31:47.000000000 -0400
32894 +++ linux-2.6.32.41/drivers/pci/slot.c 2011-04-17 15:56:46.000000000 -0400
32895 @@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struc
32896 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
32897 }
32898
32899 -static struct sysfs_ops pci_slot_sysfs_ops = {
32900 +static const struct sysfs_ops pci_slot_sysfs_ops = {
32901 .show = pci_slot_attr_show,
32902 .store = pci_slot_attr_store,
32903 };
32904 diff -urNp linux-2.6.32.41/drivers/pcmcia/pcmcia_ioctl.c linux-2.6.32.41/drivers/pcmcia/pcmcia_ioctl.c
32905 --- linux-2.6.32.41/drivers/pcmcia/pcmcia_ioctl.c 2011-03-27 14:31:47.000000000 -0400
32906 +++ linux-2.6.32.41/drivers/pcmcia/pcmcia_ioctl.c 2011-04-17 15:56:46.000000000 -0400
32907 @@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode
32908 return -EFAULT;
32909 }
32910 }
32911 - buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
32912 + buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
32913 if (!buf)
32914 return -ENOMEM;
32915
32916 diff -urNp linux-2.6.32.41/drivers/platform/x86/acer-wmi.c linux-2.6.32.41/drivers/platform/x86/acer-wmi.c
32917 --- linux-2.6.32.41/drivers/platform/x86/acer-wmi.c 2011-03-27 14:31:47.000000000 -0400
32918 +++ linux-2.6.32.41/drivers/platform/x86/acer-wmi.c 2011-04-17 15:56:46.000000000 -0400
32919 @@ -918,7 +918,7 @@ static int update_bl_status(struct backl
32920 return 0;
32921 }
32922
32923 -static struct backlight_ops acer_bl_ops = {
32924 +static const struct backlight_ops acer_bl_ops = {
32925 .get_brightness = read_brightness,
32926 .update_status = update_bl_status,
32927 };
32928 diff -urNp linux-2.6.32.41/drivers/platform/x86/asus_acpi.c linux-2.6.32.41/drivers/platform/x86/asus_acpi.c
32929 --- linux-2.6.32.41/drivers/platform/x86/asus_acpi.c 2011-03-27 14:31:47.000000000 -0400
32930 +++ linux-2.6.32.41/drivers/platform/x86/asus_acpi.c 2011-04-17 15:56:46.000000000 -0400
32931 @@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_
32932 return 0;
32933 }
32934
32935 -static struct backlight_ops asus_backlight_data = {
32936 +static const struct backlight_ops asus_backlight_data = {
32937 .get_brightness = read_brightness,
32938 .update_status = set_brightness_status,
32939 };
32940 diff -urNp linux-2.6.32.41/drivers/platform/x86/asus-laptop.c linux-2.6.32.41/drivers/platform/x86/asus-laptop.c
32941 --- linux-2.6.32.41/drivers/platform/x86/asus-laptop.c 2011-03-27 14:31:47.000000000 -0400
32942 +++ linux-2.6.32.41/drivers/platform/x86/asus-laptop.c 2011-04-17 15:56:46.000000000 -0400
32943 @@ -250,7 +250,7 @@ static struct backlight_device *asus_bac
32944 */
32945 static int read_brightness(struct backlight_device *bd);
32946 static int update_bl_status(struct backlight_device *bd);
32947 -static struct backlight_ops asusbl_ops = {
32948 +static const struct backlight_ops asusbl_ops = {
32949 .get_brightness = read_brightness,
32950 .update_status = update_bl_status,
32951 };
32952 diff -urNp linux-2.6.32.41/drivers/platform/x86/compal-laptop.c linux-2.6.32.41/drivers/platform/x86/compal-laptop.c
32953 --- linux-2.6.32.41/drivers/platform/x86/compal-laptop.c 2011-03-27 14:31:47.000000000 -0400
32954 +++ linux-2.6.32.41/drivers/platform/x86/compal-laptop.c 2011-04-17 15:56:46.000000000 -0400
32955 @@ -163,7 +163,7 @@ static int bl_update_status(struct backl
32956 return set_lcd_level(b->props.brightness);
32957 }
32958
32959 -static struct backlight_ops compalbl_ops = {
32960 +static const struct backlight_ops compalbl_ops = {
32961 .get_brightness = bl_get_brightness,
32962 .update_status = bl_update_status,
32963 };
32964 diff -urNp linux-2.6.32.41/drivers/platform/x86/dell-laptop.c linux-2.6.32.41/drivers/platform/x86/dell-laptop.c
32965 --- linux-2.6.32.41/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:01.000000000 -0400
32966 +++ linux-2.6.32.41/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:33.000000000 -0400
32967 @@ -318,7 +318,7 @@ static int dell_get_intensity(struct bac
32968 return buffer.output[1];
32969 }
32970
32971 -static struct backlight_ops dell_ops = {
32972 +static const struct backlight_ops dell_ops = {
32973 .get_brightness = dell_get_intensity,
32974 .update_status = dell_send_intensity,
32975 };
32976 diff -urNp linux-2.6.32.41/drivers/platform/x86/eeepc-laptop.c linux-2.6.32.41/drivers/platform/x86/eeepc-laptop.c
32977 --- linux-2.6.32.41/drivers/platform/x86/eeepc-laptop.c 2011-03-27 14:31:47.000000000 -0400
32978 +++ linux-2.6.32.41/drivers/platform/x86/eeepc-laptop.c 2011-04-17 15:56:46.000000000 -0400
32979 @@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device
32980 */
32981 static int read_brightness(struct backlight_device *bd);
32982 static int update_bl_status(struct backlight_device *bd);
32983 -static struct backlight_ops eeepcbl_ops = {
32984 +static const struct backlight_ops eeepcbl_ops = {
32985 .get_brightness = read_brightness,
32986 .update_status = update_bl_status,
32987 };
32988 diff -urNp linux-2.6.32.41/drivers/platform/x86/fujitsu-laptop.c linux-2.6.32.41/drivers/platform/x86/fujitsu-laptop.c
32989 --- linux-2.6.32.41/drivers/platform/x86/fujitsu-laptop.c 2011-03-27 14:31:47.000000000 -0400
32990 +++ linux-2.6.32.41/drivers/platform/x86/fujitsu-laptop.c 2011-04-17 15:56:46.000000000 -0400
32991 @@ -436,7 +436,7 @@ static int bl_update_status(struct backl
32992 return ret;
32993 }
32994
32995 -static struct backlight_ops fujitsubl_ops = {
32996 +static const struct backlight_ops fujitsubl_ops = {
32997 .get_brightness = bl_get_brightness,
32998 .update_status = bl_update_status,
32999 };
33000 diff -urNp linux-2.6.32.41/drivers/platform/x86/msi-laptop.c linux-2.6.32.41/drivers/platform/x86/msi-laptop.c
33001 --- linux-2.6.32.41/drivers/platform/x86/msi-laptop.c 2011-03-27 14:31:47.000000000 -0400
33002 +++ linux-2.6.32.41/drivers/platform/x86/msi-laptop.c 2011-04-17 15:56:46.000000000 -0400
33003 @@ -161,7 +161,7 @@ static int bl_update_status(struct backl
33004 return set_lcd_level(b->props.brightness);
33005 }
33006
33007 -static struct backlight_ops msibl_ops = {
33008 +static const struct backlight_ops msibl_ops = {
33009 .get_brightness = bl_get_brightness,
33010 .update_status = bl_update_status,
33011 };
33012 diff -urNp linux-2.6.32.41/drivers/platform/x86/panasonic-laptop.c linux-2.6.32.41/drivers/platform/x86/panasonic-laptop.c
33013 --- linux-2.6.32.41/drivers/platform/x86/panasonic-laptop.c 2011-03-27 14:31:47.000000000 -0400
33014 +++ linux-2.6.32.41/drivers/platform/x86/panasonic-laptop.c 2011-04-17 15:56:46.000000000 -0400
33015 @@ -352,7 +352,7 @@ static int bl_set_status(struct backligh
33016 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
33017 }
33018
33019 -static struct backlight_ops pcc_backlight_ops = {
33020 +static const struct backlight_ops pcc_backlight_ops = {
33021 .get_brightness = bl_get,
33022 .update_status = bl_set_status,
33023 };
33024 diff -urNp linux-2.6.32.41/drivers/platform/x86/sony-laptop.c linux-2.6.32.41/drivers/platform/x86/sony-laptop.c
33025 --- linux-2.6.32.41/drivers/platform/x86/sony-laptop.c 2011-03-27 14:31:47.000000000 -0400
33026 +++ linux-2.6.32.41/drivers/platform/x86/sony-laptop.c 2011-04-17 15:56:46.000000000 -0400
33027 @@ -850,7 +850,7 @@ static int sony_backlight_get_brightness
33028 }
33029
33030 static struct backlight_device *sony_backlight_device;
33031 -static struct backlight_ops sony_backlight_ops = {
33032 +static const struct backlight_ops sony_backlight_ops = {
33033 .update_status = sony_backlight_update_status,
33034 .get_brightness = sony_backlight_get_brightness,
33035 };
33036 diff -urNp linux-2.6.32.41/drivers/platform/x86/thinkpad_acpi.c linux-2.6.32.41/drivers/platform/x86/thinkpad_acpi.c
33037 --- linux-2.6.32.41/drivers/platform/x86/thinkpad_acpi.c 2011-03-27 14:31:47.000000000 -0400
33038 +++ linux-2.6.32.41/drivers/platform/x86/thinkpad_acpi.c 2011-04-17 15:56:46.000000000 -0400
33039 @@ -6122,7 +6122,7 @@ static void tpacpi_brightness_notify_cha
33040 BACKLIGHT_UPDATE_HOTKEY);
33041 }
33042
33043 -static struct backlight_ops ibm_backlight_data = {
33044 +static const struct backlight_ops ibm_backlight_data = {
33045 .get_brightness = brightness_get,
33046 .update_status = brightness_update_status,
33047 };
33048 diff -urNp linux-2.6.32.41/drivers/platform/x86/toshiba_acpi.c linux-2.6.32.41/drivers/platform/x86/toshiba_acpi.c
33049 --- linux-2.6.32.41/drivers/platform/x86/toshiba_acpi.c 2011-03-27 14:31:47.000000000 -0400
33050 +++ linux-2.6.32.41/drivers/platform/x86/toshiba_acpi.c 2011-04-17 15:56:46.000000000 -0400
33051 @@ -671,7 +671,7 @@ static acpi_status remove_device(void)
33052 return AE_OK;
33053 }
33054
33055 -static struct backlight_ops toshiba_backlight_data = {
33056 +static const struct backlight_ops toshiba_backlight_data = {
33057 .get_brightness = get_lcd,
33058 .update_status = set_lcd_status,
33059 };
33060 diff -urNp linux-2.6.32.41/drivers/pnp/pnpbios/bioscalls.c linux-2.6.32.41/drivers/pnp/pnpbios/bioscalls.c
33061 --- linux-2.6.32.41/drivers/pnp/pnpbios/bioscalls.c 2011-03-27 14:31:47.000000000 -0400
33062 +++ linux-2.6.32.41/drivers/pnp/pnpbios/bioscalls.c 2011-04-17 15:56:46.000000000 -0400
33063 @@ -60,7 +60,7 @@ do { \
33064 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
33065 } while(0)
33066
33067 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
33068 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
33069 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
33070
33071 /*
33072 @@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func
33073
33074 cpu = get_cpu();
33075 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
33076 +
33077 + pax_open_kernel();
33078 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
33079 + pax_close_kernel();
33080
33081 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
33082 spin_lock_irqsave(&pnp_bios_lock, flags);
33083 @@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func
33084 :"memory");
33085 spin_unlock_irqrestore(&pnp_bios_lock, flags);
33086
33087 + pax_open_kernel();
33088 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
33089 + pax_close_kernel();
33090 +
33091 put_cpu();
33092
33093 /* If we get here and this is set then the PnP BIOS faulted on us. */
33094 @@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 n
33095 return status;
33096 }
33097
33098 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
33099 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
33100 {
33101 int i;
33102
33103 @@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_i
33104 pnp_bios_callpoint.offset = header->fields.pm16offset;
33105 pnp_bios_callpoint.segment = PNP_CS16;
33106
33107 + pax_open_kernel();
33108 +
33109 for_each_possible_cpu(i) {
33110 struct desc_struct *gdt = get_cpu_gdt_table(i);
33111 if (!gdt)
33112 @@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_i
33113 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
33114 (unsigned long)__va(header->fields.pm16dseg));
33115 }
33116 +
33117 + pax_close_kernel();
33118 }
33119 diff -urNp linux-2.6.32.41/drivers/pnp/resource.c linux-2.6.32.41/drivers/pnp/resource.c
33120 --- linux-2.6.32.41/drivers/pnp/resource.c 2011-03-27 14:31:47.000000000 -0400
33121 +++ linux-2.6.32.41/drivers/pnp/resource.c 2011-04-17 15:56:46.000000000 -0400
33122 @@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
33123 return 1;
33124
33125 /* check if the resource is valid */
33126 - if (*irq < 0 || *irq > 15)
33127 + if (*irq > 15)
33128 return 0;
33129
33130 /* check if the resource is reserved */
33131 @@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
33132 return 1;
33133
33134 /* check if the resource is valid */
33135 - if (*dma < 0 || *dma == 4 || *dma > 7)
33136 + if (*dma == 4 || *dma > 7)
33137 return 0;
33138
33139 /* check if the resource is reserved */
33140 diff -urNp linux-2.6.32.41/drivers/rtc/rtc-dev.c linux-2.6.32.41/drivers/rtc/rtc-dev.c
33141 --- linux-2.6.32.41/drivers/rtc/rtc-dev.c 2011-03-27 14:31:47.000000000 -0400
33142 +++ linux-2.6.32.41/drivers/rtc/rtc-dev.c 2011-04-17 15:56:46.000000000 -0400
33143 @@ -14,6 +14,7 @@
33144 #include <linux/module.h>
33145 #include <linux/rtc.h>
33146 #include <linux/sched.h>
33147 +#include <linux/grsecurity.h>
33148 #include "rtc-core.h"
33149
33150 static dev_t rtc_devt;
33151 @@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *f
33152 if (copy_from_user(&tm, uarg, sizeof(tm)))
33153 return -EFAULT;
33154
33155 + gr_log_timechange();
33156 +
33157 return rtc_set_time(rtc, &tm);
33158
33159 case RTC_PIE_ON:
33160 diff -urNp linux-2.6.32.41/drivers/s390/cio/qdio_perf.c linux-2.6.32.41/drivers/s390/cio/qdio_perf.c
33161 --- linux-2.6.32.41/drivers/s390/cio/qdio_perf.c 2011-03-27 14:31:47.000000000 -0400
33162 +++ linux-2.6.32.41/drivers/s390/cio/qdio_perf.c 2011-04-17 15:56:46.000000000 -0400
33163 @@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_
33164 static int qdio_perf_proc_show(struct seq_file *m, void *v)
33165 {
33166 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
33167 - (long)atomic_long_read(&perf_stats.qdio_int));
33168 + (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
33169 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
33170 - (long)atomic_long_read(&perf_stats.pci_int));
33171 + (long)atomic_long_read_unchecked(&perf_stats.pci_int));
33172 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
33173 - (long)atomic_long_read(&perf_stats.thin_int));
33174 + (long)atomic_long_read_unchecked(&perf_stats.thin_int));
33175 seq_printf(m, "\n");
33176 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
33177 - (long)atomic_long_read(&perf_stats.tasklet_inbound));
33178 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
33179 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
33180 - (long)atomic_long_read(&perf_stats.tasklet_outbound));
33181 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
33182 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
33183 - (long)atomic_long_read(&perf_stats.tasklet_thinint),
33184 - (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
33185 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
33186 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
33187 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
33188 - (long)atomic_long_read(&perf_stats.thinint_inbound),
33189 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
33190 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
33191 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
33192 seq_printf(m, "\n");
33193 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
33194 - (long)atomic_long_read(&perf_stats.siga_in));
33195 + (long)atomic_long_read_unchecked(&perf_stats.siga_in));
33196 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
33197 - (long)atomic_long_read(&perf_stats.siga_out));
33198 + (long)atomic_long_read_unchecked(&perf_stats.siga_out));
33199 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
33200 - (long)atomic_long_read(&perf_stats.siga_sync));
33201 + (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
33202 seq_printf(m, "\n");
33203 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
33204 - (long)atomic_long_read(&perf_stats.inbound_handler));
33205 + (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
33206 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
33207 - (long)atomic_long_read(&perf_stats.outbound_handler));
33208 + (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
33209 seq_printf(m, "\n");
33210 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
33211 - (long)atomic_long_read(&perf_stats.fast_requeue));
33212 + (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
33213 seq_printf(m, "Number of outbound target full condition\t: %li\n",
33214 - (long)atomic_long_read(&perf_stats.outbound_target_full));
33215 + (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
33216 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
33217 - (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
33218 + (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
33219 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
33220 - (long)atomic_long_read(&perf_stats.debug_stop_polling));
33221 + (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
33222 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
33223 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
33224 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
33225 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
33226 - (long)atomic_long_read(&perf_stats.debug_eqbs_all),
33227 - (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
33228 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
33229 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
33230 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
33231 - (long)atomic_long_read(&perf_stats.debug_sqbs_all),
33232 - (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
33233 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
33234 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
33235 seq_printf(m, "\n");
33236 return 0;
33237 }
33238 diff -urNp linux-2.6.32.41/drivers/s390/cio/qdio_perf.h linux-2.6.32.41/drivers/s390/cio/qdio_perf.h
33239 --- linux-2.6.32.41/drivers/s390/cio/qdio_perf.h 2011-03-27 14:31:47.000000000 -0400
33240 +++ linux-2.6.32.41/drivers/s390/cio/qdio_perf.h 2011-04-17 15:56:46.000000000 -0400
33241 @@ -13,46 +13,46 @@
33242
33243 struct qdio_perf_stats {
33244 /* interrupt handler calls */
33245 - atomic_long_t qdio_int;
33246 - atomic_long_t pci_int;
33247 - atomic_long_t thin_int;
33248 + atomic_long_unchecked_t qdio_int;
33249 + atomic_long_unchecked_t pci_int;
33250 + atomic_long_unchecked_t thin_int;
33251
33252 /* tasklet runs */
33253 - atomic_long_t tasklet_inbound;
33254 - atomic_long_t tasklet_outbound;
33255 - atomic_long_t tasklet_thinint;
33256 - atomic_long_t tasklet_thinint_loop;
33257 - atomic_long_t thinint_inbound;
33258 - atomic_long_t thinint_inbound_loop;
33259 - atomic_long_t thinint_inbound_loop2;
33260 + atomic_long_unchecked_t tasklet_inbound;
33261 + atomic_long_unchecked_t tasklet_outbound;
33262 + atomic_long_unchecked_t tasklet_thinint;
33263 + atomic_long_unchecked_t tasklet_thinint_loop;
33264 + atomic_long_unchecked_t thinint_inbound;
33265 + atomic_long_unchecked_t thinint_inbound_loop;
33266 + atomic_long_unchecked_t thinint_inbound_loop2;
33267
33268 /* signal adapter calls */
33269 - atomic_long_t siga_out;
33270 - atomic_long_t siga_in;
33271 - atomic_long_t siga_sync;
33272 + atomic_long_unchecked_t siga_out;
33273 + atomic_long_unchecked_t siga_in;
33274 + atomic_long_unchecked_t siga_sync;
33275
33276 /* misc */
33277 - atomic_long_t inbound_handler;
33278 - atomic_long_t outbound_handler;
33279 - atomic_long_t fast_requeue;
33280 - atomic_long_t outbound_target_full;
33281 + atomic_long_unchecked_t inbound_handler;
33282 + atomic_long_unchecked_t outbound_handler;
33283 + atomic_long_unchecked_t fast_requeue;
33284 + atomic_long_unchecked_t outbound_target_full;
33285
33286 /* for debugging */
33287 - atomic_long_t debug_tl_out_timer;
33288 - atomic_long_t debug_stop_polling;
33289 - atomic_long_t debug_eqbs_all;
33290 - atomic_long_t debug_eqbs_incomplete;
33291 - atomic_long_t debug_sqbs_all;
33292 - atomic_long_t debug_sqbs_incomplete;
33293 + atomic_long_unchecked_t debug_tl_out_timer;
33294 + atomic_long_unchecked_t debug_stop_polling;
33295 + atomic_long_unchecked_t debug_eqbs_all;
33296 + atomic_long_unchecked_t debug_eqbs_incomplete;
33297 + atomic_long_unchecked_t debug_sqbs_all;
33298 + atomic_long_unchecked_t debug_sqbs_incomplete;
33299 };
33300
33301 extern struct qdio_perf_stats perf_stats;
33302 extern int qdio_performance_stats;
33303
33304 -static inline void qdio_perf_stat_inc(atomic_long_t *count)
33305 +static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
33306 {
33307 if (qdio_performance_stats)
33308 - atomic_long_inc(count);
33309 + atomic_long_inc_unchecked(count);
33310 }
33311
33312 int qdio_setup_perf_stats(void);
33313 diff -urNp linux-2.6.32.41/drivers/scsi/aacraid/commctrl.c linux-2.6.32.41/drivers/scsi/aacraid/commctrl.c
33314 --- linux-2.6.32.41/drivers/scsi/aacraid/commctrl.c 2011-03-27 14:31:47.000000000 -0400
33315 +++ linux-2.6.32.41/drivers/scsi/aacraid/commctrl.c 2011-05-16 21:46:57.000000000 -0400
33316 @@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_d
33317 u32 actual_fibsize64, actual_fibsize = 0;
33318 int i;
33319
33320 + pax_track_stack();
33321
33322 if (dev->in_reset) {
33323 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
33324 diff -urNp linux-2.6.32.41/drivers/scsi/aic94xx/aic94xx_init.c linux-2.6.32.41/drivers/scsi/aic94xx/aic94xx_init.c
33325 --- linux-2.6.32.41/drivers/scsi/aic94xx/aic94xx_init.c 2011-03-27 14:31:47.000000000 -0400
33326 +++ linux-2.6.32.41/drivers/scsi/aic94xx/aic94xx_init.c 2011-04-17 15:56:46.000000000 -0400
33327 @@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(stru
33328 flash_error_table[i].reason);
33329 }
33330
33331 -static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
33332 +static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
33333 asd_show_update_bios, asd_store_update_bios);
33334
33335 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
33336 diff -urNp linux-2.6.32.41/drivers/scsi/BusLogic.c linux-2.6.32.41/drivers/scsi/BusLogic.c
33337 --- linux-2.6.32.41/drivers/scsi/BusLogic.c 2011-03-27 14:31:47.000000000 -0400
33338 +++ linux-2.6.32.41/drivers/scsi/BusLogic.c 2011-05-16 21:46:57.000000000 -0400
33339 @@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFla
33340 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
33341 *PrototypeHostAdapter)
33342 {
33343 + pax_track_stack();
33344 +
33345 /*
33346 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
33347 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
33348 diff -urNp linux-2.6.32.41/drivers/scsi/dpt_i2o.c linux-2.6.32.41/drivers/scsi/dpt_i2o.c
33349 --- linux-2.6.32.41/drivers/scsi/dpt_i2o.c 2011-03-27 14:31:47.000000000 -0400
33350 +++ linux-2.6.32.41/drivers/scsi/dpt_i2o.c 2011-05-16 21:46:57.000000000 -0400
33351 @@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* p
33352 dma_addr_t addr;
33353 ulong flags = 0;
33354
33355 + pax_track_stack();
33356 +
33357 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
33358 // get user msg size in u32s
33359 if(get_user(size, &user_msg[0])){
33360 @@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
33361 s32 rcode;
33362 dma_addr_t addr;
33363
33364 + pax_track_stack();
33365 +
33366 memset(msg, 0 , sizeof(msg));
33367 len = scsi_bufflen(cmd);
33368 direction = 0x00000000;
33369 diff -urNp linux-2.6.32.41/drivers/scsi/eata.c linux-2.6.32.41/drivers/scsi/eata.c
33370 --- linux-2.6.32.41/drivers/scsi/eata.c 2011-03-27 14:31:47.000000000 -0400
33371 +++ linux-2.6.32.41/drivers/scsi/eata.c 2011-05-16 21:46:57.000000000 -0400
33372 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
33373 struct hostdata *ha;
33374 char name[16];
33375
33376 + pax_track_stack();
33377 +
33378 sprintf(name, "%s%d", driver_name, j);
33379
33380 if (!request_region(port_base, REGION_SIZE, driver_name)) {
33381 diff -urNp linux-2.6.32.41/drivers/scsi/fcoe/libfcoe.c linux-2.6.32.41/drivers/scsi/fcoe/libfcoe.c
33382 --- linux-2.6.32.41/drivers/scsi/fcoe/libfcoe.c 2011-03-27 14:31:47.000000000 -0400
33383 +++ linux-2.6.32.41/drivers/scsi/fcoe/libfcoe.c 2011-05-16 21:46:57.000000000 -0400
33384 @@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fc
33385 size_t rlen;
33386 size_t dlen;
33387
33388 + pax_track_stack();
33389 +
33390 fiph = (struct fip_header *)skb->data;
33391 sub = fiph->fip_subcode;
33392 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
33393 diff -urNp linux-2.6.32.41/drivers/scsi/gdth.c linux-2.6.32.41/drivers/scsi/gdth.c
33394 --- linux-2.6.32.41/drivers/scsi/gdth.c 2011-03-27 14:31:47.000000000 -0400
33395 +++ linux-2.6.32.41/drivers/scsi/gdth.c 2011-05-16 21:46:57.000000000 -0400
33396 @@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
33397 ulong flags;
33398 gdth_ha_str *ha;
33399
33400 + pax_track_stack();
33401 +
33402 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
33403 return -EFAULT;
33404 ha = gdth_find_ha(ldrv.ionode);
33405 @@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg
33406 gdth_ha_str *ha;
33407 int rval;
33408
33409 + pax_track_stack();
33410 +
33411 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
33412 res.number >= MAX_HDRIVES)
33413 return -EFAULT;
33414 @@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg,
33415 gdth_ha_str *ha;
33416 int rval;
33417
33418 + pax_track_stack();
33419 +
33420 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
33421 return -EFAULT;
33422 ha = gdth_find_ha(gen.ionode);
33423 @@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
33424 int i;
33425 gdth_cmd_str gdtcmd;
33426 char cmnd[MAX_COMMAND_SIZE];
33427 +
33428 + pax_track_stack();
33429 +
33430 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
33431
33432 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
33433 diff -urNp linux-2.6.32.41/drivers/scsi/gdth_proc.c linux-2.6.32.41/drivers/scsi/gdth_proc.c
33434 --- linux-2.6.32.41/drivers/scsi/gdth_proc.c 2011-03-27 14:31:47.000000000 -0400
33435 +++ linux-2.6.32.41/drivers/scsi/gdth_proc.c 2011-05-16 21:46:57.000000000 -0400
33436 @@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi
33437 ulong64 paddr;
33438
33439 char cmnd[MAX_COMMAND_SIZE];
33440 +
33441 + pax_track_stack();
33442 +
33443 memset(cmnd, 0xff, 12);
33444 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
33445
33446 @@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,ch
33447 gdth_hget_str *phg;
33448 char cmnd[MAX_COMMAND_SIZE];
33449
33450 + pax_track_stack();
33451 +
33452 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
33453 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
33454 if (!gdtcmd || !estr)
33455 diff -urNp linux-2.6.32.41/drivers/scsi/hosts.c linux-2.6.32.41/drivers/scsi/hosts.c
33456 --- linux-2.6.32.41/drivers/scsi/hosts.c 2011-03-27 14:31:47.000000000 -0400
33457 +++ linux-2.6.32.41/drivers/scsi/hosts.c 2011-05-04 17:56:28.000000000 -0400
33458 @@ -40,7 +40,7 @@
33459 #include "scsi_logging.h"
33460
33461
33462 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
33463 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
33464
33465
33466 static void scsi_host_cls_release(struct device *dev)
33467 @@ -344,7 +344,7 @@ struct Scsi_Host *scsi_host_alloc(struct
33468 * subtract one because we increment first then return, but we need to
33469 * know what the next host number was before increment
33470 */
33471 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
33472 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
33473 shost->dma_channel = 0xff;
33474
33475 /* These three are default values which can be overridden */
33476 diff -urNp linux-2.6.32.41/drivers/scsi/ipr.c linux-2.6.32.41/drivers/scsi/ipr.c
33477 --- linux-2.6.32.41/drivers/scsi/ipr.c 2011-03-27 14:31:47.000000000 -0400
33478 +++ linux-2.6.32.41/drivers/scsi/ipr.c 2011-04-17 15:56:46.000000000 -0400
33479 @@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_q
33480 return true;
33481 }
33482
33483 -static struct ata_port_operations ipr_sata_ops = {
33484 +static const struct ata_port_operations ipr_sata_ops = {
33485 .phy_reset = ipr_ata_phy_reset,
33486 .hardreset = ipr_sata_reset,
33487 .post_internal_cmd = ipr_ata_post_internal,
33488 diff -urNp linux-2.6.32.41/drivers/scsi/libfc/fc_exch.c linux-2.6.32.41/drivers/scsi/libfc/fc_exch.c
33489 --- linux-2.6.32.41/drivers/scsi/libfc/fc_exch.c 2011-03-27 14:31:47.000000000 -0400
33490 +++ linux-2.6.32.41/drivers/scsi/libfc/fc_exch.c 2011-04-17 15:56:46.000000000 -0400
33491 @@ -86,12 +86,12 @@ struct fc_exch_mgr {
33492 * all together if not used XXX
33493 */
33494 struct {
33495 - atomic_t no_free_exch;
33496 - atomic_t no_free_exch_xid;
33497 - atomic_t xid_not_found;
33498 - atomic_t xid_busy;
33499 - atomic_t seq_not_found;
33500 - atomic_t non_bls_resp;
33501 + atomic_unchecked_t no_free_exch;
33502 + atomic_unchecked_t no_free_exch_xid;
33503 + atomic_unchecked_t xid_not_found;
33504 + atomic_unchecked_t xid_busy;
33505 + atomic_unchecked_t seq_not_found;
33506 + atomic_unchecked_t non_bls_resp;
33507 } stats;
33508 };
33509 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
33510 @@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(
33511 /* allocate memory for exchange */
33512 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
33513 if (!ep) {
33514 - atomic_inc(&mp->stats.no_free_exch);
33515 + atomic_inc_unchecked(&mp->stats.no_free_exch);
33516 goto out;
33517 }
33518 memset(ep, 0, sizeof(*ep));
33519 @@ -557,7 +557,7 @@ out:
33520 return ep;
33521 err:
33522 spin_unlock_bh(&pool->lock);
33523 - atomic_inc(&mp->stats.no_free_exch_xid);
33524 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
33525 mempool_free(ep, mp->ep_pool);
33526 return NULL;
33527 }
33528 @@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33529 xid = ntohs(fh->fh_ox_id); /* we originated exch */
33530 ep = fc_exch_find(mp, xid);
33531 if (!ep) {
33532 - atomic_inc(&mp->stats.xid_not_found);
33533 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33534 reject = FC_RJT_OX_ID;
33535 goto out;
33536 }
33537 @@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33538 ep = fc_exch_find(mp, xid);
33539 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
33540 if (ep) {
33541 - atomic_inc(&mp->stats.xid_busy);
33542 + atomic_inc_unchecked(&mp->stats.xid_busy);
33543 reject = FC_RJT_RX_ID;
33544 goto rel;
33545 }
33546 @@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33547 }
33548 xid = ep->xid; /* get our XID */
33549 } else if (!ep) {
33550 - atomic_inc(&mp->stats.xid_not_found);
33551 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33552 reject = FC_RJT_RX_ID; /* XID not found */
33553 goto out;
33554 }
33555 @@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33556 } else {
33557 sp = &ep->seq;
33558 if (sp->id != fh->fh_seq_id) {
33559 - atomic_inc(&mp->stats.seq_not_found);
33560 + atomic_inc_unchecked(&mp->stats.seq_not_found);
33561 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
33562 goto rel;
33563 }
33564 @@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct
33565
33566 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
33567 if (!ep) {
33568 - atomic_inc(&mp->stats.xid_not_found);
33569 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33570 goto out;
33571 }
33572 if (ep->esb_stat & ESB_ST_COMPLETE) {
33573 - atomic_inc(&mp->stats.xid_not_found);
33574 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33575 goto out;
33576 }
33577 if (ep->rxid == FC_XID_UNKNOWN)
33578 ep->rxid = ntohs(fh->fh_rx_id);
33579 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
33580 - atomic_inc(&mp->stats.xid_not_found);
33581 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33582 goto rel;
33583 }
33584 if (ep->did != ntoh24(fh->fh_s_id) &&
33585 ep->did != FC_FID_FLOGI) {
33586 - atomic_inc(&mp->stats.xid_not_found);
33587 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33588 goto rel;
33589 }
33590 sof = fr_sof(fp);
33591 @@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct
33592 } else {
33593 sp = &ep->seq;
33594 if (sp->id != fh->fh_seq_id) {
33595 - atomic_inc(&mp->stats.seq_not_found);
33596 + atomic_inc_unchecked(&mp->stats.seq_not_found);
33597 goto rel;
33598 }
33599 }
33600 @@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_
33601 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
33602
33603 if (!sp)
33604 - atomic_inc(&mp->stats.xid_not_found);
33605 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33606 else
33607 - atomic_inc(&mp->stats.non_bls_resp);
33608 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
33609
33610 fc_frame_free(fp);
33611 }
33612 diff -urNp linux-2.6.32.41/drivers/scsi/libsas/sas_ata.c linux-2.6.32.41/drivers/scsi/libsas/sas_ata.c
33613 --- linux-2.6.32.41/drivers/scsi/libsas/sas_ata.c 2011-03-27 14:31:47.000000000 -0400
33614 +++ linux-2.6.32.41/drivers/scsi/libsas/sas_ata.c 2011-04-23 12:56:11.000000000 -0400
33615 @@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_l
33616 }
33617 }
33618
33619 -static struct ata_port_operations sas_sata_ops = {
33620 +static const struct ata_port_operations sas_sata_ops = {
33621 .phy_reset = sas_ata_phy_reset,
33622 .post_internal_cmd = sas_ata_post_internal,
33623 .qc_defer = ata_std_qc_defer,
33624 diff -urNp linux-2.6.32.41/drivers/scsi/lpfc/lpfc_debugfs.c linux-2.6.32.41/drivers/scsi/lpfc/lpfc_debugfs.c
33625 --- linux-2.6.32.41/drivers/scsi/lpfc/lpfc_debugfs.c 2011-03-27 14:31:47.000000000 -0400
33626 +++ linux-2.6.32.41/drivers/scsi/lpfc/lpfc_debugfs.c 2011-05-16 21:46:57.000000000 -0400
33627 @@ -124,7 +124,7 @@ struct lpfc_debug {
33628 int len;
33629 };
33630
33631 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
33632 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
33633 static unsigned long lpfc_debugfs_start_time = 0L;
33634
33635 /**
33636 @@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
33637 lpfc_debugfs_enable = 0;
33638
33639 len = 0;
33640 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
33641 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
33642 (lpfc_debugfs_max_disc_trc - 1);
33643 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
33644 dtp = vport->disc_trc + i;
33645 @@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
33646 lpfc_debugfs_enable = 0;
33647
33648 len = 0;
33649 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
33650 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
33651 (lpfc_debugfs_max_slow_ring_trc - 1);
33652 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
33653 dtp = phba->slow_ring_trc + i;
33654 @@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
33655 uint32_t *ptr;
33656 char buffer[1024];
33657
33658 + pax_track_stack();
33659 +
33660 off = 0;
33661 spin_lock_irq(&phba->hbalock);
33662
33663 @@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
33664 !vport || !vport->disc_trc)
33665 return;
33666
33667 - index = atomic_inc_return(&vport->disc_trc_cnt) &
33668 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
33669 (lpfc_debugfs_max_disc_trc - 1);
33670 dtp = vport->disc_trc + index;
33671 dtp->fmt = fmt;
33672 dtp->data1 = data1;
33673 dtp->data2 = data2;
33674 dtp->data3 = data3;
33675 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
33676 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
33677 dtp->jif = jiffies;
33678 #endif
33679 return;
33680 @@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
33681 !phba || !phba->slow_ring_trc)
33682 return;
33683
33684 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
33685 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
33686 (lpfc_debugfs_max_slow_ring_trc - 1);
33687 dtp = phba->slow_ring_trc + index;
33688 dtp->fmt = fmt;
33689 dtp->data1 = data1;
33690 dtp->data2 = data2;
33691 dtp->data3 = data3;
33692 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
33693 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
33694 dtp->jif = jiffies;
33695 #endif
33696 return;
33697 @@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
33698 "slow_ring buffer\n");
33699 goto debug_failed;
33700 }
33701 - atomic_set(&phba->slow_ring_trc_cnt, 0);
33702 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
33703 memset(phba->slow_ring_trc, 0,
33704 (sizeof(struct lpfc_debugfs_trc) *
33705 lpfc_debugfs_max_slow_ring_trc));
33706 @@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
33707 "buffer\n");
33708 goto debug_failed;
33709 }
33710 - atomic_set(&vport->disc_trc_cnt, 0);
33711 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
33712
33713 snprintf(name, sizeof(name), "discovery_trace");
33714 vport->debug_disc_trc =
33715 diff -urNp linux-2.6.32.41/drivers/scsi/lpfc/lpfc.h linux-2.6.32.41/drivers/scsi/lpfc/lpfc.h
33716 --- linux-2.6.32.41/drivers/scsi/lpfc/lpfc.h 2011-03-27 14:31:47.000000000 -0400
33717 +++ linux-2.6.32.41/drivers/scsi/lpfc/lpfc.h 2011-05-04 17:56:28.000000000 -0400
33718 @@ -400,7 +400,7 @@ struct lpfc_vport {
33719 struct dentry *debug_nodelist;
33720 struct dentry *vport_debugfs_root;
33721 struct lpfc_debugfs_trc *disc_trc;
33722 - atomic_t disc_trc_cnt;
33723 + atomic_unchecked_t disc_trc_cnt;
33724 #endif
33725 uint8_t stat_data_enabled;
33726 uint8_t stat_data_blocked;
33727 @@ -725,8 +725,8 @@ struct lpfc_hba {
33728 struct timer_list fabric_block_timer;
33729 unsigned long bit_flags;
33730 #define FABRIC_COMANDS_BLOCKED 0
33731 - atomic_t num_rsrc_err;
33732 - atomic_t num_cmd_success;
33733 + atomic_unchecked_t num_rsrc_err;
33734 + atomic_unchecked_t num_cmd_success;
33735 unsigned long last_rsrc_error_time;
33736 unsigned long last_ramp_down_time;
33737 unsigned long last_ramp_up_time;
33738 @@ -740,7 +740,7 @@ struct lpfc_hba {
33739 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
33740 struct dentry *debug_slow_ring_trc;
33741 struct lpfc_debugfs_trc *slow_ring_trc;
33742 - atomic_t slow_ring_trc_cnt;
33743 + atomic_unchecked_t slow_ring_trc_cnt;
33744 #endif
33745
33746 /* Used for deferred freeing of ELS data buffers */
33747 diff -urNp linux-2.6.32.41/drivers/scsi/lpfc/lpfc_scsi.c linux-2.6.32.41/drivers/scsi/lpfc/lpfc_scsi.c
33748 --- linux-2.6.32.41/drivers/scsi/lpfc/lpfc_scsi.c 2011-03-27 14:31:47.000000000 -0400
33749 +++ linux-2.6.32.41/drivers/scsi/lpfc/lpfc_scsi.c 2011-05-04 17:56:28.000000000 -0400
33750 @@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
33751 uint32_t evt_posted;
33752
33753 spin_lock_irqsave(&phba->hbalock, flags);
33754 - atomic_inc(&phba->num_rsrc_err);
33755 + atomic_inc_unchecked(&phba->num_rsrc_err);
33756 phba->last_rsrc_error_time = jiffies;
33757
33758 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
33759 @@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
33760 unsigned long flags;
33761 struct lpfc_hba *phba = vport->phba;
33762 uint32_t evt_posted;
33763 - atomic_inc(&phba->num_cmd_success);
33764 + atomic_inc_unchecked(&phba->num_cmd_success);
33765
33766 if (vport->cfg_lun_queue_depth <= queue_depth)
33767 return;
33768 @@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
33769 int i;
33770 struct lpfc_rport_data *rdata;
33771
33772 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
33773 - num_cmd_success = atomic_read(&phba->num_cmd_success);
33774 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
33775 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
33776
33777 vports = lpfc_create_vport_work_array(phba);
33778 if (vports != NULL)
33779 @@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
33780 }
33781 }
33782 lpfc_destroy_vport_work_array(phba, vports);
33783 - atomic_set(&phba->num_rsrc_err, 0);
33784 - atomic_set(&phba->num_cmd_success, 0);
33785 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
33786 + atomic_set_unchecked(&phba->num_cmd_success, 0);
33787 }
33788
33789 /**
33790 @@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
33791 }
33792 }
33793 lpfc_destroy_vport_work_array(phba, vports);
33794 - atomic_set(&phba->num_rsrc_err, 0);
33795 - atomic_set(&phba->num_cmd_success, 0);
33796 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
33797 + atomic_set_unchecked(&phba->num_cmd_success, 0);
33798 }
33799
33800 /**
33801 diff -urNp linux-2.6.32.41/drivers/scsi/megaraid/megaraid_mbox.c linux-2.6.32.41/drivers/scsi/megaraid/megaraid_mbox.c
33802 --- linux-2.6.32.41/drivers/scsi/megaraid/megaraid_mbox.c 2011-03-27 14:31:47.000000000 -0400
33803 +++ linux-2.6.32.41/drivers/scsi/megaraid/megaraid_mbox.c 2011-05-16 21:46:57.000000000 -0400
33804 @@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
33805 int rval;
33806 int i;
33807
33808 + pax_track_stack();
33809 +
33810 // Allocate memory for the base list of scb for management module.
33811 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
33812
33813 diff -urNp linux-2.6.32.41/drivers/scsi/osd/osd_initiator.c linux-2.6.32.41/drivers/scsi/osd/osd_initiator.c
33814 --- linux-2.6.32.41/drivers/scsi/osd/osd_initiator.c 2011-03-27 14:31:47.000000000 -0400
33815 +++ linux-2.6.32.41/drivers/scsi/osd/osd_initiator.c 2011-05-16 21:46:57.000000000 -0400
33816 @@ -94,6 +94,8 @@ static int _osd_print_system_info(struct
33817 int nelem = ARRAY_SIZE(get_attrs), a = 0;
33818 int ret;
33819
33820 + pax_track_stack();
33821 +
33822 or = osd_start_request(od, GFP_KERNEL);
33823 if (!or)
33824 return -ENOMEM;
33825 diff -urNp linux-2.6.32.41/drivers/scsi/pmcraid.c linux-2.6.32.41/drivers/scsi/pmcraid.c
33826 --- linux-2.6.32.41/drivers/scsi/pmcraid.c 2011-05-10 22:12:01.000000000 -0400
33827 +++ linux-2.6.32.41/drivers/scsi/pmcraid.c 2011-05-10 22:12:33.000000000 -0400
33828 @@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct sc
33829 res->scsi_dev = scsi_dev;
33830 scsi_dev->hostdata = res;
33831 res->change_detected = 0;
33832 - atomic_set(&res->read_failures, 0);
33833 - atomic_set(&res->write_failures, 0);
33834 + atomic_set_unchecked(&res->read_failures, 0);
33835 + atomic_set_unchecked(&res->write_failures, 0);
33836 rc = 0;
33837 }
33838 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
33839 @@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct
33840
33841 /* If this was a SCSI read/write command keep count of errors */
33842 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
33843 - atomic_inc(&res->read_failures);
33844 + atomic_inc_unchecked(&res->read_failures);
33845 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
33846 - atomic_inc(&res->write_failures);
33847 + atomic_inc_unchecked(&res->write_failures);
33848
33849 if (!RES_IS_GSCSI(res->cfg_entry) &&
33850 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
33851 @@ -4113,7 +4113,7 @@ static void pmcraid_worker_function(stru
33852
33853 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
33854 /* add resources only after host is added into system */
33855 - if (!atomic_read(&pinstance->expose_resources))
33856 + if (!atomic_read_unchecked(&pinstance->expose_resources))
33857 return;
33858
33859 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
33860 @@ -4847,7 +4847,7 @@ static int __devinit pmcraid_init_instan
33861 init_waitqueue_head(&pinstance->reset_wait_q);
33862
33863 atomic_set(&pinstance->outstanding_cmds, 0);
33864 - atomic_set(&pinstance->expose_resources, 0);
33865 + atomic_set_unchecked(&pinstance->expose_resources, 0);
33866
33867 INIT_LIST_HEAD(&pinstance->free_res_q);
33868 INIT_LIST_HEAD(&pinstance->used_res_q);
33869 @@ -5499,7 +5499,7 @@ static int __devinit pmcraid_probe(
33870 /* Schedule worker thread to handle CCN and take care of adding and
33871 * removing devices to OS
33872 */
33873 - atomic_set(&pinstance->expose_resources, 1);
33874 + atomic_set_unchecked(&pinstance->expose_resources, 1);
33875 schedule_work(&pinstance->worker_q);
33876 return rc;
33877
33878 diff -urNp linux-2.6.32.41/drivers/scsi/pmcraid.h linux-2.6.32.41/drivers/scsi/pmcraid.h
33879 --- linux-2.6.32.41/drivers/scsi/pmcraid.h 2011-03-27 14:31:47.000000000 -0400
33880 +++ linux-2.6.32.41/drivers/scsi/pmcraid.h 2011-05-04 17:56:28.000000000 -0400
33881 @@ -690,7 +690,7 @@ struct pmcraid_instance {
33882 atomic_t outstanding_cmds;
33883
33884 /* should add/delete resources to mid-layer now ?*/
33885 - atomic_t expose_resources;
33886 + atomic_unchecked_t expose_resources;
33887
33888 /* Tasklet to handle deferred processing */
33889 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
33890 @@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
33891 struct list_head queue; /* link to "to be exposed" resources */
33892 struct pmcraid_config_table_entry cfg_entry;
33893 struct scsi_device *scsi_dev; /* Link scsi_device structure */
33894 - atomic_t read_failures; /* count of failed READ commands */
33895 - atomic_t write_failures; /* count of failed WRITE commands */
33896 + atomic_unchecked_t read_failures; /* count of failed READ commands */
33897 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
33898
33899 /* To indicate add/delete/modify during CCN */
33900 u8 change_detected;
33901 diff -urNp linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_def.h linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_def.h
33902 --- linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_def.h 2011-03-27 14:31:47.000000000 -0400
33903 +++ linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_def.h 2011-05-04 17:56:28.000000000 -0400
33904 @@ -240,7 +240,7 @@ struct ddb_entry {
33905 atomic_t retry_relogin_timer; /* Min Time between relogins
33906 * (4000 only) */
33907 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
33908 - atomic_t relogin_retry_count; /* Num of times relogin has been
33909 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
33910 * retried */
33911
33912 uint16_t port;
33913 diff -urNp linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_init.c linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_init.c
33914 --- linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_init.c 2011-03-27 14:31:47.000000000 -0400
33915 +++ linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_init.c 2011-05-04 17:56:28.000000000 -0400
33916 @@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_
33917 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
33918 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
33919 atomic_set(&ddb_entry->relogin_timer, 0);
33920 - atomic_set(&ddb_entry->relogin_retry_count, 0);
33921 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
33922 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
33923 list_add_tail(&ddb_entry->list, &ha->ddb_list);
33924 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
33925 @@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct s
33926 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
33927 atomic_set(&ddb_entry->port_down_timer,
33928 ha->port_down_retry_count);
33929 - atomic_set(&ddb_entry->relogin_retry_count, 0);
33930 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
33931 atomic_set(&ddb_entry->relogin_timer, 0);
33932 clear_bit(DF_RELOGIN, &ddb_entry->flags);
33933 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
33934 diff -urNp linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_os.c linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_os.c
33935 --- linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_os.c 2011-03-27 14:31:47.000000000 -0400
33936 +++ linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_os.c 2011-05-04 17:56:28.000000000 -0400
33937 @@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_ql
33938 ddb_entry->fw_ddb_device_state ==
33939 DDB_DS_SESSION_FAILED) {
33940 /* Reset retry relogin timer */
33941 - atomic_inc(&ddb_entry->relogin_retry_count);
33942 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
33943 DEBUG2(printk("scsi%ld: index[%d] relogin"
33944 " timed out-retrying"
33945 " relogin (%d)\n",
33946 ha->host_no,
33947 ddb_entry->fw_ddb_index,
33948 - atomic_read(&ddb_entry->
33949 + atomic_read_unchecked(&ddb_entry->
33950 relogin_retry_count))
33951 );
33952 start_dpc++;
33953 diff -urNp linux-2.6.32.41/drivers/scsi/scsi.c linux-2.6.32.41/drivers/scsi/scsi.c
33954 --- linux-2.6.32.41/drivers/scsi/scsi.c 2011-03-27 14:31:47.000000000 -0400
33955 +++ linux-2.6.32.41/drivers/scsi/scsi.c 2011-05-04 17:56:28.000000000 -0400
33956 @@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
33957 unsigned long timeout;
33958 int rtn = 0;
33959
33960 - atomic_inc(&cmd->device->iorequest_cnt);
33961 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
33962
33963 /* check if the device is still usable */
33964 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
33965 diff -urNp linux-2.6.32.41/drivers/scsi/scsi_debug.c linux-2.6.32.41/drivers/scsi/scsi_debug.c
33966 --- linux-2.6.32.41/drivers/scsi/scsi_debug.c 2011-03-27 14:31:47.000000000 -0400
33967 +++ linux-2.6.32.41/drivers/scsi/scsi_debug.c 2011-05-16 21:46:57.000000000 -0400
33968 @@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_
33969 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
33970 unsigned char *cmd = (unsigned char *)scp->cmnd;
33971
33972 + pax_track_stack();
33973 +
33974 if ((errsts = check_readiness(scp, 1, devip)))
33975 return errsts;
33976 memset(arr, 0, sizeof(arr));
33977 @@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cm
33978 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
33979 unsigned char *cmd = (unsigned char *)scp->cmnd;
33980
33981 + pax_track_stack();
33982 +
33983 if ((errsts = check_readiness(scp, 1, devip)))
33984 return errsts;
33985 memset(arr, 0, sizeof(arr));
33986 diff -urNp linux-2.6.32.41/drivers/scsi/scsi_lib.c linux-2.6.32.41/drivers/scsi/scsi_lib.c
33987 --- linux-2.6.32.41/drivers/scsi/scsi_lib.c 2011-05-10 22:12:01.000000000 -0400
33988 +++ linux-2.6.32.41/drivers/scsi/scsi_lib.c 2011-05-10 22:12:33.000000000 -0400
33989 @@ -1384,7 +1384,7 @@ static void scsi_kill_request(struct req
33990
33991 scsi_init_cmd_errh(cmd);
33992 cmd->result = DID_NO_CONNECT << 16;
33993 - atomic_inc(&cmd->device->iorequest_cnt);
33994 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
33995
33996 /*
33997 * SCSI request completion path will do scsi_device_unbusy(),
33998 @@ -1415,9 +1415,9 @@ static void scsi_softirq_done(struct req
33999 */
34000 cmd->serial_number = 0;
34001
34002 - atomic_inc(&cmd->device->iodone_cnt);
34003 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
34004 if (cmd->result)
34005 - atomic_inc(&cmd->device->ioerr_cnt);
34006 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
34007
34008 disposition = scsi_decide_disposition(cmd);
34009 if (disposition != SUCCESS &&
34010 diff -urNp linux-2.6.32.41/drivers/scsi/scsi_sysfs.c linux-2.6.32.41/drivers/scsi/scsi_sysfs.c
34011 --- linux-2.6.32.41/drivers/scsi/scsi_sysfs.c 2011-05-10 22:12:01.000000000 -0400
34012 +++ linux-2.6.32.41/drivers/scsi/scsi_sysfs.c 2011-05-10 22:12:33.000000000 -0400
34013 @@ -661,7 +661,7 @@ show_iostat_##field(struct device *dev,
34014 char *buf) \
34015 { \
34016 struct scsi_device *sdev = to_scsi_device(dev); \
34017 - unsigned long long count = atomic_read(&sdev->field); \
34018 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
34019 return snprintf(buf, 20, "0x%llx\n", count); \
34020 } \
34021 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
34022 diff -urNp linux-2.6.32.41/drivers/scsi/scsi_transport_fc.c linux-2.6.32.41/drivers/scsi/scsi_transport_fc.c
34023 --- linux-2.6.32.41/drivers/scsi/scsi_transport_fc.c 2011-03-27 14:31:47.000000000 -0400
34024 +++ linux-2.6.32.41/drivers/scsi/scsi_transport_fc.c 2011-05-04 17:56:28.000000000 -0400
34025 @@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
34026 * Netlink Infrastructure
34027 */
34028
34029 -static atomic_t fc_event_seq;
34030 +static atomic_unchecked_t fc_event_seq;
34031
34032 /**
34033 * fc_get_event_number - Obtain the next sequential FC event number
34034 @@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
34035 u32
34036 fc_get_event_number(void)
34037 {
34038 - return atomic_add_return(1, &fc_event_seq);
34039 + return atomic_add_return_unchecked(1, &fc_event_seq);
34040 }
34041 EXPORT_SYMBOL(fc_get_event_number);
34042
34043 @@ -641,7 +641,7 @@ static __init int fc_transport_init(void
34044 {
34045 int error;
34046
34047 - atomic_set(&fc_event_seq, 0);
34048 + atomic_set_unchecked(&fc_event_seq, 0);
34049
34050 error = transport_class_register(&fc_host_class);
34051 if (error)
34052 diff -urNp linux-2.6.32.41/drivers/scsi/scsi_transport_iscsi.c linux-2.6.32.41/drivers/scsi/scsi_transport_iscsi.c
34053 --- linux-2.6.32.41/drivers/scsi/scsi_transport_iscsi.c 2011-03-27 14:31:47.000000000 -0400
34054 +++ linux-2.6.32.41/drivers/scsi/scsi_transport_iscsi.c 2011-05-04 17:56:28.000000000 -0400
34055 @@ -81,7 +81,7 @@ struct iscsi_internal {
34056 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
34057 };
34058
34059 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
34060 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
34061 static struct workqueue_struct *iscsi_eh_timer_workq;
34062
34063 /*
34064 @@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_s
34065 int err;
34066
34067 ihost = shost->shost_data;
34068 - session->sid = atomic_add_return(1, &iscsi_session_nr);
34069 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
34070
34071 if (id == ISCSI_MAX_TARGET) {
34072 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
34073 @@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(v
34074 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
34075 ISCSI_TRANSPORT_VERSION);
34076
34077 - atomic_set(&iscsi_session_nr, 0);
34078 + atomic_set_unchecked(&iscsi_session_nr, 0);
34079
34080 err = class_register(&iscsi_transport_class);
34081 if (err)
34082 diff -urNp linux-2.6.32.41/drivers/scsi/scsi_transport_srp.c linux-2.6.32.41/drivers/scsi/scsi_transport_srp.c
34083 --- linux-2.6.32.41/drivers/scsi/scsi_transport_srp.c 2011-03-27 14:31:47.000000000 -0400
34084 +++ linux-2.6.32.41/drivers/scsi/scsi_transport_srp.c 2011-05-04 17:56:28.000000000 -0400
34085 @@ -33,7 +33,7 @@
34086 #include "scsi_transport_srp_internal.h"
34087
34088 struct srp_host_attrs {
34089 - atomic_t next_port_id;
34090 + atomic_unchecked_t next_port_id;
34091 };
34092 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
34093
34094 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
34095 struct Scsi_Host *shost = dev_to_shost(dev);
34096 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
34097
34098 - atomic_set(&srp_host->next_port_id, 0);
34099 + atomic_set_unchecked(&srp_host->next_port_id, 0);
34100 return 0;
34101 }
34102
34103 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
34104 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
34105 rport->roles = ids->roles;
34106
34107 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
34108 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
34109 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
34110
34111 transport_setup_device(&rport->dev);
34112 diff -urNp linux-2.6.32.41/drivers/scsi/sg.c linux-2.6.32.41/drivers/scsi/sg.c
34113 --- linux-2.6.32.41/drivers/scsi/sg.c 2011-03-27 14:31:47.000000000 -0400
34114 +++ linux-2.6.32.41/drivers/scsi/sg.c 2011-04-17 15:56:46.000000000 -0400
34115 @@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
34116 const struct file_operations * fops;
34117 };
34118
34119 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
34120 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
34121 {"allow_dio", &adio_fops},
34122 {"debug", &debug_fops},
34123 {"def_reserved_size", &dressz_fops},
34124 @@ -2307,7 +2307,7 @@ sg_proc_init(void)
34125 {
34126 int k, mask;
34127 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
34128 - struct sg_proc_leaf * leaf;
34129 + const struct sg_proc_leaf * leaf;
34130
34131 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
34132 if (!sg_proc_sgp)
34133 diff -urNp linux-2.6.32.41/drivers/scsi/sym53c8xx_2/sym_glue.c linux-2.6.32.41/drivers/scsi/sym53c8xx_2/sym_glue.c
34134 --- linux-2.6.32.41/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-03-27 14:31:47.000000000 -0400
34135 +++ linux-2.6.32.41/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-05-16 21:46:57.000000000 -0400
34136 @@ -1754,6 +1754,8 @@ static int __devinit sym2_probe(struct p
34137 int do_iounmap = 0;
34138 int do_disable_device = 1;
34139
34140 + pax_track_stack();
34141 +
34142 memset(&sym_dev, 0, sizeof(sym_dev));
34143 memset(&nvram, 0, sizeof(nvram));
34144 sym_dev.pdev = pdev;
34145 diff -urNp linux-2.6.32.41/drivers/serial/kgdboc.c linux-2.6.32.41/drivers/serial/kgdboc.c
34146 --- linux-2.6.32.41/drivers/serial/kgdboc.c 2011-03-27 14:31:47.000000000 -0400
34147 +++ linux-2.6.32.41/drivers/serial/kgdboc.c 2011-04-17 15:56:46.000000000 -0400
34148 @@ -18,7 +18,7 @@
34149
34150 #define MAX_CONFIG_LEN 40
34151
34152 -static struct kgdb_io kgdboc_io_ops;
34153 +static const struct kgdb_io kgdboc_io_ops;
34154
34155 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
34156 static int configured = -1;
34157 @@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void
34158 module_put(THIS_MODULE);
34159 }
34160
34161 -static struct kgdb_io kgdboc_io_ops = {
34162 +static const struct kgdb_io kgdboc_io_ops = {
34163 .name = "kgdboc",
34164 .read_char = kgdboc_get_char,
34165 .write_char = kgdboc_put_char,
34166 diff -urNp linux-2.6.32.41/drivers/spi/spi.c linux-2.6.32.41/drivers/spi/spi.c
34167 --- linux-2.6.32.41/drivers/spi/spi.c 2011-03-27 14:31:47.000000000 -0400
34168 +++ linux-2.6.32.41/drivers/spi/spi.c 2011-05-04 17:56:28.000000000 -0400
34169 @@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, str
34170 EXPORT_SYMBOL_GPL(spi_sync);
34171
34172 /* portable code must never pass more than 32 bytes */
34173 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
34174 +#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
34175
34176 static u8 *buf;
34177
34178 diff -urNp linux-2.6.32.41/drivers/staging/android/binder.c linux-2.6.32.41/drivers/staging/android/binder.c
34179 --- linux-2.6.32.41/drivers/staging/android/binder.c 2011-03-27 14:31:47.000000000 -0400
34180 +++ linux-2.6.32.41/drivers/staging/android/binder.c 2011-04-17 15:56:46.000000000 -0400
34181 @@ -2756,7 +2756,7 @@ static void binder_vma_close(struct vm_a
34182 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
34183 }
34184
34185 -static struct vm_operations_struct binder_vm_ops = {
34186 +static const struct vm_operations_struct binder_vm_ops = {
34187 .open = binder_vma_open,
34188 .close = binder_vma_close,
34189 };
34190 diff -urNp linux-2.6.32.41/drivers/staging/b3dfg/b3dfg.c linux-2.6.32.41/drivers/staging/b3dfg/b3dfg.c
34191 --- linux-2.6.32.41/drivers/staging/b3dfg/b3dfg.c 2011-03-27 14:31:47.000000000 -0400
34192 +++ linux-2.6.32.41/drivers/staging/b3dfg/b3dfg.c 2011-04-17 15:56:46.000000000 -0400
34193 @@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_are
34194 return VM_FAULT_NOPAGE;
34195 }
34196
34197 -static struct vm_operations_struct b3dfg_vm_ops = {
34198 +static const struct vm_operations_struct b3dfg_vm_ops = {
34199 .fault = b3dfg_vma_fault,
34200 };
34201
34202 @@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp,
34203 return r;
34204 }
34205
34206 -static struct file_operations b3dfg_fops = {
34207 +static const struct file_operations b3dfg_fops = {
34208 .owner = THIS_MODULE,
34209 .open = b3dfg_open,
34210 .release = b3dfg_release,
34211 diff -urNp linux-2.6.32.41/drivers/staging/comedi/comedi_fops.c linux-2.6.32.41/drivers/staging/comedi/comedi_fops.c
34212 --- linux-2.6.32.41/drivers/staging/comedi/comedi_fops.c 2011-03-27 14:31:47.000000000 -0400
34213 +++ linux-2.6.32.41/drivers/staging/comedi/comedi_fops.c 2011-04-17 15:56:46.000000000 -0400
34214 @@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct
34215 mutex_unlock(&dev->mutex);
34216 }
34217
34218 -static struct vm_operations_struct comedi_vm_ops = {
34219 +static const struct vm_operations_struct comedi_vm_ops = {
34220 .close = comedi_unmap,
34221 };
34222
34223 diff -urNp linux-2.6.32.41/drivers/staging/dream/qdsp5/adsp_driver.c linux-2.6.32.41/drivers/staging/dream/qdsp5/adsp_driver.c
34224 --- linux-2.6.32.41/drivers/staging/dream/qdsp5/adsp_driver.c 2011-03-27 14:31:47.000000000 -0400
34225 +++ linux-2.6.32.41/drivers/staging/dream/qdsp5/adsp_driver.c 2011-04-17 15:56:46.000000000 -0400
34226 @@ -576,7 +576,7 @@ static struct adsp_device *inode_to_devi
34227 static dev_t adsp_devno;
34228 static struct class *adsp_class;
34229
34230 -static struct file_operations adsp_fops = {
34231 +static const struct file_operations adsp_fops = {
34232 .owner = THIS_MODULE,
34233 .open = adsp_open,
34234 .unlocked_ioctl = adsp_ioctl,
34235 diff -urNp linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_aac.c linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_aac.c
34236 --- linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_aac.c 2011-03-27 14:31:47.000000000 -0400
34237 +++ linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_aac.c 2011-04-17 15:56:46.000000000 -0400
34238 @@ -1022,7 +1022,7 @@ done:
34239 return rc;
34240 }
34241
34242 -static struct file_operations audio_aac_fops = {
34243 +static const struct file_operations audio_aac_fops = {
34244 .owner = THIS_MODULE,
34245 .open = audio_open,
34246 .release = audio_release,
34247 diff -urNp linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_amrnb.c linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_amrnb.c
34248 --- linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-03-27 14:31:47.000000000 -0400
34249 +++ linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-04-17 15:56:46.000000000 -0400
34250 @@ -833,7 +833,7 @@ done:
34251 return rc;
34252 }
34253
34254 -static struct file_operations audio_amrnb_fops = {
34255 +static const struct file_operations audio_amrnb_fops = {
34256 .owner = THIS_MODULE,
34257 .open = audamrnb_open,
34258 .release = audamrnb_release,
34259 diff -urNp linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_evrc.c linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_evrc.c
34260 --- linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_evrc.c 2011-03-27 14:31:47.000000000 -0400
34261 +++ linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_evrc.c 2011-04-17 15:56:46.000000000 -0400
34262 @@ -805,7 +805,7 @@ dma_fail:
34263 return rc;
34264 }
34265
34266 -static struct file_operations audio_evrc_fops = {
34267 +static const struct file_operations audio_evrc_fops = {
34268 .owner = THIS_MODULE,
34269 .open = audevrc_open,
34270 .release = audevrc_release,
34271 diff -urNp linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_in.c linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_in.c
34272 --- linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_in.c 2011-03-27 14:31:47.000000000 -0400
34273 +++ linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_in.c 2011-04-17 15:56:46.000000000 -0400
34274 @@ -913,7 +913,7 @@ static int audpre_open(struct inode *ino
34275 return 0;
34276 }
34277
34278 -static struct file_operations audio_fops = {
34279 +static const struct file_operations audio_fops = {
34280 .owner = THIS_MODULE,
34281 .open = audio_in_open,
34282 .release = audio_in_release,
34283 @@ -922,7 +922,7 @@ static struct file_operations audio_fops
34284 .unlocked_ioctl = audio_in_ioctl,
34285 };
34286
34287 -static struct file_operations audpre_fops = {
34288 +static const struct file_operations audpre_fops = {
34289 .owner = THIS_MODULE,
34290 .open = audpre_open,
34291 .unlocked_ioctl = audpre_ioctl,
34292 diff -urNp linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_mp3.c linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_mp3.c
34293 --- linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_mp3.c 2011-03-27 14:31:47.000000000 -0400
34294 +++ linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_mp3.c 2011-04-17 15:56:46.000000000 -0400
34295 @@ -941,7 +941,7 @@ done:
34296 return rc;
34297 }
34298
34299 -static struct file_operations audio_mp3_fops = {
34300 +static const struct file_operations audio_mp3_fops = {
34301 .owner = THIS_MODULE,
34302 .open = audio_open,
34303 .release = audio_release,
34304 diff -urNp linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_out.c linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_out.c
34305 --- linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_out.c 2011-03-27 14:31:47.000000000 -0400
34306 +++ linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_out.c 2011-04-17 15:56:46.000000000 -0400
34307 @@ -810,7 +810,7 @@ static int audpp_open(struct inode *inod
34308 return 0;
34309 }
34310
34311 -static struct file_operations audio_fops = {
34312 +static const struct file_operations audio_fops = {
34313 .owner = THIS_MODULE,
34314 .open = audio_open,
34315 .release = audio_release,
34316 @@ -819,7 +819,7 @@ static struct file_operations audio_fops
34317 .unlocked_ioctl = audio_ioctl,
34318 };
34319
34320 -static struct file_operations audpp_fops = {
34321 +static const struct file_operations audpp_fops = {
34322 .owner = THIS_MODULE,
34323 .open = audpp_open,
34324 .unlocked_ioctl = audpp_ioctl,
34325 diff -urNp linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_qcelp.c linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_qcelp.c
34326 --- linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-03-27 14:31:47.000000000 -0400
34327 +++ linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-04-17 15:56:46.000000000 -0400
34328 @@ -816,7 +816,7 @@ err:
34329 return rc;
34330 }
34331
34332 -static struct file_operations audio_qcelp_fops = {
34333 +static const struct file_operations audio_qcelp_fops = {
34334 .owner = THIS_MODULE,
34335 .open = audqcelp_open,
34336 .release = audqcelp_release,
34337 diff -urNp linux-2.6.32.41/drivers/staging/dream/qdsp5/snd.c linux-2.6.32.41/drivers/staging/dream/qdsp5/snd.c
34338 --- linux-2.6.32.41/drivers/staging/dream/qdsp5/snd.c 2011-03-27 14:31:47.000000000 -0400
34339 +++ linux-2.6.32.41/drivers/staging/dream/qdsp5/snd.c 2011-04-17 15:56:46.000000000 -0400
34340 @@ -242,7 +242,7 @@ err:
34341 return rc;
34342 }
34343
34344 -static struct file_operations snd_fops = {
34345 +static const struct file_operations snd_fops = {
34346 .owner = THIS_MODULE,
34347 .open = snd_open,
34348 .release = snd_release,
34349 diff -urNp linux-2.6.32.41/drivers/staging/dream/smd/smd_qmi.c linux-2.6.32.41/drivers/staging/dream/smd/smd_qmi.c
34350 --- linux-2.6.32.41/drivers/staging/dream/smd/smd_qmi.c 2011-03-27 14:31:47.000000000 -0400
34351 +++ linux-2.6.32.41/drivers/staging/dream/smd/smd_qmi.c 2011-04-17 15:56:46.000000000 -0400
34352 @@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip,
34353 return 0;
34354 }
34355
34356 -static struct file_operations qmi_fops = {
34357 +static const struct file_operations qmi_fops = {
34358 .owner = THIS_MODULE,
34359 .read = qmi_read,
34360 .write = qmi_write,
34361 diff -urNp linux-2.6.32.41/drivers/staging/dream/smd/smd_rpcrouter_device.c linux-2.6.32.41/drivers/staging/dream/smd/smd_rpcrouter_device.c
34362 --- linux-2.6.32.41/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-03-27 14:31:47.000000000 -0400
34363 +++ linux-2.6.32.41/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-04-17 15:56:46.000000000 -0400
34364 @@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file
34365 return rc;
34366 }
34367
34368 -static struct file_operations rpcrouter_server_fops = {
34369 +static const struct file_operations rpcrouter_server_fops = {
34370 .owner = THIS_MODULE,
34371 .open = rpcrouter_open,
34372 .release = rpcrouter_release,
34373 @@ -224,7 +224,7 @@ static struct file_operations rpcrouter_
34374 .unlocked_ioctl = rpcrouter_ioctl,
34375 };
34376
34377 -static struct file_operations rpcrouter_router_fops = {
34378 +static const struct file_operations rpcrouter_router_fops = {
34379 .owner = THIS_MODULE,
34380 .open = rpcrouter_open,
34381 .release = rpcrouter_release,
34382 diff -urNp linux-2.6.32.41/drivers/staging/dst/dcore.c linux-2.6.32.41/drivers/staging/dst/dcore.c
34383 --- linux-2.6.32.41/drivers/staging/dst/dcore.c 2011-03-27 14:31:47.000000000 -0400
34384 +++ linux-2.6.32.41/drivers/staging/dst/dcore.c 2011-04-17 15:56:46.000000000 -0400
34385 @@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendi
34386 return 0;
34387 }
34388
34389 -static struct block_device_operations dst_blk_ops = {
34390 +static const struct block_device_operations dst_blk_ops = {
34391 .open = dst_bdev_open,
34392 .release = dst_bdev_release,
34393 .owner = THIS_MODULE,
34394 @@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(s
34395 n->size = ctl->size;
34396
34397 atomic_set(&n->refcnt, 1);
34398 - atomic_long_set(&n->gen, 0);
34399 + atomic_long_set_unchecked(&n->gen, 0);
34400 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
34401
34402 err = dst_node_sysfs_init(n);
34403 diff -urNp linux-2.6.32.41/drivers/staging/dst/trans.c linux-2.6.32.41/drivers/staging/dst/trans.c
34404 --- linux-2.6.32.41/drivers/staging/dst/trans.c 2011-03-27 14:31:47.000000000 -0400
34405 +++ linux-2.6.32.41/drivers/staging/dst/trans.c 2011-04-17 15:56:46.000000000 -0400
34406 @@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n,
34407 t->error = 0;
34408 t->retries = 0;
34409 atomic_set(&t->refcnt, 1);
34410 - t->gen = atomic_long_inc_return(&n->gen);
34411 + t->gen = atomic_long_inc_return_unchecked(&n->gen);
34412
34413 t->enc = bio_data_dir(bio);
34414 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
34415 diff -urNp linux-2.6.32.41/drivers/staging/et131x/et1310_tx.c linux-2.6.32.41/drivers/staging/et131x/et1310_tx.c
34416 --- linux-2.6.32.41/drivers/staging/et131x/et1310_tx.c 2011-03-27 14:31:47.000000000 -0400
34417 +++ linux-2.6.32.41/drivers/staging/et131x/et1310_tx.c 2011-05-04 17:56:28.000000000 -0400
34418 @@ -710,11 +710,11 @@ inline void et131x_free_send_packet(stru
34419 struct net_device_stats *stats = &etdev->net_stats;
34420
34421 if (pMpTcb->Flags & fMP_DEST_BROAD)
34422 - atomic_inc(&etdev->Stats.brdcstxmt);
34423 + atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
34424 else if (pMpTcb->Flags & fMP_DEST_MULTI)
34425 - atomic_inc(&etdev->Stats.multixmt);
34426 + atomic_inc_unchecked(&etdev->Stats.multixmt);
34427 else
34428 - atomic_inc(&etdev->Stats.unixmt);
34429 + atomic_inc_unchecked(&etdev->Stats.unixmt);
34430
34431 if (pMpTcb->Packet) {
34432 stats->tx_bytes += pMpTcb->Packet->len;
34433 diff -urNp linux-2.6.32.41/drivers/staging/et131x/et131x_adapter.h linux-2.6.32.41/drivers/staging/et131x/et131x_adapter.h
34434 --- linux-2.6.32.41/drivers/staging/et131x/et131x_adapter.h 2011-03-27 14:31:47.000000000 -0400
34435 +++ linux-2.6.32.41/drivers/staging/et131x/et131x_adapter.h 2011-05-04 17:56:28.000000000 -0400
34436 @@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
34437 * operations
34438 */
34439 u32 unircv; /* # multicast packets received */
34440 - atomic_t unixmt; /* # multicast packets for Tx */
34441 + atomic_unchecked_t unixmt; /* # multicast packets for Tx */
34442 u32 multircv; /* # multicast packets received */
34443 - atomic_t multixmt; /* # multicast packets for Tx */
34444 + atomic_unchecked_t multixmt; /* # multicast packets for Tx */
34445 u32 brdcstrcv; /* # broadcast packets received */
34446 - atomic_t brdcstxmt; /* # broadcast packets for Tx */
34447 + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
34448 u32 norcvbuf; /* # Rx packets discarded */
34449 u32 noxmtbuf; /* # Tx packets discarded */
34450
34451 diff -urNp linux-2.6.32.41/drivers/staging/go7007/go7007-v4l2.c linux-2.6.32.41/drivers/staging/go7007/go7007-v4l2.c
34452 --- linux-2.6.32.41/drivers/staging/go7007/go7007-v4l2.c 2011-03-27 14:31:47.000000000 -0400
34453 +++ linux-2.6.32.41/drivers/staging/go7007/go7007-v4l2.c 2011-04-17 15:56:46.000000000 -0400
34454 @@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_are
34455 return 0;
34456 }
34457
34458 -static struct vm_operations_struct go7007_vm_ops = {
34459 +static const struct vm_operations_struct go7007_vm_ops = {
34460 .open = go7007_vm_open,
34461 .close = go7007_vm_close,
34462 .fault = go7007_vm_fault,
34463 diff -urNp linux-2.6.32.41/drivers/staging/hv/blkvsc_drv.c linux-2.6.32.41/drivers/staging/hv/blkvsc_drv.c
34464 --- linux-2.6.32.41/drivers/staging/hv/blkvsc_drv.c 2011-03-27 14:31:47.000000000 -0400
34465 +++ linux-2.6.32.41/drivers/staging/hv/blkvsc_drv.c 2011-04-17 15:56:46.000000000 -0400
34466 @@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKV
34467 /* The one and only one */
34468 static struct blkvsc_driver_context g_blkvsc_drv;
34469
34470 -static struct block_device_operations block_ops = {
34471 +static const struct block_device_operations block_ops = {
34472 .owner = THIS_MODULE,
34473 .open = blkvsc_open,
34474 .release = blkvsc_release,
34475 diff -urNp linux-2.6.32.41/drivers/staging/hv/Channel.c linux-2.6.32.41/drivers/staging/hv/Channel.c
34476 --- linux-2.6.32.41/drivers/staging/hv/Channel.c 2011-04-17 17:00:52.000000000 -0400
34477 +++ linux-2.6.32.41/drivers/staging/hv/Channel.c 2011-05-04 17:56:28.000000000 -0400
34478 @@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vm
34479
34480 DPRINT_ENTER(VMBUS);
34481
34482 - nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
34483 - atomic_inc(&gVmbusConnection.NextGpadlHandle);
34484 + nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
34485 + atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
34486
34487 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
34488 ASSERT(msgInfo != NULL);
34489 diff -urNp linux-2.6.32.41/drivers/staging/hv/Hv.c linux-2.6.32.41/drivers/staging/hv/Hv.c
34490 --- linux-2.6.32.41/drivers/staging/hv/Hv.c 2011-03-27 14:31:47.000000000 -0400
34491 +++ linux-2.6.32.41/drivers/staging/hv/Hv.c 2011-04-17 15:56:46.000000000 -0400
34492 @@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, vo
34493 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
34494 u32 outputAddressHi = outputAddress >> 32;
34495 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
34496 - volatile void *hypercallPage = gHvContext.HypercallPage;
34497 + volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
34498
34499 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
34500 Control, Input, Output);
34501 diff -urNp linux-2.6.32.41/drivers/staging/hv/vmbus_drv.c linux-2.6.32.41/drivers/staging/hv/vmbus_drv.c
34502 --- linux-2.6.32.41/drivers/staging/hv/vmbus_drv.c 2011-03-27 14:31:47.000000000 -0400
34503 +++ linux-2.6.32.41/drivers/staging/hv/vmbus_drv.c 2011-05-04 17:56:28.000000000 -0400
34504 @@ -532,7 +532,7 @@ static int vmbus_child_device_register(s
34505 to_device_context(root_device_obj);
34506 struct device_context *child_device_ctx =
34507 to_device_context(child_device_obj);
34508 - static atomic_t device_num = ATOMIC_INIT(0);
34509 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
34510
34511 DPRINT_ENTER(VMBUS_DRV);
34512
34513 @@ -541,7 +541,7 @@ static int vmbus_child_device_register(s
34514
34515 /* Set the device name. Otherwise, device_register() will fail. */
34516 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
34517 - atomic_inc_return(&device_num));
34518 + atomic_inc_return_unchecked(&device_num));
34519
34520 /* The new device belongs to this bus */
34521 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
34522 diff -urNp linux-2.6.32.41/drivers/staging/hv/VmbusPrivate.h linux-2.6.32.41/drivers/staging/hv/VmbusPrivate.h
34523 --- linux-2.6.32.41/drivers/staging/hv/VmbusPrivate.h 2011-04-17 17:00:52.000000000 -0400
34524 +++ linux-2.6.32.41/drivers/staging/hv/VmbusPrivate.h 2011-05-04 17:56:28.000000000 -0400
34525 @@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
34526 struct VMBUS_CONNECTION {
34527 enum VMBUS_CONNECT_STATE ConnectState;
34528
34529 - atomic_t NextGpadlHandle;
34530 + atomic_unchecked_t NextGpadlHandle;
34531
34532 /*
34533 * Represents channel interrupts. Each bit position represents a
34534 diff -urNp linux-2.6.32.41/drivers/staging/octeon/ethernet.c linux-2.6.32.41/drivers/staging/octeon/ethernet.c
34535 --- linux-2.6.32.41/drivers/staging/octeon/ethernet.c 2011-03-27 14:31:47.000000000 -0400
34536 +++ linux-2.6.32.41/drivers/staging/octeon/ethernet.c 2011-05-04 17:56:28.000000000 -0400
34537 @@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_
34538 * since the RX tasklet also increments it.
34539 */
34540 #ifdef CONFIG_64BIT
34541 - atomic64_add(rx_status.dropped_packets,
34542 - (atomic64_t *)&priv->stats.rx_dropped);
34543 + atomic64_add_unchecked(rx_status.dropped_packets,
34544 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
34545 #else
34546 - atomic_add(rx_status.dropped_packets,
34547 - (atomic_t *)&priv->stats.rx_dropped);
34548 + atomic_add_unchecked(rx_status.dropped_packets,
34549 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
34550 #endif
34551 }
34552
34553 diff -urNp linux-2.6.32.41/drivers/staging/octeon/ethernet-rx.c linux-2.6.32.41/drivers/staging/octeon/ethernet-rx.c
34554 --- linux-2.6.32.41/drivers/staging/octeon/ethernet-rx.c 2011-03-27 14:31:47.000000000 -0400
34555 +++ linux-2.6.32.41/drivers/staging/octeon/ethernet-rx.c 2011-05-04 17:56:28.000000000 -0400
34556 @@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long un
34557 /* Increment RX stats for virtual ports */
34558 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
34559 #ifdef CONFIG_64BIT
34560 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
34561 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
34562 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
34563 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
34564 #else
34565 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
34566 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
34567 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
34568 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
34569 #endif
34570 }
34571 netif_receive_skb(skb);
34572 @@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long un
34573 dev->name);
34574 */
34575 #ifdef CONFIG_64BIT
34576 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
34577 + atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
34578 #else
34579 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
34580 + atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
34581 #endif
34582 dev_kfree_skb_irq(skb);
34583 }
34584 diff -urNp linux-2.6.32.41/drivers/staging/panel/panel.c linux-2.6.32.41/drivers/staging/panel/panel.c
34585 --- linux-2.6.32.41/drivers/staging/panel/panel.c 2011-03-27 14:31:47.000000000 -0400
34586 +++ linux-2.6.32.41/drivers/staging/panel/panel.c 2011-04-17 15:56:46.000000000 -0400
34587 @@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *ino
34588 return 0;
34589 }
34590
34591 -static struct file_operations lcd_fops = {
34592 +static const struct file_operations lcd_fops = {
34593 .write = lcd_write,
34594 .open = lcd_open,
34595 .release = lcd_release,
34596 @@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *
34597 return 0;
34598 }
34599
34600 -static struct file_operations keypad_fops = {
34601 +static const struct file_operations keypad_fops = {
34602 .read = keypad_read, /* read */
34603 .open = keypad_open, /* open */
34604 .release = keypad_release, /* close */
34605 diff -urNp linux-2.6.32.41/drivers/staging/phison/phison.c linux-2.6.32.41/drivers/staging/phison/phison.c
34606 --- linux-2.6.32.41/drivers/staging/phison/phison.c 2011-03-27 14:31:47.000000000 -0400
34607 +++ linux-2.6.32.41/drivers/staging/phison/phison.c 2011-04-17 15:56:46.000000000 -0400
34608 @@ -43,7 +43,7 @@ static struct scsi_host_template phison_
34609 ATA_BMDMA_SHT(DRV_NAME),
34610 };
34611
34612 -static struct ata_port_operations phison_ops = {
34613 +static const struct ata_port_operations phison_ops = {
34614 .inherits = &ata_bmdma_port_ops,
34615 .prereset = phison_pre_reset,
34616 };
34617 diff -urNp linux-2.6.32.41/drivers/staging/poch/poch.c linux-2.6.32.41/drivers/staging/poch/poch.c
34618 --- linux-2.6.32.41/drivers/staging/poch/poch.c 2011-03-27 14:31:47.000000000 -0400
34619 +++ linux-2.6.32.41/drivers/staging/poch/poch.c 2011-04-17 15:56:46.000000000 -0400
34620 @@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inod
34621 return 0;
34622 }
34623
34624 -static struct file_operations poch_fops = {
34625 +static const struct file_operations poch_fops = {
34626 .owner = THIS_MODULE,
34627 .open = poch_open,
34628 .release = poch_release,
34629 diff -urNp linux-2.6.32.41/drivers/staging/pohmelfs/inode.c linux-2.6.32.41/drivers/staging/pohmelfs/inode.c
34630 --- linux-2.6.32.41/drivers/staging/pohmelfs/inode.c 2011-03-27 14:31:47.000000000 -0400
34631 +++ linux-2.6.32.41/drivers/staging/pohmelfs/inode.c 2011-05-04 17:56:20.000000000 -0400
34632 @@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct su
34633 mutex_init(&psb->mcache_lock);
34634 psb->mcache_root = RB_ROOT;
34635 psb->mcache_timeout = msecs_to_jiffies(5000);
34636 - atomic_long_set(&psb->mcache_gen, 0);
34637 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
34638
34639 psb->trans_max_pages = 100;
34640
34641 @@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct su
34642 INIT_LIST_HEAD(&psb->crypto_ready_list);
34643 INIT_LIST_HEAD(&psb->crypto_active_list);
34644
34645 - atomic_set(&psb->trans_gen, 1);
34646 + atomic_set_unchecked(&psb->trans_gen, 1);
34647 atomic_long_set(&psb->total_inodes, 0);
34648
34649 mutex_init(&psb->state_lock);
34650 diff -urNp linux-2.6.32.41/drivers/staging/pohmelfs/mcache.c linux-2.6.32.41/drivers/staging/pohmelfs/mcache.c
34651 --- linux-2.6.32.41/drivers/staging/pohmelfs/mcache.c 2011-03-27 14:31:47.000000000 -0400
34652 +++ linux-2.6.32.41/drivers/staging/pohmelfs/mcache.c 2011-04-17 15:56:46.000000000 -0400
34653 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
34654 m->data = data;
34655 m->start = start;
34656 m->size = size;
34657 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
34658 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
34659
34660 mutex_lock(&psb->mcache_lock);
34661 err = pohmelfs_mcache_insert(psb, m);
34662 diff -urNp linux-2.6.32.41/drivers/staging/pohmelfs/netfs.h linux-2.6.32.41/drivers/staging/pohmelfs/netfs.h
34663 --- linux-2.6.32.41/drivers/staging/pohmelfs/netfs.h 2011-03-27 14:31:47.000000000 -0400
34664 +++ linux-2.6.32.41/drivers/staging/pohmelfs/netfs.h 2011-05-04 17:56:20.000000000 -0400
34665 @@ -570,14 +570,14 @@ struct pohmelfs_config;
34666 struct pohmelfs_sb {
34667 struct rb_root mcache_root;
34668 struct mutex mcache_lock;
34669 - atomic_long_t mcache_gen;
34670 + atomic_long_unchecked_t mcache_gen;
34671 unsigned long mcache_timeout;
34672
34673 unsigned int idx;
34674
34675 unsigned int trans_retries;
34676
34677 - atomic_t trans_gen;
34678 + atomic_unchecked_t trans_gen;
34679
34680 unsigned int crypto_attached_size;
34681 unsigned int crypto_align_size;
34682 diff -urNp linux-2.6.32.41/drivers/staging/pohmelfs/trans.c linux-2.6.32.41/drivers/staging/pohmelfs/trans.c
34683 --- linux-2.6.32.41/drivers/staging/pohmelfs/trans.c 2011-03-27 14:31:47.000000000 -0400
34684 +++ linux-2.6.32.41/drivers/staging/pohmelfs/trans.c 2011-05-04 17:56:28.000000000 -0400
34685 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
34686 int err;
34687 struct netfs_cmd *cmd = t->iovec.iov_base;
34688
34689 - t->gen = atomic_inc_return(&psb->trans_gen);
34690 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
34691
34692 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
34693 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
34694 diff -urNp linux-2.6.32.41/drivers/staging/sep/sep_driver.c linux-2.6.32.41/drivers/staging/sep/sep_driver.c
34695 --- linux-2.6.32.41/drivers/staging/sep/sep_driver.c 2011-03-27 14:31:47.000000000 -0400
34696 +++ linux-2.6.32.41/drivers/staging/sep/sep_driver.c 2011-04-17 15:56:46.000000000 -0400
34697 @@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver
34698 static dev_t sep_devno;
34699
34700 /* the files operations structure of the driver */
34701 -static struct file_operations sep_file_operations = {
34702 +static const struct file_operations sep_file_operations = {
34703 .owner = THIS_MODULE,
34704 .ioctl = sep_ioctl,
34705 .poll = sep_poll,
34706 diff -urNp linux-2.6.32.41/drivers/staging/usbip/vhci.h linux-2.6.32.41/drivers/staging/usbip/vhci.h
34707 --- linux-2.6.32.41/drivers/staging/usbip/vhci.h 2011-03-27 14:31:47.000000000 -0400
34708 +++ linux-2.6.32.41/drivers/staging/usbip/vhci.h 2011-05-04 17:56:28.000000000 -0400
34709 @@ -92,7 +92,7 @@ struct vhci_hcd {
34710 unsigned resuming:1;
34711 unsigned long re_timeout;
34712
34713 - atomic_t seqnum;
34714 + atomic_unchecked_t seqnum;
34715
34716 /*
34717 * NOTE:
34718 diff -urNp linux-2.6.32.41/drivers/staging/usbip/vhci_hcd.c linux-2.6.32.41/drivers/staging/usbip/vhci_hcd.c
34719 --- linux-2.6.32.41/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:01.000000000 -0400
34720 +++ linux-2.6.32.41/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:33.000000000 -0400
34721 @@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
34722 return;
34723 }
34724
34725 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
34726 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
34727 if (priv->seqnum == 0xffff)
34728 usbip_uinfo("seqnum max\n");
34729
34730 @@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_h
34731 return -ENOMEM;
34732 }
34733
34734 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
34735 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
34736 if (unlink->seqnum == 0xffff)
34737 usbip_uinfo("seqnum max\n");
34738
34739 @@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hc
34740 vdev->rhport = rhport;
34741 }
34742
34743 - atomic_set(&vhci->seqnum, 0);
34744 + atomic_set_unchecked(&vhci->seqnum, 0);
34745 spin_lock_init(&vhci->lock);
34746
34747
34748 diff -urNp linux-2.6.32.41/drivers/staging/usbip/vhci_rx.c linux-2.6.32.41/drivers/staging/usbip/vhci_rx.c
34749 --- linux-2.6.32.41/drivers/staging/usbip/vhci_rx.c 2011-04-17 17:00:52.000000000 -0400
34750 +++ linux-2.6.32.41/drivers/staging/usbip/vhci_rx.c 2011-05-04 17:56:28.000000000 -0400
34751 @@ -78,7 +78,7 @@ static void vhci_recv_ret_submit(struct
34752 usbip_uerr("cannot find a urb of seqnum %u\n",
34753 pdu->base.seqnum);
34754 usbip_uinfo("max seqnum %d\n",
34755 - atomic_read(&the_controller->seqnum));
34756 + atomic_read_unchecked(&the_controller->seqnum));
34757 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
34758 return;
34759 }
34760 diff -urNp linux-2.6.32.41/drivers/staging/vme/devices/vme_user.c linux-2.6.32.41/drivers/staging/vme/devices/vme_user.c
34761 --- linux-2.6.32.41/drivers/staging/vme/devices/vme_user.c 2011-03-27 14:31:47.000000000 -0400
34762 +++ linux-2.6.32.41/drivers/staging/vme/devices/vme_user.c 2011-04-17 15:56:46.000000000 -0400
34763 @@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *
34764 static int __init vme_user_probe(struct device *, int, int);
34765 static int __exit vme_user_remove(struct device *, int, int);
34766
34767 -static struct file_operations vme_user_fops = {
34768 +static const struct file_operations vme_user_fops = {
34769 .open = vme_user_open,
34770 .release = vme_user_release,
34771 .read = vme_user_read,
34772 diff -urNp linux-2.6.32.41/drivers/telephony/ixj.c linux-2.6.32.41/drivers/telephony/ixj.c
34773 --- linux-2.6.32.41/drivers/telephony/ixj.c 2011-03-27 14:31:47.000000000 -0400
34774 +++ linux-2.6.32.41/drivers/telephony/ixj.c 2011-05-16 21:46:57.000000000 -0400
34775 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
34776 bool mContinue;
34777 char *pIn, *pOut;
34778
34779 + pax_track_stack();
34780 +
34781 if (!SCI_Prepare(j))
34782 return 0;
34783
34784 diff -urNp linux-2.6.32.41/drivers/uio/uio.c linux-2.6.32.41/drivers/uio/uio.c
34785 --- linux-2.6.32.41/drivers/uio/uio.c 2011-03-27 14:31:47.000000000 -0400
34786 +++ linux-2.6.32.41/drivers/uio/uio.c 2011-05-04 17:56:20.000000000 -0400
34787 @@ -23,6 +23,7 @@
34788 #include <linux/string.h>
34789 #include <linux/kobject.h>
34790 #include <linux/uio_driver.h>
34791 +#include <asm/local.h>
34792
34793 #define UIO_MAX_DEVICES 255
34794
34795 @@ -30,10 +31,10 @@ struct uio_device {
34796 struct module *owner;
34797 struct device *dev;
34798 int minor;
34799 - atomic_t event;
34800 + atomic_unchecked_t event;
34801 struct fasync_struct *async_queue;
34802 wait_queue_head_t wait;
34803 - int vma_count;
34804 + local_t vma_count;
34805 struct uio_info *info;
34806 struct kobject *map_dir;
34807 struct kobject *portio_dir;
34808 @@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobj
34809 return entry->show(mem, buf);
34810 }
34811
34812 -static struct sysfs_ops map_sysfs_ops = {
34813 +static const struct sysfs_ops map_sysfs_ops = {
34814 .show = map_type_show,
34815 };
34816
34817 @@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct k
34818 return entry->show(port, buf);
34819 }
34820
34821 -static struct sysfs_ops portio_sysfs_ops = {
34822 +static const struct sysfs_ops portio_sysfs_ops = {
34823 .show = portio_type_show,
34824 };
34825
34826 @@ -255,7 +256,7 @@ static ssize_t show_event(struct device
34827 struct uio_device *idev = dev_get_drvdata(dev);
34828 if (idev)
34829 return sprintf(buf, "%u\n",
34830 - (unsigned int)atomic_read(&idev->event));
34831 + (unsigned int)atomic_read_unchecked(&idev->event));
34832 else
34833 return -ENODEV;
34834 }
34835 @@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *i
34836 {
34837 struct uio_device *idev = info->uio_dev;
34838
34839 - atomic_inc(&idev->event);
34840 + atomic_inc_unchecked(&idev->event);
34841 wake_up_interruptible(&idev->wait);
34842 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
34843 }
34844 @@ -477,7 +478,7 @@ static int uio_open(struct inode *inode,
34845 }
34846
34847 listener->dev = idev;
34848 - listener->event_count = atomic_read(&idev->event);
34849 + listener->event_count = atomic_read_unchecked(&idev->event);
34850 filep->private_data = listener;
34851
34852 if (idev->info->open) {
34853 @@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file
34854 return -EIO;
34855
34856 poll_wait(filep, &idev->wait, wait);
34857 - if (listener->event_count != atomic_read(&idev->event))
34858 + if (listener->event_count != atomic_read_unchecked(&idev->event))
34859 return POLLIN | POLLRDNORM;
34860 return 0;
34861 }
34862 @@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *fil
34863 do {
34864 set_current_state(TASK_INTERRUPTIBLE);
34865
34866 - event_count = atomic_read(&idev->event);
34867 + event_count = atomic_read_unchecked(&idev->event);
34868 if (event_count != listener->event_count) {
34869 if (copy_to_user(buf, &event_count, count))
34870 retval = -EFAULT;
34871 @@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_
34872 static void uio_vma_open(struct vm_area_struct *vma)
34873 {
34874 struct uio_device *idev = vma->vm_private_data;
34875 - idev->vma_count++;
34876 + local_inc(&idev->vma_count);
34877 }
34878
34879 static void uio_vma_close(struct vm_area_struct *vma)
34880 {
34881 struct uio_device *idev = vma->vm_private_data;
34882 - idev->vma_count--;
34883 + local_dec(&idev->vma_count);
34884 }
34885
34886 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
34887 @@ -840,7 +841,7 @@ int __uio_register_device(struct module
34888 idev->owner = owner;
34889 idev->info = info;
34890 init_waitqueue_head(&idev->wait);
34891 - atomic_set(&idev->event, 0);
34892 + atomic_set_unchecked(&idev->event, 0);
34893
34894 ret = uio_get_minor(idev);
34895 if (ret)
34896 diff -urNp linux-2.6.32.41/drivers/usb/atm/usbatm.c linux-2.6.32.41/drivers/usb/atm/usbatm.c
34897 --- linux-2.6.32.41/drivers/usb/atm/usbatm.c 2011-03-27 14:31:47.000000000 -0400
34898 +++ linux-2.6.32.41/drivers/usb/atm/usbatm.c 2011-04-17 15:56:46.000000000 -0400
34899 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(stru
34900 if (printk_ratelimit())
34901 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
34902 __func__, vpi, vci);
34903 - atomic_inc(&vcc->stats->rx_err);
34904 + atomic_inc_unchecked(&vcc->stats->rx_err);
34905 return;
34906 }
34907
34908 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(stru
34909 if (length > ATM_MAX_AAL5_PDU) {
34910 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
34911 __func__, length, vcc);
34912 - atomic_inc(&vcc->stats->rx_err);
34913 + atomic_inc_unchecked(&vcc->stats->rx_err);
34914 goto out;
34915 }
34916
34917 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(stru
34918 if (sarb->len < pdu_length) {
34919 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
34920 __func__, pdu_length, sarb->len, vcc);
34921 - atomic_inc(&vcc->stats->rx_err);
34922 + atomic_inc_unchecked(&vcc->stats->rx_err);
34923 goto out;
34924 }
34925
34926 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
34927 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
34928 __func__, vcc);
34929 - atomic_inc(&vcc->stats->rx_err);
34930 + atomic_inc_unchecked(&vcc->stats->rx_err);
34931 goto out;
34932 }
34933
34934 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(stru
34935 if (printk_ratelimit())
34936 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
34937 __func__, length);
34938 - atomic_inc(&vcc->stats->rx_drop);
34939 + atomic_inc_unchecked(&vcc->stats->rx_drop);
34940 goto out;
34941 }
34942
34943 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(stru
34944
34945 vcc->push(vcc, skb);
34946
34947 - atomic_inc(&vcc->stats->rx);
34948 + atomic_inc_unchecked(&vcc->stats->rx);
34949 out:
34950 skb_trim(sarb, 0);
34951 }
34952 @@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned l
34953 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
34954
34955 usbatm_pop(vcc, skb);
34956 - atomic_inc(&vcc->stats->tx);
34957 + atomic_inc_unchecked(&vcc->stats->tx);
34958
34959 skb = skb_dequeue(&instance->sndqueue);
34960 }
34961 @@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct a
34962 if (!left--)
34963 return sprintf(page,
34964 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
34965 - atomic_read(&atm_dev->stats.aal5.tx),
34966 - atomic_read(&atm_dev->stats.aal5.tx_err),
34967 - atomic_read(&atm_dev->stats.aal5.rx),
34968 - atomic_read(&atm_dev->stats.aal5.rx_err),
34969 - atomic_read(&atm_dev->stats.aal5.rx_drop));
34970 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
34971 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
34972 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
34973 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
34974 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
34975
34976 if (!left--) {
34977 if (instance->disconnected)
34978 diff -urNp linux-2.6.32.41/drivers/usb/class/cdc-wdm.c linux-2.6.32.41/drivers/usb/class/cdc-wdm.c
34979 --- linux-2.6.32.41/drivers/usb/class/cdc-wdm.c 2011-03-27 14:31:47.000000000 -0400
34980 +++ linux-2.6.32.41/drivers/usb/class/cdc-wdm.c 2011-04-17 15:56:46.000000000 -0400
34981 @@ -314,7 +314,7 @@ static ssize_t wdm_write
34982 if (r < 0)
34983 goto outnp;
34984
34985 - if (!file->f_flags && O_NONBLOCK)
34986 + if (!(file->f_flags & O_NONBLOCK))
34987 r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
34988 &desc->flags));
34989 else
34990 diff -urNp linux-2.6.32.41/drivers/usb/core/hcd.c linux-2.6.32.41/drivers/usb/core/hcd.c
34991 --- linux-2.6.32.41/drivers/usb/core/hcd.c 2011-03-27 14:31:47.000000000 -0400
34992 +++ linux-2.6.32.41/drivers/usb/core/hcd.c 2011-04-17 15:56:46.000000000 -0400
34993 @@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutd
34994
34995 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
34996
34997 -struct usb_mon_operations *mon_ops;
34998 +const struct usb_mon_operations *mon_ops;
34999
35000 /*
35001 * The registration is unlocked.
35002 @@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
35003 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
35004 */
35005
35006 -int usb_mon_register (struct usb_mon_operations *ops)
35007 +int usb_mon_register (const struct usb_mon_operations *ops)
35008 {
35009
35010 if (mon_ops)
35011 diff -urNp linux-2.6.32.41/drivers/usb/core/hcd.h linux-2.6.32.41/drivers/usb/core/hcd.h
35012 --- linux-2.6.32.41/drivers/usb/core/hcd.h 2011-03-27 14:31:47.000000000 -0400
35013 +++ linux-2.6.32.41/drivers/usb/core/hcd.h 2011-04-17 15:56:46.000000000 -0400
35014 @@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) {
35015 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
35016
35017 struct usb_mon_operations {
35018 - void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
35019 - void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
35020 - void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
35021 + void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
35022 + void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
35023 + void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
35024 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
35025 };
35026
35027 -extern struct usb_mon_operations *mon_ops;
35028 +extern const struct usb_mon_operations *mon_ops;
35029
35030 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
35031 {
35032 @@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(s
35033 (*mon_ops->urb_complete)(bus, urb, status);
35034 }
35035
35036 -int usb_mon_register(struct usb_mon_operations *ops);
35037 +int usb_mon_register(const struct usb_mon_operations *ops);
35038 void usb_mon_deregister(void);
35039
35040 #else
35041 diff -urNp linux-2.6.32.41/drivers/usb/core/message.c linux-2.6.32.41/drivers/usb/core/message.c
35042 --- linux-2.6.32.41/drivers/usb/core/message.c 2011-03-27 14:31:47.000000000 -0400
35043 +++ linux-2.6.32.41/drivers/usb/core/message.c 2011-04-17 15:56:46.000000000 -0400
35044 @@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device
35045 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
35046 if (buf) {
35047 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
35048 - if (len > 0) {
35049 - smallbuf = kmalloc(++len, GFP_NOIO);
35050 + if (len++ > 0) {
35051 + smallbuf = kmalloc(len, GFP_NOIO);
35052 if (!smallbuf)
35053 return buf;
35054 memcpy(smallbuf, buf, len);
35055 diff -urNp linux-2.6.32.41/drivers/usb/misc/appledisplay.c linux-2.6.32.41/drivers/usb/misc/appledisplay.c
35056 --- linux-2.6.32.41/drivers/usb/misc/appledisplay.c 2011-03-27 14:31:47.000000000 -0400
35057 +++ linux-2.6.32.41/drivers/usb/misc/appledisplay.c 2011-04-17 15:56:46.000000000 -0400
35058 @@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightnes
35059 return pdata->msgdata[1];
35060 }
35061
35062 -static struct backlight_ops appledisplay_bl_data = {
35063 +static const struct backlight_ops appledisplay_bl_data = {
35064 .get_brightness = appledisplay_bl_get_brightness,
35065 .update_status = appledisplay_bl_update_status,
35066 };
35067 diff -urNp linux-2.6.32.41/drivers/usb/mon/mon_main.c linux-2.6.32.41/drivers/usb/mon/mon_main.c
35068 --- linux-2.6.32.41/drivers/usb/mon/mon_main.c 2011-03-27 14:31:47.000000000 -0400
35069 +++ linux-2.6.32.41/drivers/usb/mon/mon_main.c 2011-04-17 15:56:46.000000000 -0400
35070 @@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
35071 /*
35072 * Ops
35073 */
35074 -static struct usb_mon_operations mon_ops_0 = {
35075 +static const struct usb_mon_operations mon_ops_0 = {
35076 .urb_submit = mon_submit,
35077 .urb_submit_error = mon_submit_error,
35078 .urb_complete = mon_complete,
35079 diff -urNp linux-2.6.32.41/drivers/usb/wusbcore/wa-hc.h linux-2.6.32.41/drivers/usb/wusbcore/wa-hc.h
35080 --- linux-2.6.32.41/drivers/usb/wusbcore/wa-hc.h 2011-03-27 14:31:47.000000000 -0400
35081 +++ linux-2.6.32.41/drivers/usb/wusbcore/wa-hc.h 2011-05-04 17:56:28.000000000 -0400
35082 @@ -192,7 +192,7 @@ struct wahc {
35083 struct list_head xfer_delayed_list;
35084 spinlock_t xfer_list_lock;
35085 struct work_struct xfer_work;
35086 - atomic_t xfer_id_count;
35087 + atomic_unchecked_t xfer_id_count;
35088 };
35089
35090
35091 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
35092 INIT_LIST_HEAD(&wa->xfer_delayed_list);
35093 spin_lock_init(&wa->xfer_list_lock);
35094 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
35095 - atomic_set(&wa->xfer_id_count, 1);
35096 + atomic_set_unchecked(&wa->xfer_id_count, 1);
35097 }
35098
35099 /**
35100 diff -urNp linux-2.6.32.41/drivers/usb/wusbcore/wa-xfer.c linux-2.6.32.41/drivers/usb/wusbcore/wa-xfer.c
35101 --- linux-2.6.32.41/drivers/usb/wusbcore/wa-xfer.c 2011-03-27 14:31:47.000000000 -0400
35102 +++ linux-2.6.32.41/drivers/usb/wusbcore/wa-xfer.c 2011-05-04 17:56:28.000000000 -0400
35103 @@ -293,7 +293,7 @@ out:
35104 */
35105 static void wa_xfer_id_init(struct wa_xfer *xfer)
35106 {
35107 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
35108 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
35109 }
35110
35111 /*
35112 diff -urNp linux-2.6.32.41/drivers/uwb/wlp/messages.c linux-2.6.32.41/drivers/uwb/wlp/messages.c
35113 --- linux-2.6.32.41/drivers/uwb/wlp/messages.c 2011-03-27 14:31:47.000000000 -0400
35114 +++ linux-2.6.32.41/drivers/uwb/wlp/messages.c 2011-04-17 15:56:46.000000000 -0400
35115 @@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct
35116 size_t len = skb->len;
35117 size_t used;
35118 ssize_t result;
35119 - struct wlp_nonce enonce, rnonce;
35120 + struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
35121 enum wlp_assc_error assc_err;
35122 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
35123 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
35124 diff -urNp linux-2.6.32.41/drivers/uwb/wlp/sysfs.c linux-2.6.32.41/drivers/uwb/wlp/sysfs.c
35125 --- linux-2.6.32.41/drivers/uwb/wlp/sysfs.c 2011-03-27 14:31:47.000000000 -0400
35126 +++ linux-2.6.32.41/drivers/uwb/wlp/sysfs.c 2011-04-17 15:56:46.000000000 -0400
35127 @@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobjec
35128 return ret;
35129 }
35130
35131 -static
35132 -struct sysfs_ops wss_sysfs_ops = {
35133 +static const struct sysfs_ops wss_sysfs_ops = {
35134 .show = wlp_wss_attr_show,
35135 .store = wlp_wss_attr_store,
35136 };
35137 diff -urNp linux-2.6.32.41/drivers/video/atmel_lcdfb.c linux-2.6.32.41/drivers/video/atmel_lcdfb.c
35138 --- linux-2.6.32.41/drivers/video/atmel_lcdfb.c 2011-03-27 14:31:47.000000000 -0400
35139 +++ linux-2.6.32.41/drivers/video/atmel_lcdfb.c 2011-04-17 15:56:46.000000000 -0400
35140 @@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struc
35141 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
35142 }
35143
35144 -static struct backlight_ops atmel_lcdc_bl_ops = {
35145 +static const struct backlight_ops atmel_lcdc_bl_ops = {
35146 .update_status = atmel_bl_update_status,
35147 .get_brightness = atmel_bl_get_brightness,
35148 };
35149 diff -urNp linux-2.6.32.41/drivers/video/aty/aty128fb.c linux-2.6.32.41/drivers/video/aty/aty128fb.c
35150 --- linux-2.6.32.41/drivers/video/aty/aty128fb.c 2011-03-27 14:31:47.000000000 -0400
35151 +++ linux-2.6.32.41/drivers/video/aty/aty128fb.c 2011-04-17 15:56:46.000000000 -0400
35152 @@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(stru
35153 return bd->props.brightness;
35154 }
35155
35156 -static struct backlight_ops aty128_bl_data = {
35157 +static const struct backlight_ops aty128_bl_data = {
35158 .get_brightness = aty128_bl_get_brightness,
35159 .update_status = aty128_bl_update_status,
35160 };
35161 diff -urNp linux-2.6.32.41/drivers/video/aty/atyfb_base.c linux-2.6.32.41/drivers/video/aty/atyfb_base.c
35162 --- linux-2.6.32.41/drivers/video/aty/atyfb_base.c 2011-03-27 14:31:47.000000000 -0400
35163 +++ linux-2.6.32.41/drivers/video/aty/atyfb_base.c 2011-04-17 15:56:46.000000000 -0400
35164 @@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct
35165 return bd->props.brightness;
35166 }
35167
35168 -static struct backlight_ops aty_bl_data = {
35169 +static const struct backlight_ops aty_bl_data = {
35170 .get_brightness = aty_bl_get_brightness,
35171 .update_status = aty_bl_update_status,
35172 };
35173 diff -urNp linux-2.6.32.41/drivers/video/aty/radeon_backlight.c linux-2.6.32.41/drivers/video/aty/radeon_backlight.c
35174 --- linux-2.6.32.41/drivers/video/aty/radeon_backlight.c 2011-03-27 14:31:47.000000000 -0400
35175 +++ linux-2.6.32.41/drivers/video/aty/radeon_backlight.c 2011-04-17 15:56:46.000000000 -0400
35176 @@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(stru
35177 return bd->props.brightness;
35178 }
35179
35180 -static struct backlight_ops radeon_bl_data = {
35181 +static const struct backlight_ops radeon_bl_data = {
35182 .get_brightness = radeon_bl_get_brightness,
35183 .update_status = radeon_bl_update_status,
35184 };
35185 diff -urNp linux-2.6.32.41/drivers/video/backlight/adp5520_bl.c linux-2.6.32.41/drivers/video/backlight/adp5520_bl.c
35186 --- linux-2.6.32.41/drivers/video/backlight/adp5520_bl.c 2011-03-27 14:31:47.000000000 -0400
35187 +++ linux-2.6.32.41/drivers/video/backlight/adp5520_bl.c 2011-04-17 15:56:46.000000000 -0400
35188 @@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(str
35189 return error ? data->current_brightness : reg_val;
35190 }
35191
35192 -static struct backlight_ops adp5520_bl_ops = {
35193 +static const struct backlight_ops adp5520_bl_ops = {
35194 .update_status = adp5520_bl_update_status,
35195 .get_brightness = adp5520_bl_get_brightness,
35196 };
35197 diff -urNp linux-2.6.32.41/drivers/video/backlight/adx_bl.c linux-2.6.32.41/drivers/video/backlight/adx_bl.c
35198 --- linux-2.6.32.41/drivers/video/backlight/adx_bl.c 2011-03-27 14:31:47.000000000 -0400
35199 +++ linux-2.6.32.41/drivers/video/backlight/adx_bl.c 2011-04-17 15:56:46.000000000 -0400
35200 @@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct
35201 return 1;
35202 }
35203
35204 -static struct backlight_ops adx_backlight_ops = {
35205 +static const struct backlight_ops adx_backlight_ops = {
35206 .options = 0,
35207 .update_status = adx_backlight_update_status,
35208 .get_brightness = adx_backlight_get_brightness,
35209 diff -urNp linux-2.6.32.41/drivers/video/backlight/atmel-pwm-bl.c linux-2.6.32.41/drivers/video/backlight/atmel-pwm-bl.c
35210 --- linux-2.6.32.41/drivers/video/backlight/atmel-pwm-bl.c 2011-03-27 14:31:47.000000000 -0400
35211 +++ linux-2.6.32.41/drivers/video/backlight/atmel-pwm-bl.c 2011-04-17 15:56:46.000000000 -0400
35212 @@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct
35213 return pwm_channel_enable(&pwmbl->pwmc);
35214 }
35215
35216 -static struct backlight_ops atmel_pwm_bl_ops = {
35217 +static const struct backlight_ops atmel_pwm_bl_ops = {
35218 .get_brightness = atmel_pwm_bl_get_intensity,
35219 .update_status = atmel_pwm_bl_set_intensity,
35220 };
35221 diff -urNp linux-2.6.32.41/drivers/video/backlight/backlight.c linux-2.6.32.41/drivers/video/backlight/backlight.c
35222 --- linux-2.6.32.41/drivers/video/backlight/backlight.c 2011-03-27 14:31:47.000000000 -0400
35223 +++ linux-2.6.32.41/drivers/video/backlight/backlight.c 2011-04-17 15:56:46.000000000 -0400
35224 @@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
35225 * ERR_PTR() or a pointer to the newly allocated device.
35226 */
35227 struct backlight_device *backlight_device_register(const char *name,
35228 - struct device *parent, void *devdata, struct backlight_ops *ops)
35229 + struct device *parent, void *devdata, const struct backlight_ops *ops)
35230 {
35231 struct backlight_device *new_bd;
35232 int rc;
35233 diff -urNp linux-2.6.32.41/drivers/video/backlight/corgi_lcd.c linux-2.6.32.41/drivers/video/backlight/corgi_lcd.c
35234 --- linux-2.6.32.41/drivers/video/backlight/corgi_lcd.c 2011-03-27 14:31:47.000000000 -0400
35235 +++ linux-2.6.32.41/drivers/video/backlight/corgi_lcd.c 2011-04-17 15:56:46.000000000 -0400
35236 @@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit
35237 }
35238 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
35239
35240 -static struct backlight_ops corgi_bl_ops = {
35241 +static const struct backlight_ops corgi_bl_ops = {
35242 .get_brightness = corgi_bl_get_intensity,
35243 .update_status = corgi_bl_update_status,
35244 };
35245 diff -urNp linux-2.6.32.41/drivers/video/backlight/cr_bllcd.c linux-2.6.32.41/drivers/video/backlight/cr_bllcd.c
35246 --- linux-2.6.32.41/drivers/video/backlight/cr_bllcd.c 2011-03-27 14:31:47.000000000 -0400
35247 +++ linux-2.6.32.41/drivers/video/backlight/cr_bllcd.c 2011-04-17 15:56:46.000000000 -0400
35248 @@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(st
35249 return intensity;
35250 }
35251
35252 -static struct backlight_ops cr_backlight_ops = {
35253 +static const struct backlight_ops cr_backlight_ops = {
35254 .get_brightness = cr_backlight_get_intensity,
35255 .update_status = cr_backlight_set_intensity,
35256 };
35257 diff -urNp linux-2.6.32.41/drivers/video/backlight/da903x_bl.c linux-2.6.32.41/drivers/video/backlight/da903x_bl.c
35258 --- linux-2.6.32.41/drivers/video/backlight/da903x_bl.c 2011-03-27 14:31:47.000000000 -0400
35259 +++ linux-2.6.32.41/drivers/video/backlight/da903x_bl.c 2011-04-17 15:56:46.000000000 -0400
35260 @@ -94,7 +94,7 @@ static int da903x_backlight_get_brightne
35261 return data->current_brightness;
35262 }
35263
35264 -static struct backlight_ops da903x_backlight_ops = {
35265 +static const struct backlight_ops da903x_backlight_ops = {
35266 .update_status = da903x_backlight_update_status,
35267 .get_brightness = da903x_backlight_get_brightness,
35268 };
35269 diff -urNp linux-2.6.32.41/drivers/video/backlight/generic_bl.c linux-2.6.32.41/drivers/video/backlight/generic_bl.c
35270 --- linux-2.6.32.41/drivers/video/backlight/generic_bl.c 2011-03-27 14:31:47.000000000 -0400
35271 +++ linux-2.6.32.41/drivers/video/backlight/generic_bl.c 2011-04-17 15:56:46.000000000 -0400
35272 @@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
35273 }
35274 EXPORT_SYMBOL(corgibl_limit_intensity);
35275
35276 -static struct backlight_ops genericbl_ops = {
35277 +static const struct backlight_ops genericbl_ops = {
35278 .options = BL_CORE_SUSPENDRESUME,
35279 .get_brightness = genericbl_get_intensity,
35280 .update_status = genericbl_send_intensity,
35281 diff -urNp linux-2.6.32.41/drivers/video/backlight/hp680_bl.c linux-2.6.32.41/drivers/video/backlight/hp680_bl.c
35282 --- linux-2.6.32.41/drivers/video/backlight/hp680_bl.c 2011-03-27 14:31:47.000000000 -0400
35283 +++ linux-2.6.32.41/drivers/video/backlight/hp680_bl.c 2011-04-17 15:56:46.000000000 -0400
35284 @@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct
35285 return current_intensity;
35286 }
35287
35288 -static struct backlight_ops hp680bl_ops = {
35289 +static const struct backlight_ops hp680bl_ops = {
35290 .get_brightness = hp680bl_get_intensity,
35291 .update_status = hp680bl_set_intensity,
35292 };
35293 diff -urNp linux-2.6.32.41/drivers/video/backlight/jornada720_bl.c linux-2.6.32.41/drivers/video/backlight/jornada720_bl.c
35294 --- linux-2.6.32.41/drivers/video/backlight/jornada720_bl.c 2011-03-27 14:31:47.000000000 -0400
35295 +++ linux-2.6.32.41/drivers/video/backlight/jornada720_bl.c 2011-04-17 15:56:46.000000000 -0400
35296 @@ -93,7 +93,7 @@ out:
35297 return ret;
35298 }
35299
35300 -static struct backlight_ops jornada_bl_ops = {
35301 +static const struct backlight_ops jornada_bl_ops = {
35302 .get_brightness = jornada_bl_get_brightness,
35303 .update_status = jornada_bl_update_status,
35304 .options = BL_CORE_SUSPENDRESUME,
35305 diff -urNp linux-2.6.32.41/drivers/video/backlight/kb3886_bl.c linux-2.6.32.41/drivers/video/backlight/kb3886_bl.c
35306 --- linux-2.6.32.41/drivers/video/backlight/kb3886_bl.c 2011-03-27 14:31:47.000000000 -0400
35307 +++ linux-2.6.32.41/drivers/video/backlight/kb3886_bl.c 2011-04-17 15:56:46.000000000 -0400
35308 @@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct
35309 return kb3886bl_intensity;
35310 }
35311
35312 -static struct backlight_ops kb3886bl_ops = {
35313 +static const struct backlight_ops kb3886bl_ops = {
35314 .get_brightness = kb3886bl_get_intensity,
35315 .update_status = kb3886bl_send_intensity,
35316 };
35317 diff -urNp linux-2.6.32.41/drivers/video/backlight/locomolcd.c linux-2.6.32.41/drivers/video/backlight/locomolcd.c
35318 --- linux-2.6.32.41/drivers/video/backlight/locomolcd.c 2011-03-27 14:31:47.000000000 -0400
35319 +++ linux-2.6.32.41/drivers/video/backlight/locomolcd.c 2011-04-17 15:56:46.000000000 -0400
35320 @@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struc
35321 return current_intensity;
35322 }
35323
35324 -static struct backlight_ops locomobl_data = {
35325 +static const struct backlight_ops locomobl_data = {
35326 .get_brightness = locomolcd_get_intensity,
35327 .update_status = locomolcd_set_intensity,
35328 };
35329 diff -urNp linux-2.6.32.41/drivers/video/backlight/mbp_nvidia_bl.c linux-2.6.32.41/drivers/video/backlight/mbp_nvidia_bl.c
35330 --- linux-2.6.32.41/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:01.000000000 -0400
35331 +++ linux-2.6.32.41/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:33.000000000 -0400
35332 @@ -33,7 +33,7 @@ struct dmi_match_data {
35333 unsigned long iostart;
35334 unsigned long iolen;
35335 /* Backlight operations structure. */
35336 - struct backlight_ops backlight_ops;
35337 + const struct backlight_ops backlight_ops;
35338 };
35339
35340 /* Module parameters. */
35341 diff -urNp linux-2.6.32.41/drivers/video/backlight/omap1_bl.c linux-2.6.32.41/drivers/video/backlight/omap1_bl.c
35342 --- linux-2.6.32.41/drivers/video/backlight/omap1_bl.c 2011-03-27 14:31:47.000000000 -0400
35343 +++ linux-2.6.32.41/drivers/video/backlight/omap1_bl.c 2011-04-17 15:56:46.000000000 -0400
35344 @@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct b
35345 return bl->current_intensity;
35346 }
35347
35348 -static struct backlight_ops omapbl_ops = {
35349 +static const struct backlight_ops omapbl_ops = {
35350 .get_brightness = omapbl_get_intensity,
35351 .update_status = omapbl_update_status,
35352 };
35353 diff -urNp linux-2.6.32.41/drivers/video/backlight/progear_bl.c linux-2.6.32.41/drivers/video/backlight/progear_bl.c
35354 --- linux-2.6.32.41/drivers/video/backlight/progear_bl.c 2011-03-27 14:31:47.000000000 -0400
35355 +++ linux-2.6.32.41/drivers/video/backlight/progear_bl.c 2011-04-17 15:56:46.000000000 -0400
35356 @@ -54,7 +54,7 @@ static int progearbl_get_intensity(struc
35357 return intensity - HW_LEVEL_MIN;
35358 }
35359
35360 -static struct backlight_ops progearbl_ops = {
35361 +static const struct backlight_ops progearbl_ops = {
35362 .get_brightness = progearbl_get_intensity,
35363 .update_status = progearbl_set_intensity,
35364 };
35365 diff -urNp linux-2.6.32.41/drivers/video/backlight/pwm_bl.c linux-2.6.32.41/drivers/video/backlight/pwm_bl.c
35366 --- linux-2.6.32.41/drivers/video/backlight/pwm_bl.c 2011-03-27 14:31:47.000000000 -0400
35367 +++ linux-2.6.32.41/drivers/video/backlight/pwm_bl.c 2011-04-17 15:56:46.000000000 -0400
35368 @@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(
35369 return bl->props.brightness;
35370 }
35371
35372 -static struct backlight_ops pwm_backlight_ops = {
35373 +static const struct backlight_ops pwm_backlight_ops = {
35374 .update_status = pwm_backlight_update_status,
35375 .get_brightness = pwm_backlight_get_brightness,
35376 };
35377 diff -urNp linux-2.6.32.41/drivers/video/backlight/tosa_bl.c linux-2.6.32.41/drivers/video/backlight/tosa_bl.c
35378 --- linux-2.6.32.41/drivers/video/backlight/tosa_bl.c 2011-03-27 14:31:47.000000000 -0400
35379 +++ linux-2.6.32.41/drivers/video/backlight/tosa_bl.c 2011-04-17 15:56:46.000000000 -0400
35380 @@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct
35381 return props->brightness;
35382 }
35383
35384 -static struct backlight_ops bl_ops = {
35385 +static const struct backlight_ops bl_ops = {
35386 .get_brightness = tosa_bl_get_brightness,
35387 .update_status = tosa_bl_update_status,
35388 };
35389 diff -urNp linux-2.6.32.41/drivers/video/backlight/wm831x_bl.c linux-2.6.32.41/drivers/video/backlight/wm831x_bl.c
35390 --- linux-2.6.32.41/drivers/video/backlight/wm831x_bl.c 2011-03-27 14:31:47.000000000 -0400
35391 +++ linux-2.6.32.41/drivers/video/backlight/wm831x_bl.c 2011-04-17 15:56:46.000000000 -0400
35392 @@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightne
35393 return data->current_brightness;
35394 }
35395
35396 -static struct backlight_ops wm831x_backlight_ops = {
35397 +static const struct backlight_ops wm831x_backlight_ops = {
35398 .options = BL_CORE_SUSPENDRESUME,
35399 .update_status = wm831x_backlight_update_status,
35400 .get_brightness = wm831x_backlight_get_brightness,
35401 diff -urNp linux-2.6.32.41/drivers/video/bf54x-lq043fb.c linux-2.6.32.41/drivers/video/bf54x-lq043fb.c
35402 --- linux-2.6.32.41/drivers/video/bf54x-lq043fb.c 2011-03-27 14:31:47.000000000 -0400
35403 +++ linux-2.6.32.41/drivers/video/bf54x-lq043fb.c 2011-04-17 15:56:46.000000000 -0400
35404 @@ -463,7 +463,7 @@ static int bl_get_brightness(struct back
35405 return 0;
35406 }
35407
35408 -static struct backlight_ops bfin_lq043fb_bl_ops = {
35409 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
35410 .get_brightness = bl_get_brightness,
35411 };
35412
35413 diff -urNp linux-2.6.32.41/drivers/video/bfin-t350mcqb-fb.c linux-2.6.32.41/drivers/video/bfin-t350mcqb-fb.c
35414 --- linux-2.6.32.41/drivers/video/bfin-t350mcqb-fb.c 2011-03-27 14:31:47.000000000 -0400
35415 +++ linux-2.6.32.41/drivers/video/bfin-t350mcqb-fb.c 2011-04-17 15:56:46.000000000 -0400
35416 @@ -381,7 +381,7 @@ static int bl_get_brightness(struct back
35417 return 0;
35418 }
35419
35420 -static struct backlight_ops bfin_lq043fb_bl_ops = {
35421 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
35422 .get_brightness = bl_get_brightness,
35423 };
35424
35425 diff -urNp linux-2.6.32.41/drivers/video/fbcmap.c linux-2.6.32.41/drivers/video/fbcmap.c
35426 --- linux-2.6.32.41/drivers/video/fbcmap.c 2011-03-27 14:31:47.000000000 -0400
35427 +++ linux-2.6.32.41/drivers/video/fbcmap.c 2011-04-17 15:56:46.000000000 -0400
35428 @@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user
35429 rc = -ENODEV;
35430 goto out;
35431 }
35432 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
35433 - !info->fbops->fb_setcmap)) {
35434 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
35435 rc = -EINVAL;
35436 goto out1;
35437 }
35438 diff -urNp linux-2.6.32.41/drivers/video/fbmem.c linux-2.6.32.41/drivers/video/fbmem.c
35439 --- linux-2.6.32.41/drivers/video/fbmem.c 2011-03-27 14:31:47.000000000 -0400
35440 +++ linux-2.6.32.41/drivers/video/fbmem.c 2011-05-16 21:46:57.000000000 -0400
35441 @@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_in
35442 image->dx += image->width + 8;
35443 }
35444 } else if (rotate == FB_ROTATE_UD) {
35445 - for (x = 0; x < num && image->dx >= 0; x++) {
35446 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
35447 info->fbops->fb_imageblit(info, image);
35448 image->dx -= image->width + 8;
35449 }
35450 @@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_in
35451 image->dy += image->height + 8;
35452 }
35453 } else if (rotate == FB_ROTATE_CCW) {
35454 - for (x = 0; x < num && image->dy >= 0; x++) {
35455 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
35456 info->fbops->fb_imageblit(info, image);
35457 image->dy -= image->height + 8;
35458 }
35459 @@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct
35460 int flags = info->flags;
35461 int ret = 0;
35462
35463 + pax_track_stack();
35464 +
35465 if (var->activate & FB_ACTIVATE_INV_MODE) {
35466 struct fb_videomode mode1, mode2;
35467
35468 @@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *
35469 void __user *argp = (void __user *)arg;
35470 long ret = 0;
35471
35472 + pax_track_stack();
35473 +
35474 switch (cmd) {
35475 case FBIOGET_VSCREENINFO:
35476 if (!lock_fb_info(info))
35477 @@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *
35478 return -EFAULT;
35479 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
35480 return -EINVAL;
35481 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
35482 + if (con2fb.framebuffer >= FB_MAX)
35483 return -EINVAL;
35484 if (!registered_fb[con2fb.framebuffer])
35485 request_module("fb%d", con2fb.framebuffer);
35486 diff -urNp linux-2.6.32.41/drivers/video/i810/i810_accel.c linux-2.6.32.41/drivers/video/i810/i810_accel.c
35487 --- linux-2.6.32.41/drivers/video/i810/i810_accel.c 2011-03-27 14:31:47.000000000 -0400
35488 +++ linux-2.6.32.41/drivers/video/i810/i810_accel.c 2011-04-17 15:56:46.000000000 -0400
35489 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct
35490 }
35491 }
35492 printk("ringbuffer lockup!!!\n");
35493 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
35494 i810_report_error(mmio);
35495 par->dev_flags |= LOCKUP;
35496 info->pixmap.scan_align = 1;
35497 diff -urNp linux-2.6.32.41/drivers/video/nvidia/nv_backlight.c linux-2.6.32.41/drivers/video/nvidia/nv_backlight.c
35498 --- linux-2.6.32.41/drivers/video/nvidia/nv_backlight.c 2011-03-27 14:31:47.000000000 -0400
35499 +++ linux-2.6.32.41/drivers/video/nvidia/nv_backlight.c 2011-04-17 15:56:46.000000000 -0400
35500 @@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(stru
35501 return bd->props.brightness;
35502 }
35503
35504 -static struct backlight_ops nvidia_bl_ops = {
35505 +static const struct backlight_ops nvidia_bl_ops = {
35506 .get_brightness = nvidia_bl_get_brightness,
35507 .update_status = nvidia_bl_update_status,
35508 };
35509 diff -urNp linux-2.6.32.41/drivers/video/riva/fbdev.c linux-2.6.32.41/drivers/video/riva/fbdev.c
35510 --- linux-2.6.32.41/drivers/video/riva/fbdev.c 2011-03-27 14:31:47.000000000 -0400
35511 +++ linux-2.6.32.41/drivers/video/riva/fbdev.c 2011-04-17 15:56:46.000000000 -0400
35512 @@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct
35513 return bd->props.brightness;
35514 }
35515
35516 -static struct backlight_ops riva_bl_ops = {
35517 +static const struct backlight_ops riva_bl_ops = {
35518 .get_brightness = riva_bl_get_brightness,
35519 .update_status = riva_bl_update_status,
35520 };
35521 diff -urNp linux-2.6.32.41/drivers/video/uvesafb.c linux-2.6.32.41/drivers/video/uvesafb.c
35522 --- linux-2.6.32.41/drivers/video/uvesafb.c 2011-03-27 14:31:47.000000000 -0400
35523 +++ linux-2.6.32.41/drivers/video/uvesafb.c 2011-04-17 15:56:46.000000000 -0400
35524 @@ -18,6 +18,7 @@
35525 #include <linux/fb.h>
35526 #include <linux/io.h>
35527 #include <linux/mutex.h>
35528 +#include <linux/moduleloader.h>
35529 #include <video/edid.h>
35530 #include <video/uvesafb.h>
35531 #ifdef CONFIG_X86
35532 @@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
35533 NULL,
35534 };
35535
35536 - return call_usermodehelper(v86d_path, argv, envp, 1);
35537 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
35538 }
35539
35540 /*
35541 @@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(
35542 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
35543 par->pmi_setpal = par->ypan = 0;
35544 } else {
35545 +
35546 +#ifdef CONFIG_PAX_KERNEXEC
35547 +#ifdef CONFIG_MODULES
35548 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
35549 +#endif
35550 + if (!par->pmi_code) {
35551 + par->pmi_setpal = par->ypan = 0;
35552 + return 0;
35553 + }
35554 +#endif
35555 +
35556 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
35557 + task->t.regs.edi);
35558 +
35559 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35560 + pax_open_kernel();
35561 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
35562 + pax_close_kernel();
35563 +
35564 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
35565 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
35566 +#else
35567 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
35568 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
35569 +#endif
35570 +
35571 printk(KERN_INFO "uvesafb: protected mode interface info at "
35572 "%04x:%04x\n",
35573 (u16)task->t.regs.es, (u16)task->t.regs.edi);
35574 @@ -1799,6 +1822,11 @@ out:
35575 if (par->vbe_modes)
35576 kfree(par->vbe_modes);
35577
35578 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35579 + if (par->pmi_code)
35580 + module_free_exec(NULL, par->pmi_code);
35581 +#endif
35582 +
35583 framebuffer_release(info);
35584 return err;
35585 }
35586 @@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platfor
35587 kfree(par->vbe_state_orig);
35588 if (par->vbe_state_saved)
35589 kfree(par->vbe_state_saved);
35590 +
35591 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35592 + if (par->pmi_code)
35593 + module_free_exec(NULL, par->pmi_code);
35594 +#endif
35595 +
35596 }
35597
35598 framebuffer_release(info);
35599 diff -urNp linux-2.6.32.41/drivers/video/vesafb.c linux-2.6.32.41/drivers/video/vesafb.c
35600 --- linux-2.6.32.41/drivers/video/vesafb.c 2011-03-27 14:31:47.000000000 -0400
35601 +++ linux-2.6.32.41/drivers/video/vesafb.c 2011-04-17 15:56:46.000000000 -0400
35602 @@ -9,6 +9,7 @@
35603 */
35604
35605 #include <linux/module.h>
35606 +#include <linux/moduleloader.h>
35607 #include <linux/kernel.h>
35608 #include <linux/errno.h>
35609 #include <linux/string.h>
35610 @@ -53,8 +54,8 @@ static int vram_remap __initdata; /*
35611 static int vram_total __initdata; /* Set total amount of memory */
35612 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
35613 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
35614 -static void (*pmi_start)(void) __read_mostly;
35615 -static void (*pmi_pal) (void) __read_mostly;
35616 +static void (*pmi_start)(void) __read_only;
35617 +static void (*pmi_pal) (void) __read_only;
35618 static int depth __read_mostly;
35619 static int vga_compat __read_mostly;
35620 /* --------------------------------------------------------------------- */
35621 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
35622 unsigned int size_vmode;
35623 unsigned int size_remap;
35624 unsigned int size_total;
35625 + void *pmi_code = NULL;
35626
35627 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
35628 return -ENODEV;
35629 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
35630 size_remap = size_total;
35631 vesafb_fix.smem_len = size_remap;
35632
35633 -#ifndef __i386__
35634 - screen_info.vesapm_seg = 0;
35635 -#endif
35636 -
35637 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
35638 printk(KERN_WARNING
35639 "vesafb: cannot reserve video memory at 0x%lx\n",
35640 @@ -315,9 +313,21 @@ static int __init vesafb_probe(struct pl
35641 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
35642 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
35643
35644 +#ifdef __i386__
35645 +
35646 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35647 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
35648 + if (!pmi_code)
35649 +#elif !defined(CONFIG_PAX_KERNEXEC)
35650 + if (0)
35651 +#endif
35652 +
35653 +#endif
35654 + screen_info.vesapm_seg = 0;
35655 +
35656 if (screen_info.vesapm_seg) {
35657 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
35658 - screen_info.vesapm_seg,screen_info.vesapm_off);
35659 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
35660 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
35661 }
35662
35663 if (screen_info.vesapm_seg < 0xc000)
35664 @@ -325,9 +335,25 @@ static int __init vesafb_probe(struct pl
35665
35666 if (ypan || pmi_setpal) {
35667 unsigned short *pmi_base;
35668 - pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
35669 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
35670 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
35671 +
35672 + pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
35673 +
35674 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35675 + pax_open_kernel();
35676 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
35677 +#else
35678 + pmi_code = pmi_base;
35679 +#endif
35680 +
35681 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
35682 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
35683 +
35684 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35685 + pmi_start = ktva_ktla(pmi_start);
35686 + pmi_pal = ktva_ktla(pmi_pal);
35687 + pax_close_kernel();
35688 +#endif
35689 +
35690 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
35691 if (pmi_base[3]) {
35692 printk(KERN_INFO "vesafb: pmi: ports = ");
35693 @@ -469,6 +495,11 @@ static int __init vesafb_probe(struct pl
35694 info->node, info->fix.id);
35695 return 0;
35696 err:
35697 +
35698 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35699 + module_free_exec(NULL, pmi_code);
35700 +#endif
35701 +
35702 if (info->screen_base)
35703 iounmap(info->screen_base);
35704 framebuffer_release(info);
35705 diff -urNp linux-2.6.32.41/drivers/xen/sys-hypervisor.c linux-2.6.32.41/drivers/xen/sys-hypervisor.c
35706 --- linux-2.6.32.41/drivers/xen/sys-hypervisor.c 2011-03-27 14:31:47.000000000 -0400
35707 +++ linux-2.6.32.41/drivers/xen/sys-hypervisor.c 2011-04-17 15:56:46.000000000 -0400
35708 @@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct ko
35709 return 0;
35710 }
35711
35712 -static struct sysfs_ops hyp_sysfs_ops = {
35713 +static const struct sysfs_ops hyp_sysfs_ops = {
35714 .show = hyp_sysfs_show,
35715 .store = hyp_sysfs_store,
35716 };
35717 diff -urNp linux-2.6.32.41/fs/9p/vfs_inode.c linux-2.6.32.41/fs/9p/vfs_inode.c
35718 --- linux-2.6.32.41/fs/9p/vfs_inode.c 2011-03-27 14:31:47.000000000 -0400
35719 +++ linux-2.6.32.41/fs/9p/vfs_inode.c 2011-04-17 15:56:46.000000000 -0400
35720 @@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct
35721 static void
35722 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
35723 {
35724 - char *s = nd_get_link(nd);
35725 + const char *s = nd_get_link(nd);
35726
35727 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
35728 IS_ERR(s) ? "<error>" : s);
35729 diff -urNp linux-2.6.32.41/fs/aio.c linux-2.6.32.41/fs/aio.c
35730 --- linux-2.6.32.41/fs/aio.c 2011-03-27 14:31:47.000000000 -0400
35731 +++ linux-2.6.32.41/fs/aio.c 2011-06-04 20:40:21.000000000 -0400
35732 @@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx
35733 size += sizeof(struct io_event) * nr_events;
35734 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
35735
35736 - if (nr_pages < 0)
35737 + if (nr_pages <= 0)
35738 return -EINVAL;
35739
35740 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
35741 @@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ct
35742 struct aio_timeout to;
35743 int retry = 0;
35744
35745 + pax_track_stack();
35746 +
35747 /* needed to zero any padding within an entry (there shouldn't be
35748 * any, but C is fun!
35749 */
35750 @@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *i
35751 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
35752 {
35753 ssize_t ret;
35754 + struct iovec iovstack;
35755
35756 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
35757 kiocb->ki_nbytes, 1,
35758 - &kiocb->ki_inline_vec, &kiocb->ki_iovec);
35759 + &iovstack, &kiocb->ki_iovec);
35760 if (ret < 0)
35761 goto out;
35762
35763 + if (kiocb->ki_iovec == &iovstack) {
35764 + kiocb->ki_inline_vec = iovstack;
35765 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
35766 + }
35767 kiocb->ki_nr_segs = kiocb->ki_nbytes;
35768 kiocb->ki_cur_seg = 0;
35769 /* ki_nbytes/left now reflect bytes instead of segs */
35770 diff -urNp linux-2.6.32.41/fs/attr.c linux-2.6.32.41/fs/attr.c
35771 --- linux-2.6.32.41/fs/attr.c 2011-03-27 14:31:47.000000000 -0400
35772 +++ linux-2.6.32.41/fs/attr.c 2011-04-17 15:56:46.000000000 -0400
35773 @@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode
35774 unsigned long limit;
35775
35776 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
35777 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
35778 if (limit != RLIM_INFINITY && offset > limit)
35779 goto out_sig;
35780 if (offset > inode->i_sb->s_maxbytes)
35781 diff -urNp linux-2.6.32.41/fs/autofs/root.c linux-2.6.32.41/fs/autofs/root.c
35782 --- linux-2.6.32.41/fs/autofs/root.c 2011-03-27 14:31:47.000000000 -0400
35783 +++ linux-2.6.32.41/fs/autofs/root.c 2011-04-17 15:56:46.000000000 -0400
35784 @@ -299,7 +299,8 @@ static int autofs_root_symlink(struct in
35785 set_bit(n,sbi->symlink_bitmap);
35786 sl = &sbi->symlink[n];
35787 sl->len = strlen(symname);
35788 - sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
35789 + slsize = sl->len+1;
35790 + sl->data = kmalloc(slsize, GFP_KERNEL);
35791 if (!sl->data) {
35792 clear_bit(n,sbi->symlink_bitmap);
35793 unlock_kernel();
35794 diff -urNp linux-2.6.32.41/fs/autofs4/symlink.c linux-2.6.32.41/fs/autofs4/symlink.c
35795 --- linux-2.6.32.41/fs/autofs4/symlink.c 2011-03-27 14:31:47.000000000 -0400
35796 +++ linux-2.6.32.41/fs/autofs4/symlink.c 2011-04-17 15:56:46.000000000 -0400
35797 @@ -15,7 +15,7 @@
35798 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
35799 {
35800 struct autofs_info *ino = autofs4_dentry_ino(dentry);
35801 - nd_set_link(nd, (char *)ino->u.symlink);
35802 + nd_set_link(nd, ino->u.symlink);
35803 return NULL;
35804 }
35805
35806 diff -urNp linux-2.6.32.41/fs/befs/linuxvfs.c linux-2.6.32.41/fs/befs/linuxvfs.c
35807 --- linux-2.6.32.41/fs/befs/linuxvfs.c 2011-03-27 14:31:47.000000000 -0400
35808 +++ linux-2.6.32.41/fs/befs/linuxvfs.c 2011-04-17 15:56:46.000000000 -0400
35809 @@ -493,7 +493,7 @@ static void befs_put_link(struct dentry
35810 {
35811 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
35812 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
35813 - char *link = nd_get_link(nd);
35814 + const char *link = nd_get_link(nd);
35815 if (!IS_ERR(link))
35816 kfree(link);
35817 }
35818 diff -urNp linux-2.6.32.41/fs/binfmt_aout.c linux-2.6.32.41/fs/binfmt_aout.c
35819 --- linux-2.6.32.41/fs/binfmt_aout.c 2011-03-27 14:31:47.000000000 -0400
35820 +++ linux-2.6.32.41/fs/binfmt_aout.c 2011-04-17 15:56:46.000000000 -0400
35821 @@ -16,6 +16,7 @@
35822 #include <linux/string.h>
35823 #include <linux/fs.h>
35824 #include <linux/file.h>
35825 +#include <linux/security.h>
35826 #include <linux/stat.h>
35827 #include <linux/fcntl.h>
35828 #include <linux/ptrace.h>
35829 @@ -102,6 +103,8 @@ static int aout_core_dump(long signr, st
35830 #endif
35831 # define START_STACK(u) (u.start_stack)
35832
35833 + memset(&dump, 0, sizeof(dump));
35834 +
35835 fs = get_fs();
35836 set_fs(KERNEL_DS);
35837 has_dumped = 1;
35838 @@ -113,10 +116,12 @@ static int aout_core_dump(long signr, st
35839
35840 /* If the size of the dump file exceeds the rlimit, then see what would happen
35841 if we wrote the stack, but not the data area. */
35842 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
35843 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
35844 dump.u_dsize = 0;
35845
35846 /* Make sure we have enough room to write the stack and data areas. */
35847 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
35848 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
35849 dump.u_ssize = 0;
35850
35851 @@ -146,9 +151,7 @@ static int aout_core_dump(long signr, st
35852 dump_size = dump.u_ssize << PAGE_SHIFT;
35853 DUMP_WRITE(dump_start,dump_size);
35854 }
35855 -/* Finally dump the task struct. Not be used by gdb, but could be useful */
35856 - set_fs(KERNEL_DS);
35857 - DUMP_WRITE(current,sizeof(*current));
35858 +/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
35859 end_coredump:
35860 set_fs(fs);
35861 return has_dumped;
35862 @@ -249,6 +252,8 @@ static int load_aout_binary(struct linux
35863 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
35864 if (rlim >= RLIM_INFINITY)
35865 rlim = ~0;
35866 +
35867 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
35868 if (ex.a_data + ex.a_bss > rlim)
35869 return -ENOMEM;
35870
35871 @@ -277,6 +282,27 @@ static int load_aout_binary(struct linux
35872 install_exec_creds(bprm);
35873 current->flags &= ~PF_FORKNOEXEC;
35874
35875 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
35876 + current->mm->pax_flags = 0UL;
35877 +#endif
35878 +
35879 +#ifdef CONFIG_PAX_PAGEEXEC
35880 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
35881 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
35882 +
35883 +#ifdef CONFIG_PAX_EMUTRAMP
35884 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
35885 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
35886 +#endif
35887 +
35888 +#ifdef CONFIG_PAX_MPROTECT
35889 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
35890 + current->mm->pax_flags |= MF_PAX_MPROTECT;
35891 +#endif
35892 +
35893 + }
35894 +#endif
35895 +
35896 if (N_MAGIC(ex) == OMAGIC) {
35897 unsigned long text_addr, map_size;
35898 loff_t pos;
35899 @@ -349,7 +375,7 @@ static int load_aout_binary(struct linux
35900
35901 down_write(&current->mm->mmap_sem);
35902 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
35903 - PROT_READ | PROT_WRITE | PROT_EXEC,
35904 + PROT_READ | PROT_WRITE,
35905 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
35906 fd_offset + ex.a_text);
35907 up_write(&current->mm->mmap_sem);
35908 diff -urNp linux-2.6.32.41/fs/binfmt_elf.c linux-2.6.32.41/fs/binfmt_elf.c
35909 --- linux-2.6.32.41/fs/binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
35910 +++ linux-2.6.32.41/fs/binfmt_elf.c 2011-05-16 21:46:57.000000000 -0400
35911 @@ -50,6 +50,10 @@ static int elf_core_dump(long signr, str
35912 #define elf_core_dump NULL
35913 #endif
35914
35915 +#ifdef CONFIG_PAX_MPROTECT
35916 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
35917 +#endif
35918 +
35919 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
35920 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
35921 #else
35922 @@ -69,6 +73,11 @@ static struct linux_binfmt elf_format =
35923 .load_binary = load_elf_binary,
35924 .load_shlib = load_elf_library,
35925 .core_dump = elf_core_dump,
35926 +
35927 +#ifdef CONFIG_PAX_MPROTECT
35928 + .handle_mprotect= elf_handle_mprotect,
35929 +#endif
35930 +
35931 .min_coredump = ELF_EXEC_PAGESIZE,
35932 .hasvdso = 1
35933 };
35934 @@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
35935
35936 static int set_brk(unsigned long start, unsigned long end)
35937 {
35938 + unsigned long e = end;
35939 +
35940 start = ELF_PAGEALIGN(start);
35941 end = ELF_PAGEALIGN(end);
35942 if (end > start) {
35943 @@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
35944 if (BAD_ADDR(addr))
35945 return addr;
35946 }
35947 - current->mm->start_brk = current->mm->brk = end;
35948 + current->mm->start_brk = current->mm->brk = e;
35949 return 0;
35950 }
35951
35952 @@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
35953 elf_addr_t __user *u_rand_bytes;
35954 const char *k_platform = ELF_PLATFORM;
35955 const char *k_base_platform = ELF_BASE_PLATFORM;
35956 - unsigned char k_rand_bytes[16];
35957 + u32 k_rand_bytes[4];
35958 int items;
35959 elf_addr_t *elf_info;
35960 int ei_index = 0;
35961 const struct cred *cred = current_cred();
35962 struct vm_area_struct *vma;
35963 + unsigned long saved_auxv[AT_VECTOR_SIZE];
35964 +
35965 + pax_track_stack();
35966
35967 /*
35968 * In some cases (e.g. Hyper-Threading), we want to avoid L1
35969 @@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
35970 * Generate 16 random bytes for userspace PRNG seeding.
35971 */
35972 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
35973 - u_rand_bytes = (elf_addr_t __user *)
35974 - STACK_ALLOC(p, sizeof(k_rand_bytes));
35975 + srandom32(k_rand_bytes[0] ^ random32());
35976 + srandom32(k_rand_bytes[1] ^ random32());
35977 + srandom32(k_rand_bytes[2] ^ random32());
35978 + srandom32(k_rand_bytes[3] ^ random32());
35979 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
35980 + u_rand_bytes = (elf_addr_t __user *) p;
35981 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
35982 return -EFAULT;
35983
35984 @@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
35985 return -EFAULT;
35986 current->mm->env_end = p;
35987
35988 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
35989 +
35990 /* Put the elf_info on the stack in the right place. */
35991 sp = (elf_addr_t __user *)envp + 1;
35992 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
35993 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
35994 return -EFAULT;
35995 return 0;
35996 }
35997 @@ -385,10 +405,10 @@ static unsigned long load_elf_interp(str
35998 {
35999 struct elf_phdr *elf_phdata;
36000 struct elf_phdr *eppnt;
36001 - unsigned long load_addr = 0;
36002 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
36003 int load_addr_set = 0;
36004 unsigned long last_bss = 0, elf_bss = 0;
36005 - unsigned long error = ~0UL;
36006 + unsigned long error = -EINVAL;
36007 unsigned long total_size;
36008 int retval, i, size;
36009
36010 @@ -434,6 +454,11 @@ static unsigned long load_elf_interp(str
36011 goto out_close;
36012 }
36013
36014 +#ifdef CONFIG_PAX_SEGMEXEC
36015 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
36016 + pax_task_size = SEGMEXEC_TASK_SIZE;
36017 +#endif
36018 +
36019 eppnt = elf_phdata;
36020 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
36021 if (eppnt->p_type == PT_LOAD) {
36022 @@ -477,8 +502,8 @@ static unsigned long load_elf_interp(str
36023 k = load_addr + eppnt->p_vaddr;
36024 if (BAD_ADDR(k) ||
36025 eppnt->p_filesz > eppnt->p_memsz ||
36026 - eppnt->p_memsz > TASK_SIZE ||
36027 - TASK_SIZE - eppnt->p_memsz < k) {
36028 + eppnt->p_memsz > pax_task_size ||
36029 + pax_task_size - eppnt->p_memsz < k) {
36030 error = -ENOMEM;
36031 goto out_close;
36032 }
36033 @@ -532,6 +557,194 @@ out:
36034 return error;
36035 }
36036
36037 +#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
36038 +static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
36039 +{
36040 + unsigned long pax_flags = 0UL;
36041 +
36042 +#ifdef CONFIG_PAX_PAGEEXEC
36043 + if (elf_phdata->p_flags & PF_PAGEEXEC)
36044 + pax_flags |= MF_PAX_PAGEEXEC;
36045 +#endif
36046 +
36047 +#ifdef CONFIG_PAX_SEGMEXEC
36048 + if (elf_phdata->p_flags & PF_SEGMEXEC)
36049 + pax_flags |= MF_PAX_SEGMEXEC;
36050 +#endif
36051 +
36052 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36053 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36054 + if (nx_enabled)
36055 + pax_flags &= ~MF_PAX_SEGMEXEC;
36056 + else
36057 + pax_flags &= ~MF_PAX_PAGEEXEC;
36058 + }
36059 +#endif
36060 +
36061 +#ifdef CONFIG_PAX_EMUTRAMP
36062 + if (elf_phdata->p_flags & PF_EMUTRAMP)
36063 + pax_flags |= MF_PAX_EMUTRAMP;
36064 +#endif
36065 +
36066 +#ifdef CONFIG_PAX_MPROTECT
36067 + if (elf_phdata->p_flags & PF_MPROTECT)
36068 + pax_flags |= MF_PAX_MPROTECT;
36069 +#endif
36070 +
36071 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
36072 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
36073 + pax_flags |= MF_PAX_RANDMMAP;
36074 +#endif
36075 +
36076 + return pax_flags;
36077 +}
36078 +#endif
36079 +
36080 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
36081 +static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
36082 +{
36083 + unsigned long pax_flags = 0UL;
36084 +
36085 +#ifdef CONFIG_PAX_PAGEEXEC
36086 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
36087 + pax_flags |= MF_PAX_PAGEEXEC;
36088 +#endif
36089 +
36090 +#ifdef CONFIG_PAX_SEGMEXEC
36091 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
36092 + pax_flags |= MF_PAX_SEGMEXEC;
36093 +#endif
36094 +
36095 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36096 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36097 + if (nx_enabled)
36098 + pax_flags &= ~MF_PAX_SEGMEXEC;
36099 + else
36100 + pax_flags &= ~MF_PAX_PAGEEXEC;
36101 + }
36102 +#endif
36103 +
36104 +#ifdef CONFIG_PAX_EMUTRAMP
36105 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
36106 + pax_flags |= MF_PAX_EMUTRAMP;
36107 +#endif
36108 +
36109 +#ifdef CONFIG_PAX_MPROTECT
36110 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
36111 + pax_flags |= MF_PAX_MPROTECT;
36112 +#endif
36113 +
36114 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
36115 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
36116 + pax_flags |= MF_PAX_RANDMMAP;
36117 +#endif
36118 +
36119 + return pax_flags;
36120 +}
36121 +#endif
36122 +
36123 +#ifdef CONFIG_PAX_EI_PAX
36124 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
36125 +{
36126 + unsigned long pax_flags = 0UL;
36127 +
36128 +#ifdef CONFIG_PAX_PAGEEXEC
36129 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
36130 + pax_flags |= MF_PAX_PAGEEXEC;
36131 +#endif
36132 +
36133 +#ifdef CONFIG_PAX_SEGMEXEC
36134 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
36135 + pax_flags |= MF_PAX_SEGMEXEC;
36136 +#endif
36137 +
36138 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36139 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36140 + if (nx_enabled)
36141 + pax_flags &= ~MF_PAX_SEGMEXEC;
36142 + else
36143 + pax_flags &= ~MF_PAX_PAGEEXEC;
36144 + }
36145 +#endif
36146 +
36147 +#ifdef CONFIG_PAX_EMUTRAMP
36148 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
36149 + pax_flags |= MF_PAX_EMUTRAMP;
36150 +#endif
36151 +
36152 +#ifdef CONFIG_PAX_MPROTECT
36153 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
36154 + pax_flags |= MF_PAX_MPROTECT;
36155 +#endif
36156 +
36157 +#ifdef CONFIG_PAX_ASLR
36158 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
36159 + pax_flags |= MF_PAX_RANDMMAP;
36160 +#endif
36161 +
36162 + return pax_flags;
36163 +}
36164 +#endif
36165 +
36166 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
36167 +static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
36168 +{
36169 + unsigned long pax_flags = 0UL;
36170 +
36171 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
36172 + unsigned long i;
36173 + int found_flags = 0;
36174 +#endif
36175 +
36176 +#ifdef CONFIG_PAX_EI_PAX
36177 + pax_flags = pax_parse_ei_pax(elf_ex);
36178 +#endif
36179 +
36180 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
36181 + for (i = 0UL; i < elf_ex->e_phnum; i++)
36182 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
36183 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
36184 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
36185 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
36186 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
36187 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
36188 + return -EINVAL;
36189 +
36190 +#ifdef CONFIG_PAX_SOFTMODE
36191 + if (pax_softmode)
36192 + pax_flags = pax_parse_softmode(&elf_phdata[i]);
36193 + else
36194 +#endif
36195 +
36196 + pax_flags = pax_parse_hardmode(&elf_phdata[i]);
36197 + found_flags = 1;
36198 + break;
36199 + }
36200 +#endif
36201 +
36202 +#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
36203 + if (found_flags == 0) {
36204 + struct elf_phdr phdr;
36205 + memset(&phdr, 0, sizeof(phdr));
36206 + phdr.p_flags = PF_NOEMUTRAMP;
36207 +#ifdef CONFIG_PAX_SOFTMODE
36208 + if (pax_softmode)
36209 + pax_flags = pax_parse_softmode(&phdr);
36210 + else
36211 +#endif
36212 + pax_flags = pax_parse_hardmode(&phdr);
36213 + }
36214 +#endif
36215 +
36216 +
36217 + if (0 > pax_check_flags(&pax_flags))
36218 + return -EINVAL;
36219 +
36220 + current->mm->pax_flags = pax_flags;
36221 + return 0;
36222 +}
36223 +#endif
36224 +
36225 /*
36226 * These are the functions used to load ELF style executables and shared
36227 * libraries. There is no binary dependent code anywhere else.
36228 @@ -548,6 +761,11 @@ static unsigned long randomize_stack_top
36229 {
36230 unsigned int random_variable = 0;
36231
36232 +#ifdef CONFIG_PAX_RANDUSTACK
36233 + if (randomize_va_space)
36234 + return stack_top - current->mm->delta_stack;
36235 +#endif
36236 +
36237 if ((current->flags & PF_RANDOMIZE) &&
36238 !(current->personality & ADDR_NO_RANDOMIZE)) {
36239 random_variable = get_random_int() & STACK_RND_MASK;
36240 @@ -566,7 +784,7 @@ static int load_elf_binary(struct linux_
36241 unsigned long load_addr = 0, load_bias = 0;
36242 int load_addr_set = 0;
36243 char * elf_interpreter = NULL;
36244 - unsigned long error;
36245 + unsigned long error = 0;
36246 struct elf_phdr *elf_ppnt, *elf_phdata;
36247 unsigned long elf_bss, elf_brk;
36248 int retval, i;
36249 @@ -576,11 +794,11 @@ static int load_elf_binary(struct linux_
36250 unsigned long start_code, end_code, start_data, end_data;
36251 unsigned long reloc_func_desc = 0;
36252 int executable_stack = EXSTACK_DEFAULT;
36253 - unsigned long def_flags = 0;
36254 struct {
36255 struct elfhdr elf_ex;
36256 struct elfhdr interp_elf_ex;
36257 } *loc;
36258 + unsigned long pax_task_size = TASK_SIZE;
36259
36260 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
36261 if (!loc) {
36262 @@ -718,11 +936,80 @@ static int load_elf_binary(struct linux_
36263
36264 /* OK, This is the point of no return */
36265 current->flags &= ~PF_FORKNOEXEC;
36266 - current->mm->def_flags = def_flags;
36267 +
36268 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
36269 + current->mm->pax_flags = 0UL;
36270 +#endif
36271 +
36272 +#ifdef CONFIG_PAX_DLRESOLVE
36273 + current->mm->call_dl_resolve = 0UL;
36274 +#endif
36275 +
36276 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
36277 + current->mm->call_syscall = 0UL;
36278 +#endif
36279 +
36280 +#ifdef CONFIG_PAX_ASLR
36281 + current->mm->delta_mmap = 0UL;
36282 + current->mm->delta_stack = 0UL;
36283 +#endif
36284 +
36285 + current->mm->def_flags = 0;
36286 +
36287 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
36288 + if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
36289 + send_sig(SIGKILL, current, 0);
36290 + goto out_free_dentry;
36291 + }
36292 +#endif
36293 +
36294 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
36295 + pax_set_initial_flags(bprm);
36296 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
36297 + if (pax_set_initial_flags_func)
36298 + (pax_set_initial_flags_func)(bprm);
36299 +#endif
36300 +
36301 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
36302 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
36303 + current->mm->context.user_cs_limit = PAGE_SIZE;
36304 + current->mm->def_flags |= VM_PAGEEXEC;
36305 + }
36306 +#endif
36307 +
36308 +#ifdef CONFIG_PAX_SEGMEXEC
36309 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
36310 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
36311 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
36312 + pax_task_size = SEGMEXEC_TASK_SIZE;
36313 + }
36314 +#endif
36315 +
36316 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
36317 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36318 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
36319 + put_cpu();
36320 + }
36321 +#endif
36322
36323 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
36324 may depend on the personality. */
36325 SET_PERSONALITY(loc->elf_ex);
36326 +
36327 +#ifdef CONFIG_PAX_ASLR
36328 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
36329 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
36330 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
36331 + }
36332 +#endif
36333 +
36334 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
36335 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36336 + executable_stack = EXSTACK_DISABLE_X;
36337 + current->personality &= ~READ_IMPLIES_EXEC;
36338 + } else
36339 +#endif
36340 +
36341 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
36342 current->personality |= READ_IMPLIES_EXEC;
36343
36344 @@ -804,6 +1091,20 @@ static int load_elf_binary(struct linux_
36345 #else
36346 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
36347 #endif
36348 +
36349 +#ifdef CONFIG_PAX_RANDMMAP
36350 + /* PaX: randomize base address at the default exe base if requested */
36351 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
36352 +#ifdef CONFIG_SPARC64
36353 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
36354 +#else
36355 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
36356 +#endif
36357 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
36358 + elf_flags |= MAP_FIXED;
36359 + }
36360 +#endif
36361 +
36362 }
36363
36364 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
36365 @@ -836,9 +1137,9 @@ static int load_elf_binary(struct linux_
36366 * allowed task size. Note that p_filesz must always be
36367 * <= p_memsz so it is only necessary to check p_memsz.
36368 */
36369 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
36370 - elf_ppnt->p_memsz > TASK_SIZE ||
36371 - TASK_SIZE - elf_ppnt->p_memsz < k) {
36372 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
36373 + elf_ppnt->p_memsz > pax_task_size ||
36374 + pax_task_size - elf_ppnt->p_memsz < k) {
36375 /* set_brk can never work. Avoid overflows. */
36376 send_sig(SIGKILL, current, 0);
36377 retval = -EINVAL;
36378 @@ -866,6 +1167,11 @@ static int load_elf_binary(struct linux_
36379 start_data += load_bias;
36380 end_data += load_bias;
36381
36382 +#ifdef CONFIG_PAX_RANDMMAP
36383 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
36384 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
36385 +#endif
36386 +
36387 /* Calling set_brk effectively mmaps the pages that we need
36388 * for the bss and break sections. We must do this before
36389 * mapping in the interpreter, to make sure it doesn't wind
36390 @@ -877,9 +1183,11 @@ static int load_elf_binary(struct linux_
36391 goto out_free_dentry;
36392 }
36393 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
36394 - send_sig(SIGSEGV, current, 0);
36395 - retval = -EFAULT; /* Nobody gets to see this, but.. */
36396 - goto out_free_dentry;
36397 + /*
36398 + * This bss-zeroing can fail if the ELF
36399 + * file specifies odd protections. So
36400 + * we don't check the return value
36401 + */
36402 }
36403
36404 if (elf_interpreter) {
36405 @@ -1112,8 +1420,10 @@ static int dump_seek(struct file *file,
36406 unsigned long n = off;
36407 if (n > PAGE_SIZE)
36408 n = PAGE_SIZE;
36409 - if (!dump_write(file, buf, n))
36410 + if (!dump_write(file, buf, n)) {
36411 + free_page((unsigned long)buf);
36412 return 0;
36413 + }
36414 off -= n;
36415 }
36416 free_page((unsigned long)buf);
36417 @@ -1125,7 +1435,7 @@ static int dump_seek(struct file *file,
36418 * Decide what to dump of a segment, part, all or none.
36419 */
36420 static unsigned long vma_dump_size(struct vm_area_struct *vma,
36421 - unsigned long mm_flags)
36422 + unsigned long mm_flags, long signr)
36423 {
36424 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
36425
36426 @@ -1159,7 +1469,7 @@ static unsigned long vma_dump_size(struc
36427 if (vma->vm_file == NULL)
36428 return 0;
36429
36430 - if (FILTER(MAPPED_PRIVATE))
36431 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
36432 goto whole;
36433
36434 /*
36435 @@ -1255,8 +1565,11 @@ static int writenote(struct memelfnote *
36436 #undef DUMP_WRITE
36437
36438 #define DUMP_WRITE(addr, nr) \
36439 + do { \
36440 + gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
36441 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
36442 - goto end_coredump;
36443 + goto end_coredump; \
36444 + } while (0);
36445
36446 static void fill_elf_header(struct elfhdr *elf, int segs,
36447 u16 machine, u32 flags, u8 osabi)
36448 @@ -1385,9 +1698,9 @@ static void fill_auxv_note(struct memelf
36449 {
36450 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
36451 int i = 0;
36452 - do
36453 + do {
36454 i += 2;
36455 - while (auxv[i - 2] != AT_NULL);
36456 + } while (auxv[i - 2] != AT_NULL);
36457 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
36458 }
36459
36460 @@ -1973,7 +2286,7 @@ static int elf_core_dump(long signr, str
36461 phdr.p_offset = offset;
36462 phdr.p_vaddr = vma->vm_start;
36463 phdr.p_paddr = 0;
36464 - phdr.p_filesz = vma_dump_size(vma, mm_flags);
36465 + phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
36466 phdr.p_memsz = vma->vm_end - vma->vm_start;
36467 offset += phdr.p_filesz;
36468 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
36469 @@ -2006,7 +2319,7 @@ static int elf_core_dump(long signr, str
36470 unsigned long addr;
36471 unsigned long end;
36472
36473 - end = vma->vm_start + vma_dump_size(vma, mm_flags);
36474 + end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
36475
36476 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
36477 struct page *page;
36478 @@ -2015,6 +2328,7 @@ static int elf_core_dump(long signr, str
36479 page = get_dump_page(addr);
36480 if (page) {
36481 void *kaddr = kmap(page);
36482 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
36483 stop = ((size += PAGE_SIZE) > limit) ||
36484 !dump_write(file, kaddr, PAGE_SIZE);
36485 kunmap(page);
36486 @@ -2042,6 +2356,97 @@ out:
36487
36488 #endif /* USE_ELF_CORE_DUMP */
36489
36490 +#ifdef CONFIG_PAX_MPROTECT
36491 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
36492 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
36493 + * we'll remove VM_MAYWRITE for good on RELRO segments.
36494 + *
36495 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
36496 + * basis because we want to allow the common case and not the special ones.
36497 + */
36498 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
36499 +{
36500 + struct elfhdr elf_h;
36501 + struct elf_phdr elf_p;
36502 + unsigned long i;
36503 + unsigned long oldflags;
36504 + bool is_textrel_rw, is_textrel_rx, is_relro;
36505 +
36506 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
36507 + return;
36508 +
36509 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
36510 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
36511 +
36512 +#ifdef CONFIG_PAX_ELFRELOCS
36513 + /* possible TEXTREL */
36514 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
36515 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
36516 +#else
36517 + is_textrel_rw = false;
36518 + is_textrel_rx = false;
36519 +#endif
36520 +
36521 + /* possible RELRO */
36522 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
36523 +
36524 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
36525 + return;
36526 +
36527 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
36528 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
36529 +
36530 +#ifdef CONFIG_PAX_ETEXECRELOCS
36531 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
36532 +#else
36533 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
36534 +#endif
36535 +
36536 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
36537 + !elf_check_arch(&elf_h) ||
36538 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
36539 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
36540 + return;
36541 +
36542 + for (i = 0UL; i < elf_h.e_phnum; i++) {
36543 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
36544 + return;
36545 + switch (elf_p.p_type) {
36546 + case PT_DYNAMIC:
36547 + if (!is_textrel_rw && !is_textrel_rx)
36548 + continue;
36549 + i = 0UL;
36550 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
36551 + elf_dyn dyn;
36552 +
36553 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
36554 + return;
36555 + if (dyn.d_tag == DT_NULL)
36556 + return;
36557 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
36558 + gr_log_textrel(vma);
36559 + if (is_textrel_rw)
36560 + vma->vm_flags |= VM_MAYWRITE;
36561 + else
36562 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
36563 + vma->vm_flags &= ~VM_MAYWRITE;
36564 + return;
36565 + }
36566 + i++;
36567 + }
36568 + return;
36569 +
36570 + case PT_GNU_RELRO:
36571 + if (!is_relro)
36572 + continue;
36573 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
36574 + vma->vm_flags &= ~VM_MAYWRITE;
36575 + return;
36576 + }
36577 + }
36578 +}
36579 +#endif
36580 +
36581 static int __init init_elf_binfmt(void)
36582 {
36583 return register_binfmt(&elf_format);
36584 diff -urNp linux-2.6.32.41/fs/binfmt_flat.c linux-2.6.32.41/fs/binfmt_flat.c
36585 --- linux-2.6.32.41/fs/binfmt_flat.c 2011-03-27 14:31:47.000000000 -0400
36586 +++ linux-2.6.32.41/fs/binfmt_flat.c 2011-04-17 15:56:46.000000000 -0400
36587 @@ -564,7 +564,9 @@ static int load_flat_file(struct linux_b
36588 realdatastart = (unsigned long) -ENOMEM;
36589 printk("Unable to allocate RAM for process data, errno %d\n",
36590 (int)-realdatastart);
36591 + down_write(&current->mm->mmap_sem);
36592 do_munmap(current->mm, textpos, text_len);
36593 + up_write(&current->mm->mmap_sem);
36594 ret = realdatastart;
36595 goto err;
36596 }
36597 @@ -588,8 +590,10 @@ static int load_flat_file(struct linux_b
36598 }
36599 if (IS_ERR_VALUE(result)) {
36600 printk("Unable to read data+bss, errno %d\n", (int)-result);
36601 + down_write(&current->mm->mmap_sem);
36602 do_munmap(current->mm, textpos, text_len);
36603 do_munmap(current->mm, realdatastart, data_len + extra);
36604 + up_write(&current->mm->mmap_sem);
36605 ret = result;
36606 goto err;
36607 }
36608 @@ -658,8 +662,10 @@ static int load_flat_file(struct linux_b
36609 }
36610 if (IS_ERR_VALUE(result)) {
36611 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
36612 + down_write(&current->mm->mmap_sem);
36613 do_munmap(current->mm, textpos, text_len + data_len + extra +
36614 MAX_SHARED_LIBS * sizeof(unsigned long));
36615 + up_write(&current->mm->mmap_sem);
36616 ret = result;
36617 goto err;
36618 }
36619 diff -urNp linux-2.6.32.41/fs/bio.c linux-2.6.32.41/fs/bio.c
36620 --- linux-2.6.32.41/fs/bio.c 2011-03-27 14:31:47.000000000 -0400
36621 +++ linux-2.6.32.41/fs/bio.c 2011-04-17 15:56:46.000000000 -0400
36622 @@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_cr
36623
36624 i = 0;
36625 while (i < bio_slab_nr) {
36626 - struct bio_slab *bslab = &bio_slabs[i];
36627 + bslab = &bio_slabs[i];
36628
36629 if (!bslab->slab && entry == -1)
36630 entry = i;
36631 @@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct b
36632 const int read = bio_data_dir(bio) == READ;
36633 struct bio_map_data *bmd = bio->bi_private;
36634 int i;
36635 - char *p = bmd->sgvecs[0].iov_base;
36636 + char *p = (__force char *)bmd->sgvecs[0].iov_base;
36637
36638 __bio_for_each_segment(bvec, bio, i, 0) {
36639 char *addr = page_address(bvec->bv_page);
36640 diff -urNp linux-2.6.32.41/fs/block_dev.c linux-2.6.32.41/fs/block_dev.c
36641 --- linux-2.6.32.41/fs/block_dev.c 2011-03-27 14:31:47.000000000 -0400
36642 +++ linux-2.6.32.41/fs/block_dev.c 2011-04-17 15:56:46.000000000 -0400
36643 @@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev,
36644 else if (bdev->bd_contains == bdev)
36645 res = 0; /* is a whole device which isn't held */
36646
36647 - else if (bdev->bd_contains->bd_holder == bd_claim)
36648 + else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
36649 res = 0; /* is a partition of a device that is being partitioned */
36650 else if (bdev->bd_contains->bd_holder != NULL)
36651 res = -EBUSY; /* is a partition of a held device */
36652 diff -urNp linux-2.6.32.41/fs/btrfs/ctree.c linux-2.6.32.41/fs/btrfs/ctree.c
36653 --- linux-2.6.32.41/fs/btrfs/ctree.c 2011-03-27 14:31:47.000000000 -0400
36654 +++ linux-2.6.32.41/fs/btrfs/ctree.c 2011-04-17 15:56:46.000000000 -0400
36655 @@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(st
36656 free_extent_buffer(buf);
36657 add_root_to_dirty_list(root);
36658 } else {
36659 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
36660 - parent_start = parent->start;
36661 - else
36662 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
36663 + if (parent)
36664 + parent_start = parent->start;
36665 + else
36666 + parent_start = 0;
36667 + } else
36668 parent_start = 0;
36669
36670 WARN_ON(trans->transid != btrfs_header_generation(parent));
36671 @@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_tran
36672
36673 ret = 0;
36674 if (slot == 0) {
36675 - struct btrfs_disk_key disk_key;
36676 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
36677 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
36678 }
36679 diff -urNp linux-2.6.32.41/fs/btrfs/disk-io.c linux-2.6.32.41/fs/btrfs/disk-io.c
36680 --- linux-2.6.32.41/fs/btrfs/disk-io.c 2011-04-17 17:00:52.000000000 -0400
36681 +++ linux-2.6.32.41/fs/btrfs/disk-io.c 2011-04-17 17:03:11.000000000 -0400
36682 @@ -39,7 +39,7 @@
36683 #include "tree-log.h"
36684 #include "free-space-cache.h"
36685
36686 -static struct extent_io_ops btree_extent_io_ops;
36687 +static const struct extent_io_ops btree_extent_io_ops;
36688 static void end_workqueue_fn(struct btrfs_work *work);
36689 static void free_fs_root(struct btrfs_root *root);
36690
36691 @@ -2607,7 +2607,7 @@ out:
36692 return 0;
36693 }
36694
36695 -static struct extent_io_ops btree_extent_io_ops = {
36696 +static const struct extent_io_ops btree_extent_io_ops = {
36697 .write_cache_pages_lock_hook = btree_lock_page_hook,
36698 .readpage_end_io_hook = btree_readpage_end_io_hook,
36699 .submit_bio_hook = btree_submit_bio_hook,
36700 diff -urNp linux-2.6.32.41/fs/btrfs/extent_io.h linux-2.6.32.41/fs/btrfs/extent_io.h
36701 --- linux-2.6.32.41/fs/btrfs/extent_io.h 2011-03-27 14:31:47.000000000 -0400
36702 +++ linux-2.6.32.41/fs/btrfs/extent_io.h 2011-04-17 15:56:46.000000000 -0400
36703 @@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(s
36704 struct bio *bio, int mirror_num,
36705 unsigned long bio_flags);
36706 struct extent_io_ops {
36707 - int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
36708 + int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
36709 u64 start, u64 end, int *page_started,
36710 unsigned long *nr_written);
36711 - int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
36712 - int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
36713 + int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
36714 + int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
36715 extent_submit_bio_hook_t *submit_bio_hook;
36716 - int (*merge_bio_hook)(struct page *page, unsigned long offset,
36717 + int (* const merge_bio_hook)(struct page *page, unsigned long offset,
36718 size_t size, struct bio *bio,
36719 unsigned long bio_flags);
36720 - int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
36721 - int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
36722 + int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
36723 + int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
36724 u64 start, u64 end,
36725 struct extent_state *state);
36726 - int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
36727 + int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
36728 u64 start, u64 end,
36729 struct extent_state *state);
36730 - int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
36731 + int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
36732 struct extent_state *state);
36733 - int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
36734 + int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
36735 struct extent_state *state, int uptodate);
36736 - int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
36737 + int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
36738 unsigned long old, unsigned long bits);
36739 - int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
36740 + int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
36741 unsigned long bits);
36742 - int (*merge_extent_hook)(struct inode *inode,
36743 + int (* const merge_extent_hook)(struct inode *inode,
36744 struct extent_state *new,
36745 struct extent_state *other);
36746 - int (*split_extent_hook)(struct inode *inode,
36747 + int (* const split_extent_hook)(struct inode *inode,
36748 struct extent_state *orig, u64 split);
36749 - int (*write_cache_pages_lock_hook)(struct page *page);
36750 + int (* const write_cache_pages_lock_hook)(struct page *page);
36751 };
36752
36753 struct extent_io_tree {
36754 @@ -88,7 +88,7 @@ struct extent_io_tree {
36755 u64 dirty_bytes;
36756 spinlock_t lock;
36757 spinlock_t buffer_lock;
36758 - struct extent_io_ops *ops;
36759 + const struct extent_io_ops *ops;
36760 };
36761
36762 struct extent_state {
36763 diff -urNp linux-2.6.32.41/fs/btrfs/extent-tree.c linux-2.6.32.41/fs/btrfs/extent-tree.c
36764 --- linux-2.6.32.41/fs/btrfs/extent-tree.c 2011-03-27 14:31:47.000000000 -0400
36765 +++ linux-2.6.32.41/fs/btrfs/extent-tree.c 2011-06-12 06:39:08.000000000 -0400
36766 @@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(
36767 u64 group_start = group->key.objectid;
36768 new_extents = kmalloc(sizeof(*new_extents),
36769 GFP_NOFS);
36770 + if (!new_extents) {
36771 + ret = -ENOMEM;
36772 + goto out;
36773 + }
36774 nr_extents = 1;
36775 ret = get_new_locations(reloc_inode,
36776 extent_key,
36777 diff -urNp linux-2.6.32.41/fs/btrfs/free-space-cache.c linux-2.6.32.41/fs/btrfs/free-space-cache.c
36778 --- linux-2.6.32.41/fs/btrfs/free-space-cache.c 2011-03-27 14:31:47.000000000 -0400
36779 +++ linux-2.6.32.41/fs/btrfs/free-space-cache.c 2011-04-17 15:56:46.000000000 -0400
36780 @@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
36781
36782 while(1) {
36783 if (entry->bytes < bytes || entry->offset < min_start) {
36784 - struct rb_node *node;
36785 -
36786 node = rb_next(&entry->offset_index);
36787 if (!node)
36788 break;
36789 @@ -1226,7 +1224,7 @@ again:
36790 */
36791 while (entry->bitmap || found_bitmap ||
36792 (!entry->bitmap && entry->bytes < min_bytes)) {
36793 - struct rb_node *node = rb_next(&entry->offset_index);
36794 + node = rb_next(&entry->offset_index);
36795
36796 if (entry->bitmap && entry->bytes > bytes + empty_size) {
36797 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
36798 diff -urNp linux-2.6.32.41/fs/btrfs/inode.c linux-2.6.32.41/fs/btrfs/inode.c
36799 --- linux-2.6.32.41/fs/btrfs/inode.c 2011-03-27 14:31:47.000000000 -0400
36800 +++ linux-2.6.32.41/fs/btrfs/inode.c 2011-06-12 06:39:58.000000000 -0400
36801 @@ -63,7 +63,7 @@ static const struct inode_operations btr
36802 static const struct address_space_operations btrfs_aops;
36803 static const struct address_space_operations btrfs_symlink_aops;
36804 static const struct file_operations btrfs_dir_file_operations;
36805 -static struct extent_io_ops btrfs_extent_io_ops;
36806 +static const struct extent_io_ops btrfs_extent_io_ops;
36807
36808 static struct kmem_cache *btrfs_inode_cachep;
36809 struct kmem_cache *btrfs_trans_handle_cachep;
36810 @@ -925,6 +925,7 @@ static int cow_file_range_async(struct i
36811 1, 0, NULL, GFP_NOFS);
36812 while (start < end) {
36813 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
36814 + BUG_ON(!async_cow);
36815 async_cow->inode = inode;
36816 async_cow->root = root;
36817 async_cow->locked_page = locked_page;
36818 @@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(st
36819 inline_size = btrfs_file_extent_inline_item_len(leaf,
36820 btrfs_item_nr(leaf, path->slots[0]));
36821 tmp = kmalloc(inline_size, GFP_NOFS);
36822 + if (!tmp)
36823 + return -ENOMEM;
36824 ptr = btrfs_file_extent_inline_start(item);
36825
36826 read_extent_buffer(leaf, tmp, ptr, inline_size);
36827 @@ -5410,7 +5413,7 @@ fail:
36828 return -ENOMEM;
36829 }
36830
36831 -static int btrfs_getattr(struct vfsmount *mnt,
36832 +int btrfs_getattr(struct vfsmount *mnt,
36833 struct dentry *dentry, struct kstat *stat)
36834 {
36835 struct inode *inode = dentry->d_inode;
36836 @@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount
36837 return 0;
36838 }
36839
36840 +EXPORT_SYMBOL(btrfs_getattr);
36841 +
36842 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
36843 +{
36844 + return BTRFS_I(inode)->root->anon_super.s_dev;
36845 +}
36846 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
36847 +
36848 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
36849 struct inode *new_dir, struct dentry *new_dentry)
36850 {
36851 @@ -5972,7 +5983,7 @@ static const struct file_operations btrf
36852 .fsync = btrfs_sync_file,
36853 };
36854
36855 -static struct extent_io_ops btrfs_extent_io_ops = {
36856 +static const struct extent_io_ops btrfs_extent_io_ops = {
36857 .fill_delalloc = run_delalloc_range,
36858 .submit_bio_hook = btrfs_submit_bio_hook,
36859 .merge_bio_hook = btrfs_merge_bio_hook,
36860 diff -urNp linux-2.6.32.41/fs/btrfs/relocation.c linux-2.6.32.41/fs/btrfs/relocation.c
36861 --- linux-2.6.32.41/fs/btrfs/relocation.c 2011-03-27 14:31:47.000000000 -0400
36862 +++ linux-2.6.32.41/fs/btrfs/relocation.c 2011-04-17 15:56:46.000000000 -0400
36863 @@ -884,7 +884,7 @@ static int __update_reloc_root(struct bt
36864 }
36865 spin_unlock(&rc->reloc_root_tree.lock);
36866
36867 - BUG_ON((struct btrfs_root *)node->data != root);
36868 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
36869
36870 if (!del) {
36871 spin_lock(&rc->reloc_root_tree.lock);
36872 diff -urNp linux-2.6.32.41/fs/btrfs/sysfs.c linux-2.6.32.41/fs/btrfs/sysfs.c
36873 --- linux-2.6.32.41/fs/btrfs/sysfs.c 2011-03-27 14:31:47.000000000 -0400
36874 +++ linux-2.6.32.41/fs/btrfs/sysfs.c 2011-04-17 15:56:46.000000000 -0400
36875 @@ -164,12 +164,12 @@ static void btrfs_root_release(struct ko
36876 complete(&root->kobj_unregister);
36877 }
36878
36879 -static struct sysfs_ops btrfs_super_attr_ops = {
36880 +static const struct sysfs_ops btrfs_super_attr_ops = {
36881 .show = btrfs_super_attr_show,
36882 .store = btrfs_super_attr_store,
36883 };
36884
36885 -static struct sysfs_ops btrfs_root_attr_ops = {
36886 +static const struct sysfs_ops btrfs_root_attr_ops = {
36887 .show = btrfs_root_attr_show,
36888 .store = btrfs_root_attr_store,
36889 };
36890 diff -urNp linux-2.6.32.41/fs/buffer.c linux-2.6.32.41/fs/buffer.c
36891 --- linux-2.6.32.41/fs/buffer.c 2011-03-27 14:31:47.000000000 -0400
36892 +++ linux-2.6.32.41/fs/buffer.c 2011-04-17 15:56:46.000000000 -0400
36893 @@ -25,6 +25,7 @@
36894 #include <linux/percpu.h>
36895 #include <linux/slab.h>
36896 #include <linux/capability.h>
36897 +#include <linux/security.h>
36898 #include <linux/blkdev.h>
36899 #include <linux/file.h>
36900 #include <linux/quotaops.h>
36901 diff -urNp linux-2.6.32.41/fs/cachefiles/bind.c linux-2.6.32.41/fs/cachefiles/bind.c
36902 --- linux-2.6.32.41/fs/cachefiles/bind.c 2011-03-27 14:31:47.000000000 -0400
36903 +++ linux-2.6.32.41/fs/cachefiles/bind.c 2011-04-17 15:56:46.000000000 -0400
36904 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
36905 args);
36906
36907 /* start by checking things over */
36908 - ASSERT(cache->fstop_percent >= 0 &&
36909 - cache->fstop_percent < cache->fcull_percent &&
36910 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
36911 cache->fcull_percent < cache->frun_percent &&
36912 cache->frun_percent < 100);
36913
36914 - ASSERT(cache->bstop_percent >= 0 &&
36915 - cache->bstop_percent < cache->bcull_percent &&
36916 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
36917 cache->bcull_percent < cache->brun_percent &&
36918 cache->brun_percent < 100);
36919
36920 diff -urNp linux-2.6.32.41/fs/cachefiles/daemon.c linux-2.6.32.41/fs/cachefiles/daemon.c
36921 --- linux-2.6.32.41/fs/cachefiles/daemon.c 2011-03-27 14:31:47.000000000 -0400
36922 +++ linux-2.6.32.41/fs/cachefiles/daemon.c 2011-04-17 15:56:46.000000000 -0400
36923 @@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(s
36924 if (test_bit(CACHEFILES_DEAD, &cache->flags))
36925 return -EIO;
36926
36927 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
36928 + if (datalen > PAGE_SIZE - 1)
36929 return -EOPNOTSUPP;
36930
36931 /* drag the command string into the kernel so we can parse it */
36932 @@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struc
36933 if (args[0] != '%' || args[1] != '\0')
36934 return -EINVAL;
36935
36936 - if (fstop < 0 || fstop >= cache->fcull_percent)
36937 + if (fstop >= cache->fcull_percent)
36938 return cachefiles_daemon_range_error(cache, args);
36939
36940 cache->fstop_percent = fstop;
36941 @@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struc
36942 if (args[0] != '%' || args[1] != '\0')
36943 return -EINVAL;
36944
36945 - if (bstop < 0 || bstop >= cache->bcull_percent)
36946 + if (bstop >= cache->bcull_percent)
36947 return cachefiles_daemon_range_error(cache, args);
36948
36949 cache->bstop_percent = bstop;
36950 diff -urNp linux-2.6.32.41/fs/cachefiles/internal.h linux-2.6.32.41/fs/cachefiles/internal.h
36951 --- linux-2.6.32.41/fs/cachefiles/internal.h 2011-03-27 14:31:47.000000000 -0400
36952 +++ linux-2.6.32.41/fs/cachefiles/internal.h 2011-05-04 17:56:28.000000000 -0400
36953 @@ -56,7 +56,7 @@ struct cachefiles_cache {
36954 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
36955 struct rb_root active_nodes; /* active nodes (can't be culled) */
36956 rwlock_t active_lock; /* lock for active_nodes */
36957 - atomic_t gravecounter; /* graveyard uniquifier */
36958 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
36959 unsigned frun_percent; /* when to stop culling (% files) */
36960 unsigned fcull_percent; /* when to start culling (% files) */
36961 unsigned fstop_percent; /* when to stop allocating (% files) */
36962 @@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struc
36963 * proc.c
36964 */
36965 #ifdef CONFIG_CACHEFILES_HISTOGRAM
36966 -extern atomic_t cachefiles_lookup_histogram[HZ];
36967 -extern atomic_t cachefiles_mkdir_histogram[HZ];
36968 -extern atomic_t cachefiles_create_histogram[HZ];
36969 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
36970 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
36971 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
36972
36973 extern int __init cachefiles_proc_init(void);
36974 extern void cachefiles_proc_cleanup(void);
36975 static inline
36976 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
36977 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
36978 {
36979 unsigned long jif = jiffies - start_jif;
36980 if (jif >= HZ)
36981 jif = HZ - 1;
36982 - atomic_inc(&histogram[jif]);
36983 + atomic_inc_unchecked(&histogram[jif]);
36984 }
36985
36986 #else
36987 diff -urNp linux-2.6.32.41/fs/cachefiles/namei.c linux-2.6.32.41/fs/cachefiles/namei.c
36988 --- linux-2.6.32.41/fs/cachefiles/namei.c 2011-03-27 14:31:47.000000000 -0400
36989 +++ linux-2.6.32.41/fs/cachefiles/namei.c 2011-05-04 17:56:28.000000000 -0400
36990 @@ -250,7 +250,7 @@ try_again:
36991 /* first step is to make up a grave dentry in the graveyard */
36992 sprintf(nbuffer, "%08x%08x",
36993 (uint32_t) get_seconds(),
36994 - (uint32_t) atomic_inc_return(&cache->gravecounter));
36995 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
36996
36997 /* do the multiway lock magic */
36998 trap = lock_rename(cache->graveyard, dir);
36999 diff -urNp linux-2.6.32.41/fs/cachefiles/proc.c linux-2.6.32.41/fs/cachefiles/proc.c
37000 --- linux-2.6.32.41/fs/cachefiles/proc.c 2011-03-27 14:31:47.000000000 -0400
37001 +++ linux-2.6.32.41/fs/cachefiles/proc.c 2011-05-04 17:56:28.000000000 -0400
37002 @@ -14,9 +14,9 @@
37003 #include <linux/seq_file.h>
37004 #include "internal.h"
37005
37006 -atomic_t cachefiles_lookup_histogram[HZ];
37007 -atomic_t cachefiles_mkdir_histogram[HZ];
37008 -atomic_t cachefiles_create_histogram[HZ];
37009 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
37010 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
37011 +atomic_unchecked_t cachefiles_create_histogram[HZ];
37012
37013 /*
37014 * display the latency histogram
37015 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
37016 return 0;
37017 default:
37018 index = (unsigned long) v - 3;
37019 - x = atomic_read(&cachefiles_lookup_histogram[index]);
37020 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
37021 - z = atomic_read(&cachefiles_create_histogram[index]);
37022 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
37023 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
37024 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
37025 if (x == 0 && y == 0 && z == 0)
37026 return 0;
37027
37028 diff -urNp linux-2.6.32.41/fs/cachefiles/rdwr.c linux-2.6.32.41/fs/cachefiles/rdwr.c
37029 --- linux-2.6.32.41/fs/cachefiles/rdwr.c 2011-03-27 14:31:47.000000000 -0400
37030 +++ linux-2.6.32.41/fs/cachefiles/rdwr.c 2011-04-17 15:56:46.000000000 -0400
37031 @@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache
37032 old_fs = get_fs();
37033 set_fs(KERNEL_DS);
37034 ret = file->f_op->write(
37035 - file, (const void __user *) data, len, &pos);
37036 + file, (__force const void __user *) data, len, &pos);
37037 set_fs(old_fs);
37038 kunmap(page);
37039 if (ret != len)
37040 diff -urNp linux-2.6.32.41/fs/cifs/cifs_debug.c linux-2.6.32.41/fs/cifs/cifs_debug.c
37041 --- linux-2.6.32.41/fs/cifs/cifs_debug.c 2011-03-27 14:31:47.000000000 -0400
37042 +++ linux-2.6.32.41/fs/cifs/cifs_debug.c 2011-05-04 17:56:28.000000000 -0400
37043 @@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(str
37044 tcon = list_entry(tmp3,
37045 struct cifsTconInfo,
37046 tcon_list);
37047 - atomic_set(&tcon->num_smbs_sent, 0);
37048 - atomic_set(&tcon->num_writes, 0);
37049 - atomic_set(&tcon->num_reads, 0);
37050 - atomic_set(&tcon->num_oplock_brks, 0);
37051 - atomic_set(&tcon->num_opens, 0);
37052 - atomic_set(&tcon->num_posixopens, 0);
37053 - atomic_set(&tcon->num_posixmkdirs, 0);
37054 - atomic_set(&tcon->num_closes, 0);
37055 - atomic_set(&tcon->num_deletes, 0);
37056 - atomic_set(&tcon->num_mkdirs, 0);
37057 - atomic_set(&tcon->num_rmdirs, 0);
37058 - atomic_set(&tcon->num_renames, 0);
37059 - atomic_set(&tcon->num_t2renames, 0);
37060 - atomic_set(&tcon->num_ffirst, 0);
37061 - atomic_set(&tcon->num_fnext, 0);
37062 - atomic_set(&tcon->num_fclose, 0);
37063 - atomic_set(&tcon->num_hardlinks, 0);
37064 - atomic_set(&tcon->num_symlinks, 0);
37065 - atomic_set(&tcon->num_locks, 0);
37066 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
37067 + atomic_set_unchecked(&tcon->num_writes, 0);
37068 + atomic_set_unchecked(&tcon->num_reads, 0);
37069 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
37070 + atomic_set_unchecked(&tcon->num_opens, 0);
37071 + atomic_set_unchecked(&tcon->num_posixopens, 0);
37072 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
37073 + atomic_set_unchecked(&tcon->num_closes, 0);
37074 + atomic_set_unchecked(&tcon->num_deletes, 0);
37075 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
37076 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
37077 + atomic_set_unchecked(&tcon->num_renames, 0);
37078 + atomic_set_unchecked(&tcon->num_t2renames, 0);
37079 + atomic_set_unchecked(&tcon->num_ffirst, 0);
37080 + atomic_set_unchecked(&tcon->num_fnext, 0);
37081 + atomic_set_unchecked(&tcon->num_fclose, 0);
37082 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
37083 + atomic_set_unchecked(&tcon->num_symlinks, 0);
37084 + atomic_set_unchecked(&tcon->num_locks, 0);
37085 }
37086 }
37087 }
37088 @@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct s
37089 if (tcon->need_reconnect)
37090 seq_puts(m, "\tDISCONNECTED ");
37091 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
37092 - atomic_read(&tcon->num_smbs_sent),
37093 - atomic_read(&tcon->num_oplock_brks));
37094 + atomic_read_unchecked(&tcon->num_smbs_sent),
37095 + atomic_read_unchecked(&tcon->num_oplock_brks));
37096 seq_printf(m, "\nReads: %d Bytes: %lld",
37097 - atomic_read(&tcon->num_reads),
37098 + atomic_read_unchecked(&tcon->num_reads),
37099 (long long)(tcon->bytes_read));
37100 seq_printf(m, "\nWrites: %d Bytes: %lld",
37101 - atomic_read(&tcon->num_writes),
37102 + atomic_read_unchecked(&tcon->num_writes),
37103 (long long)(tcon->bytes_written));
37104 seq_printf(m, "\nFlushes: %d",
37105 - atomic_read(&tcon->num_flushes));
37106 + atomic_read_unchecked(&tcon->num_flushes));
37107 seq_printf(m, "\nLocks: %d HardLinks: %d "
37108 "Symlinks: %d",
37109 - atomic_read(&tcon->num_locks),
37110 - atomic_read(&tcon->num_hardlinks),
37111 - atomic_read(&tcon->num_symlinks));
37112 + atomic_read_unchecked(&tcon->num_locks),
37113 + atomic_read_unchecked(&tcon->num_hardlinks),
37114 + atomic_read_unchecked(&tcon->num_symlinks));
37115 seq_printf(m, "\nOpens: %d Closes: %d "
37116 "Deletes: %d",
37117 - atomic_read(&tcon->num_opens),
37118 - atomic_read(&tcon->num_closes),
37119 - atomic_read(&tcon->num_deletes));
37120 + atomic_read_unchecked(&tcon->num_opens),
37121 + atomic_read_unchecked(&tcon->num_closes),
37122 + atomic_read_unchecked(&tcon->num_deletes));
37123 seq_printf(m, "\nPosix Opens: %d "
37124 "Posix Mkdirs: %d",
37125 - atomic_read(&tcon->num_posixopens),
37126 - atomic_read(&tcon->num_posixmkdirs));
37127 + atomic_read_unchecked(&tcon->num_posixopens),
37128 + atomic_read_unchecked(&tcon->num_posixmkdirs));
37129 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
37130 - atomic_read(&tcon->num_mkdirs),
37131 - atomic_read(&tcon->num_rmdirs));
37132 + atomic_read_unchecked(&tcon->num_mkdirs),
37133 + atomic_read_unchecked(&tcon->num_rmdirs));
37134 seq_printf(m, "\nRenames: %d T2 Renames %d",
37135 - atomic_read(&tcon->num_renames),
37136 - atomic_read(&tcon->num_t2renames));
37137 + atomic_read_unchecked(&tcon->num_renames),
37138 + atomic_read_unchecked(&tcon->num_t2renames));
37139 seq_printf(m, "\nFindFirst: %d FNext %d "
37140 "FClose %d",
37141 - atomic_read(&tcon->num_ffirst),
37142 - atomic_read(&tcon->num_fnext),
37143 - atomic_read(&tcon->num_fclose));
37144 + atomic_read_unchecked(&tcon->num_ffirst),
37145 + atomic_read_unchecked(&tcon->num_fnext),
37146 + atomic_read_unchecked(&tcon->num_fclose));
37147 }
37148 }
37149 }
37150 diff -urNp linux-2.6.32.41/fs/cifs/cifsglob.h linux-2.6.32.41/fs/cifs/cifsglob.h
37151 --- linux-2.6.32.41/fs/cifs/cifsglob.h 2011-03-27 14:31:47.000000000 -0400
37152 +++ linux-2.6.32.41/fs/cifs/cifsglob.h 2011-05-04 17:56:28.000000000 -0400
37153 @@ -252,28 +252,28 @@ struct cifsTconInfo {
37154 __u16 Flags; /* optional support bits */
37155 enum statusEnum tidStatus;
37156 #ifdef CONFIG_CIFS_STATS
37157 - atomic_t num_smbs_sent;
37158 - atomic_t num_writes;
37159 - atomic_t num_reads;
37160 - atomic_t num_flushes;
37161 - atomic_t num_oplock_brks;
37162 - atomic_t num_opens;
37163 - atomic_t num_closes;
37164 - atomic_t num_deletes;
37165 - atomic_t num_mkdirs;
37166 - atomic_t num_posixopens;
37167 - atomic_t num_posixmkdirs;
37168 - atomic_t num_rmdirs;
37169 - atomic_t num_renames;
37170 - atomic_t num_t2renames;
37171 - atomic_t num_ffirst;
37172 - atomic_t num_fnext;
37173 - atomic_t num_fclose;
37174 - atomic_t num_hardlinks;
37175 - atomic_t num_symlinks;
37176 - atomic_t num_locks;
37177 - atomic_t num_acl_get;
37178 - atomic_t num_acl_set;
37179 + atomic_unchecked_t num_smbs_sent;
37180 + atomic_unchecked_t num_writes;
37181 + atomic_unchecked_t num_reads;
37182 + atomic_unchecked_t num_flushes;
37183 + atomic_unchecked_t num_oplock_brks;
37184 + atomic_unchecked_t num_opens;
37185 + atomic_unchecked_t num_closes;
37186 + atomic_unchecked_t num_deletes;
37187 + atomic_unchecked_t num_mkdirs;
37188 + atomic_unchecked_t num_posixopens;
37189 + atomic_unchecked_t num_posixmkdirs;
37190 + atomic_unchecked_t num_rmdirs;
37191 + atomic_unchecked_t num_renames;
37192 + atomic_unchecked_t num_t2renames;
37193 + atomic_unchecked_t num_ffirst;
37194 + atomic_unchecked_t num_fnext;
37195 + atomic_unchecked_t num_fclose;
37196 + atomic_unchecked_t num_hardlinks;
37197 + atomic_unchecked_t num_symlinks;
37198 + atomic_unchecked_t num_locks;
37199 + atomic_unchecked_t num_acl_get;
37200 + atomic_unchecked_t num_acl_set;
37201 #ifdef CONFIG_CIFS_STATS2
37202 unsigned long long time_writes;
37203 unsigned long long time_reads;
37204 @@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const st
37205 }
37206
37207 #ifdef CONFIG_CIFS_STATS
37208 -#define cifs_stats_inc atomic_inc
37209 +#define cifs_stats_inc atomic_inc_unchecked
37210
37211 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
37212 unsigned int bytes)
37213 diff -urNp linux-2.6.32.41/fs/cifs/link.c linux-2.6.32.41/fs/cifs/link.c
37214 --- linux-2.6.32.41/fs/cifs/link.c 2011-03-27 14:31:47.000000000 -0400
37215 +++ linux-2.6.32.41/fs/cifs/link.c 2011-04-17 15:56:46.000000000 -0400
37216 @@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct
37217
37218 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
37219 {
37220 - char *p = nd_get_link(nd);
37221 + const char *p = nd_get_link(nd);
37222 if (!IS_ERR(p))
37223 kfree(p);
37224 }
37225 diff -urNp linux-2.6.32.41/fs/coda/cache.c linux-2.6.32.41/fs/coda/cache.c
37226 --- linux-2.6.32.41/fs/coda/cache.c 2011-03-27 14:31:47.000000000 -0400
37227 +++ linux-2.6.32.41/fs/coda/cache.c 2011-05-04 17:56:28.000000000 -0400
37228 @@ -24,14 +24,14 @@
37229 #include <linux/coda_fs_i.h>
37230 #include <linux/coda_cache.h>
37231
37232 -static atomic_t permission_epoch = ATOMIC_INIT(0);
37233 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
37234
37235 /* replace or extend an acl cache hit */
37236 void coda_cache_enter(struct inode *inode, int mask)
37237 {
37238 struct coda_inode_info *cii = ITOC(inode);
37239
37240 - cii->c_cached_epoch = atomic_read(&permission_epoch);
37241 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
37242 if (cii->c_uid != current_fsuid()) {
37243 cii->c_uid = current_fsuid();
37244 cii->c_cached_perm = mask;
37245 @@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inod
37246 void coda_cache_clear_inode(struct inode *inode)
37247 {
37248 struct coda_inode_info *cii = ITOC(inode);
37249 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
37250 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
37251 }
37252
37253 /* remove all acl caches */
37254 void coda_cache_clear_all(struct super_block *sb)
37255 {
37256 - atomic_inc(&permission_epoch);
37257 + atomic_inc_unchecked(&permission_epoch);
37258 }
37259
37260
37261 @@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode
37262
37263 hit = (mask & cii->c_cached_perm) == mask &&
37264 cii->c_uid == current_fsuid() &&
37265 - cii->c_cached_epoch == atomic_read(&permission_epoch);
37266 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
37267
37268 return hit;
37269 }
37270 diff -urNp linux-2.6.32.41/fs/compat_binfmt_elf.c linux-2.6.32.41/fs/compat_binfmt_elf.c
37271 --- linux-2.6.32.41/fs/compat_binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
37272 +++ linux-2.6.32.41/fs/compat_binfmt_elf.c 2011-04-17 15:56:46.000000000 -0400
37273 @@ -29,10 +29,12 @@
37274 #undef elfhdr
37275 #undef elf_phdr
37276 #undef elf_note
37277 +#undef elf_dyn
37278 #undef elf_addr_t
37279 #define elfhdr elf32_hdr
37280 #define elf_phdr elf32_phdr
37281 #define elf_note elf32_note
37282 +#define elf_dyn Elf32_Dyn
37283 #define elf_addr_t Elf32_Addr
37284
37285 /*
37286 diff -urNp linux-2.6.32.41/fs/compat.c linux-2.6.32.41/fs/compat.c
37287 --- linux-2.6.32.41/fs/compat.c 2011-04-17 17:00:52.000000000 -0400
37288 +++ linux-2.6.32.41/fs/compat.c 2011-05-16 21:46:57.000000000 -0400
37289 @@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
37290
37291 struct compat_readdir_callback {
37292 struct compat_old_linux_dirent __user *dirent;
37293 + struct file * file;
37294 int result;
37295 };
37296
37297 @@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf
37298 buf->result = -EOVERFLOW;
37299 return -EOVERFLOW;
37300 }
37301 +
37302 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
37303 + return 0;
37304 +
37305 buf->result++;
37306 dirent = buf->dirent;
37307 if (!access_ok(VERIFY_WRITE, dirent,
37308 @@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(u
37309
37310 buf.result = 0;
37311 buf.dirent = dirent;
37312 + buf.file = file;
37313
37314 error = vfs_readdir(file, compat_fillonedir, &buf);
37315 if (buf.result)
37316 @@ -899,6 +905,7 @@ struct compat_linux_dirent {
37317 struct compat_getdents_callback {
37318 struct compat_linux_dirent __user *current_dir;
37319 struct compat_linux_dirent __user *previous;
37320 + struct file * file;
37321 int count;
37322 int error;
37323 };
37324 @@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, c
37325 buf->error = -EOVERFLOW;
37326 return -EOVERFLOW;
37327 }
37328 +
37329 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
37330 + return 0;
37331 +
37332 dirent = buf->previous;
37333 if (dirent) {
37334 if (__put_user(offset, &dirent->d_off))
37335 @@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsi
37336 buf.previous = NULL;
37337 buf.count = count;
37338 buf.error = 0;
37339 + buf.file = file;
37340
37341 error = vfs_readdir(file, compat_filldir, &buf);
37342 if (error >= 0)
37343 @@ -987,6 +999,7 @@ out:
37344 struct compat_getdents_callback64 {
37345 struct linux_dirent64 __user *current_dir;
37346 struct linux_dirent64 __user *previous;
37347 + struct file * file;
37348 int count;
37349 int error;
37350 };
37351 @@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf
37352 buf->error = -EINVAL; /* only used if we fail.. */
37353 if (reclen > buf->count)
37354 return -EINVAL;
37355 +
37356 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
37357 + return 0;
37358 +
37359 dirent = buf->previous;
37360
37361 if (dirent) {
37362 @@ -1054,6 +1071,7 @@ asmlinkage long compat_sys_getdents64(un
37363 buf.previous = NULL;
37364 buf.count = count;
37365 buf.error = 0;
37366 + buf.file = file;
37367
37368 error = vfs_readdir(file, compat_filldir64, &buf);
37369 if (error >= 0)
37370 @@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(in
37371 * verify all the pointers
37372 */
37373 ret = -EINVAL;
37374 - if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
37375 + if (nr_segs > UIO_MAXIOV)
37376 goto out;
37377 if (!file->f_op)
37378 goto out;
37379 @@ -1463,6 +1481,11 @@ int compat_do_execve(char * filename,
37380 compat_uptr_t __user *envp,
37381 struct pt_regs * regs)
37382 {
37383 +#ifdef CONFIG_GRKERNSEC
37384 + struct file *old_exec_file;
37385 + struct acl_subject_label *old_acl;
37386 + struct rlimit old_rlim[RLIM_NLIMITS];
37387 +#endif
37388 struct linux_binprm *bprm;
37389 struct file *file;
37390 struct files_struct *displaced;
37391 @@ -1499,6 +1522,19 @@ int compat_do_execve(char * filename,
37392 bprm->filename = filename;
37393 bprm->interp = filename;
37394
37395 + if (gr_process_user_ban()) {
37396 + retval = -EPERM;
37397 + goto out_file;
37398 + }
37399 +
37400 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
37401 + retval = -EAGAIN;
37402 + if (gr_handle_nproc())
37403 + goto out_file;
37404 + retval = -EACCES;
37405 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
37406 + goto out_file;
37407 +
37408 retval = bprm_mm_init(bprm);
37409 if (retval)
37410 goto out_file;
37411 @@ -1528,9 +1564,40 @@ int compat_do_execve(char * filename,
37412 if (retval < 0)
37413 goto out;
37414
37415 + if (!gr_tpe_allow(file)) {
37416 + retval = -EACCES;
37417 + goto out;
37418 + }
37419 +
37420 + if (gr_check_crash_exec(file)) {
37421 + retval = -EACCES;
37422 + goto out;
37423 + }
37424 +
37425 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
37426 +
37427 + gr_handle_exec_args_compat(bprm, argv);
37428 +
37429 +#ifdef CONFIG_GRKERNSEC
37430 + old_acl = current->acl;
37431 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
37432 + old_exec_file = current->exec_file;
37433 + get_file(file);
37434 + current->exec_file = file;
37435 +#endif
37436 +
37437 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
37438 + bprm->unsafe & LSM_UNSAFE_SHARE);
37439 + if (retval < 0)
37440 + goto out_fail;
37441 +
37442 retval = search_binary_handler(bprm, regs);
37443 if (retval < 0)
37444 - goto out;
37445 + goto out_fail;
37446 +#ifdef CONFIG_GRKERNSEC
37447 + if (old_exec_file)
37448 + fput(old_exec_file);
37449 +#endif
37450
37451 /* execve succeeded */
37452 current->fs->in_exec = 0;
37453 @@ -1541,6 +1608,14 @@ int compat_do_execve(char * filename,
37454 put_files_struct(displaced);
37455 return retval;
37456
37457 +out_fail:
37458 +#ifdef CONFIG_GRKERNSEC
37459 + current->acl = old_acl;
37460 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
37461 + fput(current->exec_file);
37462 + current->exec_file = old_exec_file;
37463 +#endif
37464 +
37465 out:
37466 if (bprm->mm) {
37467 acct_arg_size(bprm, 0);
37468 @@ -1711,6 +1786,8 @@ int compat_core_sys_select(int n, compat
37469 struct fdtable *fdt;
37470 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
37471
37472 + pax_track_stack();
37473 +
37474 if (n < 0)
37475 goto out_nofds;
37476
37477 diff -urNp linux-2.6.32.41/fs/compat_ioctl.c linux-2.6.32.41/fs/compat_ioctl.c
37478 --- linux-2.6.32.41/fs/compat_ioctl.c 2011-03-27 14:31:47.000000000 -0400
37479 +++ linux-2.6.32.41/fs/compat_ioctl.c 2011-04-23 12:56:11.000000000 -0400
37480 @@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsi
37481 up = (struct compat_video_spu_palette __user *) arg;
37482 err = get_user(palp, &up->palette);
37483 err |= get_user(length, &up->length);
37484 + if (err)
37485 + return -EFAULT;
37486
37487 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
37488 err = put_user(compat_ptr(palp), &up_native->palette);
37489 diff -urNp linux-2.6.32.41/fs/configfs/dir.c linux-2.6.32.41/fs/configfs/dir.c
37490 --- linux-2.6.32.41/fs/configfs/dir.c 2011-03-27 14:31:47.000000000 -0400
37491 +++ linux-2.6.32.41/fs/configfs/dir.c 2011-05-11 18:25:15.000000000 -0400
37492 @@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file
37493 }
37494 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
37495 struct configfs_dirent *next;
37496 - const char * name;
37497 + const unsigned char * name;
37498 + char d_name[sizeof(next->s_dentry->d_iname)];
37499 int len;
37500
37501 next = list_entry(p, struct configfs_dirent,
37502 @@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file
37503 continue;
37504
37505 name = configfs_get_name(next);
37506 - len = strlen(name);
37507 + if (next->s_dentry && name == next->s_dentry->d_iname) {
37508 + len = next->s_dentry->d_name.len;
37509 + memcpy(d_name, name, len);
37510 + name = d_name;
37511 + } else
37512 + len = strlen(name);
37513 if (next->s_dentry)
37514 ino = next->s_dentry->d_inode->i_ino;
37515 else
37516 diff -urNp linux-2.6.32.41/fs/dcache.c linux-2.6.32.41/fs/dcache.c
37517 --- linux-2.6.32.41/fs/dcache.c 2011-03-27 14:31:47.000000000 -0400
37518 +++ linux-2.6.32.41/fs/dcache.c 2011-04-23 13:32:21.000000000 -0400
37519 @@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
37520
37521 static struct kmem_cache *dentry_cache __read_mostly;
37522
37523 -#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
37524 -
37525 /*
37526 * This is the single most critical data structure when it comes
37527 * to the dcache: the hashtable for lookups. Somebody should try
37528 @@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned lon
37529 mempages -= reserve;
37530
37531 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
37532 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
37533 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
37534
37535 dcache_init();
37536 inode_init();
37537 diff -urNp linux-2.6.32.41/fs/dlm/lockspace.c linux-2.6.32.41/fs/dlm/lockspace.c
37538 --- linux-2.6.32.41/fs/dlm/lockspace.c 2011-03-27 14:31:47.000000000 -0400
37539 +++ linux-2.6.32.41/fs/dlm/lockspace.c 2011-04-17 15:56:46.000000000 -0400
37540 @@ -148,7 +148,7 @@ static void lockspace_kobj_release(struc
37541 kfree(ls);
37542 }
37543
37544 -static struct sysfs_ops dlm_attr_ops = {
37545 +static const struct sysfs_ops dlm_attr_ops = {
37546 .show = dlm_attr_show,
37547 .store = dlm_attr_store,
37548 };
37549 diff -urNp linux-2.6.32.41/fs/ecryptfs/inode.c linux-2.6.32.41/fs/ecryptfs/inode.c
37550 --- linux-2.6.32.41/fs/ecryptfs/inode.c 2011-03-27 14:31:47.000000000 -0400
37551 +++ linux-2.6.32.41/fs/ecryptfs/inode.c 2011-04-17 15:56:46.000000000 -0400
37552 @@ -660,7 +660,7 @@ static int ecryptfs_readlink_lower(struc
37553 old_fs = get_fs();
37554 set_fs(get_ds());
37555 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
37556 - (char __user *)lower_buf,
37557 + (__force char __user *)lower_buf,
37558 lower_bufsiz);
37559 set_fs(old_fs);
37560 if (rc < 0)
37561 @@ -706,7 +706,7 @@ static void *ecryptfs_follow_link(struct
37562 }
37563 old_fs = get_fs();
37564 set_fs(get_ds());
37565 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
37566 + rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
37567 set_fs(old_fs);
37568 if (rc < 0)
37569 goto out_free;
37570 diff -urNp linux-2.6.32.41/fs/exec.c linux-2.6.32.41/fs/exec.c
37571 --- linux-2.6.32.41/fs/exec.c 2011-04-17 17:00:52.000000000 -0400
37572 +++ linux-2.6.32.41/fs/exec.c 2011-06-04 20:41:36.000000000 -0400
37573 @@ -56,12 +56,24 @@
37574 #include <linux/fsnotify.h>
37575 #include <linux/fs_struct.h>
37576 #include <linux/pipe_fs_i.h>
37577 +#include <linux/random.h>
37578 +#include <linux/seq_file.h>
37579 +
37580 +#ifdef CONFIG_PAX_REFCOUNT
37581 +#include <linux/kallsyms.h>
37582 +#include <linux/kdebug.h>
37583 +#endif
37584
37585 #include <asm/uaccess.h>
37586 #include <asm/mmu_context.h>
37587 #include <asm/tlb.h>
37588 #include "internal.h"
37589
37590 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
37591 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
37592 +EXPORT_SYMBOL(pax_set_initial_flags_func);
37593 +#endif
37594 +
37595 int core_uses_pid;
37596 char core_pattern[CORENAME_MAX_SIZE] = "core";
37597 unsigned int core_pipe_limit;
37598 @@ -115,7 +127,7 @@ SYSCALL_DEFINE1(uselib, const char __use
37599 goto out;
37600
37601 file = do_filp_open(AT_FDCWD, tmp,
37602 - O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
37603 + O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
37604 MAY_READ | MAY_EXEC | MAY_OPEN);
37605 putname(tmp);
37606 error = PTR_ERR(file);
37607 @@ -178,18 +190,10 @@ struct page *get_arg_page(struct linux_b
37608 int write)
37609 {
37610 struct page *page;
37611 - int ret;
37612
37613 -#ifdef CONFIG_STACK_GROWSUP
37614 - if (write) {
37615 - ret = expand_stack_downwards(bprm->vma, pos);
37616 - if (ret < 0)
37617 - return NULL;
37618 - }
37619 -#endif
37620 - ret = get_user_pages(current, bprm->mm, pos,
37621 - 1, write, 1, &page, NULL);
37622 - if (ret <= 0)
37623 + if (0 > expand_stack_downwards(bprm->vma, pos))
37624 + return NULL;
37625 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
37626 return NULL;
37627
37628 if (write) {
37629 @@ -263,6 +267,11 @@ static int __bprm_mm_init(struct linux_b
37630 vma->vm_end = STACK_TOP_MAX;
37631 vma->vm_start = vma->vm_end - PAGE_SIZE;
37632 vma->vm_flags = VM_STACK_FLAGS;
37633 +
37634 +#ifdef CONFIG_PAX_SEGMEXEC
37635 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
37636 +#endif
37637 +
37638 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
37639
37640 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
37641 @@ -276,6 +285,12 @@ static int __bprm_mm_init(struct linux_b
37642 mm->stack_vm = mm->total_vm = 1;
37643 up_write(&mm->mmap_sem);
37644 bprm->p = vma->vm_end - sizeof(void *);
37645 +
37646 +#ifdef CONFIG_PAX_RANDUSTACK
37647 + if (randomize_va_space)
37648 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
37649 +#endif
37650 +
37651 return 0;
37652 err:
37653 up_write(&mm->mmap_sem);
37654 @@ -510,7 +525,7 @@ int copy_strings_kernel(int argc,char **
37655 int r;
37656 mm_segment_t oldfs = get_fs();
37657 set_fs(KERNEL_DS);
37658 - r = copy_strings(argc, (char __user * __user *)argv, bprm);
37659 + r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
37660 set_fs(oldfs);
37661 return r;
37662 }
37663 @@ -540,7 +555,8 @@ static int shift_arg_pages(struct vm_are
37664 unsigned long new_end = old_end - shift;
37665 struct mmu_gather *tlb;
37666
37667 - BUG_ON(new_start > new_end);
37668 + if (new_start >= new_end || new_start < mmap_min_addr)
37669 + return -ENOMEM;
37670
37671 /*
37672 * ensure there are no vmas between where we want to go
37673 @@ -549,6 +565,10 @@ static int shift_arg_pages(struct vm_are
37674 if (vma != find_vma(mm, new_start))
37675 return -EFAULT;
37676
37677 +#ifdef CONFIG_PAX_SEGMEXEC
37678 + BUG_ON(pax_find_mirror_vma(vma));
37679 +#endif
37680 +
37681 /*
37682 * cover the whole range: [new_start, old_end)
37683 */
37684 @@ -630,10 +650,6 @@ int setup_arg_pages(struct linux_binprm
37685 stack_top = arch_align_stack(stack_top);
37686 stack_top = PAGE_ALIGN(stack_top);
37687
37688 - if (unlikely(stack_top < mmap_min_addr) ||
37689 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
37690 - return -ENOMEM;
37691 -
37692 stack_shift = vma->vm_end - stack_top;
37693
37694 bprm->p -= stack_shift;
37695 @@ -645,6 +661,14 @@ int setup_arg_pages(struct linux_binprm
37696 bprm->exec -= stack_shift;
37697
37698 down_write(&mm->mmap_sem);
37699 +
37700 + /* Move stack pages down in memory. */
37701 + if (stack_shift) {
37702 + ret = shift_arg_pages(vma, stack_shift);
37703 + if (ret)
37704 + goto out_unlock;
37705 + }
37706 +
37707 vm_flags = VM_STACK_FLAGS;
37708
37709 /*
37710 @@ -658,19 +682,24 @@ int setup_arg_pages(struct linux_binprm
37711 vm_flags &= ~VM_EXEC;
37712 vm_flags |= mm->def_flags;
37713
37714 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
37715 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
37716 + vm_flags &= ~VM_EXEC;
37717 +
37718 +#ifdef CONFIG_PAX_MPROTECT
37719 + if (mm->pax_flags & MF_PAX_MPROTECT)
37720 + vm_flags &= ~VM_MAYEXEC;
37721 +#endif
37722 +
37723 + }
37724 +#endif
37725 +
37726 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
37727 vm_flags);
37728 if (ret)
37729 goto out_unlock;
37730 BUG_ON(prev != vma);
37731
37732 - /* Move stack pages down in memory. */
37733 - if (stack_shift) {
37734 - ret = shift_arg_pages(vma, stack_shift);
37735 - if (ret)
37736 - goto out_unlock;
37737 - }
37738 -
37739 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
37740 stack_size = vma->vm_end - vma->vm_start;
37741 /*
37742 @@ -707,7 +736,7 @@ struct file *open_exec(const char *name)
37743 int err;
37744
37745 file = do_filp_open(AT_FDCWD, name,
37746 - O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
37747 + O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
37748 MAY_EXEC | MAY_OPEN);
37749 if (IS_ERR(file))
37750 goto out;
37751 @@ -744,7 +773,7 @@ int kernel_read(struct file *file, loff_
37752 old_fs = get_fs();
37753 set_fs(get_ds());
37754 /* The cast to a user pointer is valid due to the set_fs() */
37755 - result = vfs_read(file, (void __user *)addr, count, &pos);
37756 + result = vfs_read(file, (__force void __user *)addr, count, &pos);
37757 set_fs(old_fs);
37758 return result;
37759 }
37760 @@ -1151,7 +1180,7 @@ int check_unsafe_exec(struct linux_binpr
37761 }
37762 rcu_read_unlock();
37763
37764 - if (p->fs->users > n_fs) {
37765 + if (atomic_read(&p->fs->users) > n_fs) {
37766 bprm->unsafe |= LSM_UNSAFE_SHARE;
37767 } else {
37768 res = -EAGAIN;
37769 @@ -1350,6 +1379,11 @@ int do_execve(char * filename,
37770 char __user *__user *envp,
37771 struct pt_regs * regs)
37772 {
37773 +#ifdef CONFIG_GRKERNSEC
37774 + struct file *old_exec_file;
37775 + struct acl_subject_label *old_acl;
37776 + struct rlimit old_rlim[RLIM_NLIMITS];
37777 +#endif
37778 struct linux_binprm *bprm;
37779 struct file *file;
37780 struct files_struct *displaced;
37781 @@ -1386,6 +1420,23 @@ int do_execve(char * filename,
37782 bprm->filename = filename;
37783 bprm->interp = filename;
37784
37785 + if (gr_process_user_ban()) {
37786 + retval = -EPERM;
37787 + goto out_file;
37788 + }
37789 +
37790 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
37791 +
37792 + if (gr_handle_nproc()) {
37793 + retval = -EAGAIN;
37794 + goto out_file;
37795 + }
37796 +
37797 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
37798 + retval = -EACCES;
37799 + goto out_file;
37800 + }
37801 +
37802 retval = bprm_mm_init(bprm);
37803 if (retval)
37804 goto out_file;
37805 @@ -1415,10 +1466,41 @@ int do_execve(char * filename,
37806 if (retval < 0)
37807 goto out;
37808
37809 + if (!gr_tpe_allow(file)) {
37810 + retval = -EACCES;
37811 + goto out;
37812 + }
37813 +
37814 + if (gr_check_crash_exec(file)) {
37815 + retval = -EACCES;
37816 + goto out;
37817 + }
37818 +
37819 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
37820 +
37821 + gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
37822 +
37823 +#ifdef CONFIG_GRKERNSEC
37824 + old_acl = current->acl;
37825 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
37826 + old_exec_file = current->exec_file;
37827 + get_file(file);
37828 + current->exec_file = file;
37829 +#endif
37830 +
37831 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
37832 + bprm->unsafe & LSM_UNSAFE_SHARE);
37833 + if (retval < 0)
37834 + goto out_fail;
37835 +
37836 current->flags &= ~PF_KTHREAD;
37837 retval = search_binary_handler(bprm,regs);
37838 if (retval < 0)
37839 - goto out;
37840 + goto out_fail;
37841 +#ifdef CONFIG_GRKERNSEC
37842 + if (old_exec_file)
37843 + fput(old_exec_file);
37844 +#endif
37845
37846 /* execve succeeded */
37847 current->fs->in_exec = 0;
37848 @@ -1429,6 +1511,14 @@ int do_execve(char * filename,
37849 put_files_struct(displaced);
37850 return retval;
37851
37852 +out_fail:
37853 +#ifdef CONFIG_GRKERNSEC
37854 + current->acl = old_acl;
37855 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
37856 + fput(current->exec_file);
37857 + current->exec_file = old_exec_file;
37858 +#endif
37859 +
37860 out:
37861 if (bprm->mm) {
37862 acct_arg_size(bprm, 0);
37863 @@ -1594,6 +1684,220 @@ out:
37864 return ispipe;
37865 }
37866
37867 +int pax_check_flags(unsigned long *flags)
37868 +{
37869 + int retval = 0;
37870 +
37871 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
37872 + if (*flags & MF_PAX_SEGMEXEC)
37873 + {
37874 + *flags &= ~MF_PAX_SEGMEXEC;
37875 + retval = -EINVAL;
37876 + }
37877 +#endif
37878 +
37879 + if ((*flags & MF_PAX_PAGEEXEC)
37880 +
37881 +#ifdef CONFIG_PAX_PAGEEXEC
37882 + && (*flags & MF_PAX_SEGMEXEC)
37883 +#endif
37884 +
37885 + )
37886 + {
37887 + *flags &= ~MF_PAX_PAGEEXEC;
37888 + retval = -EINVAL;
37889 + }
37890 +
37891 + if ((*flags & MF_PAX_MPROTECT)
37892 +
37893 +#ifdef CONFIG_PAX_MPROTECT
37894 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
37895 +#endif
37896 +
37897 + )
37898 + {
37899 + *flags &= ~MF_PAX_MPROTECT;
37900 + retval = -EINVAL;
37901 + }
37902 +
37903 + if ((*flags & MF_PAX_EMUTRAMP)
37904 +
37905 +#ifdef CONFIG_PAX_EMUTRAMP
37906 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
37907 +#endif
37908 +
37909 + )
37910 + {
37911 + *flags &= ~MF_PAX_EMUTRAMP;
37912 + retval = -EINVAL;
37913 + }
37914 +
37915 + return retval;
37916 +}
37917 +
37918 +EXPORT_SYMBOL(pax_check_flags);
37919 +
37920 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
37921 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
37922 +{
37923 + struct task_struct *tsk = current;
37924 + struct mm_struct *mm = current->mm;
37925 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
37926 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
37927 + char *path_exec = NULL;
37928 + char *path_fault = NULL;
37929 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
37930 +
37931 + if (buffer_exec && buffer_fault) {
37932 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
37933 +
37934 + down_read(&mm->mmap_sem);
37935 + vma = mm->mmap;
37936 + while (vma && (!vma_exec || !vma_fault)) {
37937 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
37938 + vma_exec = vma;
37939 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
37940 + vma_fault = vma;
37941 + vma = vma->vm_next;
37942 + }
37943 + if (vma_exec) {
37944 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
37945 + if (IS_ERR(path_exec))
37946 + path_exec = "<path too long>";
37947 + else {
37948 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
37949 + if (path_exec) {
37950 + *path_exec = 0;
37951 + path_exec = buffer_exec;
37952 + } else
37953 + path_exec = "<path too long>";
37954 + }
37955 + }
37956 + if (vma_fault) {
37957 + start = vma_fault->vm_start;
37958 + end = vma_fault->vm_end;
37959 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
37960 + if (vma_fault->vm_file) {
37961 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
37962 + if (IS_ERR(path_fault))
37963 + path_fault = "<path too long>";
37964 + else {
37965 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
37966 + if (path_fault) {
37967 + *path_fault = 0;
37968 + path_fault = buffer_fault;
37969 + } else
37970 + path_fault = "<path too long>";
37971 + }
37972 + } else
37973 + path_fault = "<anonymous mapping>";
37974 + }
37975 + up_read(&mm->mmap_sem);
37976 + }
37977 + if (tsk->signal->curr_ip)
37978 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
37979 + else
37980 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
37981 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
37982 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
37983 + task_uid(tsk), task_euid(tsk), pc, sp);
37984 + free_page((unsigned long)buffer_exec);
37985 + free_page((unsigned long)buffer_fault);
37986 + pax_report_insns(pc, sp);
37987 + do_coredump(SIGKILL, SIGKILL, regs);
37988 +}
37989 +#endif
37990 +
37991 +#ifdef CONFIG_PAX_REFCOUNT
37992 +void pax_report_refcount_overflow(struct pt_regs *regs)
37993 +{
37994 + if (current->signal->curr_ip)
37995 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
37996 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
37997 + else
37998 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
37999 + current->comm, task_pid_nr(current), current_uid(), current_euid());
38000 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
38001 + show_regs(regs);
38002 + force_sig_specific(SIGKILL, current);
38003 +}
38004 +#endif
38005 +
38006 +#ifdef CONFIG_PAX_USERCOPY
38007 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
38008 +int object_is_on_stack(const void *obj, unsigned long len)
38009 +{
38010 + const void * const stack = task_stack_page(current);
38011 + const void * const stackend = stack + THREAD_SIZE;
38012 +
38013 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
38014 + const void *frame = NULL;
38015 + const void *oldframe;
38016 +#endif
38017 +
38018 + if (obj + len < obj)
38019 + return -1;
38020 +
38021 + if (obj + len <= stack || stackend <= obj)
38022 + return 0;
38023 +
38024 + if (obj < stack || stackend < obj + len)
38025 + return -1;
38026 +
38027 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
38028 + oldframe = __builtin_frame_address(1);
38029 + if (oldframe)
38030 + frame = __builtin_frame_address(2);
38031 + /*
38032 + low ----------------------------------------------> high
38033 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
38034 + ^----------------^
38035 + allow copies only within here
38036 + */
38037 + while (stack <= frame && frame < stackend) {
38038 + /* if obj + len extends past the last frame, this
38039 + check won't pass and the next frame will be 0,
38040 + causing us to bail out and correctly report
38041 + the copy as invalid
38042 + */
38043 + if (obj + len <= frame)
38044 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
38045 + oldframe = frame;
38046 + frame = *(const void * const *)frame;
38047 + }
38048 + return -1;
38049 +#else
38050 + return 1;
38051 +#endif
38052 +}
38053 +
38054 +
38055 +void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
38056 +{
38057 + if (current->signal->curr_ip)
38058 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
38059 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
38060 + else
38061 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
38062 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
38063 +
38064 + dump_stack();
38065 + gr_handle_kernel_exploit();
38066 + do_group_exit(SIGKILL);
38067 +}
38068 +#endif
38069 +
38070 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
38071 +void pax_track_stack(void)
38072 +{
38073 + unsigned long sp = (unsigned long)&sp;
38074 + if (sp < current_thread_info()->lowest_stack &&
38075 + sp > (unsigned long)task_stack_page(current))
38076 + current_thread_info()->lowest_stack = sp;
38077 +}
38078 +EXPORT_SYMBOL(pax_track_stack);
38079 +#endif
38080 +
38081 static int zap_process(struct task_struct *start)
38082 {
38083 struct task_struct *t;
38084 @@ -1796,17 +2100,17 @@ static void wait_for_dump_helpers(struct
38085 pipe = file->f_path.dentry->d_inode->i_pipe;
38086
38087 pipe_lock(pipe);
38088 - pipe->readers++;
38089 - pipe->writers--;
38090 + atomic_inc(&pipe->readers);
38091 + atomic_dec(&pipe->writers);
38092
38093 - while ((pipe->readers > 1) && (!signal_pending(current))) {
38094 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
38095 wake_up_interruptible_sync(&pipe->wait);
38096 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
38097 pipe_wait(pipe);
38098 }
38099
38100 - pipe->readers--;
38101 - pipe->writers++;
38102 + atomic_dec(&pipe->readers);
38103 + atomic_inc(&pipe->writers);
38104 pipe_unlock(pipe);
38105
38106 }
38107 @@ -1829,10 +2133,13 @@ void do_coredump(long signr, int exit_co
38108 char **helper_argv = NULL;
38109 int helper_argc = 0;
38110 int dump_count = 0;
38111 - static atomic_t core_dump_count = ATOMIC_INIT(0);
38112 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
38113
38114 audit_core_dumps(signr);
38115
38116 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
38117 + gr_handle_brute_attach(current, mm->flags);
38118 +
38119 binfmt = mm->binfmt;
38120 if (!binfmt || !binfmt->core_dump)
38121 goto fail;
38122 @@ -1877,6 +2184,8 @@ void do_coredump(long signr, int exit_co
38123 */
38124 clear_thread_flag(TIF_SIGPENDING);
38125
38126 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
38127 +
38128 /*
38129 * lock_kernel() because format_corename() is controlled by sysctl, which
38130 * uses lock_kernel()
38131 @@ -1911,7 +2220,7 @@ void do_coredump(long signr, int exit_co
38132 goto fail_unlock;
38133 }
38134
38135 - dump_count = atomic_inc_return(&core_dump_count);
38136 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
38137 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
38138 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
38139 task_tgid_vnr(current), current->comm);
38140 @@ -1975,7 +2284,7 @@ close_fail:
38141 filp_close(file, NULL);
38142 fail_dropcount:
38143 if (dump_count)
38144 - atomic_dec(&core_dump_count);
38145 + atomic_dec_unchecked(&core_dump_count);
38146 fail_unlock:
38147 if (helper_argv)
38148 argv_free(helper_argv);
38149 diff -urNp linux-2.6.32.41/fs/ext2/balloc.c linux-2.6.32.41/fs/ext2/balloc.c
38150 --- linux-2.6.32.41/fs/ext2/balloc.c 2011-03-27 14:31:47.000000000 -0400
38151 +++ linux-2.6.32.41/fs/ext2/balloc.c 2011-04-17 15:56:46.000000000 -0400
38152 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
38153
38154 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
38155 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
38156 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
38157 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
38158 sbi->s_resuid != current_fsuid() &&
38159 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
38160 return 0;
38161 diff -urNp linux-2.6.32.41/fs/ext3/balloc.c linux-2.6.32.41/fs/ext3/balloc.c
38162 --- linux-2.6.32.41/fs/ext3/balloc.c 2011-03-27 14:31:47.000000000 -0400
38163 +++ linux-2.6.32.41/fs/ext3/balloc.c 2011-04-17 15:56:46.000000000 -0400
38164 @@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct e
38165
38166 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
38167 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
38168 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
38169 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
38170 sbi->s_resuid != current_fsuid() &&
38171 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
38172 return 0;
38173 diff -urNp linux-2.6.32.41/fs/ext4/balloc.c linux-2.6.32.41/fs/ext4/balloc.c
38174 --- linux-2.6.32.41/fs/ext4/balloc.c 2011-03-27 14:31:47.000000000 -0400
38175 +++ linux-2.6.32.41/fs/ext4/balloc.c 2011-04-17 15:56:46.000000000 -0400
38176 @@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_
38177 /* Hm, nope. Are (enough) root reserved blocks available? */
38178 if (sbi->s_resuid == current_fsuid() ||
38179 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
38180 - capable(CAP_SYS_RESOURCE)) {
38181 + capable_nolog(CAP_SYS_RESOURCE)) {
38182 if (free_blocks >= (nblocks + dirty_blocks))
38183 return 1;
38184 }
38185 diff -urNp linux-2.6.32.41/fs/ext4/ext4.h linux-2.6.32.41/fs/ext4/ext4.h
38186 --- linux-2.6.32.41/fs/ext4/ext4.h 2011-03-27 14:31:47.000000000 -0400
38187 +++ linux-2.6.32.41/fs/ext4/ext4.h 2011-04-17 15:56:46.000000000 -0400
38188 @@ -1078,19 +1078,19 @@ struct ext4_sb_info {
38189
38190 /* stats for buddy allocator */
38191 spinlock_t s_mb_pa_lock;
38192 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
38193 - atomic_t s_bal_success; /* we found long enough chunks */
38194 - atomic_t s_bal_allocated; /* in blocks */
38195 - atomic_t s_bal_ex_scanned; /* total extents scanned */
38196 - atomic_t s_bal_goals; /* goal hits */
38197 - atomic_t s_bal_breaks; /* too long searches */
38198 - atomic_t s_bal_2orders; /* 2^order hits */
38199 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
38200 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
38201 + atomic_unchecked_t s_bal_allocated; /* in blocks */
38202 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
38203 + atomic_unchecked_t s_bal_goals; /* goal hits */
38204 + atomic_unchecked_t s_bal_breaks; /* too long searches */
38205 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
38206 spinlock_t s_bal_lock;
38207 unsigned long s_mb_buddies_generated;
38208 unsigned long long s_mb_generation_time;
38209 - atomic_t s_mb_lost_chunks;
38210 - atomic_t s_mb_preallocated;
38211 - atomic_t s_mb_discarded;
38212 + atomic_unchecked_t s_mb_lost_chunks;
38213 + atomic_unchecked_t s_mb_preallocated;
38214 + atomic_unchecked_t s_mb_discarded;
38215 atomic_t s_lock_busy;
38216
38217 /* locality groups */
38218 diff -urNp linux-2.6.32.41/fs/ext4/mballoc.c linux-2.6.32.41/fs/ext4/mballoc.c
38219 --- linux-2.6.32.41/fs/ext4/mballoc.c 2011-03-27 14:31:47.000000000 -0400
38220 +++ linux-2.6.32.41/fs/ext4/mballoc.c 2011-05-16 21:46:57.000000000 -0400
38221 @@ -1753,7 +1753,7 @@ void ext4_mb_simple_scan_group(struct ex
38222 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
38223
38224 if (EXT4_SB(sb)->s_mb_stats)
38225 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
38226 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
38227
38228 break;
38229 }
38230 @@ -2129,7 +2129,7 @@ repeat:
38231 ac->ac_status = AC_STATUS_CONTINUE;
38232 ac->ac_flags |= EXT4_MB_HINT_FIRST;
38233 cr = 3;
38234 - atomic_inc(&sbi->s_mb_lost_chunks);
38235 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
38236 goto repeat;
38237 }
38238 }
38239 @@ -2172,6 +2172,8 @@ static int ext4_mb_seq_groups_show(struc
38240 ext4_grpblk_t counters[16];
38241 } sg;
38242
38243 + pax_track_stack();
38244 +
38245 group--;
38246 if (group == 0)
38247 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
38248 @@ -2532,25 +2534,25 @@ int ext4_mb_release(struct super_block *
38249 if (sbi->s_mb_stats) {
38250 printk(KERN_INFO
38251 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
38252 - atomic_read(&sbi->s_bal_allocated),
38253 - atomic_read(&sbi->s_bal_reqs),
38254 - atomic_read(&sbi->s_bal_success));
38255 + atomic_read_unchecked(&sbi->s_bal_allocated),
38256 + atomic_read_unchecked(&sbi->s_bal_reqs),
38257 + atomic_read_unchecked(&sbi->s_bal_success));
38258 printk(KERN_INFO
38259 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
38260 "%u 2^N hits, %u breaks, %u lost\n",
38261 - atomic_read(&sbi->s_bal_ex_scanned),
38262 - atomic_read(&sbi->s_bal_goals),
38263 - atomic_read(&sbi->s_bal_2orders),
38264 - atomic_read(&sbi->s_bal_breaks),
38265 - atomic_read(&sbi->s_mb_lost_chunks));
38266 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
38267 + atomic_read_unchecked(&sbi->s_bal_goals),
38268 + atomic_read_unchecked(&sbi->s_bal_2orders),
38269 + atomic_read_unchecked(&sbi->s_bal_breaks),
38270 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
38271 printk(KERN_INFO
38272 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
38273 sbi->s_mb_buddies_generated++,
38274 sbi->s_mb_generation_time);
38275 printk(KERN_INFO
38276 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
38277 - atomic_read(&sbi->s_mb_preallocated),
38278 - atomic_read(&sbi->s_mb_discarded));
38279 + atomic_read_unchecked(&sbi->s_mb_preallocated),
38280 + atomic_read_unchecked(&sbi->s_mb_discarded));
38281 }
38282
38283 free_percpu(sbi->s_locality_groups);
38284 @@ -3032,16 +3034,16 @@ static void ext4_mb_collect_stats(struct
38285 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
38286
38287 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
38288 - atomic_inc(&sbi->s_bal_reqs);
38289 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
38290 + atomic_inc_unchecked(&sbi->s_bal_reqs);
38291 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
38292 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
38293 - atomic_inc(&sbi->s_bal_success);
38294 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
38295 + atomic_inc_unchecked(&sbi->s_bal_success);
38296 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
38297 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
38298 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
38299 - atomic_inc(&sbi->s_bal_goals);
38300 + atomic_inc_unchecked(&sbi->s_bal_goals);
38301 if (ac->ac_found > sbi->s_mb_max_to_scan)
38302 - atomic_inc(&sbi->s_bal_breaks);
38303 + atomic_inc_unchecked(&sbi->s_bal_breaks);
38304 }
38305
38306 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
38307 @@ -3441,7 +3443,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
38308 trace_ext4_mb_new_inode_pa(ac, pa);
38309
38310 ext4_mb_use_inode_pa(ac, pa);
38311 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
38312 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
38313
38314 ei = EXT4_I(ac->ac_inode);
38315 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
38316 @@ -3501,7 +3503,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
38317 trace_ext4_mb_new_group_pa(ac, pa);
38318
38319 ext4_mb_use_group_pa(ac, pa);
38320 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
38321 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
38322
38323 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
38324 lg = ac->ac_lg;
38325 @@ -3605,7 +3607,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
38326 * from the bitmap and continue.
38327 */
38328 }
38329 - atomic_add(free, &sbi->s_mb_discarded);
38330 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
38331
38332 return err;
38333 }
38334 @@ -3624,7 +3626,7 @@ ext4_mb_release_group_pa(struct ext4_bud
38335 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
38336 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
38337 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
38338 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
38339 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
38340
38341 if (ac) {
38342 ac->ac_sb = sb;
38343 diff -urNp linux-2.6.32.41/fs/ext4/super.c linux-2.6.32.41/fs/ext4/super.c
38344 --- linux-2.6.32.41/fs/ext4/super.c 2011-03-27 14:31:47.000000000 -0400
38345 +++ linux-2.6.32.41/fs/ext4/super.c 2011-04-17 15:56:46.000000000 -0400
38346 @@ -2287,7 +2287,7 @@ static void ext4_sb_release(struct kobje
38347 }
38348
38349
38350 -static struct sysfs_ops ext4_attr_ops = {
38351 +static const struct sysfs_ops ext4_attr_ops = {
38352 .show = ext4_attr_show,
38353 .store = ext4_attr_store,
38354 };
38355 diff -urNp linux-2.6.32.41/fs/fcntl.c linux-2.6.32.41/fs/fcntl.c
38356 --- linux-2.6.32.41/fs/fcntl.c 2011-03-27 14:31:47.000000000 -0400
38357 +++ linux-2.6.32.41/fs/fcntl.c 2011-04-17 15:56:46.000000000 -0400
38358 @@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct
38359 if (err)
38360 return err;
38361
38362 + if (gr_handle_chroot_fowner(pid, type))
38363 + return -ENOENT;
38364 + if (gr_check_protected_task_fowner(pid, type))
38365 + return -EACCES;
38366 +
38367 f_modown(filp, pid, type, force);
38368 return 0;
38369 }
38370 @@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned in
38371 switch (cmd) {
38372 case F_DUPFD:
38373 case F_DUPFD_CLOEXEC:
38374 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
38375 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
38376 break;
38377 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
38378 diff -urNp linux-2.6.32.41/fs/fifo.c linux-2.6.32.41/fs/fifo.c
38379 --- linux-2.6.32.41/fs/fifo.c 2011-03-27 14:31:47.000000000 -0400
38380 +++ linux-2.6.32.41/fs/fifo.c 2011-04-17 15:56:46.000000000 -0400
38381 @@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode
38382 */
38383 filp->f_op = &read_pipefifo_fops;
38384 pipe->r_counter++;
38385 - if (pipe->readers++ == 0)
38386 + if (atomic_inc_return(&pipe->readers) == 1)
38387 wake_up_partner(inode);
38388
38389 - if (!pipe->writers) {
38390 + if (!atomic_read(&pipe->writers)) {
38391 if ((filp->f_flags & O_NONBLOCK)) {
38392 /* suppress POLLHUP until we have
38393 * seen a writer */
38394 @@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode
38395 * errno=ENXIO when there is no process reading the FIFO.
38396 */
38397 ret = -ENXIO;
38398 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
38399 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
38400 goto err;
38401
38402 filp->f_op = &write_pipefifo_fops;
38403 pipe->w_counter++;
38404 - if (!pipe->writers++)
38405 + if (atomic_inc_return(&pipe->writers) == 1)
38406 wake_up_partner(inode);
38407
38408 - if (!pipe->readers) {
38409 + if (!atomic_read(&pipe->readers)) {
38410 wait_for_partner(inode, &pipe->r_counter);
38411 if (signal_pending(current))
38412 goto err_wr;
38413 @@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode
38414 */
38415 filp->f_op = &rdwr_pipefifo_fops;
38416
38417 - pipe->readers++;
38418 - pipe->writers++;
38419 + atomic_inc(&pipe->readers);
38420 + atomic_inc(&pipe->writers);
38421 pipe->r_counter++;
38422 pipe->w_counter++;
38423 - if (pipe->readers == 1 || pipe->writers == 1)
38424 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
38425 wake_up_partner(inode);
38426 break;
38427
38428 @@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode
38429 return 0;
38430
38431 err_rd:
38432 - if (!--pipe->readers)
38433 + if (atomic_dec_and_test(&pipe->readers))
38434 wake_up_interruptible(&pipe->wait);
38435 ret = -ERESTARTSYS;
38436 goto err;
38437
38438 err_wr:
38439 - if (!--pipe->writers)
38440 + if (atomic_dec_and_test(&pipe->writers))
38441 wake_up_interruptible(&pipe->wait);
38442 ret = -ERESTARTSYS;
38443 goto err;
38444
38445 err:
38446 - if (!pipe->readers && !pipe->writers)
38447 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
38448 free_pipe_info(inode);
38449
38450 err_nocleanup:
38451 diff -urNp linux-2.6.32.41/fs/file.c linux-2.6.32.41/fs/file.c
38452 --- linux-2.6.32.41/fs/file.c 2011-03-27 14:31:47.000000000 -0400
38453 +++ linux-2.6.32.41/fs/file.c 2011-04-17 15:56:46.000000000 -0400
38454 @@ -14,6 +14,7 @@
38455 #include <linux/slab.h>
38456 #include <linux/vmalloc.h>
38457 #include <linux/file.h>
38458 +#include <linux/security.h>
38459 #include <linux/fdtable.h>
38460 #include <linux/bitops.h>
38461 #include <linux/interrupt.h>
38462 @@ -257,6 +258,8 @@ int expand_files(struct files_struct *fi
38463 * N.B. For clone tasks sharing a files structure, this test
38464 * will limit the total number of files that can be opened.
38465 */
38466 +
38467 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
38468 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
38469 return -EMFILE;
38470
38471 diff -urNp linux-2.6.32.41/fs/filesystems.c linux-2.6.32.41/fs/filesystems.c
38472 --- linux-2.6.32.41/fs/filesystems.c 2011-03-27 14:31:47.000000000 -0400
38473 +++ linux-2.6.32.41/fs/filesystems.c 2011-04-17 15:56:46.000000000 -0400
38474 @@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(con
38475 int len = dot ? dot - name : strlen(name);
38476
38477 fs = __get_fs_type(name, len);
38478 +
38479 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
38480 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
38481 +#else
38482 if (!fs && (request_module("%.*s", len, name) == 0))
38483 +#endif
38484 fs = __get_fs_type(name, len);
38485
38486 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
38487 diff -urNp linux-2.6.32.41/fs/fscache/cookie.c linux-2.6.32.41/fs/fscache/cookie.c
38488 --- linux-2.6.32.41/fs/fscache/cookie.c 2011-03-27 14:31:47.000000000 -0400
38489 +++ linux-2.6.32.41/fs/fscache/cookie.c 2011-05-04 17:56:28.000000000 -0400
38490 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
38491 parent ? (char *) parent->def->name : "<no-parent>",
38492 def->name, netfs_data);
38493
38494 - fscache_stat(&fscache_n_acquires);
38495 + fscache_stat_unchecked(&fscache_n_acquires);
38496
38497 /* if there's no parent cookie, then we don't create one here either */
38498 if (!parent) {
38499 - fscache_stat(&fscache_n_acquires_null);
38500 + fscache_stat_unchecked(&fscache_n_acquires_null);
38501 _leave(" [no parent]");
38502 return NULL;
38503 }
38504 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
38505 /* allocate and initialise a cookie */
38506 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
38507 if (!cookie) {
38508 - fscache_stat(&fscache_n_acquires_oom);
38509 + fscache_stat_unchecked(&fscache_n_acquires_oom);
38510 _leave(" [ENOMEM]");
38511 return NULL;
38512 }
38513 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
38514
38515 switch (cookie->def->type) {
38516 case FSCACHE_COOKIE_TYPE_INDEX:
38517 - fscache_stat(&fscache_n_cookie_index);
38518 + fscache_stat_unchecked(&fscache_n_cookie_index);
38519 break;
38520 case FSCACHE_COOKIE_TYPE_DATAFILE:
38521 - fscache_stat(&fscache_n_cookie_data);
38522 + fscache_stat_unchecked(&fscache_n_cookie_data);
38523 break;
38524 default:
38525 - fscache_stat(&fscache_n_cookie_special);
38526 + fscache_stat_unchecked(&fscache_n_cookie_special);
38527 break;
38528 }
38529
38530 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
38531 if (fscache_acquire_non_index_cookie(cookie) < 0) {
38532 atomic_dec(&parent->n_children);
38533 __fscache_cookie_put(cookie);
38534 - fscache_stat(&fscache_n_acquires_nobufs);
38535 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
38536 _leave(" = NULL");
38537 return NULL;
38538 }
38539 }
38540
38541 - fscache_stat(&fscache_n_acquires_ok);
38542 + fscache_stat_unchecked(&fscache_n_acquires_ok);
38543 _leave(" = %p", cookie);
38544 return cookie;
38545 }
38546 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
38547 cache = fscache_select_cache_for_object(cookie->parent);
38548 if (!cache) {
38549 up_read(&fscache_addremove_sem);
38550 - fscache_stat(&fscache_n_acquires_no_cache);
38551 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
38552 _leave(" = -ENOMEDIUM [no cache]");
38553 return -ENOMEDIUM;
38554 }
38555 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
38556 object = cache->ops->alloc_object(cache, cookie);
38557 fscache_stat_d(&fscache_n_cop_alloc_object);
38558 if (IS_ERR(object)) {
38559 - fscache_stat(&fscache_n_object_no_alloc);
38560 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
38561 ret = PTR_ERR(object);
38562 goto error;
38563 }
38564
38565 - fscache_stat(&fscache_n_object_alloc);
38566 + fscache_stat_unchecked(&fscache_n_object_alloc);
38567
38568 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
38569
38570 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
38571 struct fscache_object *object;
38572 struct hlist_node *_p;
38573
38574 - fscache_stat(&fscache_n_updates);
38575 + fscache_stat_unchecked(&fscache_n_updates);
38576
38577 if (!cookie) {
38578 - fscache_stat(&fscache_n_updates_null);
38579 + fscache_stat_unchecked(&fscache_n_updates_null);
38580 _leave(" [no cookie]");
38581 return;
38582 }
38583 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
38584 struct fscache_object *object;
38585 unsigned long event;
38586
38587 - fscache_stat(&fscache_n_relinquishes);
38588 + fscache_stat_unchecked(&fscache_n_relinquishes);
38589 if (retire)
38590 - fscache_stat(&fscache_n_relinquishes_retire);
38591 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
38592
38593 if (!cookie) {
38594 - fscache_stat(&fscache_n_relinquishes_null);
38595 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
38596 _leave(" [no cookie]");
38597 return;
38598 }
38599 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
38600
38601 /* wait for the cookie to finish being instantiated (or to fail) */
38602 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
38603 - fscache_stat(&fscache_n_relinquishes_waitcrt);
38604 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
38605 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
38606 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
38607 }
38608 diff -urNp linux-2.6.32.41/fs/fscache/internal.h linux-2.6.32.41/fs/fscache/internal.h
38609 --- linux-2.6.32.41/fs/fscache/internal.h 2011-03-27 14:31:47.000000000 -0400
38610 +++ linux-2.6.32.41/fs/fscache/internal.h 2011-05-04 17:56:28.000000000 -0400
38611 @@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
38612 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
38613 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
38614
38615 -extern atomic_t fscache_n_op_pend;
38616 -extern atomic_t fscache_n_op_run;
38617 -extern atomic_t fscache_n_op_enqueue;
38618 -extern atomic_t fscache_n_op_deferred_release;
38619 -extern atomic_t fscache_n_op_release;
38620 -extern atomic_t fscache_n_op_gc;
38621 -extern atomic_t fscache_n_op_cancelled;
38622 -extern atomic_t fscache_n_op_rejected;
38623 -
38624 -extern atomic_t fscache_n_attr_changed;
38625 -extern atomic_t fscache_n_attr_changed_ok;
38626 -extern atomic_t fscache_n_attr_changed_nobufs;
38627 -extern atomic_t fscache_n_attr_changed_nomem;
38628 -extern atomic_t fscache_n_attr_changed_calls;
38629 -
38630 -extern atomic_t fscache_n_allocs;
38631 -extern atomic_t fscache_n_allocs_ok;
38632 -extern atomic_t fscache_n_allocs_wait;
38633 -extern atomic_t fscache_n_allocs_nobufs;
38634 -extern atomic_t fscache_n_allocs_intr;
38635 -extern atomic_t fscache_n_allocs_object_dead;
38636 -extern atomic_t fscache_n_alloc_ops;
38637 -extern atomic_t fscache_n_alloc_op_waits;
38638 -
38639 -extern atomic_t fscache_n_retrievals;
38640 -extern atomic_t fscache_n_retrievals_ok;
38641 -extern atomic_t fscache_n_retrievals_wait;
38642 -extern atomic_t fscache_n_retrievals_nodata;
38643 -extern atomic_t fscache_n_retrievals_nobufs;
38644 -extern atomic_t fscache_n_retrievals_intr;
38645 -extern atomic_t fscache_n_retrievals_nomem;
38646 -extern atomic_t fscache_n_retrievals_object_dead;
38647 -extern atomic_t fscache_n_retrieval_ops;
38648 -extern atomic_t fscache_n_retrieval_op_waits;
38649 -
38650 -extern atomic_t fscache_n_stores;
38651 -extern atomic_t fscache_n_stores_ok;
38652 -extern atomic_t fscache_n_stores_again;
38653 -extern atomic_t fscache_n_stores_nobufs;
38654 -extern atomic_t fscache_n_stores_oom;
38655 -extern atomic_t fscache_n_store_ops;
38656 -extern atomic_t fscache_n_store_calls;
38657 -extern atomic_t fscache_n_store_pages;
38658 -extern atomic_t fscache_n_store_radix_deletes;
38659 -extern atomic_t fscache_n_store_pages_over_limit;
38660 -
38661 -extern atomic_t fscache_n_store_vmscan_not_storing;
38662 -extern atomic_t fscache_n_store_vmscan_gone;
38663 -extern atomic_t fscache_n_store_vmscan_busy;
38664 -extern atomic_t fscache_n_store_vmscan_cancelled;
38665 -
38666 -extern atomic_t fscache_n_marks;
38667 -extern atomic_t fscache_n_uncaches;
38668 -
38669 -extern atomic_t fscache_n_acquires;
38670 -extern atomic_t fscache_n_acquires_null;
38671 -extern atomic_t fscache_n_acquires_no_cache;
38672 -extern atomic_t fscache_n_acquires_ok;
38673 -extern atomic_t fscache_n_acquires_nobufs;
38674 -extern atomic_t fscache_n_acquires_oom;
38675 -
38676 -extern atomic_t fscache_n_updates;
38677 -extern atomic_t fscache_n_updates_null;
38678 -extern atomic_t fscache_n_updates_run;
38679 -
38680 -extern atomic_t fscache_n_relinquishes;
38681 -extern atomic_t fscache_n_relinquishes_null;
38682 -extern atomic_t fscache_n_relinquishes_waitcrt;
38683 -extern atomic_t fscache_n_relinquishes_retire;
38684 -
38685 -extern atomic_t fscache_n_cookie_index;
38686 -extern atomic_t fscache_n_cookie_data;
38687 -extern atomic_t fscache_n_cookie_special;
38688 -
38689 -extern atomic_t fscache_n_object_alloc;
38690 -extern atomic_t fscache_n_object_no_alloc;
38691 -extern atomic_t fscache_n_object_lookups;
38692 -extern atomic_t fscache_n_object_lookups_negative;
38693 -extern atomic_t fscache_n_object_lookups_positive;
38694 -extern atomic_t fscache_n_object_lookups_timed_out;
38695 -extern atomic_t fscache_n_object_created;
38696 -extern atomic_t fscache_n_object_avail;
38697 -extern atomic_t fscache_n_object_dead;
38698 -
38699 -extern atomic_t fscache_n_checkaux_none;
38700 -extern atomic_t fscache_n_checkaux_okay;
38701 -extern atomic_t fscache_n_checkaux_update;
38702 -extern atomic_t fscache_n_checkaux_obsolete;
38703 +extern atomic_unchecked_t fscache_n_op_pend;
38704 +extern atomic_unchecked_t fscache_n_op_run;
38705 +extern atomic_unchecked_t fscache_n_op_enqueue;
38706 +extern atomic_unchecked_t fscache_n_op_deferred_release;
38707 +extern atomic_unchecked_t fscache_n_op_release;
38708 +extern atomic_unchecked_t fscache_n_op_gc;
38709 +extern atomic_unchecked_t fscache_n_op_cancelled;
38710 +extern atomic_unchecked_t fscache_n_op_rejected;
38711 +
38712 +extern atomic_unchecked_t fscache_n_attr_changed;
38713 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
38714 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
38715 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
38716 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
38717 +
38718 +extern atomic_unchecked_t fscache_n_allocs;
38719 +extern atomic_unchecked_t fscache_n_allocs_ok;
38720 +extern atomic_unchecked_t fscache_n_allocs_wait;
38721 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
38722 +extern atomic_unchecked_t fscache_n_allocs_intr;
38723 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
38724 +extern atomic_unchecked_t fscache_n_alloc_ops;
38725 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
38726 +
38727 +extern atomic_unchecked_t fscache_n_retrievals;
38728 +extern atomic_unchecked_t fscache_n_retrievals_ok;
38729 +extern atomic_unchecked_t fscache_n_retrievals_wait;
38730 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
38731 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
38732 +extern atomic_unchecked_t fscache_n_retrievals_intr;
38733 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
38734 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
38735 +extern atomic_unchecked_t fscache_n_retrieval_ops;
38736 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
38737 +
38738 +extern atomic_unchecked_t fscache_n_stores;
38739 +extern atomic_unchecked_t fscache_n_stores_ok;
38740 +extern atomic_unchecked_t fscache_n_stores_again;
38741 +extern atomic_unchecked_t fscache_n_stores_nobufs;
38742 +extern atomic_unchecked_t fscache_n_stores_oom;
38743 +extern atomic_unchecked_t fscache_n_store_ops;
38744 +extern atomic_unchecked_t fscache_n_store_calls;
38745 +extern atomic_unchecked_t fscache_n_store_pages;
38746 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
38747 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
38748 +
38749 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
38750 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
38751 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
38752 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
38753 +
38754 +extern atomic_unchecked_t fscache_n_marks;
38755 +extern atomic_unchecked_t fscache_n_uncaches;
38756 +
38757 +extern atomic_unchecked_t fscache_n_acquires;
38758 +extern atomic_unchecked_t fscache_n_acquires_null;
38759 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
38760 +extern atomic_unchecked_t fscache_n_acquires_ok;
38761 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
38762 +extern atomic_unchecked_t fscache_n_acquires_oom;
38763 +
38764 +extern atomic_unchecked_t fscache_n_updates;
38765 +extern atomic_unchecked_t fscache_n_updates_null;
38766 +extern atomic_unchecked_t fscache_n_updates_run;
38767 +
38768 +extern atomic_unchecked_t fscache_n_relinquishes;
38769 +extern atomic_unchecked_t fscache_n_relinquishes_null;
38770 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
38771 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
38772 +
38773 +extern atomic_unchecked_t fscache_n_cookie_index;
38774 +extern atomic_unchecked_t fscache_n_cookie_data;
38775 +extern atomic_unchecked_t fscache_n_cookie_special;
38776 +
38777 +extern atomic_unchecked_t fscache_n_object_alloc;
38778 +extern atomic_unchecked_t fscache_n_object_no_alloc;
38779 +extern atomic_unchecked_t fscache_n_object_lookups;
38780 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
38781 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
38782 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
38783 +extern atomic_unchecked_t fscache_n_object_created;
38784 +extern atomic_unchecked_t fscache_n_object_avail;
38785 +extern atomic_unchecked_t fscache_n_object_dead;
38786 +
38787 +extern atomic_unchecked_t fscache_n_checkaux_none;
38788 +extern atomic_unchecked_t fscache_n_checkaux_okay;
38789 +extern atomic_unchecked_t fscache_n_checkaux_update;
38790 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
38791
38792 extern atomic_t fscache_n_cop_alloc_object;
38793 extern atomic_t fscache_n_cop_lookup_object;
38794 @@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t
38795 atomic_inc(stat);
38796 }
38797
38798 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
38799 +{
38800 + atomic_inc_unchecked(stat);
38801 +}
38802 +
38803 static inline void fscache_stat_d(atomic_t *stat)
38804 {
38805 atomic_dec(stat);
38806 @@ -259,6 +264,7 @@ extern const struct file_operations fsca
38807
38808 #define __fscache_stat(stat) (NULL)
38809 #define fscache_stat(stat) do {} while (0)
38810 +#define fscache_stat_unchecked(stat) do {} while (0)
38811 #define fscache_stat_d(stat) do {} while (0)
38812 #endif
38813
38814 diff -urNp linux-2.6.32.41/fs/fscache/object.c linux-2.6.32.41/fs/fscache/object.c
38815 --- linux-2.6.32.41/fs/fscache/object.c 2011-03-27 14:31:47.000000000 -0400
38816 +++ linux-2.6.32.41/fs/fscache/object.c 2011-05-04 17:56:28.000000000 -0400
38817 @@ -144,7 +144,7 @@ static void fscache_object_state_machine
38818 /* update the object metadata on disk */
38819 case FSCACHE_OBJECT_UPDATING:
38820 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
38821 - fscache_stat(&fscache_n_updates_run);
38822 + fscache_stat_unchecked(&fscache_n_updates_run);
38823 fscache_stat(&fscache_n_cop_update_object);
38824 object->cache->ops->update_object(object);
38825 fscache_stat_d(&fscache_n_cop_update_object);
38826 @@ -233,7 +233,7 @@ static void fscache_object_state_machine
38827 spin_lock(&object->lock);
38828 object->state = FSCACHE_OBJECT_DEAD;
38829 spin_unlock(&object->lock);
38830 - fscache_stat(&fscache_n_object_dead);
38831 + fscache_stat_unchecked(&fscache_n_object_dead);
38832 goto terminal_transit;
38833
38834 /* handle the parent cache of this object being withdrawn from
38835 @@ -248,7 +248,7 @@ static void fscache_object_state_machine
38836 spin_lock(&object->lock);
38837 object->state = FSCACHE_OBJECT_DEAD;
38838 spin_unlock(&object->lock);
38839 - fscache_stat(&fscache_n_object_dead);
38840 + fscache_stat_unchecked(&fscache_n_object_dead);
38841 goto terminal_transit;
38842
38843 /* complain about the object being woken up once it is
38844 @@ -492,7 +492,7 @@ static void fscache_lookup_object(struct
38845 parent->cookie->def->name, cookie->def->name,
38846 object->cache->tag->name);
38847
38848 - fscache_stat(&fscache_n_object_lookups);
38849 + fscache_stat_unchecked(&fscache_n_object_lookups);
38850 fscache_stat(&fscache_n_cop_lookup_object);
38851 ret = object->cache->ops->lookup_object(object);
38852 fscache_stat_d(&fscache_n_cop_lookup_object);
38853 @@ -503,7 +503,7 @@ static void fscache_lookup_object(struct
38854 if (ret == -ETIMEDOUT) {
38855 /* probably stuck behind another object, so move this one to
38856 * the back of the queue */
38857 - fscache_stat(&fscache_n_object_lookups_timed_out);
38858 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
38859 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
38860 }
38861
38862 @@ -526,7 +526,7 @@ void fscache_object_lookup_negative(stru
38863
38864 spin_lock(&object->lock);
38865 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
38866 - fscache_stat(&fscache_n_object_lookups_negative);
38867 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
38868
38869 /* transit here to allow write requests to begin stacking up
38870 * and read requests to begin returning ENODATA */
38871 @@ -572,7 +572,7 @@ void fscache_obtained_object(struct fsca
38872 * result, in which case there may be data available */
38873 spin_lock(&object->lock);
38874 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
38875 - fscache_stat(&fscache_n_object_lookups_positive);
38876 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
38877
38878 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
38879
38880 @@ -586,7 +586,7 @@ void fscache_obtained_object(struct fsca
38881 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
38882 } else {
38883 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
38884 - fscache_stat(&fscache_n_object_created);
38885 + fscache_stat_unchecked(&fscache_n_object_created);
38886
38887 object->state = FSCACHE_OBJECT_AVAILABLE;
38888 spin_unlock(&object->lock);
38889 @@ -633,7 +633,7 @@ static void fscache_object_available(str
38890 fscache_enqueue_dependents(object);
38891
38892 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
38893 - fscache_stat(&fscache_n_object_avail);
38894 + fscache_stat_unchecked(&fscache_n_object_avail);
38895
38896 _leave("");
38897 }
38898 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
38899 enum fscache_checkaux result;
38900
38901 if (!object->cookie->def->check_aux) {
38902 - fscache_stat(&fscache_n_checkaux_none);
38903 + fscache_stat_unchecked(&fscache_n_checkaux_none);
38904 return FSCACHE_CHECKAUX_OKAY;
38905 }
38906
38907 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
38908 switch (result) {
38909 /* entry okay as is */
38910 case FSCACHE_CHECKAUX_OKAY:
38911 - fscache_stat(&fscache_n_checkaux_okay);
38912 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
38913 break;
38914
38915 /* entry requires update */
38916 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
38917 - fscache_stat(&fscache_n_checkaux_update);
38918 + fscache_stat_unchecked(&fscache_n_checkaux_update);
38919 break;
38920
38921 /* entry requires deletion */
38922 case FSCACHE_CHECKAUX_OBSOLETE:
38923 - fscache_stat(&fscache_n_checkaux_obsolete);
38924 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
38925 break;
38926
38927 default:
38928 diff -urNp linux-2.6.32.41/fs/fscache/operation.c linux-2.6.32.41/fs/fscache/operation.c
38929 --- linux-2.6.32.41/fs/fscache/operation.c 2011-03-27 14:31:47.000000000 -0400
38930 +++ linux-2.6.32.41/fs/fscache/operation.c 2011-05-04 17:56:28.000000000 -0400
38931 @@ -16,7 +16,7 @@
38932 #include <linux/seq_file.h>
38933 #include "internal.h"
38934
38935 -atomic_t fscache_op_debug_id;
38936 +atomic_unchecked_t fscache_op_debug_id;
38937 EXPORT_SYMBOL(fscache_op_debug_id);
38938
38939 /**
38940 @@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fs
38941 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
38942 ASSERTCMP(atomic_read(&op->usage), >, 0);
38943
38944 - fscache_stat(&fscache_n_op_enqueue);
38945 + fscache_stat_unchecked(&fscache_n_op_enqueue);
38946 switch (op->flags & FSCACHE_OP_TYPE) {
38947 case FSCACHE_OP_FAST:
38948 _debug("queue fast");
38949 @@ -76,7 +76,7 @@ static void fscache_run_op(struct fscach
38950 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
38951 if (op->processor)
38952 fscache_enqueue_operation(op);
38953 - fscache_stat(&fscache_n_op_run);
38954 + fscache_stat_unchecked(&fscache_n_op_run);
38955 }
38956
38957 /*
38958 @@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct f
38959 if (object->n_ops > 0) {
38960 atomic_inc(&op->usage);
38961 list_add_tail(&op->pend_link, &object->pending_ops);
38962 - fscache_stat(&fscache_n_op_pend);
38963 + fscache_stat_unchecked(&fscache_n_op_pend);
38964 } else if (!list_empty(&object->pending_ops)) {
38965 atomic_inc(&op->usage);
38966 list_add_tail(&op->pend_link, &object->pending_ops);
38967 - fscache_stat(&fscache_n_op_pend);
38968 + fscache_stat_unchecked(&fscache_n_op_pend);
38969 fscache_start_operations(object);
38970 } else {
38971 ASSERTCMP(object->n_in_progress, ==, 0);
38972 @@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct f
38973 object->n_exclusive++; /* reads and writes must wait */
38974 atomic_inc(&op->usage);
38975 list_add_tail(&op->pend_link, &object->pending_ops);
38976 - fscache_stat(&fscache_n_op_pend);
38977 + fscache_stat_unchecked(&fscache_n_op_pend);
38978 ret = 0;
38979 } else {
38980 /* not allowed to submit ops in any other state */
38981 @@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_obj
38982 if (object->n_exclusive > 0) {
38983 atomic_inc(&op->usage);
38984 list_add_tail(&op->pend_link, &object->pending_ops);
38985 - fscache_stat(&fscache_n_op_pend);
38986 + fscache_stat_unchecked(&fscache_n_op_pend);
38987 } else if (!list_empty(&object->pending_ops)) {
38988 atomic_inc(&op->usage);
38989 list_add_tail(&op->pend_link, &object->pending_ops);
38990 - fscache_stat(&fscache_n_op_pend);
38991 + fscache_stat_unchecked(&fscache_n_op_pend);
38992 fscache_start_operations(object);
38993 } else {
38994 ASSERTCMP(object->n_exclusive, ==, 0);
38995 @@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_obj
38996 object->n_ops++;
38997 atomic_inc(&op->usage);
38998 list_add_tail(&op->pend_link, &object->pending_ops);
38999 - fscache_stat(&fscache_n_op_pend);
39000 + fscache_stat_unchecked(&fscache_n_op_pend);
39001 ret = 0;
39002 } else if (object->state == FSCACHE_OBJECT_DYING ||
39003 object->state == FSCACHE_OBJECT_LC_DYING ||
39004 object->state == FSCACHE_OBJECT_WITHDRAWING) {
39005 - fscache_stat(&fscache_n_op_rejected);
39006 + fscache_stat_unchecked(&fscache_n_op_rejected);
39007 ret = -ENOBUFS;
39008 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
39009 fscache_report_unexpected_submission(object, op, ostate);
39010 @@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_ope
39011
39012 ret = -EBUSY;
39013 if (!list_empty(&op->pend_link)) {
39014 - fscache_stat(&fscache_n_op_cancelled);
39015 + fscache_stat_unchecked(&fscache_n_op_cancelled);
39016 list_del_init(&op->pend_link);
39017 object->n_ops--;
39018 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
39019 @@ -344,7 +344,7 @@ void fscache_put_operation(struct fscach
39020 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
39021 BUG();
39022
39023 - fscache_stat(&fscache_n_op_release);
39024 + fscache_stat_unchecked(&fscache_n_op_release);
39025
39026 if (op->release) {
39027 op->release(op);
39028 @@ -361,7 +361,7 @@ void fscache_put_operation(struct fscach
39029 * lock, and defer it otherwise */
39030 if (!spin_trylock(&object->lock)) {
39031 _debug("defer put");
39032 - fscache_stat(&fscache_n_op_deferred_release);
39033 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
39034
39035 cache = object->cache;
39036 spin_lock(&cache->op_gc_list_lock);
39037 @@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_st
39038
39039 _debug("GC DEFERRED REL OBJ%x OP%x",
39040 object->debug_id, op->debug_id);
39041 - fscache_stat(&fscache_n_op_gc);
39042 + fscache_stat_unchecked(&fscache_n_op_gc);
39043
39044 ASSERTCMP(atomic_read(&op->usage), ==, 0);
39045
39046 diff -urNp linux-2.6.32.41/fs/fscache/page.c linux-2.6.32.41/fs/fscache/page.c
39047 --- linux-2.6.32.41/fs/fscache/page.c 2011-03-27 14:31:47.000000000 -0400
39048 +++ linux-2.6.32.41/fs/fscache/page.c 2011-05-04 17:56:28.000000000 -0400
39049 @@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct
39050 val = radix_tree_lookup(&cookie->stores, page->index);
39051 if (!val) {
39052 rcu_read_unlock();
39053 - fscache_stat(&fscache_n_store_vmscan_not_storing);
39054 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
39055 __fscache_uncache_page(cookie, page);
39056 return true;
39057 }
39058 @@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct
39059 spin_unlock(&cookie->stores_lock);
39060
39061 if (xpage) {
39062 - fscache_stat(&fscache_n_store_vmscan_cancelled);
39063 - fscache_stat(&fscache_n_store_radix_deletes);
39064 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
39065 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
39066 ASSERTCMP(xpage, ==, page);
39067 } else {
39068 - fscache_stat(&fscache_n_store_vmscan_gone);
39069 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
39070 }
39071
39072 wake_up_bit(&cookie->flags, 0);
39073 @@ -106,7 +106,7 @@ page_busy:
39074 /* we might want to wait here, but that could deadlock the allocator as
39075 * the slow-work threads writing to the cache may all end up sleeping
39076 * on memory allocation */
39077 - fscache_stat(&fscache_n_store_vmscan_busy);
39078 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
39079 return false;
39080 }
39081 EXPORT_SYMBOL(__fscache_maybe_release_page);
39082 @@ -130,7 +130,7 @@ static void fscache_end_page_write(struc
39083 FSCACHE_COOKIE_STORING_TAG);
39084 if (!radix_tree_tag_get(&cookie->stores, page->index,
39085 FSCACHE_COOKIE_PENDING_TAG)) {
39086 - fscache_stat(&fscache_n_store_radix_deletes);
39087 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
39088 xpage = radix_tree_delete(&cookie->stores, page->index);
39089 }
39090 spin_unlock(&cookie->stores_lock);
39091 @@ -151,7 +151,7 @@ static void fscache_attr_changed_op(stru
39092
39093 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
39094
39095 - fscache_stat(&fscache_n_attr_changed_calls);
39096 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
39097
39098 if (fscache_object_is_active(object)) {
39099 fscache_set_op_state(op, "CallFS");
39100 @@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscach
39101
39102 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
39103
39104 - fscache_stat(&fscache_n_attr_changed);
39105 + fscache_stat_unchecked(&fscache_n_attr_changed);
39106
39107 op = kzalloc(sizeof(*op), GFP_KERNEL);
39108 if (!op) {
39109 - fscache_stat(&fscache_n_attr_changed_nomem);
39110 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
39111 _leave(" = -ENOMEM");
39112 return -ENOMEM;
39113 }
39114 @@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscach
39115 if (fscache_submit_exclusive_op(object, op) < 0)
39116 goto nobufs;
39117 spin_unlock(&cookie->lock);
39118 - fscache_stat(&fscache_n_attr_changed_ok);
39119 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
39120 fscache_put_operation(op);
39121 _leave(" = 0");
39122 return 0;
39123 @@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscach
39124 nobufs:
39125 spin_unlock(&cookie->lock);
39126 kfree(op);
39127 - fscache_stat(&fscache_n_attr_changed_nobufs);
39128 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
39129 _leave(" = %d", -ENOBUFS);
39130 return -ENOBUFS;
39131 }
39132 @@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache
39133 /* allocate a retrieval operation and attempt to submit it */
39134 op = kzalloc(sizeof(*op), GFP_NOIO);
39135 if (!op) {
39136 - fscache_stat(&fscache_n_retrievals_nomem);
39137 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
39138 return NULL;
39139 }
39140
39141 @@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_loo
39142 return 0;
39143 }
39144
39145 - fscache_stat(&fscache_n_retrievals_wait);
39146 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
39147
39148 jif = jiffies;
39149 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
39150 fscache_wait_bit_interruptible,
39151 TASK_INTERRUPTIBLE) != 0) {
39152 - fscache_stat(&fscache_n_retrievals_intr);
39153 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
39154 _leave(" = -ERESTARTSYS");
39155 return -ERESTARTSYS;
39156 }
39157 @@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_loo
39158 */
39159 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
39160 struct fscache_retrieval *op,
39161 - atomic_t *stat_op_waits,
39162 - atomic_t *stat_object_dead)
39163 + atomic_unchecked_t *stat_op_waits,
39164 + atomic_unchecked_t *stat_object_dead)
39165 {
39166 int ret;
39167
39168 @@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_ac
39169 goto check_if_dead;
39170
39171 _debug(">>> WT");
39172 - fscache_stat(stat_op_waits);
39173 + fscache_stat_unchecked(stat_op_waits);
39174 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
39175 fscache_wait_bit_interruptible,
39176 TASK_INTERRUPTIBLE) < 0) {
39177 @@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_ac
39178
39179 check_if_dead:
39180 if (unlikely(fscache_object_is_dead(object))) {
39181 - fscache_stat(stat_object_dead);
39182 + fscache_stat_unchecked(stat_object_dead);
39183 return -ENOBUFS;
39184 }
39185 return 0;
39186 @@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct
39187
39188 _enter("%p,%p,,,", cookie, page);
39189
39190 - fscache_stat(&fscache_n_retrievals);
39191 + fscache_stat_unchecked(&fscache_n_retrievals);
39192
39193 if (hlist_empty(&cookie->backing_objects))
39194 goto nobufs;
39195 @@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct
39196 goto nobufs_unlock;
39197 spin_unlock(&cookie->lock);
39198
39199 - fscache_stat(&fscache_n_retrieval_ops);
39200 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
39201
39202 /* pin the netfs read context in case we need to do the actual netfs
39203 * read because we've encountered a cache read failure */
39204 @@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct
39205
39206 error:
39207 if (ret == -ENOMEM)
39208 - fscache_stat(&fscache_n_retrievals_nomem);
39209 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
39210 else if (ret == -ERESTARTSYS)
39211 - fscache_stat(&fscache_n_retrievals_intr);
39212 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
39213 else if (ret == -ENODATA)
39214 - fscache_stat(&fscache_n_retrievals_nodata);
39215 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
39216 else if (ret < 0)
39217 - fscache_stat(&fscache_n_retrievals_nobufs);
39218 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
39219 else
39220 - fscache_stat(&fscache_n_retrievals_ok);
39221 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
39222
39223 fscache_put_retrieval(op);
39224 _leave(" = %d", ret);
39225 @@ -453,7 +453,7 @@ nobufs_unlock:
39226 spin_unlock(&cookie->lock);
39227 kfree(op);
39228 nobufs:
39229 - fscache_stat(&fscache_n_retrievals_nobufs);
39230 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
39231 _leave(" = -ENOBUFS");
39232 return -ENOBUFS;
39233 }
39234 @@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct
39235
39236 _enter("%p,,%d,,,", cookie, *nr_pages);
39237
39238 - fscache_stat(&fscache_n_retrievals);
39239 + fscache_stat_unchecked(&fscache_n_retrievals);
39240
39241 if (hlist_empty(&cookie->backing_objects))
39242 goto nobufs;
39243 @@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct
39244 goto nobufs_unlock;
39245 spin_unlock(&cookie->lock);
39246
39247 - fscache_stat(&fscache_n_retrieval_ops);
39248 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
39249
39250 /* pin the netfs read context in case we need to do the actual netfs
39251 * read because we've encountered a cache read failure */
39252 @@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct
39253
39254 error:
39255 if (ret == -ENOMEM)
39256 - fscache_stat(&fscache_n_retrievals_nomem);
39257 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
39258 else if (ret == -ERESTARTSYS)
39259 - fscache_stat(&fscache_n_retrievals_intr);
39260 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
39261 else if (ret == -ENODATA)
39262 - fscache_stat(&fscache_n_retrievals_nodata);
39263 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
39264 else if (ret < 0)
39265 - fscache_stat(&fscache_n_retrievals_nobufs);
39266 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
39267 else
39268 - fscache_stat(&fscache_n_retrievals_ok);
39269 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
39270
39271 fscache_put_retrieval(op);
39272 _leave(" = %d", ret);
39273 @@ -570,7 +570,7 @@ nobufs_unlock:
39274 spin_unlock(&cookie->lock);
39275 kfree(op);
39276 nobufs:
39277 - fscache_stat(&fscache_n_retrievals_nobufs);
39278 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
39279 _leave(" = -ENOBUFS");
39280 return -ENOBUFS;
39281 }
39282 @@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_
39283
39284 _enter("%p,%p,,,", cookie, page);
39285
39286 - fscache_stat(&fscache_n_allocs);
39287 + fscache_stat_unchecked(&fscache_n_allocs);
39288
39289 if (hlist_empty(&cookie->backing_objects))
39290 goto nobufs;
39291 @@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_
39292 goto nobufs_unlock;
39293 spin_unlock(&cookie->lock);
39294
39295 - fscache_stat(&fscache_n_alloc_ops);
39296 + fscache_stat_unchecked(&fscache_n_alloc_ops);
39297
39298 ret = fscache_wait_for_retrieval_activation(
39299 object, op,
39300 @@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_
39301
39302 error:
39303 if (ret == -ERESTARTSYS)
39304 - fscache_stat(&fscache_n_allocs_intr);
39305 + fscache_stat_unchecked(&fscache_n_allocs_intr);
39306 else if (ret < 0)
39307 - fscache_stat(&fscache_n_allocs_nobufs);
39308 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
39309 else
39310 - fscache_stat(&fscache_n_allocs_ok);
39311 + fscache_stat_unchecked(&fscache_n_allocs_ok);
39312
39313 fscache_put_retrieval(op);
39314 _leave(" = %d", ret);
39315 @@ -651,7 +651,7 @@ nobufs_unlock:
39316 spin_unlock(&cookie->lock);
39317 kfree(op);
39318 nobufs:
39319 - fscache_stat(&fscache_n_allocs_nobufs);
39320 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
39321 _leave(" = -ENOBUFS");
39322 return -ENOBUFS;
39323 }
39324 @@ -694,7 +694,7 @@ static void fscache_write_op(struct fsca
39325
39326 spin_lock(&cookie->stores_lock);
39327
39328 - fscache_stat(&fscache_n_store_calls);
39329 + fscache_stat_unchecked(&fscache_n_store_calls);
39330
39331 /* find a page to store */
39332 page = NULL;
39333 @@ -705,7 +705,7 @@ static void fscache_write_op(struct fsca
39334 page = results[0];
39335 _debug("gang %d [%lx]", n, page->index);
39336 if (page->index > op->store_limit) {
39337 - fscache_stat(&fscache_n_store_pages_over_limit);
39338 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
39339 goto superseded;
39340 }
39341
39342 @@ -721,7 +721,7 @@ static void fscache_write_op(struct fsca
39343
39344 if (page) {
39345 fscache_set_op_state(&op->op, "Store");
39346 - fscache_stat(&fscache_n_store_pages);
39347 + fscache_stat_unchecked(&fscache_n_store_pages);
39348 fscache_stat(&fscache_n_cop_write_page);
39349 ret = object->cache->ops->write_page(op, page);
39350 fscache_stat_d(&fscache_n_cop_write_page);
39351 @@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_
39352 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
39353 ASSERT(PageFsCache(page));
39354
39355 - fscache_stat(&fscache_n_stores);
39356 + fscache_stat_unchecked(&fscache_n_stores);
39357
39358 op = kzalloc(sizeof(*op), GFP_NOIO);
39359 if (!op)
39360 @@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_
39361 spin_unlock(&cookie->stores_lock);
39362 spin_unlock(&object->lock);
39363
39364 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
39365 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
39366 op->store_limit = object->store_limit;
39367
39368 if (fscache_submit_op(object, &op->op) < 0)
39369 @@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_
39370
39371 spin_unlock(&cookie->lock);
39372 radix_tree_preload_end();
39373 - fscache_stat(&fscache_n_store_ops);
39374 - fscache_stat(&fscache_n_stores_ok);
39375 + fscache_stat_unchecked(&fscache_n_store_ops);
39376 + fscache_stat_unchecked(&fscache_n_stores_ok);
39377
39378 /* the slow work queue now carries its own ref on the object */
39379 fscache_put_operation(&op->op);
39380 @@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_
39381 return 0;
39382
39383 already_queued:
39384 - fscache_stat(&fscache_n_stores_again);
39385 + fscache_stat_unchecked(&fscache_n_stores_again);
39386 already_pending:
39387 spin_unlock(&cookie->stores_lock);
39388 spin_unlock(&object->lock);
39389 spin_unlock(&cookie->lock);
39390 radix_tree_preload_end();
39391 kfree(op);
39392 - fscache_stat(&fscache_n_stores_ok);
39393 + fscache_stat_unchecked(&fscache_n_stores_ok);
39394 _leave(" = 0");
39395 return 0;
39396
39397 @@ -886,14 +886,14 @@ nobufs:
39398 spin_unlock(&cookie->lock);
39399 radix_tree_preload_end();
39400 kfree(op);
39401 - fscache_stat(&fscache_n_stores_nobufs);
39402 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
39403 _leave(" = -ENOBUFS");
39404 return -ENOBUFS;
39405
39406 nomem_free:
39407 kfree(op);
39408 nomem:
39409 - fscache_stat(&fscache_n_stores_oom);
39410 + fscache_stat_unchecked(&fscache_n_stores_oom);
39411 _leave(" = -ENOMEM");
39412 return -ENOMEM;
39413 }
39414 @@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscac
39415 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
39416 ASSERTCMP(page, !=, NULL);
39417
39418 - fscache_stat(&fscache_n_uncaches);
39419 + fscache_stat_unchecked(&fscache_n_uncaches);
39420
39421 /* cache withdrawal may beat us to it */
39422 if (!PageFsCache(page))
39423 @@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fs
39424 unsigned long loop;
39425
39426 #ifdef CONFIG_FSCACHE_STATS
39427 - atomic_add(pagevec->nr, &fscache_n_marks);
39428 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
39429 #endif
39430
39431 for (loop = 0; loop < pagevec->nr; loop++) {
39432 diff -urNp linux-2.6.32.41/fs/fscache/stats.c linux-2.6.32.41/fs/fscache/stats.c
39433 --- linux-2.6.32.41/fs/fscache/stats.c 2011-03-27 14:31:47.000000000 -0400
39434 +++ linux-2.6.32.41/fs/fscache/stats.c 2011-05-04 17:56:28.000000000 -0400
39435 @@ -18,95 +18,95 @@
39436 /*
39437 * operation counters
39438 */
39439 -atomic_t fscache_n_op_pend;
39440 -atomic_t fscache_n_op_run;
39441 -atomic_t fscache_n_op_enqueue;
39442 -atomic_t fscache_n_op_requeue;
39443 -atomic_t fscache_n_op_deferred_release;
39444 -atomic_t fscache_n_op_release;
39445 -atomic_t fscache_n_op_gc;
39446 -atomic_t fscache_n_op_cancelled;
39447 -atomic_t fscache_n_op_rejected;
39448 -
39449 -atomic_t fscache_n_attr_changed;
39450 -atomic_t fscache_n_attr_changed_ok;
39451 -atomic_t fscache_n_attr_changed_nobufs;
39452 -atomic_t fscache_n_attr_changed_nomem;
39453 -atomic_t fscache_n_attr_changed_calls;
39454 -
39455 -atomic_t fscache_n_allocs;
39456 -atomic_t fscache_n_allocs_ok;
39457 -atomic_t fscache_n_allocs_wait;
39458 -atomic_t fscache_n_allocs_nobufs;
39459 -atomic_t fscache_n_allocs_intr;
39460 -atomic_t fscache_n_allocs_object_dead;
39461 -atomic_t fscache_n_alloc_ops;
39462 -atomic_t fscache_n_alloc_op_waits;
39463 -
39464 -atomic_t fscache_n_retrievals;
39465 -atomic_t fscache_n_retrievals_ok;
39466 -atomic_t fscache_n_retrievals_wait;
39467 -atomic_t fscache_n_retrievals_nodata;
39468 -atomic_t fscache_n_retrievals_nobufs;
39469 -atomic_t fscache_n_retrievals_intr;
39470 -atomic_t fscache_n_retrievals_nomem;
39471 -atomic_t fscache_n_retrievals_object_dead;
39472 -atomic_t fscache_n_retrieval_ops;
39473 -atomic_t fscache_n_retrieval_op_waits;
39474 -
39475 -atomic_t fscache_n_stores;
39476 -atomic_t fscache_n_stores_ok;
39477 -atomic_t fscache_n_stores_again;
39478 -atomic_t fscache_n_stores_nobufs;
39479 -atomic_t fscache_n_stores_oom;
39480 -atomic_t fscache_n_store_ops;
39481 -atomic_t fscache_n_store_calls;
39482 -atomic_t fscache_n_store_pages;
39483 -atomic_t fscache_n_store_radix_deletes;
39484 -atomic_t fscache_n_store_pages_over_limit;
39485 -
39486 -atomic_t fscache_n_store_vmscan_not_storing;
39487 -atomic_t fscache_n_store_vmscan_gone;
39488 -atomic_t fscache_n_store_vmscan_busy;
39489 -atomic_t fscache_n_store_vmscan_cancelled;
39490 -
39491 -atomic_t fscache_n_marks;
39492 -atomic_t fscache_n_uncaches;
39493 -
39494 -atomic_t fscache_n_acquires;
39495 -atomic_t fscache_n_acquires_null;
39496 -atomic_t fscache_n_acquires_no_cache;
39497 -atomic_t fscache_n_acquires_ok;
39498 -atomic_t fscache_n_acquires_nobufs;
39499 -atomic_t fscache_n_acquires_oom;
39500 -
39501 -atomic_t fscache_n_updates;
39502 -atomic_t fscache_n_updates_null;
39503 -atomic_t fscache_n_updates_run;
39504 -
39505 -atomic_t fscache_n_relinquishes;
39506 -atomic_t fscache_n_relinquishes_null;
39507 -atomic_t fscache_n_relinquishes_waitcrt;
39508 -atomic_t fscache_n_relinquishes_retire;
39509 -
39510 -atomic_t fscache_n_cookie_index;
39511 -atomic_t fscache_n_cookie_data;
39512 -atomic_t fscache_n_cookie_special;
39513 -
39514 -atomic_t fscache_n_object_alloc;
39515 -atomic_t fscache_n_object_no_alloc;
39516 -atomic_t fscache_n_object_lookups;
39517 -atomic_t fscache_n_object_lookups_negative;
39518 -atomic_t fscache_n_object_lookups_positive;
39519 -atomic_t fscache_n_object_lookups_timed_out;
39520 -atomic_t fscache_n_object_created;
39521 -atomic_t fscache_n_object_avail;
39522 -atomic_t fscache_n_object_dead;
39523 -
39524 -atomic_t fscache_n_checkaux_none;
39525 -atomic_t fscache_n_checkaux_okay;
39526 -atomic_t fscache_n_checkaux_update;
39527 -atomic_t fscache_n_checkaux_obsolete;
39528 +atomic_unchecked_t fscache_n_op_pend;
39529 +atomic_unchecked_t fscache_n_op_run;
39530 +atomic_unchecked_t fscache_n_op_enqueue;
39531 +atomic_unchecked_t fscache_n_op_requeue;
39532 +atomic_unchecked_t fscache_n_op_deferred_release;
39533 +atomic_unchecked_t fscache_n_op_release;
39534 +atomic_unchecked_t fscache_n_op_gc;
39535 +atomic_unchecked_t fscache_n_op_cancelled;
39536 +atomic_unchecked_t fscache_n_op_rejected;
39537 +
39538 +atomic_unchecked_t fscache_n_attr_changed;
39539 +atomic_unchecked_t fscache_n_attr_changed_ok;
39540 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
39541 +atomic_unchecked_t fscache_n_attr_changed_nomem;
39542 +atomic_unchecked_t fscache_n_attr_changed_calls;
39543 +
39544 +atomic_unchecked_t fscache_n_allocs;
39545 +atomic_unchecked_t fscache_n_allocs_ok;
39546 +atomic_unchecked_t fscache_n_allocs_wait;
39547 +atomic_unchecked_t fscache_n_allocs_nobufs;
39548 +atomic_unchecked_t fscache_n_allocs_intr;
39549 +atomic_unchecked_t fscache_n_allocs_object_dead;
39550 +atomic_unchecked_t fscache_n_alloc_ops;
39551 +atomic_unchecked_t fscache_n_alloc_op_waits;
39552 +
39553 +atomic_unchecked_t fscache_n_retrievals;
39554 +atomic_unchecked_t fscache_n_retrievals_ok;
39555 +atomic_unchecked_t fscache_n_retrievals_wait;
39556 +atomic_unchecked_t fscache_n_retrievals_nodata;
39557 +atomic_unchecked_t fscache_n_retrievals_nobufs;
39558 +atomic_unchecked_t fscache_n_retrievals_intr;
39559 +atomic_unchecked_t fscache_n_retrievals_nomem;
39560 +atomic_unchecked_t fscache_n_retrievals_object_dead;
39561 +atomic_unchecked_t fscache_n_retrieval_ops;
39562 +atomic_unchecked_t fscache_n_retrieval_op_waits;
39563 +
39564 +atomic_unchecked_t fscache_n_stores;
39565 +atomic_unchecked_t fscache_n_stores_ok;
39566 +atomic_unchecked_t fscache_n_stores_again;
39567 +atomic_unchecked_t fscache_n_stores_nobufs;
39568 +atomic_unchecked_t fscache_n_stores_oom;
39569 +atomic_unchecked_t fscache_n_store_ops;
39570 +atomic_unchecked_t fscache_n_store_calls;
39571 +atomic_unchecked_t fscache_n_store_pages;
39572 +atomic_unchecked_t fscache_n_store_radix_deletes;
39573 +atomic_unchecked_t fscache_n_store_pages_over_limit;
39574 +
39575 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
39576 +atomic_unchecked_t fscache_n_store_vmscan_gone;
39577 +atomic_unchecked_t fscache_n_store_vmscan_busy;
39578 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
39579 +
39580 +atomic_unchecked_t fscache_n_marks;
39581 +atomic_unchecked_t fscache_n_uncaches;
39582 +
39583 +atomic_unchecked_t fscache_n_acquires;
39584 +atomic_unchecked_t fscache_n_acquires_null;
39585 +atomic_unchecked_t fscache_n_acquires_no_cache;
39586 +atomic_unchecked_t fscache_n_acquires_ok;
39587 +atomic_unchecked_t fscache_n_acquires_nobufs;
39588 +atomic_unchecked_t fscache_n_acquires_oom;
39589 +
39590 +atomic_unchecked_t fscache_n_updates;
39591 +atomic_unchecked_t fscache_n_updates_null;
39592 +atomic_unchecked_t fscache_n_updates_run;
39593 +
39594 +atomic_unchecked_t fscache_n_relinquishes;
39595 +atomic_unchecked_t fscache_n_relinquishes_null;
39596 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
39597 +atomic_unchecked_t fscache_n_relinquishes_retire;
39598 +
39599 +atomic_unchecked_t fscache_n_cookie_index;
39600 +atomic_unchecked_t fscache_n_cookie_data;
39601 +atomic_unchecked_t fscache_n_cookie_special;
39602 +
39603 +atomic_unchecked_t fscache_n_object_alloc;
39604 +atomic_unchecked_t fscache_n_object_no_alloc;
39605 +atomic_unchecked_t fscache_n_object_lookups;
39606 +atomic_unchecked_t fscache_n_object_lookups_negative;
39607 +atomic_unchecked_t fscache_n_object_lookups_positive;
39608 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
39609 +atomic_unchecked_t fscache_n_object_created;
39610 +atomic_unchecked_t fscache_n_object_avail;
39611 +atomic_unchecked_t fscache_n_object_dead;
39612 +
39613 +atomic_unchecked_t fscache_n_checkaux_none;
39614 +atomic_unchecked_t fscache_n_checkaux_okay;
39615 +atomic_unchecked_t fscache_n_checkaux_update;
39616 +atomic_unchecked_t fscache_n_checkaux_obsolete;
39617
39618 atomic_t fscache_n_cop_alloc_object;
39619 atomic_t fscache_n_cop_lookup_object;
39620 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
39621 seq_puts(m, "FS-Cache statistics\n");
39622
39623 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
39624 - atomic_read(&fscache_n_cookie_index),
39625 - atomic_read(&fscache_n_cookie_data),
39626 - atomic_read(&fscache_n_cookie_special));
39627 + atomic_read_unchecked(&fscache_n_cookie_index),
39628 + atomic_read_unchecked(&fscache_n_cookie_data),
39629 + atomic_read_unchecked(&fscache_n_cookie_special));
39630
39631 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
39632 - atomic_read(&fscache_n_object_alloc),
39633 - atomic_read(&fscache_n_object_no_alloc),
39634 - atomic_read(&fscache_n_object_avail),
39635 - atomic_read(&fscache_n_object_dead));
39636 + atomic_read_unchecked(&fscache_n_object_alloc),
39637 + atomic_read_unchecked(&fscache_n_object_no_alloc),
39638 + atomic_read_unchecked(&fscache_n_object_avail),
39639 + atomic_read_unchecked(&fscache_n_object_dead));
39640 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
39641 - atomic_read(&fscache_n_checkaux_none),
39642 - atomic_read(&fscache_n_checkaux_okay),
39643 - atomic_read(&fscache_n_checkaux_update),
39644 - atomic_read(&fscache_n_checkaux_obsolete));
39645 + atomic_read_unchecked(&fscache_n_checkaux_none),
39646 + atomic_read_unchecked(&fscache_n_checkaux_okay),
39647 + atomic_read_unchecked(&fscache_n_checkaux_update),
39648 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
39649
39650 seq_printf(m, "Pages : mrk=%u unc=%u\n",
39651 - atomic_read(&fscache_n_marks),
39652 - atomic_read(&fscache_n_uncaches));
39653 + atomic_read_unchecked(&fscache_n_marks),
39654 + atomic_read_unchecked(&fscache_n_uncaches));
39655
39656 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
39657 " oom=%u\n",
39658 - atomic_read(&fscache_n_acquires),
39659 - atomic_read(&fscache_n_acquires_null),
39660 - atomic_read(&fscache_n_acquires_no_cache),
39661 - atomic_read(&fscache_n_acquires_ok),
39662 - atomic_read(&fscache_n_acquires_nobufs),
39663 - atomic_read(&fscache_n_acquires_oom));
39664 + atomic_read_unchecked(&fscache_n_acquires),
39665 + atomic_read_unchecked(&fscache_n_acquires_null),
39666 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
39667 + atomic_read_unchecked(&fscache_n_acquires_ok),
39668 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
39669 + atomic_read_unchecked(&fscache_n_acquires_oom));
39670
39671 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
39672 - atomic_read(&fscache_n_object_lookups),
39673 - atomic_read(&fscache_n_object_lookups_negative),
39674 - atomic_read(&fscache_n_object_lookups_positive),
39675 - atomic_read(&fscache_n_object_lookups_timed_out),
39676 - atomic_read(&fscache_n_object_created));
39677 + atomic_read_unchecked(&fscache_n_object_lookups),
39678 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
39679 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
39680 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
39681 + atomic_read_unchecked(&fscache_n_object_created));
39682
39683 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
39684 - atomic_read(&fscache_n_updates),
39685 - atomic_read(&fscache_n_updates_null),
39686 - atomic_read(&fscache_n_updates_run));
39687 + atomic_read_unchecked(&fscache_n_updates),
39688 + atomic_read_unchecked(&fscache_n_updates_null),
39689 + atomic_read_unchecked(&fscache_n_updates_run));
39690
39691 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
39692 - atomic_read(&fscache_n_relinquishes),
39693 - atomic_read(&fscache_n_relinquishes_null),
39694 - atomic_read(&fscache_n_relinquishes_waitcrt),
39695 - atomic_read(&fscache_n_relinquishes_retire));
39696 + atomic_read_unchecked(&fscache_n_relinquishes),
39697 + atomic_read_unchecked(&fscache_n_relinquishes_null),
39698 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
39699 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
39700
39701 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
39702 - atomic_read(&fscache_n_attr_changed),
39703 - atomic_read(&fscache_n_attr_changed_ok),
39704 - atomic_read(&fscache_n_attr_changed_nobufs),
39705 - atomic_read(&fscache_n_attr_changed_nomem),
39706 - atomic_read(&fscache_n_attr_changed_calls));
39707 + atomic_read_unchecked(&fscache_n_attr_changed),
39708 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
39709 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
39710 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
39711 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
39712
39713 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
39714 - atomic_read(&fscache_n_allocs),
39715 - atomic_read(&fscache_n_allocs_ok),
39716 - atomic_read(&fscache_n_allocs_wait),
39717 - atomic_read(&fscache_n_allocs_nobufs),
39718 - atomic_read(&fscache_n_allocs_intr));
39719 + atomic_read_unchecked(&fscache_n_allocs),
39720 + atomic_read_unchecked(&fscache_n_allocs_ok),
39721 + atomic_read_unchecked(&fscache_n_allocs_wait),
39722 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
39723 + atomic_read_unchecked(&fscache_n_allocs_intr));
39724 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
39725 - atomic_read(&fscache_n_alloc_ops),
39726 - atomic_read(&fscache_n_alloc_op_waits),
39727 - atomic_read(&fscache_n_allocs_object_dead));
39728 + atomic_read_unchecked(&fscache_n_alloc_ops),
39729 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
39730 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
39731
39732 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
39733 " int=%u oom=%u\n",
39734 - atomic_read(&fscache_n_retrievals),
39735 - atomic_read(&fscache_n_retrievals_ok),
39736 - atomic_read(&fscache_n_retrievals_wait),
39737 - atomic_read(&fscache_n_retrievals_nodata),
39738 - atomic_read(&fscache_n_retrievals_nobufs),
39739 - atomic_read(&fscache_n_retrievals_intr),
39740 - atomic_read(&fscache_n_retrievals_nomem));
39741 + atomic_read_unchecked(&fscache_n_retrievals),
39742 + atomic_read_unchecked(&fscache_n_retrievals_ok),
39743 + atomic_read_unchecked(&fscache_n_retrievals_wait),
39744 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
39745 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
39746 + atomic_read_unchecked(&fscache_n_retrievals_intr),
39747 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
39748 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
39749 - atomic_read(&fscache_n_retrieval_ops),
39750 - atomic_read(&fscache_n_retrieval_op_waits),
39751 - atomic_read(&fscache_n_retrievals_object_dead));
39752 + atomic_read_unchecked(&fscache_n_retrieval_ops),
39753 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
39754 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
39755
39756 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
39757 - atomic_read(&fscache_n_stores),
39758 - atomic_read(&fscache_n_stores_ok),
39759 - atomic_read(&fscache_n_stores_again),
39760 - atomic_read(&fscache_n_stores_nobufs),
39761 - atomic_read(&fscache_n_stores_oom));
39762 + atomic_read_unchecked(&fscache_n_stores),
39763 + atomic_read_unchecked(&fscache_n_stores_ok),
39764 + atomic_read_unchecked(&fscache_n_stores_again),
39765 + atomic_read_unchecked(&fscache_n_stores_nobufs),
39766 + atomic_read_unchecked(&fscache_n_stores_oom));
39767 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
39768 - atomic_read(&fscache_n_store_ops),
39769 - atomic_read(&fscache_n_store_calls),
39770 - atomic_read(&fscache_n_store_pages),
39771 - atomic_read(&fscache_n_store_radix_deletes),
39772 - atomic_read(&fscache_n_store_pages_over_limit));
39773 + atomic_read_unchecked(&fscache_n_store_ops),
39774 + atomic_read_unchecked(&fscache_n_store_calls),
39775 + atomic_read_unchecked(&fscache_n_store_pages),
39776 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
39777 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
39778
39779 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
39780 - atomic_read(&fscache_n_store_vmscan_not_storing),
39781 - atomic_read(&fscache_n_store_vmscan_gone),
39782 - atomic_read(&fscache_n_store_vmscan_busy),
39783 - atomic_read(&fscache_n_store_vmscan_cancelled));
39784 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
39785 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
39786 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
39787 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
39788
39789 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
39790 - atomic_read(&fscache_n_op_pend),
39791 - atomic_read(&fscache_n_op_run),
39792 - atomic_read(&fscache_n_op_enqueue),
39793 - atomic_read(&fscache_n_op_cancelled),
39794 - atomic_read(&fscache_n_op_rejected));
39795 + atomic_read_unchecked(&fscache_n_op_pend),
39796 + atomic_read_unchecked(&fscache_n_op_run),
39797 + atomic_read_unchecked(&fscache_n_op_enqueue),
39798 + atomic_read_unchecked(&fscache_n_op_cancelled),
39799 + atomic_read_unchecked(&fscache_n_op_rejected));
39800 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
39801 - atomic_read(&fscache_n_op_deferred_release),
39802 - atomic_read(&fscache_n_op_release),
39803 - atomic_read(&fscache_n_op_gc));
39804 + atomic_read_unchecked(&fscache_n_op_deferred_release),
39805 + atomic_read_unchecked(&fscache_n_op_release),
39806 + atomic_read_unchecked(&fscache_n_op_gc));
39807
39808 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
39809 atomic_read(&fscache_n_cop_alloc_object),
39810 diff -urNp linux-2.6.32.41/fs/fs_struct.c linux-2.6.32.41/fs/fs_struct.c
39811 --- linux-2.6.32.41/fs/fs_struct.c 2011-03-27 14:31:47.000000000 -0400
39812 +++ linux-2.6.32.41/fs/fs_struct.c 2011-04-17 15:56:46.000000000 -0400
39813 @@ -4,6 +4,7 @@
39814 #include <linux/path.h>
39815 #include <linux/slab.h>
39816 #include <linux/fs_struct.h>
39817 +#include <linux/grsecurity.h>
39818
39819 /*
39820 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
39821 @@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, s
39822 old_root = fs->root;
39823 fs->root = *path;
39824 path_get(path);
39825 + gr_set_chroot_entries(current, path);
39826 write_unlock(&fs->lock);
39827 if (old_root.dentry)
39828 path_put(&old_root);
39829 @@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_roo
39830 && fs->root.mnt == old_root->mnt) {
39831 path_get(new_root);
39832 fs->root = *new_root;
39833 + gr_set_chroot_entries(p, new_root);
39834 count++;
39835 }
39836 if (fs->pwd.dentry == old_root->dentry
39837 @@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
39838 task_lock(tsk);
39839 write_lock(&fs->lock);
39840 tsk->fs = NULL;
39841 - kill = !--fs->users;
39842 + gr_clear_chroot_entries(tsk);
39843 + kill = !atomic_dec_return(&fs->users);
39844 write_unlock(&fs->lock);
39845 task_unlock(tsk);
39846 if (kill)
39847 @@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct
39848 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
39849 /* We don't need to lock fs - think why ;-) */
39850 if (fs) {
39851 - fs->users = 1;
39852 + atomic_set(&fs->users, 1);
39853 fs->in_exec = 0;
39854 rwlock_init(&fs->lock);
39855 fs->umask = old->umask;
39856 @@ -127,8 +131,9 @@ int unshare_fs_struct(void)
39857
39858 task_lock(current);
39859 write_lock(&fs->lock);
39860 - kill = !--fs->users;
39861 + kill = !atomic_dec_return(&fs->users);
39862 current->fs = new_fs;
39863 + gr_set_chroot_entries(current, &new_fs->root);
39864 write_unlock(&fs->lock);
39865 task_unlock(current);
39866
39867 @@ -147,7 +152,7 @@ EXPORT_SYMBOL(current_umask);
39868
39869 /* to be mentioned only in INIT_TASK */
39870 struct fs_struct init_fs = {
39871 - .users = 1,
39872 + .users = ATOMIC_INIT(1),
39873 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
39874 .umask = 0022,
39875 };
39876 @@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
39877 task_lock(current);
39878
39879 write_lock(&init_fs.lock);
39880 - init_fs.users++;
39881 + atomic_inc(&init_fs.users);
39882 write_unlock(&init_fs.lock);
39883
39884 write_lock(&fs->lock);
39885 current->fs = &init_fs;
39886 - kill = !--fs->users;
39887 + gr_set_chroot_entries(current, &current->fs->root);
39888 + kill = !atomic_dec_return(&fs->users);
39889 write_unlock(&fs->lock);
39890
39891 task_unlock(current);
39892 diff -urNp linux-2.6.32.41/fs/fuse/cuse.c linux-2.6.32.41/fs/fuse/cuse.c
39893 --- linux-2.6.32.41/fs/fuse/cuse.c 2011-03-27 14:31:47.000000000 -0400
39894 +++ linux-2.6.32.41/fs/fuse/cuse.c 2011-04-17 15:56:46.000000000 -0400
39895 @@ -528,8 +528,18 @@ static int cuse_channel_release(struct i
39896 return rc;
39897 }
39898
39899 -static struct file_operations cuse_channel_fops; /* initialized during init */
39900 -
39901 +static const struct file_operations cuse_channel_fops = { /* initialized during init */
39902 + .owner = THIS_MODULE,
39903 + .llseek = no_llseek,
39904 + .read = do_sync_read,
39905 + .aio_read = fuse_dev_read,
39906 + .write = do_sync_write,
39907 + .aio_write = fuse_dev_write,
39908 + .poll = fuse_dev_poll,
39909 + .open = cuse_channel_open,
39910 + .release = cuse_channel_release,
39911 + .fasync = fuse_dev_fasync,
39912 +};
39913
39914 /**************************************************************************
39915 * Misc stuff and module initializatiion
39916 @@ -575,12 +585,6 @@ static int __init cuse_init(void)
39917 for (i = 0; i < CUSE_CONNTBL_LEN; i++)
39918 INIT_LIST_HEAD(&cuse_conntbl[i]);
39919
39920 - /* inherit and extend fuse_dev_operations */
39921 - cuse_channel_fops = fuse_dev_operations;
39922 - cuse_channel_fops.owner = THIS_MODULE;
39923 - cuse_channel_fops.open = cuse_channel_open;
39924 - cuse_channel_fops.release = cuse_channel_release;
39925 -
39926 cuse_class = class_create(THIS_MODULE, "cuse");
39927 if (IS_ERR(cuse_class))
39928 return PTR_ERR(cuse_class);
39929 diff -urNp linux-2.6.32.41/fs/fuse/dev.c linux-2.6.32.41/fs/fuse/dev.c
39930 --- linux-2.6.32.41/fs/fuse/dev.c 2011-03-27 14:31:47.000000000 -0400
39931 +++ linux-2.6.32.41/fs/fuse/dev.c 2011-04-17 15:56:46.000000000 -0400
39932 @@ -745,7 +745,7 @@ __releases(&fc->lock)
39933 * request_end(). Otherwise add it to the processing list, and set
39934 * the 'sent' flag.
39935 */
39936 -static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
39937 +ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
39938 unsigned long nr_segs, loff_t pos)
39939 {
39940 int err;
39941 @@ -827,6 +827,7 @@ static ssize_t fuse_dev_read(struct kioc
39942 spin_unlock(&fc->lock);
39943 return err;
39944 }
39945 +EXPORT_SYMBOL_GPL(fuse_dev_read);
39946
39947 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
39948 struct fuse_copy_state *cs)
39949 @@ -885,7 +886,7 @@ static int fuse_notify_inval_entry(struc
39950 {
39951 struct fuse_notify_inval_entry_out outarg;
39952 int err = -EINVAL;
39953 - char buf[FUSE_NAME_MAX+1];
39954 + char *buf = NULL;
39955 struct qstr name;
39956
39957 if (size < sizeof(outarg))
39958 @@ -899,6 +900,11 @@ static int fuse_notify_inval_entry(struc
39959 if (outarg.namelen > FUSE_NAME_MAX)
39960 goto err;
39961
39962 + err = -ENOMEM;
39963 + buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
39964 + if (!buf)
39965 + goto err;
39966 +
39967 name.name = buf;
39968 name.len = outarg.namelen;
39969 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
39970 @@ -910,17 +916,15 @@ static int fuse_notify_inval_entry(struc
39971
39972 down_read(&fc->killsb);
39973 err = -ENOENT;
39974 - if (!fc->sb)
39975 - goto err_unlock;
39976 -
39977 - err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
39978 -
39979 -err_unlock:
39980 + if (fc->sb)
39981 + err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
39982 up_read(&fc->killsb);
39983 + kfree(buf);
39984 return err;
39985
39986 err:
39987 fuse_copy_finish(cs);
39988 + kfree(buf);
39989 return err;
39990 }
39991
39992 @@ -987,7 +991,7 @@ static int copy_out_args(struct fuse_cop
39993 * it from the list and copy the rest of the buffer to the request.
39994 * The request is finished by calling request_end()
39995 */
39996 -static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
39997 +ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
39998 unsigned long nr_segs, loff_t pos)
39999 {
40000 int err;
40001 @@ -1083,8 +1087,9 @@ static ssize_t fuse_dev_write(struct kio
40002 fuse_copy_finish(&cs);
40003 return err;
40004 }
40005 +EXPORT_SYMBOL_GPL(fuse_dev_write);
40006
40007 -static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
40008 +unsigned fuse_dev_poll(struct file *file, poll_table *wait)
40009 {
40010 unsigned mask = POLLOUT | POLLWRNORM;
40011 struct fuse_conn *fc = fuse_get_conn(file);
40012 @@ -1102,6 +1107,7 @@ static unsigned fuse_dev_poll(struct fil
40013
40014 return mask;
40015 }
40016 +EXPORT_SYMBOL_GPL(fuse_dev_poll);
40017
40018 /*
40019 * Abort all requests on the given list (pending or processing)
40020 @@ -1218,7 +1224,7 @@ int fuse_dev_release(struct inode *inode
40021 }
40022 EXPORT_SYMBOL_GPL(fuse_dev_release);
40023
40024 -static int fuse_dev_fasync(int fd, struct file *file, int on)
40025 +int fuse_dev_fasync(int fd, struct file *file, int on)
40026 {
40027 struct fuse_conn *fc = fuse_get_conn(file);
40028 if (!fc)
40029 @@ -1227,6 +1233,7 @@ static int fuse_dev_fasync(int fd, struc
40030 /* No locking - fasync_helper does its own locking */
40031 return fasync_helper(fd, file, on, &fc->fasync);
40032 }
40033 +EXPORT_SYMBOL_GPL(fuse_dev_fasync);
40034
40035 const struct file_operations fuse_dev_operations = {
40036 .owner = THIS_MODULE,
40037 diff -urNp linux-2.6.32.41/fs/fuse/dir.c linux-2.6.32.41/fs/fuse/dir.c
40038 --- linux-2.6.32.41/fs/fuse/dir.c 2011-03-27 14:31:47.000000000 -0400
40039 +++ linux-2.6.32.41/fs/fuse/dir.c 2011-04-17 15:56:46.000000000 -0400
40040 @@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *de
40041 return link;
40042 }
40043
40044 -static void free_link(char *link)
40045 +static void free_link(const char *link)
40046 {
40047 if (!IS_ERR(link))
40048 free_page((unsigned long) link);
40049 diff -urNp linux-2.6.32.41/fs/fuse/fuse_i.h linux-2.6.32.41/fs/fuse/fuse_i.h
40050 --- linux-2.6.32.41/fs/fuse/fuse_i.h 2011-03-27 14:31:47.000000000 -0400
40051 +++ linux-2.6.32.41/fs/fuse/fuse_i.h 2011-04-17 15:56:46.000000000 -0400
40052 @@ -525,6 +525,16 @@ extern const struct file_operations fuse
40053
40054 extern const struct dentry_operations fuse_dentry_operations;
40055
40056 +extern ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
40057 + unsigned long nr_segs, loff_t pos);
40058 +
40059 +extern ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
40060 + unsigned long nr_segs, loff_t pos);
40061 +
40062 +extern unsigned fuse_dev_poll(struct file *file, poll_table *wait);
40063 +
40064 +extern int fuse_dev_fasync(int fd, struct file *file, int on);
40065 +
40066 /**
40067 * Inode to nodeid comparison.
40068 */
40069 diff -urNp linux-2.6.32.41/fs/gfs2/ops_inode.c linux-2.6.32.41/fs/gfs2/ops_inode.c
40070 --- linux-2.6.32.41/fs/gfs2/ops_inode.c 2011-03-27 14:31:47.000000000 -0400
40071 +++ linux-2.6.32.41/fs/gfs2/ops_inode.c 2011-05-16 21:46:57.000000000 -0400
40072 @@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odi
40073 unsigned int x;
40074 int error;
40075
40076 + pax_track_stack();
40077 +
40078 if (ndentry->d_inode) {
40079 nip = GFS2_I(ndentry->d_inode);
40080 if (ip == nip)
40081 diff -urNp linux-2.6.32.41/fs/gfs2/sys.c linux-2.6.32.41/fs/gfs2/sys.c
40082 --- linux-2.6.32.41/fs/gfs2/sys.c 2011-03-27 14:31:47.000000000 -0400
40083 +++ linux-2.6.32.41/fs/gfs2/sys.c 2011-04-17 15:56:46.000000000 -0400
40084 @@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct ko
40085 return a->store ? a->store(sdp, buf, len) : len;
40086 }
40087
40088 -static struct sysfs_ops gfs2_attr_ops = {
40089 +static const struct sysfs_ops gfs2_attr_ops = {
40090 .show = gfs2_attr_show,
40091 .store = gfs2_attr_store,
40092 };
40093 @@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset
40094 return 0;
40095 }
40096
40097 -static struct kset_uevent_ops gfs2_uevent_ops = {
40098 +static const struct kset_uevent_ops gfs2_uevent_ops = {
40099 .uevent = gfs2_uevent,
40100 };
40101
40102 diff -urNp linux-2.6.32.41/fs/hfsplus/catalog.c linux-2.6.32.41/fs/hfsplus/catalog.c
40103 --- linux-2.6.32.41/fs/hfsplus/catalog.c 2011-03-27 14:31:47.000000000 -0400
40104 +++ linux-2.6.32.41/fs/hfsplus/catalog.c 2011-05-16 21:46:57.000000000 -0400
40105 @@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block
40106 int err;
40107 u16 type;
40108
40109 + pax_track_stack();
40110 +
40111 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
40112 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
40113 if (err)
40114 @@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct
40115 int entry_size;
40116 int err;
40117
40118 + pax_track_stack();
40119 +
40120 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
40121 sb = dir->i_sb;
40122 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
40123 @@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
40124 int entry_size, type;
40125 int err = 0;
40126
40127 + pax_track_stack();
40128 +
40129 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
40130 dst_dir->i_ino, dst_name->name);
40131 sb = src_dir->i_sb;
40132 diff -urNp linux-2.6.32.41/fs/hfsplus/dir.c linux-2.6.32.41/fs/hfsplus/dir.c
40133 --- linux-2.6.32.41/fs/hfsplus/dir.c 2011-03-27 14:31:47.000000000 -0400
40134 +++ linux-2.6.32.41/fs/hfsplus/dir.c 2011-05-16 21:46:57.000000000 -0400
40135 @@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *
40136 struct hfsplus_readdir_data *rd;
40137 u16 type;
40138
40139 + pax_track_stack();
40140 +
40141 if (filp->f_pos >= inode->i_size)
40142 return 0;
40143
40144 diff -urNp linux-2.6.32.41/fs/hfsplus/inode.c linux-2.6.32.41/fs/hfsplus/inode.c
40145 --- linux-2.6.32.41/fs/hfsplus/inode.c 2011-03-27 14:31:47.000000000 -0400
40146 +++ linux-2.6.32.41/fs/hfsplus/inode.c 2011-05-16 21:46:57.000000000 -0400
40147 @@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode
40148 int res = 0;
40149 u16 type;
40150
40151 + pax_track_stack();
40152 +
40153 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
40154
40155 HFSPLUS_I(inode).dev = 0;
40156 @@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode
40157 struct hfs_find_data fd;
40158 hfsplus_cat_entry entry;
40159
40160 + pax_track_stack();
40161 +
40162 if (HFSPLUS_IS_RSRC(inode))
40163 main_inode = HFSPLUS_I(inode).rsrc_inode;
40164
40165 diff -urNp linux-2.6.32.41/fs/hfsplus/ioctl.c linux-2.6.32.41/fs/hfsplus/ioctl.c
40166 --- linux-2.6.32.41/fs/hfsplus/ioctl.c 2011-03-27 14:31:47.000000000 -0400
40167 +++ linux-2.6.32.41/fs/hfsplus/ioctl.c 2011-05-16 21:46:57.000000000 -0400
40168 @@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dent
40169 struct hfsplus_cat_file *file;
40170 int res;
40171
40172 + pax_track_stack();
40173 +
40174 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
40175 return -EOPNOTSUPP;
40176
40177 @@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *
40178 struct hfsplus_cat_file *file;
40179 ssize_t res = 0;
40180
40181 + pax_track_stack();
40182 +
40183 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
40184 return -EOPNOTSUPP;
40185
40186 diff -urNp linux-2.6.32.41/fs/hfsplus/super.c linux-2.6.32.41/fs/hfsplus/super.c
40187 --- linux-2.6.32.41/fs/hfsplus/super.c 2011-03-27 14:31:47.000000000 -0400
40188 +++ linux-2.6.32.41/fs/hfsplus/super.c 2011-05-16 21:46:57.000000000 -0400
40189 @@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct sup
40190 struct nls_table *nls = NULL;
40191 int err = -EINVAL;
40192
40193 + pax_track_stack();
40194 +
40195 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
40196 if (!sbi)
40197 return -ENOMEM;
40198 diff -urNp linux-2.6.32.41/fs/hugetlbfs/inode.c linux-2.6.32.41/fs/hugetlbfs/inode.c
40199 --- linux-2.6.32.41/fs/hugetlbfs/inode.c 2011-03-27 14:31:47.000000000 -0400
40200 +++ linux-2.6.32.41/fs/hugetlbfs/inode.c 2011-04-17 15:56:46.000000000 -0400
40201 @@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs
40202 .kill_sb = kill_litter_super,
40203 };
40204
40205 -static struct vfsmount *hugetlbfs_vfsmount;
40206 +struct vfsmount *hugetlbfs_vfsmount;
40207
40208 static int can_do_hugetlb_shm(void)
40209 {
40210 diff -urNp linux-2.6.32.41/fs/ioctl.c linux-2.6.32.41/fs/ioctl.c
40211 --- linux-2.6.32.41/fs/ioctl.c 2011-03-27 14:31:47.000000000 -0400
40212 +++ linux-2.6.32.41/fs/ioctl.c 2011-04-17 15:56:46.000000000 -0400
40213 @@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiema
40214 u64 phys, u64 len, u32 flags)
40215 {
40216 struct fiemap_extent extent;
40217 - struct fiemap_extent *dest = fieinfo->fi_extents_start;
40218 + struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
40219
40220 /* only count the extents */
40221 if (fieinfo->fi_extents_max == 0) {
40222 @@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *fil
40223
40224 fieinfo.fi_flags = fiemap.fm_flags;
40225 fieinfo.fi_extents_max = fiemap.fm_extent_count;
40226 - fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
40227 + fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
40228
40229 if (fiemap.fm_extent_count != 0 &&
40230 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
40231 @@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *fil
40232 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
40233 fiemap.fm_flags = fieinfo.fi_flags;
40234 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
40235 - if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
40236 + if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
40237 error = -EFAULT;
40238
40239 return error;
40240 diff -urNp linux-2.6.32.41/fs/jbd/checkpoint.c linux-2.6.32.41/fs/jbd/checkpoint.c
40241 --- linux-2.6.32.41/fs/jbd/checkpoint.c 2011-03-27 14:31:47.000000000 -0400
40242 +++ linux-2.6.32.41/fs/jbd/checkpoint.c 2011-05-16 21:46:57.000000000 -0400
40243 @@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal
40244 tid_t this_tid;
40245 int result;
40246
40247 + pax_track_stack();
40248 +
40249 jbd_debug(1, "Start checkpoint\n");
40250
40251 /*
40252 diff -urNp linux-2.6.32.41/fs/jffs2/compr_rtime.c linux-2.6.32.41/fs/jffs2/compr_rtime.c
40253 --- linux-2.6.32.41/fs/jffs2/compr_rtime.c 2011-03-27 14:31:47.000000000 -0400
40254 +++ linux-2.6.32.41/fs/jffs2/compr_rtime.c 2011-05-16 21:46:57.000000000 -0400
40255 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
40256 int outpos = 0;
40257 int pos=0;
40258
40259 + pax_track_stack();
40260 +
40261 memset(positions,0,sizeof(positions));
40262
40263 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
40264 @@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsign
40265 int outpos = 0;
40266 int pos=0;
40267
40268 + pax_track_stack();
40269 +
40270 memset(positions,0,sizeof(positions));
40271
40272 while (outpos<destlen) {
40273 diff -urNp linux-2.6.32.41/fs/jffs2/compr_rubin.c linux-2.6.32.41/fs/jffs2/compr_rubin.c
40274 --- linux-2.6.32.41/fs/jffs2/compr_rubin.c 2011-03-27 14:31:47.000000000 -0400
40275 +++ linux-2.6.32.41/fs/jffs2/compr_rubin.c 2011-05-16 21:46:57.000000000 -0400
40276 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
40277 int ret;
40278 uint32_t mysrclen, mydstlen;
40279
40280 + pax_track_stack();
40281 +
40282 mysrclen = *sourcelen;
40283 mydstlen = *dstlen - 8;
40284
40285 diff -urNp linux-2.6.32.41/fs/jffs2/erase.c linux-2.6.32.41/fs/jffs2/erase.c
40286 --- linux-2.6.32.41/fs/jffs2/erase.c 2011-03-27 14:31:47.000000000 -0400
40287 +++ linux-2.6.32.41/fs/jffs2/erase.c 2011-04-17 15:56:46.000000000 -0400
40288 @@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(stru
40289 struct jffs2_unknown_node marker = {
40290 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
40291 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
40292 - .totlen = cpu_to_je32(c->cleanmarker_size)
40293 + .totlen = cpu_to_je32(c->cleanmarker_size),
40294 + .hdr_crc = cpu_to_je32(0)
40295 };
40296
40297 jffs2_prealloc_raw_node_refs(c, jeb, 1);
40298 diff -urNp linux-2.6.32.41/fs/jffs2/wbuf.c linux-2.6.32.41/fs/jffs2/wbuf.c
40299 --- linux-2.6.32.41/fs/jffs2/wbuf.c 2011-03-27 14:31:47.000000000 -0400
40300 +++ linux-2.6.32.41/fs/jffs2/wbuf.c 2011-04-17 15:56:46.000000000 -0400
40301 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
40302 {
40303 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
40304 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
40305 - .totlen = constant_cpu_to_je32(8)
40306 + .totlen = constant_cpu_to_je32(8),
40307 + .hdr_crc = constant_cpu_to_je32(0)
40308 };
40309
40310 /*
40311 diff -urNp linux-2.6.32.41/fs/jffs2/xattr.c linux-2.6.32.41/fs/jffs2/xattr.c
40312 --- linux-2.6.32.41/fs/jffs2/xattr.c 2011-03-27 14:31:47.000000000 -0400
40313 +++ linux-2.6.32.41/fs/jffs2/xattr.c 2011-05-16 21:46:57.000000000 -0400
40314 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
40315
40316 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
40317
40318 + pax_track_stack();
40319 +
40320 /* Phase.1 : Merge same xref */
40321 for (i=0; i < XREF_TMPHASH_SIZE; i++)
40322 xref_tmphash[i] = NULL;
40323 diff -urNp linux-2.6.32.41/fs/jfs/super.c linux-2.6.32.41/fs/jfs/super.c
40324 --- linux-2.6.32.41/fs/jfs/super.c 2011-03-27 14:31:47.000000000 -0400
40325 +++ linux-2.6.32.41/fs/jfs/super.c 2011-06-07 18:06:04.000000000 -0400
40326 @@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
40327
40328 jfs_inode_cachep =
40329 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
40330 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
40331 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
40332 init_once);
40333 if (jfs_inode_cachep == NULL)
40334 return -ENOMEM;
40335 diff -urNp linux-2.6.32.41/fs/Kconfig.binfmt linux-2.6.32.41/fs/Kconfig.binfmt
40336 --- linux-2.6.32.41/fs/Kconfig.binfmt 2011-03-27 14:31:47.000000000 -0400
40337 +++ linux-2.6.32.41/fs/Kconfig.binfmt 2011-04-17 15:56:46.000000000 -0400
40338 @@ -86,7 +86,7 @@ config HAVE_AOUT
40339
40340 config BINFMT_AOUT
40341 tristate "Kernel support for a.out and ECOFF binaries"
40342 - depends on HAVE_AOUT
40343 + depends on HAVE_AOUT && BROKEN
40344 ---help---
40345 A.out (Assembler.OUTput) is a set of formats for libraries and
40346 executables used in the earliest versions of UNIX. Linux used
40347 diff -urNp linux-2.6.32.41/fs/libfs.c linux-2.6.32.41/fs/libfs.c
40348 --- linux-2.6.32.41/fs/libfs.c 2011-03-27 14:31:47.000000000 -0400
40349 +++ linux-2.6.32.41/fs/libfs.c 2011-05-11 18:25:15.000000000 -0400
40350 @@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, v
40351
40352 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
40353 struct dentry *next;
40354 + char d_name[sizeof(next->d_iname)];
40355 + const unsigned char *name;
40356 +
40357 next = list_entry(p, struct dentry, d_u.d_child);
40358 if (d_unhashed(next) || !next->d_inode)
40359 continue;
40360
40361 spin_unlock(&dcache_lock);
40362 - if (filldir(dirent, next->d_name.name,
40363 + name = next->d_name.name;
40364 + if (name == next->d_iname) {
40365 + memcpy(d_name, name, next->d_name.len);
40366 + name = d_name;
40367 + }
40368 + if (filldir(dirent, name,
40369 next->d_name.len, filp->f_pos,
40370 next->d_inode->i_ino,
40371 dt_type(next->d_inode)) < 0)
40372 diff -urNp linux-2.6.32.41/fs/lockd/clntproc.c linux-2.6.32.41/fs/lockd/clntproc.c
40373 --- linux-2.6.32.41/fs/lockd/clntproc.c 2011-03-27 14:31:47.000000000 -0400
40374 +++ linux-2.6.32.41/fs/lockd/clntproc.c 2011-05-16 21:46:57.000000000 -0400
40375 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
40376 /*
40377 * Cookie counter for NLM requests
40378 */
40379 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
40380 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
40381
40382 void nlmclnt_next_cookie(struct nlm_cookie *c)
40383 {
40384 - u32 cookie = atomic_inc_return(&nlm_cookie);
40385 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
40386
40387 memcpy(c->data, &cookie, 4);
40388 c->len=4;
40389 @@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
40390 struct nlm_rqst reqst, *req;
40391 int status;
40392
40393 + pax_track_stack();
40394 +
40395 req = &reqst;
40396 memset(req, 0, sizeof(*req));
40397 locks_init_lock(&req->a_args.lock.fl);
40398 diff -urNp linux-2.6.32.41/fs/lockd/svc.c linux-2.6.32.41/fs/lockd/svc.c
40399 --- linux-2.6.32.41/fs/lockd/svc.c 2011-03-27 14:31:47.000000000 -0400
40400 +++ linux-2.6.32.41/fs/lockd/svc.c 2011-04-17 15:56:46.000000000 -0400
40401 @@ -43,7 +43,7 @@
40402
40403 static struct svc_program nlmsvc_program;
40404
40405 -struct nlmsvc_binding * nlmsvc_ops;
40406 +const struct nlmsvc_binding * nlmsvc_ops;
40407 EXPORT_SYMBOL_GPL(nlmsvc_ops);
40408
40409 static DEFINE_MUTEX(nlmsvc_mutex);
40410 diff -urNp linux-2.6.32.41/fs/locks.c linux-2.6.32.41/fs/locks.c
40411 --- linux-2.6.32.41/fs/locks.c 2011-03-27 14:31:47.000000000 -0400
40412 +++ linux-2.6.32.41/fs/locks.c 2011-04-17 15:56:46.000000000 -0400
40413 @@ -2007,16 +2007,16 @@ void locks_remove_flock(struct file *fil
40414 return;
40415
40416 if (filp->f_op && filp->f_op->flock) {
40417 - struct file_lock fl = {
40418 + struct file_lock flock = {
40419 .fl_pid = current->tgid,
40420 .fl_file = filp,
40421 .fl_flags = FL_FLOCK,
40422 .fl_type = F_UNLCK,
40423 .fl_end = OFFSET_MAX,
40424 };
40425 - filp->f_op->flock(filp, F_SETLKW, &fl);
40426 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
40427 - fl.fl_ops->fl_release_private(&fl);
40428 + filp->f_op->flock(filp, F_SETLKW, &flock);
40429 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
40430 + flock.fl_ops->fl_release_private(&flock);
40431 }
40432
40433 lock_kernel();
40434 diff -urNp linux-2.6.32.41/fs/namei.c linux-2.6.32.41/fs/namei.c
40435 --- linux-2.6.32.41/fs/namei.c 2011-03-27 14:31:47.000000000 -0400
40436 +++ linux-2.6.32.41/fs/namei.c 2011-05-16 21:46:57.000000000 -0400
40437 @@ -224,14 +224,6 @@ int generic_permission(struct inode *ino
40438 return ret;
40439
40440 /*
40441 - * Read/write DACs are always overridable.
40442 - * Executable DACs are overridable if at least one exec bit is set.
40443 - */
40444 - if (!(mask & MAY_EXEC) || execute_ok(inode))
40445 - if (capable(CAP_DAC_OVERRIDE))
40446 - return 0;
40447 -
40448 - /*
40449 * Searching includes executable on directories, else just read.
40450 */
40451 mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
40452 @@ -239,6 +231,14 @@ int generic_permission(struct inode *ino
40453 if (capable(CAP_DAC_READ_SEARCH))
40454 return 0;
40455
40456 + /*
40457 + * Read/write DACs are always overridable.
40458 + * Executable DACs are overridable if at least one exec bit is set.
40459 + */
40460 + if (!(mask & MAY_EXEC) || execute_ok(inode))
40461 + if (capable(CAP_DAC_OVERRIDE))
40462 + return 0;
40463 +
40464 return -EACCES;
40465 }
40466
40467 @@ -458,7 +458,8 @@ static int exec_permission_lite(struct i
40468 if (!ret)
40469 goto ok;
40470
40471 - if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
40472 + if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
40473 + capable(CAP_DAC_OVERRIDE))
40474 goto ok;
40475
40476 return ret;
40477 @@ -638,7 +639,7 @@ static __always_inline int __do_follow_l
40478 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
40479 error = PTR_ERR(cookie);
40480 if (!IS_ERR(cookie)) {
40481 - char *s = nd_get_link(nd);
40482 + const char *s = nd_get_link(nd);
40483 error = 0;
40484 if (s)
40485 error = __vfs_follow_link(nd, s);
40486 @@ -669,6 +670,13 @@ static inline int do_follow_link(struct
40487 err = security_inode_follow_link(path->dentry, nd);
40488 if (err)
40489 goto loop;
40490 +
40491 + if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
40492 + path->dentry->d_inode, path->dentry, nd->path.mnt)) {
40493 + err = -EACCES;
40494 + goto loop;
40495 + }
40496 +
40497 current->link_count++;
40498 current->total_link_count++;
40499 nd->depth++;
40500 @@ -1016,11 +1024,18 @@ return_reval:
40501 break;
40502 }
40503 return_base:
40504 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
40505 + path_put(&nd->path);
40506 + return -ENOENT;
40507 + }
40508 return 0;
40509 out_dput:
40510 path_put_conditional(&next, nd);
40511 break;
40512 }
40513 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
40514 + err = -ENOENT;
40515 +
40516 path_put(&nd->path);
40517 return_err:
40518 return err;
40519 @@ -1091,13 +1106,20 @@ static int do_path_lookup(int dfd, const
40520 int retval = path_init(dfd, name, flags, nd);
40521 if (!retval)
40522 retval = path_walk(name, nd);
40523 - if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
40524 - nd->path.dentry->d_inode))
40525 - audit_inode(name, nd->path.dentry);
40526 +
40527 + if (likely(!retval)) {
40528 + if (nd->path.dentry && nd->path.dentry->d_inode) {
40529 + if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
40530 + retval = -ENOENT;
40531 + if (!audit_dummy_context())
40532 + audit_inode(name, nd->path.dentry);
40533 + }
40534 + }
40535 if (nd->root.mnt) {
40536 path_put(&nd->root);
40537 nd->root.mnt = NULL;
40538 }
40539 +
40540 return retval;
40541 }
40542
40543 @@ -1576,6 +1598,20 @@ int may_open(struct path *path, int acc_
40544 if (error)
40545 goto err_out;
40546
40547 +
40548 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
40549 + error = -EPERM;
40550 + goto err_out;
40551 + }
40552 + if (gr_handle_rawio(inode)) {
40553 + error = -EPERM;
40554 + goto err_out;
40555 + }
40556 + if (!gr_acl_handle_open(dentry, path->mnt, flag)) {
40557 + error = -EACCES;
40558 + goto err_out;
40559 + }
40560 +
40561 if (flag & O_TRUNC) {
40562 error = get_write_access(inode);
40563 if (error)
40564 @@ -1621,12 +1657,19 @@ static int __open_namei_create(struct na
40565 int error;
40566 struct dentry *dir = nd->path.dentry;
40567
40568 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, nd->path.mnt, flag, mode)) {
40569 + error = -EACCES;
40570 + goto out_unlock;
40571 + }
40572 +
40573 if (!IS_POSIXACL(dir->d_inode))
40574 mode &= ~current_umask();
40575 error = security_path_mknod(&nd->path, path->dentry, mode, 0);
40576 if (error)
40577 goto out_unlock;
40578 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
40579 + if (!error)
40580 + gr_handle_create(path->dentry, nd->path.mnt);
40581 out_unlock:
40582 mutex_unlock(&dir->d_inode->i_mutex);
40583 dput(nd->path.dentry);
40584 @@ -1709,6 +1752,22 @@ struct file *do_filp_open(int dfd, const
40585 &nd, flag);
40586 if (error)
40587 return ERR_PTR(error);
40588 +
40589 + if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
40590 + error = -EPERM;
40591 + goto exit;
40592 + }
40593 +
40594 + if (gr_handle_rawio(nd.path.dentry->d_inode)) {
40595 + error = -EPERM;
40596 + goto exit;
40597 + }
40598 +
40599 + if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, flag)) {
40600 + error = -EACCES;
40601 + goto exit;
40602 + }
40603 +
40604 goto ok;
40605 }
40606
40607 @@ -1795,6 +1854,14 @@ do_last:
40608 /*
40609 * It already exists.
40610 */
40611 +
40612 + /* only check if O_CREAT is specified, all other checks need
40613 + to go into may_open */
40614 + if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
40615 + error = -EACCES;
40616 + goto exit_mutex_unlock;
40617 + }
40618 +
40619 mutex_unlock(&dir->d_inode->i_mutex);
40620 audit_inode(pathname, path.dentry);
40621
40622 @@ -1887,6 +1954,13 @@ do_link:
40623 error = security_inode_follow_link(path.dentry, &nd);
40624 if (error)
40625 goto exit_dput;
40626 +
40627 + if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
40628 + path.dentry, nd.path.mnt)) {
40629 + error = -EACCES;
40630 + goto exit_dput;
40631 + }
40632 +
40633 error = __do_follow_link(&path, &nd);
40634 if (error) {
40635 /* Does someone understand code flow here? Or it is only
40636 @@ -2061,6 +2135,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
40637 error = may_mknod(mode);
40638 if (error)
40639 goto out_dput;
40640 +
40641 + if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
40642 + error = -EPERM;
40643 + goto out_dput;
40644 + }
40645 +
40646 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
40647 + error = -EACCES;
40648 + goto out_dput;
40649 + }
40650 +
40651 error = mnt_want_write(nd.path.mnt);
40652 if (error)
40653 goto out_dput;
40654 @@ -2081,6 +2166,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
40655 }
40656 out_drop_write:
40657 mnt_drop_write(nd.path.mnt);
40658 +
40659 + if (!error)
40660 + gr_handle_create(dentry, nd.path.mnt);
40661 out_dput:
40662 dput(dentry);
40663 out_unlock:
40664 @@ -2134,6 +2222,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
40665 if (IS_ERR(dentry))
40666 goto out_unlock;
40667
40668 + if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
40669 + error = -EACCES;
40670 + goto out_dput;
40671 + }
40672 +
40673 if (!IS_POSIXACL(nd.path.dentry->d_inode))
40674 mode &= ~current_umask();
40675 error = mnt_want_write(nd.path.mnt);
40676 @@ -2145,6 +2238,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
40677 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
40678 out_drop_write:
40679 mnt_drop_write(nd.path.mnt);
40680 +
40681 + if (!error)
40682 + gr_handle_create(dentry, nd.path.mnt);
40683 +
40684 out_dput:
40685 dput(dentry);
40686 out_unlock:
40687 @@ -2226,6 +2323,8 @@ static long do_rmdir(int dfd, const char
40688 char * name;
40689 struct dentry *dentry;
40690 struct nameidata nd;
40691 + ino_t saved_ino = 0;
40692 + dev_t saved_dev = 0;
40693
40694 error = user_path_parent(dfd, pathname, &nd, &name);
40695 if (error)
40696 @@ -2250,6 +2349,19 @@ static long do_rmdir(int dfd, const char
40697 error = PTR_ERR(dentry);
40698 if (IS_ERR(dentry))
40699 goto exit2;
40700 +
40701 + if (dentry->d_inode != NULL) {
40702 + if (dentry->d_inode->i_nlink <= 1) {
40703 + saved_ino = dentry->d_inode->i_ino;
40704 + saved_dev = gr_get_dev_from_dentry(dentry);
40705 + }
40706 +
40707 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
40708 + error = -EACCES;
40709 + goto exit3;
40710 + }
40711 + }
40712 +
40713 error = mnt_want_write(nd.path.mnt);
40714 if (error)
40715 goto exit3;
40716 @@ -2257,6 +2369,8 @@ static long do_rmdir(int dfd, const char
40717 if (error)
40718 goto exit4;
40719 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
40720 + if (!error && (saved_dev || saved_ino))
40721 + gr_handle_delete(saved_ino, saved_dev);
40722 exit4:
40723 mnt_drop_write(nd.path.mnt);
40724 exit3:
40725 @@ -2318,6 +2432,8 @@ static long do_unlinkat(int dfd, const c
40726 struct dentry *dentry;
40727 struct nameidata nd;
40728 struct inode *inode = NULL;
40729 + ino_t saved_ino = 0;
40730 + dev_t saved_dev = 0;
40731
40732 error = user_path_parent(dfd, pathname, &nd, &name);
40733 if (error)
40734 @@ -2337,8 +2453,19 @@ static long do_unlinkat(int dfd, const c
40735 if (nd.last.name[nd.last.len])
40736 goto slashes;
40737 inode = dentry->d_inode;
40738 - if (inode)
40739 + if (inode) {
40740 + if (inode->i_nlink <= 1) {
40741 + saved_ino = inode->i_ino;
40742 + saved_dev = gr_get_dev_from_dentry(dentry);
40743 + }
40744 +
40745 atomic_inc(&inode->i_count);
40746 +
40747 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
40748 + error = -EACCES;
40749 + goto exit2;
40750 + }
40751 + }
40752 error = mnt_want_write(nd.path.mnt);
40753 if (error)
40754 goto exit2;
40755 @@ -2346,6 +2473,8 @@ static long do_unlinkat(int dfd, const c
40756 if (error)
40757 goto exit3;
40758 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
40759 + if (!error && (saved_ino || saved_dev))
40760 + gr_handle_delete(saved_ino, saved_dev);
40761 exit3:
40762 mnt_drop_write(nd.path.mnt);
40763 exit2:
40764 @@ -2424,6 +2553,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
40765 if (IS_ERR(dentry))
40766 goto out_unlock;
40767
40768 + if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
40769 + error = -EACCES;
40770 + goto out_dput;
40771 + }
40772 +
40773 error = mnt_want_write(nd.path.mnt);
40774 if (error)
40775 goto out_dput;
40776 @@ -2431,6 +2565,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
40777 if (error)
40778 goto out_drop_write;
40779 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
40780 + if (!error)
40781 + gr_handle_create(dentry, nd.path.mnt);
40782 out_drop_write:
40783 mnt_drop_write(nd.path.mnt);
40784 out_dput:
40785 @@ -2524,6 +2660,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
40786 error = PTR_ERR(new_dentry);
40787 if (IS_ERR(new_dentry))
40788 goto out_unlock;
40789 +
40790 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
40791 + old_path.dentry->d_inode,
40792 + old_path.dentry->d_inode->i_mode, to)) {
40793 + error = -EACCES;
40794 + goto out_dput;
40795 + }
40796 +
40797 + if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
40798 + old_path.dentry, old_path.mnt, to)) {
40799 + error = -EACCES;
40800 + goto out_dput;
40801 + }
40802 +
40803 error = mnt_want_write(nd.path.mnt);
40804 if (error)
40805 goto out_dput;
40806 @@ -2531,6 +2681,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
40807 if (error)
40808 goto out_drop_write;
40809 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
40810 + if (!error)
40811 + gr_handle_create(new_dentry, nd.path.mnt);
40812 out_drop_write:
40813 mnt_drop_write(nd.path.mnt);
40814 out_dput:
40815 @@ -2708,6 +2860,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
40816 char *to;
40817 int error;
40818
40819 + pax_track_stack();
40820 +
40821 error = user_path_parent(olddfd, oldname, &oldnd, &from);
40822 if (error)
40823 goto exit;
40824 @@ -2764,6 +2918,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
40825 if (new_dentry == trap)
40826 goto exit5;
40827
40828 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
40829 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
40830 + to);
40831 + if (error)
40832 + goto exit5;
40833 +
40834 error = mnt_want_write(oldnd.path.mnt);
40835 if (error)
40836 goto exit5;
40837 @@ -2773,6 +2933,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
40838 goto exit6;
40839 error = vfs_rename(old_dir->d_inode, old_dentry,
40840 new_dir->d_inode, new_dentry);
40841 + if (!error)
40842 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
40843 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
40844 exit6:
40845 mnt_drop_write(oldnd.path.mnt);
40846 exit5:
40847 @@ -2798,6 +2961,8 @@ SYSCALL_DEFINE2(rename, const char __use
40848
40849 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
40850 {
40851 + char tmpbuf[64];
40852 + const char *newlink;
40853 int len;
40854
40855 len = PTR_ERR(link);
40856 @@ -2807,7 +2972,14 @@ int vfs_readlink(struct dentry *dentry,
40857 len = strlen(link);
40858 if (len > (unsigned) buflen)
40859 len = buflen;
40860 - if (copy_to_user(buffer, link, len))
40861 +
40862 + if (len < sizeof(tmpbuf)) {
40863 + memcpy(tmpbuf, link, len);
40864 + newlink = tmpbuf;
40865 + } else
40866 + newlink = link;
40867 +
40868 + if (copy_to_user(buffer, newlink, len))
40869 len = -EFAULT;
40870 out:
40871 return len;
40872 diff -urNp linux-2.6.32.41/fs/namespace.c linux-2.6.32.41/fs/namespace.c
40873 --- linux-2.6.32.41/fs/namespace.c 2011-03-27 14:31:47.000000000 -0400
40874 +++ linux-2.6.32.41/fs/namespace.c 2011-04-17 15:56:46.000000000 -0400
40875 @@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mn
40876 if (!(sb->s_flags & MS_RDONLY))
40877 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
40878 up_write(&sb->s_umount);
40879 +
40880 + gr_log_remount(mnt->mnt_devname, retval);
40881 +
40882 return retval;
40883 }
40884
40885 @@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mn
40886 security_sb_umount_busy(mnt);
40887 up_write(&namespace_sem);
40888 release_mounts(&umount_list);
40889 +
40890 + gr_log_unmount(mnt->mnt_devname, retval);
40891 +
40892 return retval;
40893 }
40894
40895 @@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_
40896 if (retval)
40897 goto dput_out;
40898
40899 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
40900 + retval = -EPERM;
40901 + goto dput_out;
40902 + }
40903 +
40904 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
40905 + retval = -EPERM;
40906 + goto dput_out;
40907 + }
40908 +
40909 if (flags & MS_REMOUNT)
40910 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
40911 data_page);
40912 @@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_
40913 dev_name, data_page);
40914 dput_out:
40915 path_put(&path);
40916 +
40917 + gr_log_mount(dev_name, dir_name, retval);
40918 +
40919 return retval;
40920 }
40921
40922 @@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char _
40923 goto out1;
40924 }
40925
40926 + if (gr_handle_chroot_pivot()) {
40927 + error = -EPERM;
40928 + path_put(&old);
40929 + goto out1;
40930 + }
40931 +
40932 read_lock(&current->fs->lock);
40933 root = current->fs->root;
40934 path_get(&current->fs->root);
40935 diff -urNp linux-2.6.32.41/fs/ncpfs/dir.c linux-2.6.32.41/fs/ncpfs/dir.c
40936 --- linux-2.6.32.41/fs/ncpfs/dir.c 2011-03-27 14:31:47.000000000 -0400
40937 +++ linux-2.6.32.41/fs/ncpfs/dir.c 2011-05-16 21:46:57.000000000 -0400
40938 @@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *den
40939 int res, val = 0, len;
40940 __u8 __name[NCP_MAXPATHLEN + 1];
40941
40942 + pax_track_stack();
40943 +
40944 parent = dget_parent(dentry);
40945 dir = parent->d_inode;
40946
40947 @@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct
40948 int error, res, len;
40949 __u8 __name[NCP_MAXPATHLEN + 1];
40950
40951 + pax_track_stack();
40952 +
40953 lock_kernel();
40954 error = -EIO;
40955 if (!ncp_conn_valid(server))
40956 @@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, st
40957 int error, result, len;
40958 int opmode;
40959 __u8 __name[NCP_MAXPATHLEN + 1];
40960 -
40961 +
40962 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
40963 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
40964
40965 + pax_track_stack();
40966 +
40967 error = -EIO;
40968 lock_kernel();
40969 if (!ncp_conn_valid(server))
40970 @@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir,
40971 int error, len;
40972 __u8 __name[NCP_MAXPATHLEN + 1];
40973
40974 + pax_track_stack();
40975 +
40976 DPRINTK("ncp_mkdir: making %s/%s\n",
40977 dentry->d_parent->d_name.name, dentry->d_name.name);
40978
40979 @@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir,
40980 if (!ncp_conn_valid(server))
40981 goto out;
40982
40983 + pax_track_stack();
40984 +
40985 ncp_age_dentry(server, dentry);
40986 len = sizeof(__name);
40987 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
40988 @@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_
40989 int old_len, new_len;
40990 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
40991
40992 + pax_track_stack();
40993 +
40994 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
40995 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
40996 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
40997 diff -urNp linux-2.6.32.41/fs/ncpfs/inode.c linux-2.6.32.41/fs/ncpfs/inode.c
40998 --- linux-2.6.32.41/fs/ncpfs/inode.c 2011-03-27 14:31:47.000000000 -0400
40999 +++ linux-2.6.32.41/fs/ncpfs/inode.c 2011-05-16 21:46:57.000000000 -0400
41000 @@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_b
41001 #endif
41002 struct ncp_entry_info finfo;
41003
41004 + pax_track_stack();
41005 +
41006 data.wdog_pid = NULL;
41007 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
41008 if (!server)
41009 diff -urNp linux-2.6.32.41/fs/nfs/inode.c linux-2.6.32.41/fs/nfs/inode.c
41010 --- linux-2.6.32.41/fs/nfs/inode.c 2011-05-10 22:12:01.000000000 -0400
41011 +++ linux-2.6.32.41/fs/nfs/inode.c 2011-05-10 22:12:33.000000000 -0400
41012 @@ -973,16 +973,16 @@ static int nfs_size_need_update(const st
41013 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
41014 }
41015
41016 -static atomic_long_t nfs_attr_generation_counter;
41017 +static atomic_long_unchecked_t nfs_attr_generation_counter;
41018
41019 static unsigned long nfs_read_attr_generation_counter(void)
41020 {
41021 - return atomic_long_read(&nfs_attr_generation_counter);
41022 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
41023 }
41024
41025 unsigned long nfs_inc_attr_generation_counter(void)
41026 {
41027 - return atomic_long_inc_return(&nfs_attr_generation_counter);
41028 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
41029 }
41030
41031 void nfs_fattr_init(struct nfs_fattr *fattr)
41032 diff -urNp linux-2.6.32.41/fs/nfsd/lockd.c linux-2.6.32.41/fs/nfsd/lockd.c
41033 --- linux-2.6.32.41/fs/nfsd/lockd.c 2011-04-17 17:00:52.000000000 -0400
41034 +++ linux-2.6.32.41/fs/nfsd/lockd.c 2011-04-17 17:03:15.000000000 -0400
41035 @@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
41036 fput(filp);
41037 }
41038
41039 -static struct nlmsvc_binding nfsd_nlm_ops = {
41040 +static const struct nlmsvc_binding nfsd_nlm_ops = {
41041 .fopen = nlm_fopen, /* open file for locking */
41042 .fclose = nlm_fclose, /* close file */
41043 };
41044 diff -urNp linux-2.6.32.41/fs/nfsd/nfs4state.c linux-2.6.32.41/fs/nfsd/nfs4state.c
41045 --- linux-2.6.32.41/fs/nfsd/nfs4state.c 2011-03-27 14:31:47.000000000 -0400
41046 +++ linux-2.6.32.41/fs/nfsd/nfs4state.c 2011-05-16 21:46:57.000000000 -0400
41047 @@ -3457,6 +3457,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
41048 unsigned int cmd;
41049 int err;
41050
41051 + pax_track_stack();
41052 +
41053 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
41054 (long long) lock->lk_offset,
41055 (long long) lock->lk_length);
41056 diff -urNp linux-2.6.32.41/fs/nfsd/nfs4xdr.c linux-2.6.32.41/fs/nfsd/nfs4xdr.c
41057 --- linux-2.6.32.41/fs/nfsd/nfs4xdr.c 2011-03-27 14:31:47.000000000 -0400
41058 +++ linux-2.6.32.41/fs/nfsd/nfs4xdr.c 2011-05-16 21:46:57.000000000 -0400
41059 @@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
41060 struct nfsd4_compoundres *resp = rqstp->rq_resp;
41061 u32 minorversion = resp->cstate.minorversion;
41062
41063 + pax_track_stack();
41064 +
41065 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
41066 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
41067 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
41068 diff -urNp linux-2.6.32.41/fs/nfsd/vfs.c linux-2.6.32.41/fs/nfsd/vfs.c
41069 --- linux-2.6.32.41/fs/nfsd/vfs.c 2011-05-10 22:12:01.000000000 -0400
41070 +++ linux-2.6.32.41/fs/nfsd/vfs.c 2011-05-10 22:12:33.000000000 -0400
41071 @@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
41072 } else {
41073 oldfs = get_fs();
41074 set_fs(KERNEL_DS);
41075 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
41076 + host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
41077 set_fs(oldfs);
41078 }
41079
41080 @@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
41081
41082 /* Write the data. */
41083 oldfs = get_fs(); set_fs(KERNEL_DS);
41084 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
41085 + host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
41086 set_fs(oldfs);
41087 if (host_err < 0)
41088 goto out_nfserr;
41089 @@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
41090 */
41091
41092 oldfs = get_fs(); set_fs(KERNEL_DS);
41093 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
41094 + host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
41095 set_fs(oldfs);
41096
41097 if (host_err < 0)
41098 diff -urNp linux-2.6.32.41/fs/nilfs2/ioctl.c linux-2.6.32.41/fs/nilfs2/ioctl.c
41099 --- linux-2.6.32.41/fs/nilfs2/ioctl.c 2011-03-27 14:31:47.000000000 -0400
41100 +++ linux-2.6.32.41/fs/nilfs2/ioctl.c 2011-05-04 17:56:28.000000000 -0400
41101 @@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(st
41102 unsigned int cmd, void __user *argp)
41103 {
41104 struct nilfs_argv argv[5];
41105 - const static size_t argsz[5] = {
41106 + static const size_t argsz[5] = {
41107 sizeof(struct nilfs_vdesc),
41108 sizeof(struct nilfs_period),
41109 sizeof(__u64),
41110 diff -urNp linux-2.6.32.41/fs/notify/dnotify/dnotify.c linux-2.6.32.41/fs/notify/dnotify/dnotify.c
41111 --- linux-2.6.32.41/fs/notify/dnotify/dnotify.c 2011-03-27 14:31:47.000000000 -0400
41112 +++ linux-2.6.32.41/fs/notify/dnotify/dnotify.c 2011-04-17 15:56:46.000000000 -0400
41113 @@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsn
41114 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
41115 }
41116
41117 -static struct fsnotify_ops dnotify_fsnotify_ops = {
41118 +static const struct fsnotify_ops dnotify_fsnotify_ops = {
41119 .handle_event = dnotify_handle_event,
41120 .should_send_event = dnotify_should_send_event,
41121 .free_group_priv = NULL,
41122 diff -urNp linux-2.6.32.41/fs/notify/notification.c linux-2.6.32.41/fs/notify/notification.c
41123 --- linux-2.6.32.41/fs/notify/notification.c 2011-03-27 14:31:47.000000000 -0400
41124 +++ linux-2.6.32.41/fs/notify/notification.c 2011-05-04 17:56:28.000000000 -0400
41125 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
41126 * get set to 0 so it will never get 'freed'
41127 */
41128 static struct fsnotify_event q_overflow_event;
41129 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
41130 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
41131
41132 /**
41133 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
41134 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
41135 */
41136 u32 fsnotify_get_cookie(void)
41137 {
41138 - return atomic_inc_return(&fsnotify_sync_cookie);
41139 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
41140 }
41141 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
41142
41143 diff -urNp linux-2.6.32.41/fs/ntfs/dir.c linux-2.6.32.41/fs/ntfs/dir.c
41144 --- linux-2.6.32.41/fs/ntfs/dir.c 2011-03-27 14:31:47.000000000 -0400
41145 +++ linux-2.6.32.41/fs/ntfs/dir.c 2011-04-17 15:56:46.000000000 -0400
41146 @@ -1328,7 +1328,7 @@ find_next_index_buffer:
41147 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
41148 ~(s64)(ndir->itype.index.block_size - 1)));
41149 /* Bounds checks. */
41150 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
41151 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
41152 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
41153 "inode 0x%lx or driver bug.", vdir->i_ino);
41154 goto err_out;
41155 diff -urNp linux-2.6.32.41/fs/ntfs/file.c linux-2.6.32.41/fs/ntfs/file.c
41156 --- linux-2.6.32.41/fs/ntfs/file.c 2011-03-27 14:31:47.000000000 -0400
41157 +++ linux-2.6.32.41/fs/ntfs/file.c 2011-04-17 15:56:46.000000000 -0400
41158 @@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_
41159 #endif /* NTFS_RW */
41160 };
41161
41162 -const struct file_operations ntfs_empty_file_ops = {};
41163 +const struct file_operations ntfs_empty_file_ops __read_only;
41164
41165 -const struct inode_operations ntfs_empty_inode_ops = {};
41166 +const struct inode_operations ntfs_empty_inode_ops __read_only;
41167 diff -urNp linux-2.6.32.41/fs/ocfs2/cluster/masklog.c linux-2.6.32.41/fs/ocfs2/cluster/masklog.c
41168 --- linux-2.6.32.41/fs/ocfs2/cluster/masklog.c 2011-03-27 14:31:47.000000000 -0400
41169 +++ linux-2.6.32.41/fs/ocfs2/cluster/masklog.c 2011-04-17 15:56:46.000000000 -0400
41170 @@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject
41171 return mlog_mask_store(mlog_attr->mask, buf, count);
41172 }
41173
41174 -static struct sysfs_ops mlog_attr_ops = {
41175 +static const struct sysfs_ops mlog_attr_ops = {
41176 .show = mlog_show,
41177 .store = mlog_store,
41178 };
41179 diff -urNp linux-2.6.32.41/fs/ocfs2/localalloc.c linux-2.6.32.41/fs/ocfs2/localalloc.c
41180 --- linux-2.6.32.41/fs/ocfs2/localalloc.c 2011-03-27 14:31:47.000000000 -0400
41181 +++ linux-2.6.32.41/fs/ocfs2/localalloc.c 2011-04-17 15:56:46.000000000 -0400
41182 @@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_windo
41183 goto bail;
41184 }
41185
41186 - atomic_inc(&osb->alloc_stats.moves);
41187 + atomic_inc_unchecked(&osb->alloc_stats.moves);
41188
41189 status = 0;
41190 bail:
41191 diff -urNp linux-2.6.32.41/fs/ocfs2/namei.c linux-2.6.32.41/fs/ocfs2/namei.c
41192 --- linux-2.6.32.41/fs/ocfs2/namei.c 2011-03-27 14:31:47.000000000 -0400
41193 +++ linux-2.6.32.41/fs/ocfs2/namei.c 2011-05-16 21:46:57.000000000 -0400
41194 @@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *ol
41195 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
41196 struct ocfs2_dir_lookup_result target_insert = { NULL, };
41197
41198 + pax_track_stack();
41199 +
41200 /* At some point it might be nice to break this function up a
41201 * bit. */
41202
41203 diff -urNp linux-2.6.32.41/fs/ocfs2/ocfs2.h linux-2.6.32.41/fs/ocfs2/ocfs2.h
41204 --- linux-2.6.32.41/fs/ocfs2/ocfs2.h 2011-03-27 14:31:47.000000000 -0400
41205 +++ linux-2.6.32.41/fs/ocfs2/ocfs2.h 2011-04-17 15:56:46.000000000 -0400
41206 @@ -217,11 +217,11 @@ enum ocfs2_vol_state
41207
41208 struct ocfs2_alloc_stats
41209 {
41210 - atomic_t moves;
41211 - atomic_t local_data;
41212 - atomic_t bitmap_data;
41213 - atomic_t bg_allocs;
41214 - atomic_t bg_extends;
41215 + atomic_unchecked_t moves;
41216 + atomic_unchecked_t local_data;
41217 + atomic_unchecked_t bitmap_data;
41218 + atomic_unchecked_t bg_allocs;
41219 + atomic_unchecked_t bg_extends;
41220 };
41221
41222 enum ocfs2_local_alloc_state
41223 diff -urNp linux-2.6.32.41/fs/ocfs2/suballoc.c linux-2.6.32.41/fs/ocfs2/suballoc.c
41224 --- linux-2.6.32.41/fs/ocfs2/suballoc.c 2011-03-27 14:31:47.000000000 -0400
41225 +++ linux-2.6.32.41/fs/ocfs2/suballoc.c 2011-04-17 15:56:46.000000000 -0400
41226 @@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(s
41227 mlog_errno(status);
41228 goto bail;
41229 }
41230 - atomic_inc(&osb->alloc_stats.bg_extends);
41231 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
41232
41233 /* You should never ask for this much metadata */
41234 BUG_ON(bits_wanted >
41235 @@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_su
41236 mlog_errno(status);
41237 goto bail;
41238 }
41239 - atomic_inc(&osb->alloc_stats.bg_allocs);
41240 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
41241
41242 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
41243 ac->ac_bits_given += (*num_bits);
41244 @@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_s
41245 mlog_errno(status);
41246 goto bail;
41247 }
41248 - atomic_inc(&osb->alloc_stats.bg_allocs);
41249 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
41250
41251 BUG_ON(num_bits != 1);
41252
41253 @@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
41254 cluster_start,
41255 num_clusters);
41256 if (!status)
41257 - atomic_inc(&osb->alloc_stats.local_data);
41258 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
41259 } else {
41260 if (min_clusters > (osb->bitmap_cpg - 1)) {
41261 /* The only paths asking for contiguousness
41262 @@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
41263 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
41264 bg_blkno,
41265 bg_bit_off);
41266 - atomic_inc(&osb->alloc_stats.bitmap_data);
41267 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
41268 }
41269 }
41270 if (status < 0) {
41271 diff -urNp linux-2.6.32.41/fs/ocfs2/super.c linux-2.6.32.41/fs/ocfs2/super.c
41272 --- linux-2.6.32.41/fs/ocfs2/super.c 2011-03-27 14:31:47.000000000 -0400
41273 +++ linux-2.6.32.41/fs/ocfs2/super.c 2011-04-17 15:56:46.000000000 -0400
41274 @@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
41275 "%10s => GlobalAllocs: %d LocalAllocs: %d "
41276 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
41277 "Stats",
41278 - atomic_read(&osb->alloc_stats.bitmap_data),
41279 - atomic_read(&osb->alloc_stats.local_data),
41280 - atomic_read(&osb->alloc_stats.bg_allocs),
41281 - atomic_read(&osb->alloc_stats.moves),
41282 - atomic_read(&osb->alloc_stats.bg_extends));
41283 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
41284 + atomic_read_unchecked(&osb->alloc_stats.local_data),
41285 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
41286 + atomic_read_unchecked(&osb->alloc_stats.moves),
41287 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
41288
41289 out += snprintf(buf + out, len - out,
41290 "%10s => State: %u Descriptor: %llu Size: %u bits "
41291 @@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct
41292 spin_lock_init(&osb->osb_xattr_lock);
41293 ocfs2_init_inode_steal_slot(osb);
41294
41295 - atomic_set(&osb->alloc_stats.moves, 0);
41296 - atomic_set(&osb->alloc_stats.local_data, 0);
41297 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
41298 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
41299 - atomic_set(&osb->alloc_stats.bg_extends, 0);
41300 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
41301 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
41302 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
41303 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
41304 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
41305
41306 /* Copy the blockcheck stats from the superblock probe */
41307 osb->osb_ecc_stats = *stats;
41308 diff -urNp linux-2.6.32.41/fs/open.c linux-2.6.32.41/fs/open.c
41309 --- linux-2.6.32.41/fs/open.c 2011-03-27 14:31:47.000000000 -0400
41310 +++ linux-2.6.32.41/fs/open.c 2011-04-17 15:56:46.000000000 -0400
41311 @@ -275,6 +275,10 @@ static long do_sys_truncate(const char _
41312 error = locks_verify_truncate(inode, NULL, length);
41313 if (!error)
41314 error = security_path_truncate(&path, length, 0);
41315 +
41316 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
41317 + error = -EACCES;
41318 +
41319 if (!error) {
41320 vfs_dq_init(inode);
41321 error = do_truncate(path.dentry, length, 0, NULL);
41322 @@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
41323 if (__mnt_is_readonly(path.mnt))
41324 res = -EROFS;
41325
41326 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
41327 + res = -EACCES;
41328 +
41329 out_path_release:
41330 path_put(&path);
41331 out:
41332 @@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user
41333 if (error)
41334 goto dput_and_out;
41335
41336 + gr_log_chdir(path.dentry, path.mnt);
41337 +
41338 set_fs_pwd(current->fs, &path);
41339
41340 dput_and_out:
41341 @@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
41342 goto out_putf;
41343
41344 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
41345 +
41346 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
41347 + error = -EPERM;
41348 +
41349 + if (!error)
41350 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
41351 +
41352 if (!error)
41353 set_fs_pwd(current->fs, &file->f_path);
41354 out_putf:
41355 @@ -588,7 +604,18 @@ SYSCALL_DEFINE1(chroot, const char __use
41356 if (!capable(CAP_SYS_CHROOT))
41357 goto dput_and_out;
41358
41359 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
41360 + goto dput_and_out;
41361 +
41362 + if (gr_handle_chroot_caps(&path)) {
41363 + error = -ENOMEM;
41364 + goto dput_and_out;
41365 + }
41366 +
41367 set_fs_root(current->fs, &path);
41368 +
41369 + gr_handle_chroot_chdir(&path);
41370 +
41371 error = 0;
41372 dput_and_out:
41373 path_put(&path);
41374 @@ -616,12 +643,27 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
41375 err = mnt_want_write_file(file);
41376 if (err)
41377 goto out_putf;
41378 +
41379 mutex_lock(&inode->i_mutex);
41380 +
41381 + if (!gr_acl_handle_fchmod(dentry, file->f_path.mnt, mode)) {
41382 + err = -EACCES;
41383 + goto out_unlock;
41384 + }
41385 +
41386 if (mode == (mode_t) -1)
41387 mode = inode->i_mode;
41388 +
41389 + if (gr_handle_chroot_chmod(dentry, file->f_path.mnt, mode)) {
41390 + err = -EPERM;
41391 + goto out_unlock;
41392 + }
41393 +
41394 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
41395 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
41396 err = notify_change(dentry, &newattrs);
41397 +
41398 +out_unlock:
41399 mutex_unlock(&inode->i_mutex);
41400 mnt_drop_write(file->f_path.mnt);
41401 out_putf:
41402 @@ -645,12 +687,27 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
41403 error = mnt_want_write(path.mnt);
41404 if (error)
41405 goto dput_and_out;
41406 +
41407 mutex_lock(&inode->i_mutex);
41408 +
41409 + if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
41410 + error = -EACCES;
41411 + goto out_unlock;
41412 + }
41413 +
41414 if (mode == (mode_t) -1)
41415 mode = inode->i_mode;
41416 +
41417 + if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
41418 + error = -EACCES;
41419 + goto out_unlock;
41420 + }
41421 +
41422 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
41423 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
41424 error = notify_change(path.dentry, &newattrs);
41425 +
41426 +out_unlock:
41427 mutex_unlock(&inode->i_mutex);
41428 mnt_drop_write(path.mnt);
41429 dput_and_out:
41430 @@ -664,12 +721,15 @@ SYSCALL_DEFINE2(chmod, const char __user
41431 return sys_fchmodat(AT_FDCWD, filename, mode);
41432 }
41433
41434 -static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
41435 +static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
41436 {
41437 struct inode *inode = dentry->d_inode;
41438 int error;
41439 struct iattr newattrs;
41440
41441 + if (!gr_acl_handle_chown(dentry, mnt))
41442 + return -EACCES;
41443 +
41444 newattrs.ia_valid = ATTR_CTIME;
41445 if (user != (uid_t) -1) {
41446 newattrs.ia_valid |= ATTR_UID;
41447 @@ -700,7 +760,7 @@ SYSCALL_DEFINE3(chown, const char __user
41448 error = mnt_want_write(path.mnt);
41449 if (error)
41450 goto out_release;
41451 - error = chown_common(path.dentry, user, group);
41452 + error = chown_common(path.dentry, user, group, path.mnt);
41453 mnt_drop_write(path.mnt);
41454 out_release:
41455 path_put(&path);
41456 @@ -725,7 +785,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, cons
41457 error = mnt_want_write(path.mnt);
41458 if (error)
41459 goto out_release;
41460 - error = chown_common(path.dentry, user, group);
41461 + error = chown_common(path.dentry, user, group, path.mnt);
41462 mnt_drop_write(path.mnt);
41463 out_release:
41464 path_put(&path);
41465 @@ -744,7 +804,7 @@ SYSCALL_DEFINE3(lchown, const char __use
41466 error = mnt_want_write(path.mnt);
41467 if (error)
41468 goto out_release;
41469 - error = chown_common(path.dentry, user, group);
41470 + error = chown_common(path.dentry, user, group, path.mnt);
41471 mnt_drop_write(path.mnt);
41472 out_release:
41473 path_put(&path);
41474 @@ -767,7 +827,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd
41475 goto out_fput;
41476 dentry = file->f_path.dentry;
41477 audit_inode(NULL, dentry);
41478 - error = chown_common(dentry, user, group);
41479 + error = chown_common(dentry, user, group, file->f_path.mnt);
41480 mnt_drop_write(file->f_path.mnt);
41481 out_fput:
41482 fput(file);
41483 @@ -1036,7 +1096,10 @@ long do_sys_open(int dfd, const char __u
41484 if (!IS_ERR(tmp)) {
41485 fd = get_unused_fd_flags(flags);
41486 if (fd >= 0) {
41487 - struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
41488 + struct file *f;
41489 + /* don't allow to be set by userland */
41490 + flags &= ~FMODE_GREXEC;
41491 + f = do_filp_open(dfd, tmp, flags, mode, 0);
41492 if (IS_ERR(f)) {
41493 put_unused_fd(fd);
41494 fd = PTR_ERR(f);
41495 diff -urNp linux-2.6.32.41/fs/partitions/ldm.c linux-2.6.32.41/fs/partitions/ldm.c
41496 --- linux-2.6.32.41/fs/partitions/ldm.c 2011-05-10 22:12:01.000000000 -0400
41497 +++ linux-2.6.32.41/fs/partitions/ldm.c 2011-04-18 19:31:12.000000000 -0400
41498 @@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
41499 ldm_error ("A VBLK claims to have %d parts.", num);
41500 return false;
41501 }
41502 +
41503 if (rec >= num) {
41504 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
41505 return false;
41506 @@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
41507 goto found;
41508 }
41509
41510 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
41511 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
41512 if (!f) {
41513 ldm_crit ("Out of memory.");
41514 return false;
41515 diff -urNp linux-2.6.32.41/fs/partitions/mac.c linux-2.6.32.41/fs/partitions/mac.c
41516 --- linux-2.6.32.41/fs/partitions/mac.c 2011-03-27 14:31:47.000000000 -0400
41517 +++ linux-2.6.32.41/fs/partitions/mac.c 2011-04-17 15:56:46.000000000 -0400
41518 @@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitio
41519 return 0; /* not a MacOS disk */
41520 }
41521 blocks_in_map = be32_to_cpu(part->map_count);
41522 + printk(" [mac]");
41523 if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
41524 put_dev_sector(sect);
41525 return 0;
41526 }
41527 - printk(" [mac]");
41528 for (slot = 1; slot <= blocks_in_map; ++slot) {
41529 int pos = slot * secsize;
41530 put_dev_sector(sect);
41531 diff -urNp linux-2.6.32.41/fs/pipe.c linux-2.6.32.41/fs/pipe.c
41532 --- linux-2.6.32.41/fs/pipe.c 2011-03-27 14:31:47.000000000 -0400
41533 +++ linux-2.6.32.41/fs/pipe.c 2011-04-23 13:37:17.000000000 -0400
41534 @@ -401,9 +401,9 @@ redo:
41535 }
41536 if (bufs) /* More to do? */
41537 continue;
41538 - if (!pipe->writers)
41539 + if (!atomic_read(&pipe->writers))
41540 break;
41541 - if (!pipe->waiting_writers) {
41542 + if (!atomic_read(&pipe->waiting_writers)) {
41543 /* syscall merging: Usually we must not sleep
41544 * if O_NONBLOCK is set, or if we got some data.
41545 * But if a writer sleeps in kernel space, then
41546 @@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const str
41547 mutex_lock(&inode->i_mutex);
41548 pipe = inode->i_pipe;
41549
41550 - if (!pipe->readers) {
41551 + if (!atomic_read(&pipe->readers)) {
41552 send_sig(SIGPIPE, current, 0);
41553 ret = -EPIPE;
41554 goto out;
41555 @@ -511,7 +511,7 @@ redo1:
41556 for (;;) {
41557 int bufs;
41558
41559 - if (!pipe->readers) {
41560 + if (!atomic_read(&pipe->readers)) {
41561 send_sig(SIGPIPE, current, 0);
41562 if (!ret)
41563 ret = -EPIPE;
41564 @@ -597,9 +597,9 @@ redo2:
41565 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
41566 do_wakeup = 0;
41567 }
41568 - pipe->waiting_writers++;
41569 + atomic_inc(&pipe->waiting_writers);
41570 pipe_wait(pipe);
41571 - pipe->waiting_writers--;
41572 + atomic_dec(&pipe->waiting_writers);
41573 }
41574 out:
41575 mutex_unlock(&inode->i_mutex);
41576 @@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table
41577 mask = 0;
41578 if (filp->f_mode & FMODE_READ) {
41579 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
41580 - if (!pipe->writers && filp->f_version != pipe->w_counter)
41581 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
41582 mask |= POLLHUP;
41583 }
41584
41585 @@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table
41586 * Most Unices do not set POLLERR for FIFOs but on Linux they
41587 * behave exactly like pipes for poll().
41588 */
41589 - if (!pipe->readers)
41590 + if (!atomic_read(&pipe->readers))
41591 mask |= POLLERR;
41592 }
41593
41594 @@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int de
41595
41596 mutex_lock(&inode->i_mutex);
41597 pipe = inode->i_pipe;
41598 - pipe->readers -= decr;
41599 - pipe->writers -= decw;
41600 + atomic_sub(decr, &pipe->readers);
41601 + atomic_sub(decw, &pipe->writers);
41602
41603 - if (!pipe->readers && !pipe->writers) {
41604 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
41605 free_pipe_info(inode);
41606 } else {
41607 wake_up_interruptible_sync(&pipe->wait);
41608 @@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, stru
41609
41610 if (inode->i_pipe) {
41611 ret = 0;
41612 - inode->i_pipe->readers++;
41613 + atomic_inc(&inode->i_pipe->readers);
41614 }
41615
41616 mutex_unlock(&inode->i_mutex);
41617 @@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, str
41618
41619 if (inode->i_pipe) {
41620 ret = 0;
41621 - inode->i_pipe->writers++;
41622 + atomic_inc(&inode->i_pipe->writers);
41623 }
41624
41625 mutex_unlock(&inode->i_mutex);
41626 @@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, stru
41627 if (inode->i_pipe) {
41628 ret = 0;
41629 if (filp->f_mode & FMODE_READ)
41630 - inode->i_pipe->readers++;
41631 + atomic_inc(&inode->i_pipe->readers);
41632 if (filp->f_mode & FMODE_WRITE)
41633 - inode->i_pipe->writers++;
41634 + atomic_inc(&inode->i_pipe->writers);
41635 }
41636
41637 mutex_unlock(&inode->i_mutex);
41638 @@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
41639 inode->i_pipe = NULL;
41640 }
41641
41642 -static struct vfsmount *pipe_mnt __read_mostly;
41643 +struct vfsmount *pipe_mnt __read_mostly;
41644 static int pipefs_delete_dentry(struct dentry *dentry)
41645 {
41646 /*
41647 @@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(voi
41648 goto fail_iput;
41649 inode->i_pipe = pipe;
41650
41651 - pipe->readers = pipe->writers = 1;
41652 + atomic_set(&pipe->readers, 1);
41653 + atomic_set(&pipe->writers, 1);
41654 inode->i_fop = &rdwr_pipefifo_fops;
41655
41656 /*
41657 diff -urNp linux-2.6.32.41/fs/proc/array.c linux-2.6.32.41/fs/proc/array.c
41658 --- linux-2.6.32.41/fs/proc/array.c 2011-03-27 14:31:47.000000000 -0400
41659 +++ linux-2.6.32.41/fs/proc/array.c 2011-05-16 21:46:57.000000000 -0400
41660 @@ -60,6 +60,7 @@
41661 #include <linux/tty.h>
41662 #include <linux/string.h>
41663 #include <linux/mman.h>
41664 +#include <linux/grsecurity.h>
41665 #include <linux/proc_fs.h>
41666 #include <linux/ioport.h>
41667 #include <linux/uaccess.h>
41668 @@ -321,6 +322,21 @@ static inline void task_context_switch_c
41669 p->nivcsw);
41670 }
41671
41672 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41673 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
41674 +{
41675 + if (p->mm)
41676 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
41677 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
41678 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
41679 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
41680 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
41681 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
41682 + else
41683 + seq_printf(m, "PaX:\t-----\n");
41684 +}
41685 +#endif
41686 +
41687 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
41688 struct pid *pid, struct task_struct *task)
41689 {
41690 @@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m,
41691 task_cap(m, task);
41692 cpuset_task_status_allowed(m, task);
41693 task_context_switch_counts(m, task);
41694 +
41695 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41696 + task_pax(m, task);
41697 +#endif
41698 +
41699 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
41700 + task_grsec_rbac(m, task);
41701 +#endif
41702 +
41703 return 0;
41704 }
41705
41706 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41707 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
41708 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
41709 + _mm->pax_flags & MF_PAX_SEGMEXEC))
41710 +#endif
41711 +
41712 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
41713 struct pid *pid, struct task_struct *task, int whole)
41714 {
41715 @@ -358,9 +389,11 @@ static int do_task_stat(struct seq_file
41716 cputime_t cutime, cstime, utime, stime;
41717 cputime_t cgtime, gtime;
41718 unsigned long rsslim = 0;
41719 - char tcomm[sizeof(task->comm)];
41720 + char tcomm[sizeof(task->comm)] = { 0 };
41721 unsigned long flags;
41722
41723 + pax_track_stack();
41724 +
41725 state = *get_task_state(task);
41726 vsize = eip = esp = 0;
41727 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
41728 @@ -433,6 +466,19 @@ static int do_task_stat(struct seq_file
41729 gtime = task_gtime(task);
41730 }
41731
41732 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41733 + if (PAX_RAND_FLAGS(mm)) {
41734 + eip = 0;
41735 + esp = 0;
41736 + wchan = 0;
41737 + }
41738 +#endif
41739 +#ifdef CONFIG_GRKERNSEC_HIDESYM
41740 + wchan = 0;
41741 + eip =0;
41742 + esp =0;
41743 +#endif
41744 +
41745 /* scale priority and nice values from timeslices to -20..20 */
41746 /* to make it look like a "normal" Unix priority/nice value */
41747 priority = task_prio(task);
41748 @@ -473,9 +519,15 @@ static int do_task_stat(struct seq_file
41749 vsize,
41750 mm ? get_mm_rss(mm) : 0,
41751 rsslim,
41752 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41753 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
41754 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
41755 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
41756 +#else
41757 mm ? (permitted ? mm->start_code : 1) : 0,
41758 mm ? (permitted ? mm->end_code : 1) : 0,
41759 (permitted && mm) ? mm->start_stack : 0,
41760 +#endif
41761 esp,
41762 eip,
41763 /* The signal information here is obsolete.
41764 @@ -528,3 +580,18 @@ int proc_pid_statm(struct seq_file *m, s
41765
41766 return 0;
41767 }
41768 +
41769 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
41770 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
41771 +{
41772 + u32 curr_ip = 0;
41773 + unsigned long flags;
41774 +
41775 + if (lock_task_sighand(task, &flags)) {
41776 + curr_ip = task->signal->curr_ip;
41777 + unlock_task_sighand(task, &flags);
41778 + }
41779 +
41780 + return sprintf(buffer, "%pI4\n", &curr_ip);
41781 +}
41782 +#endif
41783 diff -urNp linux-2.6.32.41/fs/proc/base.c linux-2.6.32.41/fs/proc/base.c
41784 --- linux-2.6.32.41/fs/proc/base.c 2011-04-22 19:16:29.000000000 -0400
41785 +++ linux-2.6.32.41/fs/proc/base.c 2011-06-04 21:20:50.000000000 -0400
41786 @@ -102,6 +102,22 @@ struct pid_entry {
41787 union proc_op op;
41788 };
41789
41790 +struct getdents_callback {
41791 + struct linux_dirent __user * current_dir;
41792 + struct linux_dirent __user * previous;
41793 + struct file * file;
41794 + int count;
41795 + int error;
41796 +};
41797 +
41798 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
41799 + loff_t offset, u64 ino, unsigned int d_type)
41800 +{
41801 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
41802 + buf->error = -EINVAL;
41803 + return 0;
41804 +}
41805 +
41806 #define NOD(NAME, MODE, IOP, FOP, OP) { \
41807 .name = (NAME), \
41808 .len = sizeof(NAME) - 1, \
41809 @@ -213,6 +229,9 @@ static int check_mem_permission(struct t
41810 if (task == current)
41811 return 0;
41812
41813 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
41814 + return -EPERM;
41815 +
41816 /*
41817 * If current is actively ptrace'ing, and would also be
41818 * permitted to freshly attach with ptrace now, permit it.
41819 @@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_
41820 if (!mm->arg_end)
41821 goto out_mm; /* Shh! No looking before we're done */
41822
41823 + if (gr_acl_handle_procpidmem(task))
41824 + goto out_mm;
41825 +
41826 len = mm->arg_end - mm->arg_start;
41827
41828 if (len > PAGE_SIZE)
41829 @@ -287,12 +309,28 @@ out:
41830 return res;
41831 }
41832
41833 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41834 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
41835 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
41836 + _mm->pax_flags & MF_PAX_SEGMEXEC))
41837 +#endif
41838 +
41839 static int proc_pid_auxv(struct task_struct *task, char *buffer)
41840 {
41841 int res = 0;
41842 struct mm_struct *mm = get_task_mm(task);
41843 if (mm) {
41844 unsigned int nwords = 0;
41845 +
41846 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41847 + /* allow if we're currently ptracing this task */
41848 + if (PAX_RAND_FLAGS(mm) &&
41849 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
41850 + mmput(mm);
41851 + return res;
41852 + }
41853 +#endif
41854 +
41855 do {
41856 nwords += 2;
41857 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
41858 @@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_str
41859 }
41860
41861
41862 -#ifdef CONFIG_KALLSYMS
41863 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
41864 /*
41865 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
41866 * Returns the resolved symbol. If that fails, simply return the address.
41867 @@ -328,7 +366,7 @@ static int proc_pid_wchan(struct task_st
41868 }
41869 #endif /* CONFIG_KALLSYMS */
41870
41871 -#ifdef CONFIG_STACKTRACE
41872 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
41873
41874 #define MAX_STACK_TRACE_DEPTH 64
41875
41876 @@ -522,7 +560,7 @@ static int proc_pid_limits(struct task_s
41877 return count;
41878 }
41879
41880 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
41881 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
41882 static int proc_pid_syscall(struct task_struct *task, char *buffer)
41883 {
41884 long nr;
41885 @@ -547,7 +585,7 @@ static int proc_pid_syscall(struct task_
41886 /************************************************************************/
41887
41888 /* permission checks */
41889 -static int proc_fd_access_allowed(struct inode *inode)
41890 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
41891 {
41892 struct task_struct *task;
41893 int allowed = 0;
41894 @@ -557,7 +595,10 @@ static int proc_fd_access_allowed(struct
41895 */
41896 task = get_proc_task(inode);
41897 if (task) {
41898 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
41899 + if (log)
41900 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
41901 + else
41902 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
41903 put_task_struct(task);
41904 }
41905 return allowed;
41906 @@ -936,6 +977,9 @@ static ssize_t environ_read(struct file
41907 if (!task)
41908 goto out_no_task;
41909
41910 + if (gr_acl_handle_procpidmem(task))
41911 + goto out;
41912 +
41913 if (!ptrace_may_access(task, PTRACE_MODE_READ))
41914 goto out;
41915
41916 @@ -1350,7 +1394,7 @@ static void *proc_pid_follow_link(struct
41917 path_put(&nd->path);
41918
41919 /* Are we allowed to snoop on the tasks file descriptors? */
41920 - if (!proc_fd_access_allowed(inode))
41921 + if (!proc_fd_access_allowed(inode,0))
41922 goto out;
41923
41924 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
41925 @@ -1390,8 +1434,18 @@ static int proc_pid_readlink(struct dent
41926 struct path path;
41927
41928 /* Are we allowed to snoop on the tasks file descriptors? */
41929 - if (!proc_fd_access_allowed(inode))
41930 - goto out;
41931 + /* logging this is needed for learning on chromium to work properly,
41932 + but we don't want to flood the logs from 'ps' which does a readlink
41933 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
41934 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
41935 + */
41936 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
41937 + if (!proc_fd_access_allowed(inode,0))
41938 + goto out;
41939 + } else {
41940 + if (!proc_fd_access_allowed(inode,1))
41941 + goto out;
41942 + }
41943
41944 error = PROC_I(inode)->op.proc_get_link(inode, &path);
41945 if (error)
41946 @@ -1456,7 +1510,11 @@ static struct inode *proc_pid_make_inode
41947 rcu_read_lock();
41948 cred = __task_cred(task);
41949 inode->i_uid = cred->euid;
41950 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
41951 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
41952 +#else
41953 inode->i_gid = cred->egid;
41954 +#endif
41955 rcu_read_unlock();
41956 }
41957 security_task_to_inode(task, inode);
41958 @@ -1474,6 +1532,9 @@ static int pid_getattr(struct vfsmount *
41959 struct inode *inode = dentry->d_inode;
41960 struct task_struct *task;
41961 const struct cred *cred;
41962 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
41963 + const struct cred *tmpcred = current_cred();
41964 +#endif
41965
41966 generic_fillattr(inode, stat);
41967
41968 @@ -1481,13 +1542,41 @@ static int pid_getattr(struct vfsmount *
41969 stat->uid = 0;
41970 stat->gid = 0;
41971 task = pid_task(proc_pid(inode), PIDTYPE_PID);
41972 +
41973 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
41974 + rcu_read_unlock();
41975 + return -ENOENT;
41976 + }
41977 +
41978 if (task) {
41979 + cred = __task_cred(task);
41980 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
41981 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
41982 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
41983 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
41984 +#endif
41985 + ) {
41986 +#endif
41987 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
41988 +#ifdef CONFIG_GRKERNSEC_PROC_USER
41989 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
41990 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
41991 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
41992 +#endif
41993 task_dumpable(task)) {
41994 - cred = __task_cred(task);
41995 stat->uid = cred->euid;
41996 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
41997 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
41998 +#else
41999 stat->gid = cred->egid;
42000 +#endif
42001 }
42002 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42003 + } else {
42004 + rcu_read_unlock();
42005 + return -ENOENT;
42006 + }
42007 +#endif
42008 }
42009 rcu_read_unlock();
42010 return 0;
42011 @@ -1518,11 +1607,20 @@ static int pid_revalidate(struct dentry
42012
42013 if (task) {
42014 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
42015 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42016 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
42017 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42018 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
42019 +#endif
42020 task_dumpable(task)) {
42021 rcu_read_lock();
42022 cred = __task_cred(task);
42023 inode->i_uid = cred->euid;
42024 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42025 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42026 +#else
42027 inode->i_gid = cred->egid;
42028 +#endif
42029 rcu_read_unlock();
42030 } else {
42031 inode->i_uid = 0;
42032 @@ -1643,7 +1741,8 @@ static int proc_fd_info(struct inode *in
42033 int fd = proc_fd(inode);
42034
42035 if (task) {
42036 - files = get_files_struct(task);
42037 + if (!gr_acl_handle_procpidmem(task))
42038 + files = get_files_struct(task);
42039 put_task_struct(task);
42040 }
42041 if (files) {
42042 @@ -1895,12 +1994,22 @@ static const struct file_operations proc
42043 static int proc_fd_permission(struct inode *inode, int mask)
42044 {
42045 int rv;
42046 + struct task_struct *task;
42047
42048 rv = generic_permission(inode, mask, NULL);
42049 - if (rv == 0)
42050 - return 0;
42051 +
42052 if (task_pid(current) == proc_pid(inode))
42053 rv = 0;
42054 +
42055 + task = get_proc_task(inode);
42056 + if (task == NULL)
42057 + return rv;
42058 +
42059 + if (gr_acl_handle_procpidmem(task))
42060 + rv = -EACCES;
42061 +
42062 + put_task_struct(task);
42063 +
42064 return rv;
42065 }
42066
42067 @@ -2009,6 +2118,9 @@ static struct dentry *proc_pident_lookup
42068 if (!task)
42069 goto out_no_task;
42070
42071 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42072 + goto out;
42073 +
42074 /*
42075 * Yes, it does not scale. And it should not. Don't add
42076 * new entries into /proc/<tgid>/ without very good reasons.
42077 @@ -2053,6 +2165,9 @@ static int proc_pident_readdir(struct fi
42078 if (!task)
42079 goto out_no_task;
42080
42081 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42082 + goto out;
42083 +
42084 ret = 0;
42085 i = filp->f_pos;
42086 switch (i) {
42087 @@ -2320,7 +2435,7 @@ static void *proc_self_follow_link(struc
42088 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
42089 void *cookie)
42090 {
42091 - char *s = nd_get_link(nd);
42092 + const char *s = nd_get_link(nd);
42093 if (!IS_ERR(s))
42094 __putname(s);
42095 }
42096 @@ -2519,7 +2634,7 @@ static const struct pid_entry tgid_base_
42097 #ifdef CONFIG_SCHED_DEBUG
42098 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
42099 #endif
42100 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
42101 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
42102 INF("syscall", S_IRUSR, proc_pid_syscall),
42103 #endif
42104 INF("cmdline", S_IRUGO, proc_pid_cmdline),
42105 @@ -2544,10 +2659,10 @@ static const struct pid_entry tgid_base_
42106 #ifdef CONFIG_SECURITY
42107 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
42108 #endif
42109 -#ifdef CONFIG_KALLSYMS
42110 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42111 INF("wchan", S_IRUGO, proc_pid_wchan),
42112 #endif
42113 -#ifdef CONFIG_STACKTRACE
42114 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42115 ONE("stack", S_IRUSR, proc_pid_stack),
42116 #endif
42117 #ifdef CONFIG_SCHEDSTATS
42118 @@ -2577,6 +2692,9 @@ static const struct pid_entry tgid_base_
42119 #ifdef CONFIG_TASK_IO_ACCOUNTING
42120 INF("io", S_IRUGO, proc_tgid_io_accounting),
42121 #endif
42122 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
42123 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
42124 +#endif
42125 };
42126
42127 static int proc_tgid_base_readdir(struct file * filp,
42128 @@ -2701,7 +2819,14 @@ static struct dentry *proc_pid_instantia
42129 if (!inode)
42130 goto out;
42131
42132 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42133 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
42134 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42135 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42136 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
42137 +#else
42138 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
42139 +#endif
42140 inode->i_op = &proc_tgid_base_inode_operations;
42141 inode->i_fop = &proc_tgid_base_operations;
42142 inode->i_flags|=S_IMMUTABLE;
42143 @@ -2743,7 +2868,11 @@ struct dentry *proc_pid_lookup(struct in
42144 if (!task)
42145 goto out;
42146
42147 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42148 + goto out_put_task;
42149 +
42150 result = proc_pid_instantiate(dir, dentry, task, NULL);
42151 +out_put_task:
42152 put_task_struct(task);
42153 out:
42154 return result;
42155 @@ -2808,6 +2937,11 @@ int proc_pid_readdir(struct file * filp,
42156 {
42157 unsigned int nr;
42158 struct task_struct *reaper;
42159 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42160 + const struct cred *tmpcred = current_cred();
42161 + const struct cred *itercred;
42162 +#endif
42163 + filldir_t __filldir = filldir;
42164 struct tgid_iter iter;
42165 struct pid_namespace *ns;
42166
42167 @@ -2831,8 +2965,27 @@ int proc_pid_readdir(struct file * filp,
42168 for (iter = next_tgid(ns, iter);
42169 iter.task;
42170 iter.tgid += 1, iter = next_tgid(ns, iter)) {
42171 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42172 + rcu_read_lock();
42173 + itercred = __task_cred(iter.task);
42174 +#endif
42175 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
42176 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42177 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
42178 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42179 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
42180 +#endif
42181 + )
42182 +#endif
42183 + )
42184 + __filldir = &gr_fake_filldir;
42185 + else
42186 + __filldir = filldir;
42187 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42188 + rcu_read_unlock();
42189 +#endif
42190 filp->f_pos = iter.tgid + TGID_OFFSET;
42191 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
42192 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
42193 put_task_struct(iter.task);
42194 goto out;
42195 }
42196 @@ -2858,7 +3011,7 @@ static const struct pid_entry tid_base_s
42197 #ifdef CONFIG_SCHED_DEBUG
42198 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
42199 #endif
42200 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
42201 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
42202 INF("syscall", S_IRUSR, proc_pid_syscall),
42203 #endif
42204 INF("cmdline", S_IRUGO, proc_pid_cmdline),
42205 @@ -2882,10 +3035,10 @@ static const struct pid_entry tid_base_s
42206 #ifdef CONFIG_SECURITY
42207 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
42208 #endif
42209 -#ifdef CONFIG_KALLSYMS
42210 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42211 INF("wchan", S_IRUGO, proc_pid_wchan),
42212 #endif
42213 -#ifdef CONFIG_STACKTRACE
42214 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42215 ONE("stack", S_IRUSR, proc_pid_stack),
42216 #endif
42217 #ifdef CONFIG_SCHEDSTATS
42218 diff -urNp linux-2.6.32.41/fs/proc/cmdline.c linux-2.6.32.41/fs/proc/cmdline.c
42219 --- linux-2.6.32.41/fs/proc/cmdline.c 2011-03-27 14:31:47.000000000 -0400
42220 +++ linux-2.6.32.41/fs/proc/cmdline.c 2011-04-17 15:56:46.000000000 -0400
42221 @@ -23,7 +23,11 @@ static const struct file_operations cmdl
42222
42223 static int __init proc_cmdline_init(void)
42224 {
42225 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
42226 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
42227 +#else
42228 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
42229 +#endif
42230 return 0;
42231 }
42232 module_init(proc_cmdline_init);
42233 diff -urNp linux-2.6.32.41/fs/proc/devices.c linux-2.6.32.41/fs/proc/devices.c
42234 --- linux-2.6.32.41/fs/proc/devices.c 2011-03-27 14:31:47.000000000 -0400
42235 +++ linux-2.6.32.41/fs/proc/devices.c 2011-04-17 15:56:46.000000000 -0400
42236 @@ -64,7 +64,11 @@ static const struct file_operations proc
42237
42238 static int __init proc_devices_init(void)
42239 {
42240 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
42241 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
42242 +#else
42243 proc_create("devices", 0, NULL, &proc_devinfo_operations);
42244 +#endif
42245 return 0;
42246 }
42247 module_init(proc_devices_init);
42248 diff -urNp linux-2.6.32.41/fs/proc/inode.c linux-2.6.32.41/fs/proc/inode.c
42249 --- linux-2.6.32.41/fs/proc/inode.c 2011-03-27 14:31:47.000000000 -0400
42250 +++ linux-2.6.32.41/fs/proc/inode.c 2011-04-17 15:56:46.000000000 -0400
42251 @@ -457,7 +457,11 @@ struct inode *proc_get_inode(struct supe
42252 if (de->mode) {
42253 inode->i_mode = de->mode;
42254 inode->i_uid = de->uid;
42255 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42256 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42257 +#else
42258 inode->i_gid = de->gid;
42259 +#endif
42260 }
42261 if (de->size)
42262 inode->i_size = de->size;
42263 diff -urNp linux-2.6.32.41/fs/proc/internal.h linux-2.6.32.41/fs/proc/internal.h
42264 --- linux-2.6.32.41/fs/proc/internal.h 2011-03-27 14:31:47.000000000 -0400
42265 +++ linux-2.6.32.41/fs/proc/internal.h 2011-04-17 15:56:46.000000000 -0400
42266 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
42267 struct pid *pid, struct task_struct *task);
42268 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
42269 struct pid *pid, struct task_struct *task);
42270 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
42271 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
42272 +#endif
42273 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
42274
42275 extern const struct file_operations proc_maps_operations;
42276 diff -urNp linux-2.6.32.41/fs/proc/Kconfig linux-2.6.32.41/fs/proc/Kconfig
42277 --- linux-2.6.32.41/fs/proc/Kconfig 2011-03-27 14:31:47.000000000 -0400
42278 +++ linux-2.6.32.41/fs/proc/Kconfig 2011-04-17 15:56:46.000000000 -0400
42279 @@ -30,12 +30,12 @@ config PROC_FS
42280
42281 config PROC_KCORE
42282 bool "/proc/kcore support" if !ARM
42283 - depends on PROC_FS && MMU
42284 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
42285
42286 config PROC_VMCORE
42287 bool "/proc/vmcore support (EXPERIMENTAL)"
42288 - depends on PROC_FS && CRASH_DUMP
42289 - default y
42290 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
42291 + default n
42292 help
42293 Exports the dump image of crashed kernel in ELF format.
42294
42295 @@ -59,8 +59,8 @@ config PROC_SYSCTL
42296 limited in memory.
42297
42298 config PROC_PAGE_MONITOR
42299 - default y
42300 - depends on PROC_FS && MMU
42301 + default n
42302 + depends on PROC_FS && MMU && !GRKERNSEC
42303 bool "Enable /proc page monitoring" if EMBEDDED
42304 help
42305 Various /proc files exist to monitor process memory utilization:
42306 diff -urNp linux-2.6.32.41/fs/proc/kcore.c linux-2.6.32.41/fs/proc/kcore.c
42307 --- linux-2.6.32.41/fs/proc/kcore.c 2011-03-27 14:31:47.000000000 -0400
42308 +++ linux-2.6.32.41/fs/proc/kcore.c 2011-05-16 21:46:57.000000000 -0400
42309 @@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bu
42310 off_t offset = 0;
42311 struct kcore_list *m;
42312
42313 + pax_track_stack();
42314 +
42315 /* setup ELF header */
42316 elf = (struct elfhdr *) bufp;
42317 bufp += sizeof(struct elfhdr);
42318 @@ -477,9 +479,10 @@ read_kcore(struct file *file, char __use
42319 * the addresses in the elf_phdr on our list.
42320 */
42321 start = kc_offset_to_vaddr(*fpos - elf_buflen);
42322 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
42323 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
42324 + if (tsz > buflen)
42325 tsz = buflen;
42326 -
42327 +
42328 while (buflen) {
42329 struct kcore_list *m;
42330
42331 @@ -508,20 +511,23 @@ read_kcore(struct file *file, char __use
42332 kfree(elf_buf);
42333 } else {
42334 if (kern_addr_valid(start)) {
42335 - unsigned long n;
42336 + char *elf_buf;
42337 + mm_segment_t oldfs;
42338
42339 - n = copy_to_user(buffer, (char *)start, tsz);
42340 - /*
42341 - * We cannot distingush between fault on source
42342 - * and fault on destination. When this happens
42343 - * we clear too and hope it will trigger the
42344 - * EFAULT again.
42345 - */
42346 - if (n) {
42347 - if (clear_user(buffer + tsz - n,
42348 - n))
42349 + elf_buf = kmalloc(tsz, GFP_KERNEL);
42350 + if (!elf_buf)
42351 + return -ENOMEM;
42352 + oldfs = get_fs();
42353 + set_fs(KERNEL_DS);
42354 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
42355 + set_fs(oldfs);
42356 + if (copy_to_user(buffer, elf_buf, tsz)) {
42357 + kfree(elf_buf);
42358 return -EFAULT;
42359 + }
42360 }
42361 + set_fs(oldfs);
42362 + kfree(elf_buf);
42363 } else {
42364 if (clear_user(buffer, tsz))
42365 return -EFAULT;
42366 @@ -541,6 +547,9 @@ read_kcore(struct file *file, char __use
42367
42368 static int open_kcore(struct inode *inode, struct file *filp)
42369 {
42370 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
42371 + return -EPERM;
42372 +#endif
42373 if (!capable(CAP_SYS_RAWIO))
42374 return -EPERM;
42375 if (kcore_need_update)
42376 diff -urNp linux-2.6.32.41/fs/proc/meminfo.c linux-2.6.32.41/fs/proc/meminfo.c
42377 --- linux-2.6.32.41/fs/proc/meminfo.c 2011-03-27 14:31:47.000000000 -0400
42378 +++ linux-2.6.32.41/fs/proc/meminfo.c 2011-05-16 21:46:57.000000000 -0400
42379 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
42380 unsigned long pages[NR_LRU_LISTS];
42381 int lru;
42382
42383 + pax_track_stack();
42384 +
42385 /*
42386 * display in kilobytes.
42387 */
42388 @@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_
42389 vmi.used >> 10,
42390 vmi.largest_chunk >> 10
42391 #ifdef CONFIG_MEMORY_FAILURE
42392 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
42393 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
42394 #endif
42395 );
42396
42397 diff -urNp linux-2.6.32.41/fs/proc/nommu.c linux-2.6.32.41/fs/proc/nommu.c
42398 --- linux-2.6.32.41/fs/proc/nommu.c 2011-03-27 14:31:47.000000000 -0400
42399 +++ linux-2.6.32.41/fs/proc/nommu.c 2011-04-17 15:56:46.000000000 -0400
42400 @@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_
42401 if (len < 1)
42402 len = 1;
42403 seq_printf(m, "%*c", len, ' ');
42404 - seq_path(m, &file->f_path, "");
42405 + seq_path(m, &file->f_path, "\n\\");
42406 }
42407
42408 seq_putc(m, '\n');
42409 diff -urNp linux-2.6.32.41/fs/proc/proc_net.c linux-2.6.32.41/fs/proc/proc_net.c
42410 --- linux-2.6.32.41/fs/proc/proc_net.c 2011-03-27 14:31:47.000000000 -0400
42411 +++ linux-2.6.32.41/fs/proc/proc_net.c 2011-04-17 15:56:46.000000000 -0400
42412 @@ -104,6 +104,17 @@ static struct net *get_proc_task_net(str
42413 struct task_struct *task;
42414 struct nsproxy *ns;
42415 struct net *net = NULL;
42416 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42417 + const struct cred *cred = current_cred();
42418 +#endif
42419 +
42420 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42421 + if (cred->fsuid)
42422 + return net;
42423 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42424 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
42425 + return net;
42426 +#endif
42427
42428 rcu_read_lock();
42429 task = pid_task(proc_pid(dir), PIDTYPE_PID);
42430 diff -urNp linux-2.6.32.41/fs/proc/proc_sysctl.c linux-2.6.32.41/fs/proc/proc_sysctl.c
42431 --- linux-2.6.32.41/fs/proc/proc_sysctl.c 2011-03-27 14:31:47.000000000 -0400
42432 +++ linux-2.6.32.41/fs/proc/proc_sysctl.c 2011-04-17 15:56:46.000000000 -0400
42433 @@ -7,6 +7,8 @@
42434 #include <linux/security.h>
42435 #include "internal.h"
42436
42437 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
42438 +
42439 static const struct dentry_operations proc_sys_dentry_operations;
42440 static const struct file_operations proc_sys_file_operations;
42441 static const struct inode_operations proc_sys_inode_operations;
42442 @@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(st
42443 if (!p)
42444 goto out;
42445
42446 + if (gr_handle_sysctl(p, MAY_EXEC))
42447 + goto out;
42448 +
42449 err = ERR_PTR(-ENOMEM);
42450 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
42451 if (h)
42452 @@ -228,6 +233,9 @@ static int scan(struct ctl_table_header
42453 if (*pos < file->f_pos)
42454 continue;
42455
42456 + if (gr_handle_sysctl(table, 0))
42457 + continue;
42458 +
42459 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
42460 if (res)
42461 return res;
42462 @@ -344,6 +352,9 @@ static int proc_sys_getattr(struct vfsmo
42463 if (IS_ERR(head))
42464 return PTR_ERR(head);
42465
42466 + if (table && gr_handle_sysctl(table, MAY_EXEC))
42467 + return -ENOENT;
42468 +
42469 generic_fillattr(inode, stat);
42470 if (table)
42471 stat->mode = (stat->mode & S_IFMT) | table->mode;
42472 diff -urNp linux-2.6.32.41/fs/proc/root.c linux-2.6.32.41/fs/proc/root.c
42473 --- linux-2.6.32.41/fs/proc/root.c 2011-03-27 14:31:47.000000000 -0400
42474 +++ linux-2.6.32.41/fs/proc/root.c 2011-04-17 15:56:46.000000000 -0400
42475 @@ -134,7 +134,15 @@ void __init proc_root_init(void)
42476 #ifdef CONFIG_PROC_DEVICETREE
42477 proc_device_tree_init();
42478 #endif
42479 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
42480 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42481 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
42482 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42483 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
42484 +#endif
42485 +#else
42486 proc_mkdir("bus", NULL);
42487 +#endif
42488 proc_sys_init();
42489 }
42490
42491 diff -urNp linux-2.6.32.41/fs/proc/task_mmu.c linux-2.6.32.41/fs/proc/task_mmu.c
42492 --- linux-2.6.32.41/fs/proc/task_mmu.c 2011-03-27 14:31:47.000000000 -0400
42493 +++ linux-2.6.32.41/fs/proc/task_mmu.c 2011-04-23 13:38:09.000000000 -0400
42494 @@ -46,15 +46,26 @@ void task_mem(struct seq_file *m, struct
42495 "VmStk:\t%8lu kB\n"
42496 "VmExe:\t%8lu kB\n"
42497 "VmLib:\t%8lu kB\n"
42498 - "VmPTE:\t%8lu kB\n",
42499 - hiwater_vm << (PAGE_SHIFT-10),
42500 + "VmPTE:\t%8lu kB\n"
42501 +
42502 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
42503 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
42504 +#endif
42505 +
42506 + ,hiwater_vm << (PAGE_SHIFT-10),
42507 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
42508 mm->locked_vm << (PAGE_SHIFT-10),
42509 hiwater_rss << (PAGE_SHIFT-10),
42510 total_rss << (PAGE_SHIFT-10),
42511 data << (PAGE_SHIFT-10),
42512 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
42513 - (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
42514 + (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
42515 +
42516 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
42517 + , mm->context.user_cs_base, mm->context.user_cs_limit
42518 +#endif
42519 +
42520 + );
42521 }
42522
42523 unsigned long task_vsize(struct mm_struct *mm)
42524 @@ -175,7 +186,8 @@ static void m_stop(struct seq_file *m, v
42525 struct proc_maps_private *priv = m->private;
42526 struct vm_area_struct *vma = v;
42527
42528 - vma_stop(priv, vma);
42529 + if (!IS_ERR(vma))
42530 + vma_stop(priv, vma);
42531 if (priv->task)
42532 put_task_struct(priv->task);
42533 }
42534 @@ -199,6 +211,12 @@ static int do_maps_open(struct inode *in
42535 return ret;
42536 }
42537
42538 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42539 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
42540 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
42541 + _mm->pax_flags & MF_PAX_SEGMEXEC))
42542 +#endif
42543 +
42544 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
42545 {
42546 struct mm_struct *mm = vma->vm_mm;
42547 @@ -206,7 +224,6 @@ static void show_map_vma(struct seq_file
42548 int flags = vma->vm_flags;
42549 unsigned long ino = 0;
42550 unsigned long long pgoff = 0;
42551 - unsigned long start;
42552 dev_t dev = 0;
42553 int len;
42554
42555 @@ -217,20 +234,23 @@ static void show_map_vma(struct seq_file
42556 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
42557 }
42558
42559 - /* We don't show the stack guard page in /proc/maps */
42560 - start = vma->vm_start;
42561 - if (vma->vm_flags & VM_GROWSDOWN)
42562 - if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
42563 - start += PAGE_SIZE;
42564 -
42565 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
42566 - start,
42567 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42568 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
42569 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
42570 +#else
42571 + vma->vm_start,
42572 vma->vm_end,
42573 +#endif
42574 flags & VM_READ ? 'r' : '-',
42575 flags & VM_WRITE ? 'w' : '-',
42576 flags & VM_EXEC ? 'x' : '-',
42577 flags & VM_MAYSHARE ? 's' : 'p',
42578 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42579 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
42580 +#else
42581 pgoff,
42582 +#endif
42583 MAJOR(dev), MINOR(dev), ino, &len);
42584
42585 /*
42586 @@ -239,7 +259,7 @@ static void show_map_vma(struct seq_file
42587 */
42588 if (file) {
42589 pad_len_spaces(m, len);
42590 - seq_path(m, &file->f_path, "\n");
42591 + seq_path(m, &file->f_path, "\n\\");
42592 } else {
42593 const char *name = arch_vma_name(vma);
42594 if (!name) {
42595 @@ -247,8 +267,9 @@ static void show_map_vma(struct seq_file
42596 if (vma->vm_start <= mm->brk &&
42597 vma->vm_end >= mm->start_brk) {
42598 name = "[heap]";
42599 - } else if (vma->vm_start <= mm->start_stack &&
42600 - vma->vm_end >= mm->start_stack) {
42601 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
42602 + (vma->vm_start <= mm->start_stack &&
42603 + vma->vm_end >= mm->start_stack)) {
42604 name = "[stack]";
42605 }
42606 } else {
42607 @@ -391,9 +412,16 @@ static int show_smap(struct seq_file *m,
42608 };
42609
42610 memset(&mss, 0, sizeof mss);
42611 - mss.vma = vma;
42612 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
42613 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
42614 +
42615 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42616 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
42617 +#endif
42618 + mss.vma = vma;
42619 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
42620 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
42621 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42622 + }
42623 +#endif
42624
42625 show_map_vma(m, vma);
42626
42627 @@ -409,7 +437,11 @@ static int show_smap(struct seq_file *m,
42628 "Swap: %8lu kB\n"
42629 "KernelPageSize: %8lu kB\n"
42630 "MMUPageSize: %8lu kB\n",
42631 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42632 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
42633 +#else
42634 (vma->vm_end - vma->vm_start) >> 10,
42635 +#endif
42636 mss.resident >> 10,
42637 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
42638 mss.shared_clean >> 10,
42639 diff -urNp linux-2.6.32.41/fs/proc/task_nommu.c linux-2.6.32.41/fs/proc/task_nommu.c
42640 --- linux-2.6.32.41/fs/proc/task_nommu.c 2011-03-27 14:31:47.000000000 -0400
42641 +++ linux-2.6.32.41/fs/proc/task_nommu.c 2011-04-17 15:56:46.000000000 -0400
42642 @@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct
42643 else
42644 bytes += kobjsize(mm);
42645
42646 - if (current->fs && current->fs->users > 1)
42647 + if (current->fs && atomic_read(&current->fs->users) > 1)
42648 sbytes += kobjsize(current->fs);
42649 else
42650 bytes += kobjsize(current->fs);
42651 @@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_fil
42652 if (len < 1)
42653 len = 1;
42654 seq_printf(m, "%*c", len, ' ');
42655 - seq_path(m, &file->f_path, "");
42656 + seq_path(m, &file->f_path, "\n\\");
42657 }
42658
42659 seq_putc(m, '\n');
42660 diff -urNp linux-2.6.32.41/fs/readdir.c linux-2.6.32.41/fs/readdir.c
42661 --- linux-2.6.32.41/fs/readdir.c 2011-03-27 14:31:47.000000000 -0400
42662 +++ linux-2.6.32.41/fs/readdir.c 2011-04-17 15:56:46.000000000 -0400
42663 @@ -16,6 +16,7 @@
42664 #include <linux/security.h>
42665 #include <linux/syscalls.h>
42666 #include <linux/unistd.h>
42667 +#include <linux/namei.h>
42668
42669 #include <asm/uaccess.h>
42670
42671 @@ -67,6 +68,7 @@ struct old_linux_dirent {
42672
42673 struct readdir_callback {
42674 struct old_linux_dirent __user * dirent;
42675 + struct file * file;
42676 int result;
42677 };
42678
42679 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
42680 buf->result = -EOVERFLOW;
42681 return -EOVERFLOW;
42682 }
42683 +
42684 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
42685 + return 0;
42686 +
42687 buf->result++;
42688 dirent = buf->dirent;
42689 if (!access_ok(VERIFY_WRITE, dirent,
42690 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
42691
42692 buf.result = 0;
42693 buf.dirent = dirent;
42694 + buf.file = file;
42695
42696 error = vfs_readdir(file, fillonedir, &buf);
42697 if (buf.result)
42698 @@ -142,6 +149,7 @@ struct linux_dirent {
42699 struct getdents_callback {
42700 struct linux_dirent __user * current_dir;
42701 struct linux_dirent __user * previous;
42702 + struct file * file;
42703 int count;
42704 int error;
42705 };
42706 @@ -162,6 +170,10 @@ static int filldir(void * __buf, const c
42707 buf->error = -EOVERFLOW;
42708 return -EOVERFLOW;
42709 }
42710 +
42711 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
42712 + return 0;
42713 +
42714 dirent = buf->previous;
42715 if (dirent) {
42716 if (__put_user(offset, &dirent->d_off))
42717 @@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
42718 buf.previous = NULL;
42719 buf.count = count;
42720 buf.error = 0;
42721 + buf.file = file;
42722
42723 error = vfs_readdir(file, filldir, &buf);
42724 if (error >= 0)
42725 @@ -228,6 +241,7 @@ out:
42726 struct getdents_callback64 {
42727 struct linux_dirent64 __user * current_dir;
42728 struct linux_dirent64 __user * previous;
42729 + struct file *file;
42730 int count;
42731 int error;
42732 };
42733 @@ -242,6 +256,10 @@ static int filldir64(void * __buf, const
42734 buf->error = -EINVAL; /* only used if we fail.. */
42735 if (reclen > buf->count)
42736 return -EINVAL;
42737 +
42738 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
42739 + return 0;
42740 +
42741 dirent = buf->previous;
42742 if (dirent) {
42743 if (__put_user(offset, &dirent->d_off))
42744 @@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
42745
42746 buf.current_dir = dirent;
42747 buf.previous = NULL;
42748 + buf.file = file;
42749 buf.count = count;
42750 buf.error = 0;
42751
42752 diff -urNp linux-2.6.32.41/fs/reiserfs/dir.c linux-2.6.32.41/fs/reiserfs/dir.c
42753 --- linux-2.6.32.41/fs/reiserfs/dir.c 2011-03-27 14:31:47.000000000 -0400
42754 +++ linux-2.6.32.41/fs/reiserfs/dir.c 2011-05-16 21:46:57.000000000 -0400
42755 @@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
42756 struct reiserfs_dir_entry de;
42757 int ret = 0;
42758
42759 + pax_track_stack();
42760 +
42761 reiserfs_write_lock(inode->i_sb);
42762
42763 reiserfs_check_lock_depth(inode->i_sb, "readdir");
42764 diff -urNp linux-2.6.32.41/fs/reiserfs/do_balan.c linux-2.6.32.41/fs/reiserfs/do_balan.c
42765 --- linux-2.6.32.41/fs/reiserfs/do_balan.c 2011-03-27 14:31:47.000000000 -0400
42766 +++ linux-2.6.32.41/fs/reiserfs/do_balan.c 2011-04-17 15:56:46.000000000 -0400
42767 @@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb,
42768 return;
42769 }
42770
42771 - atomic_inc(&(fs_generation(tb->tb_sb)));
42772 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
42773 do_balance_starts(tb);
42774
42775 /* balance leaf returns 0 except if combining L R and S into
42776 diff -urNp linux-2.6.32.41/fs/reiserfs/item_ops.c linux-2.6.32.41/fs/reiserfs/item_ops.c
42777 --- linux-2.6.32.41/fs/reiserfs/item_ops.c 2011-03-27 14:31:47.000000000 -0400
42778 +++ linux-2.6.32.41/fs/reiserfs/item_ops.c 2011-04-17 15:56:46.000000000 -0400
42779 @@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_i
42780 vi->vi_index, vi->vi_type, vi->vi_ih);
42781 }
42782
42783 -static struct item_operations stat_data_ops = {
42784 +static const struct item_operations stat_data_ops = {
42785 .bytes_number = sd_bytes_number,
42786 .decrement_key = sd_decrement_key,
42787 .is_left_mergeable = sd_is_left_mergeable,
42788 @@ -196,7 +196,7 @@ static void direct_print_vi(struct virtu
42789 vi->vi_index, vi->vi_type, vi->vi_ih);
42790 }
42791
42792 -static struct item_operations direct_ops = {
42793 +static const struct item_operations direct_ops = {
42794 .bytes_number = direct_bytes_number,
42795 .decrement_key = direct_decrement_key,
42796 .is_left_mergeable = direct_is_left_mergeable,
42797 @@ -341,7 +341,7 @@ static void indirect_print_vi(struct vir
42798 vi->vi_index, vi->vi_type, vi->vi_ih);
42799 }
42800
42801 -static struct item_operations indirect_ops = {
42802 +static const struct item_operations indirect_ops = {
42803 .bytes_number = indirect_bytes_number,
42804 .decrement_key = indirect_decrement_key,
42805 .is_left_mergeable = indirect_is_left_mergeable,
42806 @@ -628,7 +628,7 @@ static void direntry_print_vi(struct vir
42807 printk("\n");
42808 }
42809
42810 -static struct item_operations direntry_ops = {
42811 +static const struct item_operations direntry_ops = {
42812 .bytes_number = direntry_bytes_number,
42813 .decrement_key = direntry_decrement_key,
42814 .is_left_mergeable = direntry_is_left_mergeable,
42815 @@ -724,7 +724,7 @@ static void errcatch_print_vi(struct vir
42816 "Invalid item type observed, run fsck ASAP");
42817 }
42818
42819 -static struct item_operations errcatch_ops = {
42820 +static const struct item_operations errcatch_ops = {
42821 errcatch_bytes_number,
42822 errcatch_decrement_key,
42823 errcatch_is_left_mergeable,
42824 @@ -746,7 +746,7 @@ static struct item_operations errcatch_o
42825 #error Item types must use disk-format assigned values.
42826 #endif
42827
42828 -struct item_operations *item_ops[TYPE_ANY + 1] = {
42829 +const struct item_operations * const item_ops[TYPE_ANY + 1] = {
42830 &stat_data_ops,
42831 &indirect_ops,
42832 &direct_ops,
42833 diff -urNp linux-2.6.32.41/fs/reiserfs/journal.c linux-2.6.32.41/fs/reiserfs/journal.c
42834 --- linux-2.6.32.41/fs/reiserfs/journal.c 2011-03-27 14:31:47.000000000 -0400
42835 +++ linux-2.6.32.41/fs/reiserfs/journal.c 2011-05-16 21:46:57.000000000 -0400
42836 @@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_brea
42837 struct buffer_head *bh;
42838 int i, j;
42839
42840 + pax_track_stack();
42841 +
42842 bh = __getblk(dev, block, bufsize);
42843 if (buffer_uptodate(bh))
42844 return (bh);
42845 diff -urNp linux-2.6.32.41/fs/reiserfs/namei.c linux-2.6.32.41/fs/reiserfs/namei.c
42846 --- linux-2.6.32.41/fs/reiserfs/namei.c 2011-03-27 14:31:47.000000000 -0400
42847 +++ linux-2.6.32.41/fs/reiserfs/namei.c 2011-05-16 21:46:57.000000000 -0400
42848 @@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode
42849 unsigned long savelink = 1;
42850 struct timespec ctime;
42851
42852 + pax_track_stack();
42853 +
42854 /* three balancings: (1) old name removal, (2) new name insertion
42855 and (3) maybe "save" link insertion
42856 stat data updates: (1) old directory,
42857 diff -urNp linux-2.6.32.41/fs/reiserfs/procfs.c linux-2.6.32.41/fs/reiserfs/procfs.c
42858 --- linux-2.6.32.41/fs/reiserfs/procfs.c 2011-03-27 14:31:47.000000000 -0400
42859 +++ linux-2.6.32.41/fs/reiserfs/procfs.c 2011-05-16 21:46:57.000000000 -0400
42860 @@ -123,7 +123,7 @@ static int show_super(struct seq_file *m
42861 "SMALL_TAILS " : "NO_TAILS ",
42862 replay_only(sb) ? "REPLAY_ONLY " : "",
42863 convert_reiserfs(sb) ? "CONV " : "",
42864 - atomic_read(&r->s_generation_counter),
42865 + atomic_read_unchecked(&r->s_generation_counter),
42866 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
42867 SF(s_do_balance), SF(s_unneeded_left_neighbor),
42868 SF(s_good_search_by_key_reada), SF(s_bmaps),
42869 @@ -309,6 +309,8 @@ static int show_journal(struct seq_file
42870 struct journal_params *jp = &rs->s_v1.s_journal;
42871 char b[BDEVNAME_SIZE];
42872
42873 + pax_track_stack();
42874 +
42875 seq_printf(m, /* on-disk fields */
42876 "jp_journal_1st_block: \t%i\n"
42877 "jp_journal_dev: \t%s[%x]\n"
42878 diff -urNp linux-2.6.32.41/fs/reiserfs/stree.c linux-2.6.32.41/fs/reiserfs/stree.c
42879 --- linux-2.6.32.41/fs/reiserfs/stree.c 2011-03-27 14:31:47.000000000 -0400
42880 +++ linux-2.6.32.41/fs/reiserfs/stree.c 2011-05-16 21:46:57.000000000 -0400
42881 @@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs
42882 int iter = 0;
42883 #endif
42884
42885 + pax_track_stack();
42886 +
42887 BUG_ON(!th->t_trans_id);
42888
42889 init_tb_struct(th, &s_del_balance, sb, path,
42890 @@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct r
42891 int retval;
42892 int quota_cut_bytes = 0;
42893
42894 + pax_track_stack();
42895 +
42896 BUG_ON(!th->t_trans_id);
42897
42898 le_key2cpu_key(&cpu_key, key);
42899 @@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiser
42900 int quota_cut_bytes;
42901 loff_t tail_pos = 0;
42902
42903 + pax_track_stack();
42904 +
42905 BUG_ON(!th->t_trans_id);
42906
42907 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
42908 @@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reis
42909 int retval;
42910 int fs_gen;
42911
42912 + pax_track_stack();
42913 +
42914 BUG_ON(!th->t_trans_id);
42915
42916 fs_gen = get_generation(inode->i_sb);
42917 @@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs
42918 int fs_gen = 0;
42919 int quota_bytes = 0;
42920
42921 + pax_track_stack();
42922 +
42923 BUG_ON(!th->t_trans_id);
42924
42925 if (inode) { /* Do we count quotas for item? */
42926 diff -urNp linux-2.6.32.41/fs/reiserfs/super.c linux-2.6.32.41/fs/reiserfs/super.c
42927 --- linux-2.6.32.41/fs/reiserfs/super.c 2011-03-27 14:31:47.000000000 -0400
42928 +++ linux-2.6.32.41/fs/reiserfs/super.c 2011-05-16 21:46:57.000000000 -0400
42929 @@ -912,6 +912,8 @@ static int reiserfs_parse_options(struct
42930 {.option_name = NULL}
42931 };
42932
42933 + pax_track_stack();
42934 +
42935 *blocks = 0;
42936 if (!options || !*options)
42937 /* use default configuration: create tails, journaling on, no
42938 diff -urNp linux-2.6.32.41/fs/select.c linux-2.6.32.41/fs/select.c
42939 --- linux-2.6.32.41/fs/select.c 2011-03-27 14:31:47.000000000 -0400
42940 +++ linux-2.6.32.41/fs/select.c 2011-05-16 21:46:57.000000000 -0400
42941 @@ -20,6 +20,7 @@
42942 #include <linux/module.h>
42943 #include <linux/slab.h>
42944 #include <linux/poll.h>
42945 +#include <linux/security.h>
42946 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
42947 #include <linux/file.h>
42948 #include <linux/fdtable.h>
42949 @@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, s
42950 int retval, i, timed_out = 0;
42951 unsigned long slack = 0;
42952
42953 + pax_track_stack();
42954 +
42955 rcu_read_lock();
42956 retval = max_select_fd(n, fds);
42957 rcu_read_unlock();
42958 @@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user
42959 /* Allocate small arguments on the stack to save memory and be faster */
42960 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
42961
42962 + pax_track_stack();
42963 +
42964 ret = -EINVAL;
42965 if (n < 0)
42966 goto out_nofds;
42967 @@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *uf
42968 struct poll_list *walk = head;
42969 unsigned long todo = nfds;
42970
42971 + pax_track_stack();
42972 +
42973 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
42974 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
42975 return -EINVAL;
42976
42977 diff -urNp linux-2.6.32.41/fs/seq_file.c linux-2.6.32.41/fs/seq_file.c
42978 --- linux-2.6.32.41/fs/seq_file.c 2011-03-27 14:31:47.000000000 -0400
42979 +++ linux-2.6.32.41/fs/seq_file.c 2011-04-17 15:56:46.000000000 -0400
42980 @@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
42981 return 0;
42982 }
42983 if (!m->buf) {
42984 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
42985 + m->size = PAGE_SIZE;
42986 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
42987 if (!m->buf)
42988 return -ENOMEM;
42989 }
42990 @@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
42991 Eoverflow:
42992 m->op->stop(m, p);
42993 kfree(m->buf);
42994 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
42995 + m->size <<= 1;
42996 + m->buf = kmalloc(m->size, GFP_KERNEL);
42997 return !m->buf ? -ENOMEM : -EAGAIN;
42998 }
42999
43000 @@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
43001 m->version = file->f_version;
43002 /* grab buffer if we didn't have one */
43003 if (!m->buf) {
43004 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
43005 + m->size = PAGE_SIZE;
43006 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
43007 if (!m->buf)
43008 goto Enomem;
43009 }
43010 @@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
43011 goto Fill;
43012 m->op->stop(m, p);
43013 kfree(m->buf);
43014 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
43015 + m->size <<= 1;
43016 + m->buf = kmalloc(m->size, GFP_KERNEL);
43017 if (!m->buf)
43018 goto Enomem;
43019 m->count = 0;
43020 diff -urNp linux-2.6.32.41/fs/smbfs/symlink.c linux-2.6.32.41/fs/smbfs/symlink.c
43021 --- linux-2.6.32.41/fs/smbfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
43022 +++ linux-2.6.32.41/fs/smbfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
43023 @@ -55,7 +55,7 @@ static void *smb_follow_link(struct dent
43024
43025 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
43026 {
43027 - char *s = nd_get_link(nd);
43028 + const char *s = nd_get_link(nd);
43029 if (!IS_ERR(s))
43030 __putname(s);
43031 }
43032 diff -urNp linux-2.6.32.41/fs/splice.c linux-2.6.32.41/fs/splice.c
43033 --- linux-2.6.32.41/fs/splice.c 2011-03-27 14:31:47.000000000 -0400
43034 +++ linux-2.6.32.41/fs/splice.c 2011-05-16 21:46:57.000000000 -0400
43035 @@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode
43036 pipe_lock(pipe);
43037
43038 for (;;) {
43039 - if (!pipe->readers) {
43040 + if (!atomic_read(&pipe->readers)) {
43041 send_sig(SIGPIPE, current, 0);
43042 if (!ret)
43043 ret = -EPIPE;
43044 @@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode
43045 do_wakeup = 0;
43046 }
43047
43048 - pipe->waiting_writers++;
43049 + atomic_inc(&pipe->waiting_writers);
43050 pipe_wait(pipe);
43051 - pipe->waiting_writers--;
43052 + atomic_dec(&pipe->waiting_writers);
43053 }
43054
43055 pipe_unlock(pipe);
43056 @@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *
43057 .spd_release = spd_release_page,
43058 };
43059
43060 + pax_track_stack();
43061 +
43062 index = *ppos >> PAGE_CACHE_SHIFT;
43063 loff = *ppos & ~PAGE_CACHE_MASK;
43064 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
43065 @@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file
43066 old_fs = get_fs();
43067 set_fs(get_ds());
43068 /* The cast to a user pointer is valid due to the set_fs() */
43069 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
43070 + res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
43071 set_fs(old_fs);
43072
43073 return res;
43074 @@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file
43075 old_fs = get_fs();
43076 set_fs(get_ds());
43077 /* The cast to a user pointer is valid due to the set_fs() */
43078 - res = vfs_write(file, (const char __user *)buf, count, &pos);
43079 + res = vfs_write(file, (__force const char __user *)buf, count, &pos);
43080 set_fs(old_fs);
43081
43082 return res;
43083 @@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct
43084 .spd_release = spd_release_page,
43085 };
43086
43087 + pax_track_stack();
43088 +
43089 index = *ppos >> PAGE_CACHE_SHIFT;
43090 offset = *ppos & ~PAGE_CACHE_MASK;
43091 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
43092 @@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct
43093 goto err;
43094
43095 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
43096 - vec[i].iov_base = (void __user *) page_address(page);
43097 + vec[i].iov_base = (__force void __user *) page_address(page);
43098 vec[i].iov_len = this_len;
43099 pages[i] = page;
43100 spd.nr_pages++;
43101 @@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
43102 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
43103 {
43104 while (!pipe->nrbufs) {
43105 - if (!pipe->writers)
43106 + if (!atomic_read(&pipe->writers))
43107 return 0;
43108
43109 - if (!pipe->waiting_writers && sd->num_spliced)
43110 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
43111 return 0;
43112
43113 if (sd->flags & SPLICE_F_NONBLOCK)
43114 @@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct fi
43115 * out of the pipe right after the splice_to_pipe(). So set
43116 * PIPE_READERS appropriately.
43117 */
43118 - pipe->readers = 1;
43119 + atomic_set(&pipe->readers, 1);
43120
43121 current->splice_pipe = pipe;
43122 }
43123 @@ -1592,6 +1596,8 @@ static long vmsplice_to_pipe(struct file
43124 .spd_release = spd_release_page,
43125 };
43126
43127 + pax_track_stack();
43128 +
43129 pipe = pipe_info(file->f_path.dentry->d_inode);
43130 if (!pipe)
43131 return -EBADF;
43132 @@ -1700,9 +1706,9 @@ static int ipipe_prep(struct pipe_inode_
43133 ret = -ERESTARTSYS;
43134 break;
43135 }
43136 - if (!pipe->writers)
43137 + if (!atomic_read(&pipe->writers))
43138 break;
43139 - if (!pipe->waiting_writers) {
43140 + if (!atomic_read(&pipe->waiting_writers)) {
43141 if (flags & SPLICE_F_NONBLOCK) {
43142 ret = -EAGAIN;
43143 break;
43144 @@ -1734,7 +1740,7 @@ static int opipe_prep(struct pipe_inode_
43145 pipe_lock(pipe);
43146
43147 while (pipe->nrbufs >= PIPE_BUFFERS) {
43148 - if (!pipe->readers) {
43149 + if (!atomic_read(&pipe->readers)) {
43150 send_sig(SIGPIPE, current, 0);
43151 ret = -EPIPE;
43152 break;
43153 @@ -1747,9 +1753,9 @@ static int opipe_prep(struct pipe_inode_
43154 ret = -ERESTARTSYS;
43155 break;
43156 }
43157 - pipe->waiting_writers++;
43158 + atomic_inc(&pipe->waiting_writers);
43159 pipe_wait(pipe);
43160 - pipe->waiting_writers--;
43161 + atomic_dec(&pipe->waiting_writers);
43162 }
43163
43164 pipe_unlock(pipe);
43165 @@ -1785,14 +1791,14 @@ retry:
43166 pipe_double_lock(ipipe, opipe);
43167
43168 do {
43169 - if (!opipe->readers) {
43170 + if (!atomic_read(&opipe->readers)) {
43171 send_sig(SIGPIPE, current, 0);
43172 if (!ret)
43173 ret = -EPIPE;
43174 break;
43175 }
43176
43177 - if (!ipipe->nrbufs && !ipipe->writers)
43178 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
43179 break;
43180
43181 /*
43182 @@ -1892,7 +1898,7 @@ static int link_pipe(struct pipe_inode_i
43183 pipe_double_lock(ipipe, opipe);
43184
43185 do {
43186 - if (!opipe->readers) {
43187 + if (!atomic_read(&opipe->readers)) {
43188 send_sig(SIGPIPE, current, 0);
43189 if (!ret)
43190 ret = -EPIPE;
43191 @@ -1937,7 +1943,7 @@ static int link_pipe(struct pipe_inode_i
43192 * return EAGAIN if we have the potential of some data in the
43193 * future, otherwise just return 0
43194 */
43195 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
43196 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
43197 ret = -EAGAIN;
43198
43199 pipe_unlock(ipipe);
43200 diff -urNp linux-2.6.32.41/fs/sysfs/file.c linux-2.6.32.41/fs/sysfs/file.c
43201 --- linux-2.6.32.41/fs/sysfs/file.c 2011-03-27 14:31:47.000000000 -0400
43202 +++ linux-2.6.32.41/fs/sysfs/file.c 2011-05-04 17:56:20.000000000 -0400
43203 @@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
43204
43205 struct sysfs_open_dirent {
43206 atomic_t refcnt;
43207 - atomic_t event;
43208 + atomic_unchecked_t event;
43209 wait_queue_head_t poll;
43210 struct list_head buffers; /* goes through sysfs_buffer.list */
43211 };
43212 @@ -53,7 +53,7 @@ struct sysfs_buffer {
43213 size_t count;
43214 loff_t pos;
43215 char * page;
43216 - struct sysfs_ops * ops;
43217 + const struct sysfs_ops * ops;
43218 struct mutex mutex;
43219 int needs_read_fill;
43220 int event;
43221 @@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentr
43222 {
43223 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
43224 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
43225 - struct sysfs_ops * ops = buffer->ops;
43226 + const struct sysfs_ops * ops = buffer->ops;
43227 int ret = 0;
43228 ssize_t count;
43229
43230 @@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentr
43231 if (!sysfs_get_active_two(attr_sd))
43232 return -ENODEV;
43233
43234 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
43235 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
43236 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
43237
43238 sysfs_put_active_two(attr_sd);
43239 @@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentr
43240 {
43241 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
43242 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
43243 - struct sysfs_ops * ops = buffer->ops;
43244 + const struct sysfs_ops * ops = buffer->ops;
43245 int rc;
43246
43247 /* need attr_sd for attr and ops, its parent for kobj */
43248 @@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct
43249 return -ENOMEM;
43250
43251 atomic_set(&new_od->refcnt, 0);
43252 - atomic_set(&new_od->event, 1);
43253 + atomic_set_unchecked(&new_od->event, 1);
43254 init_waitqueue_head(&new_od->poll);
43255 INIT_LIST_HEAD(&new_od->buffers);
43256 goto retry;
43257 @@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode
43258 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
43259 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
43260 struct sysfs_buffer *buffer;
43261 - struct sysfs_ops *ops;
43262 + const struct sysfs_ops *ops;
43263 int error = -EACCES;
43264 char *p;
43265
43266 @@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct fi
43267
43268 sysfs_put_active_two(attr_sd);
43269
43270 - if (buffer->event != atomic_read(&od->event))
43271 + if (buffer->event != atomic_read_unchecked(&od->event))
43272 goto trigger;
43273
43274 return DEFAULT_POLLMASK;
43275 @@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_di
43276
43277 od = sd->s_attr.open;
43278 if (od) {
43279 - atomic_inc(&od->event);
43280 + atomic_inc_unchecked(&od->event);
43281 wake_up_interruptible(&od->poll);
43282 }
43283
43284 diff -urNp linux-2.6.32.41/fs/sysfs/mount.c linux-2.6.32.41/fs/sysfs/mount.c
43285 --- linux-2.6.32.41/fs/sysfs/mount.c 2011-03-27 14:31:47.000000000 -0400
43286 +++ linux-2.6.32.41/fs/sysfs/mount.c 2011-04-17 15:56:46.000000000 -0400
43287 @@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
43288 .s_name = "",
43289 .s_count = ATOMIC_INIT(1),
43290 .s_flags = SYSFS_DIR,
43291 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
43292 + .s_mode = S_IFDIR | S_IRWXU,
43293 +#else
43294 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
43295 +#endif
43296 .s_ino = 1,
43297 };
43298
43299 diff -urNp linux-2.6.32.41/fs/sysfs/symlink.c linux-2.6.32.41/fs/sysfs/symlink.c
43300 --- linux-2.6.32.41/fs/sysfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
43301 +++ linux-2.6.32.41/fs/sysfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
43302 @@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct de
43303
43304 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
43305 {
43306 - char *page = nd_get_link(nd);
43307 + const char *page = nd_get_link(nd);
43308 if (!IS_ERR(page))
43309 free_page((unsigned long)page);
43310 }
43311 diff -urNp linux-2.6.32.41/fs/udf/balloc.c linux-2.6.32.41/fs/udf/balloc.c
43312 --- linux-2.6.32.41/fs/udf/balloc.c 2011-03-27 14:31:47.000000000 -0400
43313 +++ linux-2.6.32.41/fs/udf/balloc.c 2011-04-17 15:56:46.000000000 -0400
43314 @@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struc
43315
43316 mutex_lock(&sbi->s_alloc_mutex);
43317 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
43318 - if (bloc->logicalBlockNum < 0 ||
43319 - (bloc->logicalBlockNum + count) >
43320 - partmap->s_partition_len) {
43321 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
43322 udf_debug("%d < %d || %d + %d > %d\n",
43323 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
43324 count, partmap->s_partition_len);
43325 @@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct
43326
43327 mutex_lock(&sbi->s_alloc_mutex);
43328 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
43329 - if (bloc->logicalBlockNum < 0 ||
43330 - (bloc->logicalBlockNum + count) >
43331 - partmap->s_partition_len) {
43332 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
43333 udf_debug("%d < %d || %d + %d > %d\n",
43334 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
43335 partmap->s_partition_len);
43336 diff -urNp linux-2.6.32.41/fs/udf/inode.c linux-2.6.32.41/fs/udf/inode.c
43337 --- linux-2.6.32.41/fs/udf/inode.c 2011-03-27 14:31:47.000000000 -0400
43338 +++ linux-2.6.32.41/fs/udf/inode.c 2011-05-16 21:46:57.000000000 -0400
43339 @@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(
43340 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
43341 int lastblock = 0;
43342
43343 + pax_track_stack();
43344 +
43345 prev_epos.offset = udf_file_entry_alloc_offset(inode);
43346 prev_epos.block = iinfo->i_location;
43347 prev_epos.bh = NULL;
43348 diff -urNp linux-2.6.32.41/fs/udf/misc.c linux-2.6.32.41/fs/udf/misc.c
43349 --- linux-2.6.32.41/fs/udf/misc.c 2011-03-27 14:31:47.000000000 -0400
43350 +++ linux-2.6.32.41/fs/udf/misc.c 2011-04-23 12:56:11.000000000 -0400
43351 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
43352
43353 u8 udf_tag_checksum(const struct tag *t)
43354 {
43355 - u8 *data = (u8 *)t;
43356 + const u8 *data = (const u8 *)t;
43357 u8 checksum = 0;
43358 int i;
43359 for (i = 0; i < sizeof(struct tag); ++i)
43360 diff -urNp linux-2.6.32.41/fs/utimes.c linux-2.6.32.41/fs/utimes.c
43361 --- linux-2.6.32.41/fs/utimes.c 2011-03-27 14:31:47.000000000 -0400
43362 +++ linux-2.6.32.41/fs/utimes.c 2011-04-17 15:56:46.000000000 -0400
43363 @@ -1,6 +1,7 @@
43364 #include <linux/compiler.h>
43365 #include <linux/file.h>
43366 #include <linux/fs.h>
43367 +#include <linux/security.h>
43368 #include <linux/linkage.h>
43369 #include <linux/mount.h>
43370 #include <linux/namei.h>
43371 @@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
43372 goto mnt_drop_write_and_out;
43373 }
43374 }
43375 +
43376 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
43377 + error = -EACCES;
43378 + goto mnt_drop_write_and_out;
43379 + }
43380 +
43381 mutex_lock(&inode->i_mutex);
43382 error = notify_change(path->dentry, &newattrs);
43383 mutex_unlock(&inode->i_mutex);
43384 diff -urNp linux-2.6.32.41/fs/xattr_acl.c linux-2.6.32.41/fs/xattr_acl.c
43385 --- linux-2.6.32.41/fs/xattr_acl.c 2011-03-27 14:31:47.000000000 -0400
43386 +++ linux-2.6.32.41/fs/xattr_acl.c 2011-04-17 15:56:46.000000000 -0400
43387 @@ -17,8 +17,8 @@
43388 struct posix_acl *
43389 posix_acl_from_xattr(const void *value, size_t size)
43390 {
43391 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
43392 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
43393 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
43394 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
43395 int count;
43396 struct posix_acl *acl;
43397 struct posix_acl_entry *acl_e;
43398 diff -urNp linux-2.6.32.41/fs/xattr.c linux-2.6.32.41/fs/xattr.c
43399 --- linux-2.6.32.41/fs/xattr.c 2011-03-27 14:31:47.000000000 -0400
43400 +++ linux-2.6.32.41/fs/xattr.c 2011-04-17 15:56:46.000000000 -0400
43401 @@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
43402 * Extended attribute SET operations
43403 */
43404 static long
43405 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
43406 +setxattr(struct path *path, const char __user *name, const void __user *value,
43407 size_t size, int flags)
43408 {
43409 int error;
43410 @@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __
43411 return PTR_ERR(kvalue);
43412 }
43413
43414 - error = vfs_setxattr(d, kname, kvalue, size, flags);
43415 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
43416 + error = -EACCES;
43417 + goto out;
43418 + }
43419 +
43420 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
43421 +out:
43422 kfree(kvalue);
43423 return error;
43424 }
43425 @@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
43426 return error;
43427 error = mnt_want_write(path.mnt);
43428 if (!error) {
43429 - error = setxattr(path.dentry, name, value, size, flags);
43430 + error = setxattr(&path, name, value, size, flags);
43431 mnt_drop_write(path.mnt);
43432 }
43433 path_put(&path);
43434 @@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
43435 return error;
43436 error = mnt_want_write(path.mnt);
43437 if (!error) {
43438 - error = setxattr(path.dentry, name, value, size, flags);
43439 + error = setxattr(&path, name, value, size, flags);
43440 mnt_drop_write(path.mnt);
43441 }
43442 path_put(&path);
43443 @@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
43444 const void __user *,value, size_t, size, int, flags)
43445 {
43446 struct file *f;
43447 - struct dentry *dentry;
43448 int error = -EBADF;
43449
43450 f = fget(fd);
43451 if (!f)
43452 return error;
43453 - dentry = f->f_path.dentry;
43454 - audit_inode(NULL, dentry);
43455 + audit_inode(NULL, f->f_path.dentry);
43456 error = mnt_want_write_file(f);
43457 if (!error) {
43458 - error = setxattr(dentry, name, value, size, flags);
43459 + error = setxattr(&f->f_path, name, value, size, flags);
43460 mnt_drop_write(f->f_path.mnt);
43461 }
43462 fput(f);
43463 diff -urNp linux-2.6.32.41/fs/xfs/linux-2.6/xfs_ioctl32.c linux-2.6.32.41/fs/xfs/linux-2.6/xfs_ioctl32.c
43464 --- linux-2.6.32.41/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-03-27 14:31:47.000000000 -0400
43465 +++ linux-2.6.32.41/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-04-17 15:56:46.000000000 -0400
43466 @@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
43467 xfs_fsop_geom_t fsgeo;
43468 int error;
43469
43470 + memset(&fsgeo, 0, sizeof(fsgeo));
43471 error = xfs_fs_geometry(mp, &fsgeo, 3);
43472 if (error)
43473 return -error;
43474 diff -urNp linux-2.6.32.41/fs/xfs/linux-2.6/xfs_ioctl.c linux-2.6.32.41/fs/xfs/linux-2.6/xfs_ioctl.c
43475 --- linux-2.6.32.41/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 17:00:52.000000000 -0400
43476 +++ linux-2.6.32.41/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 20:07:09.000000000 -0400
43477 @@ -134,7 +134,7 @@ xfs_find_handle(
43478 }
43479
43480 error = -EFAULT;
43481 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
43482 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
43483 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
43484 goto out_put;
43485
43486 @@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
43487 if (IS_ERR(dentry))
43488 return PTR_ERR(dentry);
43489
43490 - kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
43491 + kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
43492 if (!kbuf)
43493 goto out_dput;
43494
43495 @@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
43496 xfs_mount_t *mp,
43497 void __user *arg)
43498 {
43499 - xfs_fsop_geom_t fsgeo;
43500 + xfs_fsop_geom_t fsgeo;
43501 int error;
43502
43503 error = xfs_fs_geometry(mp, &fsgeo, 3);
43504 diff -urNp linux-2.6.32.41/fs/xfs/linux-2.6/xfs_iops.c linux-2.6.32.41/fs/xfs/linux-2.6/xfs_iops.c
43505 --- linux-2.6.32.41/fs/xfs/linux-2.6/xfs_iops.c 2011-03-27 14:31:47.000000000 -0400
43506 +++ linux-2.6.32.41/fs/xfs/linux-2.6/xfs_iops.c 2011-04-17 15:56:46.000000000 -0400
43507 @@ -468,7 +468,7 @@ xfs_vn_put_link(
43508 struct nameidata *nd,
43509 void *p)
43510 {
43511 - char *s = nd_get_link(nd);
43512 + const char *s = nd_get_link(nd);
43513
43514 if (!IS_ERR(s))
43515 kfree(s);
43516 diff -urNp linux-2.6.32.41/fs/xfs/xfs_bmap.c linux-2.6.32.41/fs/xfs/xfs_bmap.c
43517 --- linux-2.6.32.41/fs/xfs/xfs_bmap.c 2011-03-27 14:31:47.000000000 -0400
43518 +++ linux-2.6.32.41/fs/xfs/xfs_bmap.c 2011-04-17 15:56:46.000000000 -0400
43519 @@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
43520 int nmap,
43521 int ret_nmap);
43522 #else
43523 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
43524 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
43525 #endif /* DEBUG */
43526
43527 #if defined(XFS_RW_TRACE)
43528 diff -urNp linux-2.6.32.41/fs/xfs/xfs_dir2_sf.c linux-2.6.32.41/fs/xfs/xfs_dir2_sf.c
43529 --- linux-2.6.32.41/fs/xfs/xfs_dir2_sf.c 2011-03-27 14:31:47.000000000 -0400
43530 +++ linux-2.6.32.41/fs/xfs/xfs_dir2_sf.c 2011-04-18 22:07:30.000000000 -0400
43531 @@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
43532 }
43533
43534 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
43535 - if (filldir(dirent, sfep->name, sfep->namelen,
43536 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
43537 + char name[sfep->namelen];
43538 + memcpy(name, sfep->name, sfep->namelen);
43539 + if (filldir(dirent, name, sfep->namelen,
43540 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
43541 + *offset = off & 0x7fffffff;
43542 + return 0;
43543 + }
43544 + } else if (filldir(dirent, sfep->name, sfep->namelen,
43545 off & 0x7fffffff, ino, DT_UNKNOWN)) {
43546 *offset = off & 0x7fffffff;
43547 return 0;
43548 diff -urNp linux-2.6.32.41/grsecurity/gracl_alloc.c linux-2.6.32.41/grsecurity/gracl_alloc.c
43549 --- linux-2.6.32.41/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
43550 +++ linux-2.6.32.41/grsecurity/gracl_alloc.c 2011-04-17 15:56:46.000000000 -0400
43551 @@ -0,0 +1,105 @@
43552 +#include <linux/kernel.h>
43553 +#include <linux/mm.h>
43554 +#include <linux/slab.h>
43555 +#include <linux/vmalloc.h>
43556 +#include <linux/gracl.h>
43557 +#include <linux/grsecurity.h>
43558 +
43559 +static unsigned long alloc_stack_next = 1;
43560 +static unsigned long alloc_stack_size = 1;
43561 +static void **alloc_stack;
43562 +
43563 +static __inline__ int
43564 +alloc_pop(void)
43565 +{
43566 + if (alloc_stack_next == 1)
43567 + return 0;
43568 +
43569 + kfree(alloc_stack[alloc_stack_next - 2]);
43570 +
43571 + alloc_stack_next--;
43572 +
43573 + return 1;
43574 +}
43575 +
43576 +static __inline__ int
43577 +alloc_push(void *buf)
43578 +{
43579 + if (alloc_stack_next >= alloc_stack_size)
43580 + return 1;
43581 +
43582 + alloc_stack[alloc_stack_next - 1] = buf;
43583 +
43584 + alloc_stack_next++;
43585 +
43586 + return 0;
43587 +}
43588 +
43589 +void *
43590 +acl_alloc(unsigned long len)
43591 +{
43592 + void *ret = NULL;
43593 +
43594 + if (!len || len > PAGE_SIZE)
43595 + goto out;
43596 +
43597 + ret = kmalloc(len, GFP_KERNEL);
43598 +
43599 + if (ret) {
43600 + if (alloc_push(ret)) {
43601 + kfree(ret);
43602 + ret = NULL;
43603 + }
43604 + }
43605 +
43606 +out:
43607 + return ret;
43608 +}
43609 +
43610 +void *
43611 +acl_alloc_num(unsigned long num, unsigned long len)
43612 +{
43613 + if (!len || (num > (PAGE_SIZE / len)))
43614 + return NULL;
43615 +
43616 + return acl_alloc(num * len);
43617 +}
43618 +
43619 +void
43620 +acl_free_all(void)
43621 +{
43622 + if (gr_acl_is_enabled() || !alloc_stack)
43623 + return;
43624 +
43625 + while (alloc_pop()) ;
43626 +
43627 + if (alloc_stack) {
43628 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
43629 + kfree(alloc_stack);
43630 + else
43631 + vfree(alloc_stack);
43632 + }
43633 +
43634 + alloc_stack = NULL;
43635 + alloc_stack_size = 1;
43636 + alloc_stack_next = 1;
43637 +
43638 + return;
43639 +}
43640 +
43641 +int
43642 +acl_alloc_stack_init(unsigned long size)
43643 +{
43644 + if ((size * sizeof (void *)) <= PAGE_SIZE)
43645 + alloc_stack =
43646 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
43647 + else
43648 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
43649 +
43650 + alloc_stack_size = size;
43651 +
43652 + if (!alloc_stack)
43653 + return 0;
43654 + else
43655 + return 1;
43656 +}
43657 diff -urNp linux-2.6.32.41/grsecurity/gracl.c linux-2.6.32.41/grsecurity/gracl.c
43658 --- linux-2.6.32.41/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
43659 +++ linux-2.6.32.41/grsecurity/gracl.c 2011-06-11 16:24:26.000000000 -0400
43660 @@ -0,0 +1,4085 @@
43661 +#include <linux/kernel.h>
43662 +#include <linux/module.h>
43663 +#include <linux/sched.h>
43664 +#include <linux/mm.h>
43665 +#include <linux/file.h>
43666 +#include <linux/fs.h>
43667 +#include <linux/namei.h>
43668 +#include <linux/mount.h>
43669 +#include <linux/tty.h>
43670 +#include <linux/proc_fs.h>
43671 +#include <linux/smp_lock.h>
43672 +#include <linux/slab.h>
43673 +#include <linux/vmalloc.h>
43674 +#include <linux/types.h>
43675 +#include <linux/sysctl.h>
43676 +#include <linux/netdevice.h>
43677 +#include <linux/ptrace.h>
43678 +#include <linux/gracl.h>
43679 +#include <linux/gralloc.h>
43680 +#include <linux/grsecurity.h>
43681 +#include <linux/grinternal.h>
43682 +#include <linux/pid_namespace.h>
43683 +#include <linux/fdtable.h>
43684 +#include <linux/percpu.h>
43685 +
43686 +#include <asm/uaccess.h>
43687 +#include <asm/errno.h>
43688 +#include <asm/mman.h>
43689 +
43690 +static struct acl_role_db acl_role_set;
43691 +static struct name_db name_set;
43692 +static struct inodev_db inodev_set;
43693 +
43694 +/* for keeping track of userspace pointers used for subjects, so we
43695 + can share references in the kernel as well
43696 +*/
43697 +
43698 +static struct dentry *real_root;
43699 +static struct vfsmount *real_root_mnt;
43700 +
43701 +static struct acl_subj_map_db subj_map_set;
43702 +
43703 +static struct acl_role_label *default_role;
43704 +
43705 +static struct acl_role_label *role_list;
43706 +
43707 +static u16 acl_sp_role_value;
43708 +
43709 +extern char *gr_shared_page[4];
43710 +static DEFINE_MUTEX(gr_dev_mutex);
43711 +DEFINE_RWLOCK(gr_inode_lock);
43712 +
43713 +struct gr_arg *gr_usermode;
43714 +
43715 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
43716 +
43717 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
43718 +extern void gr_clear_learn_entries(void);
43719 +
43720 +#ifdef CONFIG_GRKERNSEC_RESLOG
43721 +extern void gr_log_resource(const struct task_struct *task,
43722 + const int res, const unsigned long wanted, const int gt);
43723 +#endif
43724 +
43725 +unsigned char *gr_system_salt;
43726 +unsigned char *gr_system_sum;
43727 +
43728 +static struct sprole_pw **acl_special_roles = NULL;
43729 +static __u16 num_sprole_pws = 0;
43730 +
43731 +static struct acl_role_label *kernel_role = NULL;
43732 +
43733 +static unsigned int gr_auth_attempts = 0;
43734 +static unsigned long gr_auth_expires = 0UL;
43735 +
43736 +#ifdef CONFIG_NET
43737 +extern struct vfsmount *sock_mnt;
43738 +#endif
43739 +extern struct vfsmount *pipe_mnt;
43740 +extern struct vfsmount *shm_mnt;
43741 +#ifdef CONFIG_HUGETLBFS
43742 +extern struct vfsmount *hugetlbfs_vfsmount;
43743 +#endif
43744 +
43745 +static struct acl_object_label *fakefs_obj_rw;
43746 +static struct acl_object_label *fakefs_obj_rwx;
43747 +
43748 +extern int gr_init_uidset(void);
43749 +extern void gr_free_uidset(void);
43750 +extern void gr_remove_uid(uid_t uid);
43751 +extern int gr_find_uid(uid_t uid);
43752 +
43753 +__inline__ int
43754 +gr_acl_is_enabled(void)
43755 +{
43756 + return (gr_status & GR_READY);
43757 +}
43758 +
43759 +#ifdef CONFIG_BTRFS_FS
43760 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
43761 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
43762 +#endif
43763 +
43764 +static inline dev_t __get_dev(const struct dentry *dentry)
43765 +{
43766 +#ifdef CONFIG_BTRFS_FS
43767 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
43768 + return get_btrfs_dev_from_inode(dentry->d_inode);
43769 + else
43770 +#endif
43771 + return dentry->d_inode->i_sb->s_dev;
43772 +}
43773 +
43774 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
43775 +{
43776 + return __get_dev(dentry);
43777 +}
43778 +
43779 +static char gr_task_roletype_to_char(struct task_struct *task)
43780 +{
43781 + switch (task->role->roletype &
43782 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
43783 + GR_ROLE_SPECIAL)) {
43784 + case GR_ROLE_DEFAULT:
43785 + return 'D';
43786 + case GR_ROLE_USER:
43787 + return 'U';
43788 + case GR_ROLE_GROUP:
43789 + return 'G';
43790 + case GR_ROLE_SPECIAL:
43791 + return 'S';
43792 + }
43793 +
43794 + return 'X';
43795 +}
43796 +
43797 +char gr_roletype_to_char(void)
43798 +{
43799 + return gr_task_roletype_to_char(current);
43800 +}
43801 +
43802 +__inline__ int
43803 +gr_acl_tpe_check(void)
43804 +{
43805 + if (unlikely(!(gr_status & GR_READY)))
43806 + return 0;
43807 + if (current->role->roletype & GR_ROLE_TPE)
43808 + return 1;
43809 + else
43810 + return 0;
43811 +}
43812 +
43813 +int
43814 +gr_handle_rawio(const struct inode *inode)
43815 +{
43816 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
43817 + if (inode && S_ISBLK(inode->i_mode) &&
43818 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
43819 + !capable(CAP_SYS_RAWIO))
43820 + return 1;
43821 +#endif
43822 + return 0;
43823 +}
43824 +
43825 +static int
43826 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
43827 +{
43828 + if (likely(lena != lenb))
43829 + return 0;
43830 +
43831 + return !memcmp(a, b, lena);
43832 +}
43833 +
43834 +/* this must be called with vfsmount_lock and dcache_lock held */
43835 +
43836 +static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
43837 + struct dentry *root, struct vfsmount *rootmnt,
43838 + char *buffer, int buflen)
43839 +{
43840 + char * end = buffer+buflen;
43841 + char * retval;
43842 + int namelen;
43843 +
43844 + *--end = '\0';
43845 + buflen--;
43846 +
43847 + if (buflen < 1)
43848 + goto Elong;
43849 + /* Get '/' right */
43850 + retval = end-1;
43851 + *retval = '/';
43852 +
43853 + for (;;) {
43854 + struct dentry * parent;
43855 +
43856 + if (dentry == root && vfsmnt == rootmnt)
43857 + break;
43858 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
43859 + /* Global root? */
43860 + if (vfsmnt->mnt_parent == vfsmnt)
43861 + goto global_root;
43862 + dentry = vfsmnt->mnt_mountpoint;
43863 + vfsmnt = vfsmnt->mnt_parent;
43864 + continue;
43865 + }
43866 + parent = dentry->d_parent;
43867 + prefetch(parent);
43868 + namelen = dentry->d_name.len;
43869 + buflen -= namelen + 1;
43870 + if (buflen < 0)
43871 + goto Elong;
43872 + end -= namelen;
43873 + memcpy(end, dentry->d_name.name, namelen);
43874 + *--end = '/';
43875 + retval = end;
43876 + dentry = parent;
43877 + }
43878 +
43879 +out:
43880 + return retval;
43881 +
43882 +global_root:
43883 + namelen = dentry->d_name.len;
43884 + buflen -= namelen;
43885 + if (buflen < 0)
43886 + goto Elong;
43887 + retval -= namelen-1; /* hit the slash */
43888 + memcpy(retval, dentry->d_name.name, namelen);
43889 + goto out;
43890 +Elong:
43891 + retval = ERR_PTR(-ENAMETOOLONG);
43892 + goto out;
43893 +}
43894 +
43895 +static char *
43896 +gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
43897 + struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
43898 +{
43899 + char *retval;
43900 +
43901 + retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
43902 + if (unlikely(IS_ERR(retval)))
43903 + retval = strcpy(buf, "<path too long>");
43904 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
43905 + retval[1] = '\0';
43906 +
43907 + return retval;
43908 +}
43909 +
43910 +static char *
43911 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
43912 + char *buf, int buflen)
43913 +{
43914 + char *res;
43915 +
43916 + /* we can use real_root, real_root_mnt, because this is only called
43917 + by the RBAC system */
43918 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
43919 +
43920 + return res;
43921 +}
43922 +
43923 +static char *
43924 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
43925 + char *buf, int buflen)
43926 +{
43927 + char *res;
43928 + struct dentry *root;
43929 + struct vfsmount *rootmnt;
43930 + struct task_struct *reaper = &init_task;
43931 +
43932 + /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
43933 + read_lock(&reaper->fs->lock);
43934 + root = dget(reaper->fs->root.dentry);
43935 + rootmnt = mntget(reaper->fs->root.mnt);
43936 + read_unlock(&reaper->fs->lock);
43937 +
43938 + spin_lock(&dcache_lock);
43939 + spin_lock(&vfsmount_lock);
43940 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
43941 + spin_unlock(&vfsmount_lock);
43942 + spin_unlock(&dcache_lock);
43943 +
43944 + dput(root);
43945 + mntput(rootmnt);
43946 + return res;
43947 +}
43948 +
43949 +static char *
43950 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
43951 +{
43952 + char *ret;
43953 + spin_lock(&dcache_lock);
43954 + spin_lock(&vfsmount_lock);
43955 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
43956 + PAGE_SIZE);
43957 + spin_unlock(&vfsmount_lock);
43958 + spin_unlock(&dcache_lock);
43959 + return ret;
43960 +}
43961 +
43962 +char *
43963 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
43964 +{
43965 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
43966 + PAGE_SIZE);
43967 +}
43968 +
43969 +char *
43970 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
43971 +{
43972 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
43973 + PAGE_SIZE);
43974 +}
43975 +
43976 +char *
43977 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
43978 +{
43979 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
43980 + PAGE_SIZE);
43981 +}
43982 +
43983 +char *
43984 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
43985 +{
43986 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
43987 + PAGE_SIZE);
43988 +}
43989 +
43990 +char *
43991 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
43992 +{
43993 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
43994 + PAGE_SIZE);
43995 +}
43996 +
43997 +__inline__ __u32
43998 +to_gr_audit(const __u32 reqmode)
43999 +{
44000 + /* masks off auditable permission flags, then shifts them to create
44001 + auditing flags, and adds the special case of append auditing if
44002 + we're requesting write */
44003 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
44004 +}
44005 +
44006 +struct acl_subject_label *
44007 +lookup_subject_map(const struct acl_subject_label *userp)
44008 +{
44009 + unsigned int index = shash(userp, subj_map_set.s_size);
44010 + struct subject_map *match;
44011 +
44012 + match = subj_map_set.s_hash[index];
44013 +
44014 + while (match && match->user != userp)
44015 + match = match->next;
44016 +
44017 + if (match != NULL)
44018 + return match->kernel;
44019 + else
44020 + return NULL;
44021 +}
44022 +
44023 +static void
44024 +insert_subj_map_entry(struct subject_map *subjmap)
44025 +{
44026 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
44027 + struct subject_map **curr;
44028 +
44029 + subjmap->prev = NULL;
44030 +
44031 + curr = &subj_map_set.s_hash[index];
44032 + if (*curr != NULL)
44033 + (*curr)->prev = subjmap;
44034 +
44035 + subjmap->next = *curr;
44036 + *curr = subjmap;
44037 +
44038 + return;
44039 +}
44040 +
44041 +static struct acl_role_label *
44042 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
44043 + const gid_t gid)
44044 +{
44045 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
44046 + struct acl_role_label *match;
44047 + struct role_allowed_ip *ipp;
44048 + unsigned int x;
44049 + u32 curr_ip = task->signal->curr_ip;
44050 +
44051 + task->signal->saved_ip = curr_ip;
44052 +
44053 + match = acl_role_set.r_hash[index];
44054 +
44055 + while (match) {
44056 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
44057 + for (x = 0; x < match->domain_child_num; x++) {
44058 + if (match->domain_children[x] == uid)
44059 + goto found;
44060 + }
44061 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
44062 + break;
44063 + match = match->next;
44064 + }
44065 +found:
44066 + if (match == NULL) {
44067 + try_group:
44068 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
44069 + match = acl_role_set.r_hash[index];
44070 +
44071 + while (match) {
44072 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
44073 + for (x = 0; x < match->domain_child_num; x++) {
44074 + if (match->domain_children[x] == gid)
44075 + goto found2;
44076 + }
44077 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
44078 + break;
44079 + match = match->next;
44080 + }
44081 +found2:
44082 + if (match == NULL)
44083 + match = default_role;
44084 + if (match->allowed_ips == NULL)
44085 + return match;
44086 + else {
44087 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
44088 + if (likely
44089 + ((ntohl(curr_ip) & ipp->netmask) ==
44090 + (ntohl(ipp->addr) & ipp->netmask)))
44091 + return match;
44092 + }
44093 + match = default_role;
44094 + }
44095 + } else if (match->allowed_ips == NULL) {
44096 + return match;
44097 + } else {
44098 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
44099 + if (likely
44100 + ((ntohl(curr_ip) & ipp->netmask) ==
44101 + (ntohl(ipp->addr) & ipp->netmask)))
44102 + return match;
44103 + }
44104 + goto try_group;
44105 + }
44106 +
44107 + return match;
44108 +}
44109 +
44110 +struct acl_subject_label *
44111 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
44112 + const struct acl_role_label *role)
44113 +{
44114 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
44115 + struct acl_subject_label *match;
44116 +
44117 + match = role->subj_hash[index];
44118 +
44119 + while (match && (match->inode != ino || match->device != dev ||
44120 + (match->mode & GR_DELETED))) {
44121 + match = match->next;
44122 + }
44123 +
44124 + if (match && !(match->mode & GR_DELETED))
44125 + return match;
44126 + else
44127 + return NULL;
44128 +}
44129 +
44130 +struct acl_subject_label *
44131 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
44132 + const struct acl_role_label *role)
44133 +{
44134 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
44135 + struct acl_subject_label *match;
44136 +
44137 + match = role->subj_hash[index];
44138 +
44139 + while (match && (match->inode != ino || match->device != dev ||
44140 + !(match->mode & GR_DELETED))) {
44141 + match = match->next;
44142 + }
44143 +
44144 + if (match && (match->mode & GR_DELETED))
44145 + return match;
44146 + else
44147 + return NULL;
44148 +}
44149 +
44150 +static struct acl_object_label *
44151 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
44152 + const struct acl_subject_label *subj)
44153 +{
44154 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
44155 + struct acl_object_label *match;
44156 +
44157 + match = subj->obj_hash[index];
44158 +
44159 + while (match && (match->inode != ino || match->device != dev ||
44160 + (match->mode & GR_DELETED))) {
44161 + match = match->next;
44162 + }
44163 +
44164 + if (match && !(match->mode & GR_DELETED))
44165 + return match;
44166 + else
44167 + return NULL;
44168 +}
44169 +
44170 +static struct acl_object_label *
44171 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
44172 + const struct acl_subject_label *subj)
44173 +{
44174 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
44175 + struct acl_object_label *match;
44176 +
44177 + match = subj->obj_hash[index];
44178 +
44179 + while (match && (match->inode != ino || match->device != dev ||
44180 + !(match->mode & GR_DELETED))) {
44181 + match = match->next;
44182 + }
44183 +
44184 + if (match && (match->mode & GR_DELETED))
44185 + return match;
44186 +
44187 + match = subj->obj_hash[index];
44188 +
44189 + while (match && (match->inode != ino || match->device != dev ||
44190 + (match->mode & GR_DELETED))) {
44191 + match = match->next;
44192 + }
44193 +
44194 + if (match && !(match->mode & GR_DELETED))
44195 + return match;
44196 + else
44197 + return NULL;
44198 +}
44199 +
44200 +static struct name_entry *
44201 +lookup_name_entry(const char *name)
44202 +{
44203 + unsigned int len = strlen(name);
44204 + unsigned int key = full_name_hash(name, len);
44205 + unsigned int index = key % name_set.n_size;
44206 + struct name_entry *match;
44207 +
44208 + match = name_set.n_hash[index];
44209 +
44210 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
44211 + match = match->next;
44212 +
44213 + return match;
44214 +}
44215 +
44216 +static struct name_entry *
44217 +lookup_name_entry_create(const char *name)
44218 +{
44219 + unsigned int len = strlen(name);
44220 + unsigned int key = full_name_hash(name, len);
44221 + unsigned int index = key % name_set.n_size;
44222 + struct name_entry *match;
44223 +
44224 + match = name_set.n_hash[index];
44225 +
44226 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
44227 + !match->deleted))
44228 + match = match->next;
44229 +
44230 + if (match && match->deleted)
44231 + return match;
44232 +
44233 + match = name_set.n_hash[index];
44234 +
44235 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
44236 + match->deleted))
44237 + match = match->next;
44238 +
44239 + if (match && !match->deleted)
44240 + return match;
44241 + else
44242 + return NULL;
44243 +}
44244 +
44245 +static struct inodev_entry *
44246 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
44247 +{
44248 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
44249 + struct inodev_entry *match;
44250 +
44251 + match = inodev_set.i_hash[index];
44252 +
44253 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
44254 + match = match->next;
44255 +
44256 + return match;
44257 +}
44258 +
44259 +static void
44260 +insert_inodev_entry(struct inodev_entry *entry)
44261 +{
44262 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
44263 + inodev_set.i_size);
44264 + struct inodev_entry **curr;
44265 +
44266 + entry->prev = NULL;
44267 +
44268 + curr = &inodev_set.i_hash[index];
44269 + if (*curr != NULL)
44270 + (*curr)->prev = entry;
44271 +
44272 + entry->next = *curr;
44273 + *curr = entry;
44274 +
44275 + return;
44276 +}
44277 +
44278 +static void
44279 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
44280 +{
44281 + unsigned int index =
44282 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
44283 + struct acl_role_label **curr;
44284 + struct acl_role_label *tmp;
44285 +
44286 + curr = &acl_role_set.r_hash[index];
44287 +
44288 + /* if role was already inserted due to domains and already has
44289 + a role in the same bucket as it attached, then we need to
44290 + combine these two buckets
44291 + */
44292 + if (role->next) {
44293 + tmp = role->next;
44294 + while (tmp->next)
44295 + tmp = tmp->next;
44296 + tmp->next = *curr;
44297 + } else
44298 + role->next = *curr;
44299 + *curr = role;
44300 +
44301 + return;
44302 +}
44303 +
44304 +static void
44305 +insert_acl_role_label(struct acl_role_label *role)
44306 +{
44307 + int i;
44308 +
44309 + if (role_list == NULL) {
44310 + role_list = role;
44311 + role->prev = NULL;
44312 + } else {
44313 + role->prev = role_list;
44314 + role_list = role;
44315 + }
44316 +
44317 + /* used for hash chains */
44318 + role->next = NULL;
44319 +
44320 + if (role->roletype & GR_ROLE_DOMAIN) {
44321 + for (i = 0; i < role->domain_child_num; i++)
44322 + __insert_acl_role_label(role, role->domain_children[i]);
44323 + } else
44324 + __insert_acl_role_label(role, role->uidgid);
44325 +}
44326 +
44327 +static int
44328 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
44329 +{
44330 + struct name_entry **curr, *nentry;
44331 + struct inodev_entry *ientry;
44332 + unsigned int len = strlen(name);
44333 + unsigned int key = full_name_hash(name, len);
44334 + unsigned int index = key % name_set.n_size;
44335 +
44336 + curr = &name_set.n_hash[index];
44337 +
44338 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
44339 + curr = &((*curr)->next);
44340 +
44341 + if (*curr != NULL)
44342 + return 1;
44343 +
44344 + nentry = acl_alloc(sizeof (struct name_entry));
44345 + if (nentry == NULL)
44346 + return 0;
44347 + ientry = acl_alloc(sizeof (struct inodev_entry));
44348 + if (ientry == NULL)
44349 + return 0;
44350 + ientry->nentry = nentry;
44351 +
44352 + nentry->key = key;
44353 + nentry->name = name;
44354 + nentry->inode = inode;
44355 + nentry->device = device;
44356 + nentry->len = len;
44357 + nentry->deleted = deleted;
44358 +
44359 + nentry->prev = NULL;
44360 + curr = &name_set.n_hash[index];
44361 + if (*curr != NULL)
44362 + (*curr)->prev = nentry;
44363 + nentry->next = *curr;
44364 + *curr = nentry;
44365 +
44366 + /* insert us into the table searchable by inode/dev */
44367 + insert_inodev_entry(ientry);
44368 +
44369 + return 1;
44370 +}
44371 +
44372 +static void
44373 +insert_acl_obj_label(struct acl_object_label *obj,
44374 + struct acl_subject_label *subj)
44375 +{
44376 + unsigned int index =
44377 + fhash(obj->inode, obj->device, subj->obj_hash_size);
44378 + struct acl_object_label **curr;
44379 +
44380 +
44381 + obj->prev = NULL;
44382 +
44383 + curr = &subj->obj_hash[index];
44384 + if (*curr != NULL)
44385 + (*curr)->prev = obj;
44386 +
44387 + obj->next = *curr;
44388 + *curr = obj;
44389 +
44390 + return;
44391 +}
44392 +
44393 +static void
44394 +insert_acl_subj_label(struct acl_subject_label *obj,
44395 + struct acl_role_label *role)
44396 +{
44397 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
44398 + struct acl_subject_label **curr;
44399 +
44400 + obj->prev = NULL;
44401 +
44402 + curr = &role->subj_hash[index];
44403 + if (*curr != NULL)
44404 + (*curr)->prev = obj;
44405 +
44406 + obj->next = *curr;
44407 + *curr = obj;
44408 +
44409 + return;
44410 +}
44411 +
44412 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
44413 +
44414 +static void *
44415 +create_table(__u32 * len, int elementsize)
44416 +{
44417 + unsigned int table_sizes[] = {
44418 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
44419 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
44420 + 4194301, 8388593, 16777213, 33554393, 67108859
44421 + };
44422 + void *newtable = NULL;
44423 + unsigned int pwr = 0;
44424 +
44425 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
44426 + table_sizes[pwr] <= *len)
44427 + pwr++;
44428 +
44429 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
44430 + return newtable;
44431 +
44432 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
44433 + newtable =
44434 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
44435 + else
44436 + newtable = vmalloc(table_sizes[pwr] * elementsize);
44437 +
44438 + *len = table_sizes[pwr];
44439 +
44440 + return newtable;
44441 +}
44442 +
44443 +static int
44444 +init_variables(const struct gr_arg *arg)
44445 +{
44446 + struct task_struct *reaper = &init_task;
44447 + unsigned int stacksize;
44448 +
44449 + subj_map_set.s_size = arg->role_db.num_subjects;
44450 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
44451 + name_set.n_size = arg->role_db.num_objects;
44452 + inodev_set.i_size = arg->role_db.num_objects;
44453 +
44454 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
44455 + !name_set.n_size || !inodev_set.i_size)
44456 + return 1;
44457 +
44458 + if (!gr_init_uidset())
44459 + return 1;
44460 +
44461 + /* set up the stack that holds allocation info */
44462 +
44463 + stacksize = arg->role_db.num_pointers + 5;
44464 +
44465 + if (!acl_alloc_stack_init(stacksize))
44466 + return 1;
44467 +
44468 + /* grab reference for the real root dentry and vfsmount */
44469 + read_lock(&reaper->fs->lock);
44470 + real_root = dget(reaper->fs->root.dentry);
44471 + real_root_mnt = mntget(reaper->fs->root.mnt);
44472 + read_unlock(&reaper->fs->lock);
44473 +
44474 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44475 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
44476 +#endif
44477 +
44478 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
44479 + if (fakefs_obj_rw == NULL)
44480 + return 1;
44481 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
44482 +
44483 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
44484 + if (fakefs_obj_rwx == NULL)
44485 + return 1;
44486 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
44487 +
44488 + subj_map_set.s_hash =
44489 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
44490 + acl_role_set.r_hash =
44491 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
44492 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
44493 + inodev_set.i_hash =
44494 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
44495 +
44496 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
44497 + !name_set.n_hash || !inodev_set.i_hash)
44498 + return 1;
44499 +
44500 + memset(subj_map_set.s_hash, 0,
44501 + sizeof(struct subject_map *) * subj_map_set.s_size);
44502 + memset(acl_role_set.r_hash, 0,
44503 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
44504 + memset(name_set.n_hash, 0,
44505 + sizeof (struct name_entry *) * name_set.n_size);
44506 + memset(inodev_set.i_hash, 0,
44507 + sizeof (struct inodev_entry *) * inodev_set.i_size);
44508 +
44509 + return 0;
44510 +}
44511 +
44512 +/* free information not needed after startup
44513 + currently contains user->kernel pointer mappings for subjects
44514 +*/
44515 +
44516 +static void
44517 +free_init_variables(void)
44518 +{
44519 + __u32 i;
44520 +
44521 + if (subj_map_set.s_hash) {
44522 + for (i = 0; i < subj_map_set.s_size; i++) {
44523 + if (subj_map_set.s_hash[i]) {
44524 + kfree(subj_map_set.s_hash[i]);
44525 + subj_map_set.s_hash[i] = NULL;
44526 + }
44527 + }
44528 +
44529 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
44530 + PAGE_SIZE)
44531 + kfree(subj_map_set.s_hash);
44532 + else
44533 + vfree(subj_map_set.s_hash);
44534 + }
44535 +
44536 + return;
44537 +}
44538 +
44539 +static void
44540 +free_variables(void)
44541 +{
44542 + struct acl_subject_label *s;
44543 + struct acl_role_label *r;
44544 + struct task_struct *task, *task2;
44545 + unsigned int x;
44546 +
44547 + gr_clear_learn_entries();
44548 +
44549 + read_lock(&tasklist_lock);
44550 + do_each_thread(task2, task) {
44551 + task->acl_sp_role = 0;
44552 + task->acl_role_id = 0;
44553 + task->acl = NULL;
44554 + task->role = NULL;
44555 + } while_each_thread(task2, task);
44556 + read_unlock(&tasklist_lock);
44557 +
44558 + /* release the reference to the real root dentry and vfsmount */
44559 + if (real_root)
44560 + dput(real_root);
44561 + real_root = NULL;
44562 + if (real_root_mnt)
44563 + mntput(real_root_mnt);
44564 + real_root_mnt = NULL;
44565 +
44566 + /* free all object hash tables */
44567 +
44568 + FOR_EACH_ROLE_START(r)
44569 + if (r->subj_hash == NULL)
44570 + goto next_role;
44571 + FOR_EACH_SUBJECT_START(r, s, x)
44572 + if (s->obj_hash == NULL)
44573 + break;
44574 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
44575 + kfree(s->obj_hash);
44576 + else
44577 + vfree(s->obj_hash);
44578 + FOR_EACH_SUBJECT_END(s, x)
44579 + FOR_EACH_NESTED_SUBJECT_START(r, s)
44580 + if (s->obj_hash == NULL)
44581 + break;
44582 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
44583 + kfree(s->obj_hash);
44584 + else
44585 + vfree(s->obj_hash);
44586 + FOR_EACH_NESTED_SUBJECT_END(s)
44587 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
44588 + kfree(r->subj_hash);
44589 + else
44590 + vfree(r->subj_hash);
44591 + r->subj_hash = NULL;
44592 +next_role:
44593 + FOR_EACH_ROLE_END(r)
44594 +
44595 + acl_free_all();
44596 +
44597 + if (acl_role_set.r_hash) {
44598 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
44599 + PAGE_SIZE)
44600 + kfree(acl_role_set.r_hash);
44601 + else
44602 + vfree(acl_role_set.r_hash);
44603 + }
44604 + if (name_set.n_hash) {
44605 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
44606 + PAGE_SIZE)
44607 + kfree(name_set.n_hash);
44608 + else
44609 + vfree(name_set.n_hash);
44610 + }
44611 +
44612 + if (inodev_set.i_hash) {
44613 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
44614 + PAGE_SIZE)
44615 + kfree(inodev_set.i_hash);
44616 + else
44617 + vfree(inodev_set.i_hash);
44618 + }
44619 +
44620 + gr_free_uidset();
44621 +
44622 + memset(&name_set, 0, sizeof (struct name_db));
44623 + memset(&inodev_set, 0, sizeof (struct inodev_db));
44624 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
44625 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
44626 +
44627 + default_role = NULL;
44628 + role_list = NULL;
44629 +
44630 + return;
44631 +}
44632 +
44633 +static __u32
44634 +count_user_objs(struct acl_object_label *userp)
44635 +{
44636 + struct acl_object_label o_tmp;
44637 + __u32 num = 0;
44638 +
44639 + while (userp) {
44640 + if (copy_from_user(&o_tmp, userp,
44641 + sizeof (struct acl_object_label)))
44642 + break;
44643 +
44644 + userp = o_tmp.prev;
44645 + num++;
44646 + }
44647 +
44648 + return num;
44649 +}
44650 +
44651 +static struct acl_subject_label *
44652 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
44653 +
44654 +static int
44655 +copy_user_glob(struct acl_object_label *obj)
44656 +{
44657 + struct acl_object_label *g_tmp, **guser;
44658 + unsigned int len;
44659 + char *tmp;
44660 +
44661 + if (obj->globbed == NULL)
44662 + return 0;
44663 +
44664 + guser = &obj->globbed;
44665 + while (*guser) {
44666 + g_tmp = (struct acl_object_label *)
44667 + acl_alloc(sizeof (struct acl_object_label));
44668 + if (g_tmp == NULL)
44669 + return -ENOMEM;
44670 +
44671 + if (copy_from_user(g_tmp, *guser,
44672 + sizeof (struct acl_object_label)))
44673 + return -EFAULT;
44674 +
44675 + len = strnlen_user(g_tmp->filename, PATH_MAX);
44676 +
44677 + if (!len || len >= PATH_MAX)
44678 + return -EINVAL;
44679 +
44680 + if ((tmp = (char *) acl_alloc(len)) == NULL)
44681 + return -ENOMEM;
44682 +
44683 + if (copy_from_user(tmp, g_tmp->filename, len))
44684 + return -EFAULT;
44685 + tmp[len-1] = '\0';
44686 + g_tmp->filename = tmp;
44687 +
44688 + *guser = g_tmp;
44689 + guser = &(g_tmp->next);
44690 + }
44691 +
44692 + return 0;
44693 +}
44694 +
44695 +static int
44696 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
44697 + struct acl_role_label *role)
44698 +{
44699 + struct acl_object_label *o_tmp;
44700 + unsigned int len;
44701 + int ret;
44702 + char *tmp;
44703 +
44704 + while (userp) {
44705 + if ((o_tmp = (struct acl_object_label *)
44706 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
44707 + return -ENOMEM;
44708 +
44709 + if (copy_from_user(o_tmp, userp,
44710 + sizeof (struct acl_object_label)))
44711 + return -EFAULT;
44712 +
44713 + userp = o_tmp->prev;
44714 +
44715 + len = strnlen_user(o_tmp->filename, PATH_MAX);
44716 +
44717 + if (!len || len >= PATH_MAX)
44718 + return -EINVAL;
44719 +
44720 + if ((tmp = (char *) acl_alloc(len)) == NULL)
44721 + return -ENOMEM;
44722 +
44723 + if (copy_from_user(tmp, o_tmp->filename, len))
44724 + return -EFAULT;
44725 + tmp[len-1] = '\0';
44726 + o_tmp->filename = tmp;
44727 +
44728 + insert_acl_obj_label(o_tmp, subj);
44729 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
44730 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
44731 + return -ENOMEM;
44732 +
44733 + ret = copy_user_glob(o_tmp);
44734 + if (ret)
44735 + return ret;
44736 +
44737 + if (o_tmp->nested) {
44738 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
44739 + if (IS_ERR(o_tmp->nested))
44740 + return PTR_ERR(o_tmp->nested);
44741 +
44742 + /* insert into nested subject list */
44743 + o_tmp->nested->next = role->hash->first;
44744 + role->hash->first = o_tmp->nested;
44745 + }
44746 + }
44747 +
44748 + return 0;
44749 +}
44750 +
44751 +static __u32
44752 +count_user_subjs(struct acl_subject_label *userp)
44753 +{
44754 + struct acl_subject_label s_tmp;
44755 + __u32 num = 0;
44756 +
44757 + while (userp) {
44758 + if (copy_from_user(&s_tmp, userp,
44759 + sizeof (struct acl_subject_label)))
44760 + break;
44761 +
44762 + userp = s_tmp.prev;
44763 + /* do not count nested subjects against this count, since
44764 + they are not included in the hash table, but are
44765 + attached to objects. We have already counted
44766 + the subjects in userspace for the allocation
44767 + stack
44768 + */
44769 + if (!(s_tmp.mode & GR_NESTED))
44770 + num++;
44771 + }
44772 +
44773 + return num;
44774 +}
44775 +
44776 +static int
44777 +copy_user_allowedips(struct acl_role_label *rolep)
44778 +{
44779 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
44780 +
44781 + ruserip = rolep->allowed_ips;
44782 +
44783 + while (ruserip) {
44784 + rlast = rtmp;
44785 +
44786 + if ((rtmp = (struct role_allowed_ip *)
44787 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
44788 + return -ENOMEM;
44789 +
44790 + if (copy_from_user(rtmp, ruserip,
44791 + sizeof (struct role_allowed_ip)))
44792 + return -EFAULT;
44793 +
44794 + ruserip = rtmp->prev;
44795 +
44796 + if (!rlast) {
44797 + rtmp->prev = NULL;
44798 + rolep->allowed_ips = rtmp;
44799 + } else {
44800 + rlast->next = rtmp;
44801 + rtmp->prev = rlast;
44802 + }
44803 +
44804 + if (!ruserip)
44805 + rtmp->next = NULL;
44806 + }
44807 +
44808 + return 0;
44809 +}
44810 +
44811 +static int
44812 +copy_user_transitions(struct acl_role_label *rolep)
44813 +{
44814 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
44815 +
44816 + unsigned int len;
44817 + char *tmp;
44818 +
44819 + rusertp = rolep->transitions;
44820 +
44821 + while (rusertp) {
44822 + rlast = rtmp;
44823 +
44824 + if ((rtmp = (struct role_transition *)
44825 + acl_alloc(sizeof (struct role_transition))) == NULL)
44826 + return -ENOMEM;
44827 +
44828 + if (copy_from_user(rtmp, rusertp,
44829 + sizeof (struct role_transition)))
44830 + return -EFAULT;
44831 +
44832 + rusertp = rtmp->prev;
44833 +
44834 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
44835 +
44836 + if (!len || len >= GR_SPROLE_LEN)
44837 + return -EINVAL;
44838 +
44839 + if ((tmp = (char *) acl_alloc(len)) == NULL)
44840 + return -ENOMEM;
44841 +
44842 + if (copy_from_user(tmp, rtmp->rolename, len))
44843 + return -EFAULT;
44844 + tmp[len-1] = '\0';
44845 + rtmp->rolename = tmp;
44846 +
44847 + if (!rlast) {
44848 + rtmp->prev = NULL;
44849 + rolep->transitions = rtmp;
44850 + } else {
44851 + rlast->next = rtmp;
44852 + rtmp->prev = rlast;
44853 + }
44854 +
44855 + if (!rusertp)
44856 + rtmp->next = NULL;
44857 + }
44858 +
44859 + return 0;
44860 +}
44861 +
44862 +static struct acl_subject_label *
44863 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
44864 +{
44865 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
44866 + unsigned int len;
44867 + char *tmp;
44868 + __u32 num_objs;
44869 + struct acl_ip_label **i_tmp, *i_utmp2;
44870 + struct gr_hash_struct ghash;
44871 + struct subject_map *subjmap;
44872 + unsigned int i_num;
44873 + int err;
44874 +
44875 + s_tmp = lookup_subject_map(userp);
44876 +
44877 + /* we've already copied this subject into the kernel, just return
44878 + the reference to it, and don't copy it over again
44879 + */
44880 + if (s_tmp)
44881 + return(s_tmp);
44882 +
44883 + if ((s_tmp = (struct acl_subject_label *)
44884 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
44885 + return ERR_PTR(-ENOMEM);
44886 +
44887 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
44888 + if (subjmap == NULL)
44889 + return ERR_PTR(-ENOMEM);
44890 +
44891 + subjmap->user = userp;
44892 + subjmap->kernel = s_tmp;
44893 + insert_subj_map_entry(subjmap);
44894 +
44895 + if (copy_from_user(s_tmp, userp,
44896 + sizeof (struct acl_subject_label)))
44897 + return ERR_PTR(-EFAULT);
44898 +
44899 + len = strnlen_user(s_tmp->filename, PATH_MAX);
44900 +
44901 + if (!len || len >= PATH_MAX)
44902 + return ERR_PTR(-EINVAL);
44903 +
44904 + if ((tmp = (char *) acl_alloc(len)) == NULL)
44905 + return ERR_PTR(-ENOMEM);
44906 +
44907 + if (copy_from_user(tmp, s_tmp->filename, len))
44908 + return ERR_PTR(-EFAULT);
44909 + tmp[len-1] = '\0';
44910 + s_tmp->filename = tmp;
44911 +
44912 + if (!strcmp(s_tmp->filename, "/"))
44913 + role->root_label = s_tmp;
44914 +
44915 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
44916 + return ERR_PTR(-EFAULT);
44917 +
44918 + /* copy user and group transition tables */
44919 +
44920 + if (s_tmp->user_trans_num) {
44921 + uid_t *uidlist;
44922 +
44923 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
44924 + if (uidlist == NULL)
44925 + return ERR_PTR(-ENOMEM);
44926 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
44927 + return ERR_PTR(-EFAULT);
44928 +
44929 + s_tmp->user_transitions = uidlist;
44930 + }
44931 +
44932 + if (s_tmp->group_trans_num) {
44933 + gid_t *gidlist;
44934 +
44935 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
44936 + if (gidlist == NULL)
44937 + return ERR_PTR(-ENOMEM);
44938 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
44939 + return ERR_PTR(-EFAULT);
44940 +
44941 + s_tmp->group_transitions = gidlist;
44942 + }
44943 +
44944 + /* set up object hash table */
44945 + num_objs = count_user_objs(ghash.first);
44946 +
44947 + s_tmp->obj_hash_size = num_objs;
44948 + s_tmp->obj_hash =
44949 + (struct acl_object_label **)
44950 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
44951 +
44952 + if (!s_tmp->obj_hash)
44953 + return ERR_PTR(-ENOMEM);
44954 +
44955 + memset(s_tmp->obj_hash, 0,
44956 + s_tmp->obj_hash_size *
44957 + sizeof (struct acl_object_label *));
44958 +
44959 + /* add in objects */
44960 + err = copy_user_objs(ghash.first, s_tmp, role);
44961 +
44962 + if (err)
44963 + return ERR_PTR(err);
44964 +
44965 + /* set pointer for parent subject */
44966 + if (s_tmp->parent_subject) {
44967 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
44968 +
44969 + if (IS_ERR(s_tmp2))
44970 + return s_tmp2;
44971 +
44972 + s_tmp->parent_subject = s_tmp2;
44973 + }
44974 +
44975 + /* add in ip acls */
44976 +
44977 + if (!s_tmp->ip_num) {
44978 + s_tmp->ips = NULL;
44979 + goto insert;
44980 + }
44981 +
44982 + i_tmp =
44983 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
44984 + sizeof (struct acl_ip_label *));
44985 +
44986 + if (!i_tmp)
44987 + return ERR_PTR(-ENOMEM);
44988 +
44989 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
44990 + *(i_tmp + i_num) =
44991 + (struct acl_ip_label *)
44992 + acl_alloc(sizeof (struct acl_ip_label));
44993 + if (!*(i_tmp + i_num))
44994 + return ERR_PTR(-ENOMEM);
44995 +
44996 + if (copy_from_user
44997 + (&i_utmp2, s_tmp->ips + i_num,
44998 + sizeof (struct acl_ip_label *)))
44999 + return ERR_PTR(-EFAULT);
45000 +
45001 + if (copy_from_user
45002 + (*(i_tmp + i_num), i_utmp2,
45003 + sizeof (struct acl_ip_label)))
45004 + return ERR_PTR(-EFAULT);
45005 +
45006 + if ((*(i_tmp + i_num))->iface == NULL)
45007 + continue;
45008 +
45009 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
45010 + if (!len || len >= IFNAMSIZ)
45011 + return ERR_PTR(-EINVAL);
45012 + tmp = acl_alloc(len);
45013 + if (tmp == NULL)
45014 + return ERR_PTR(-ENOMEM);
45015 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
45016 + return ERR_PTR(-EFAULT);
45017 + (*(i_tmp + i_num))->iface = tmp;
45018 + }
45019 +
45020 + s_tmp->ips = i_tmp;
45021 +
45022 +insert:
45023 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
45024 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
45025 + return ERR_PTR(-ENOMEM);
45026 +
45027 + return s_tmp;
45028 +}
45029 +
45030 +static int
45031 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
45032 +{
45033 + struct acl_subject_label s_pre;
45034 + struct acl_subject_label * ret;
45035 + int err;
45036 +
45037 + while (userp) {
45038 + if (copy_from_user(&s_pre, userp,
45039 + sizeof (struct acl_subject_label)))
45040 + return -EFAULT;
45041 +
45042 + /* do not add nested subjects here, add
45043 + while parsing objects
45044 + */
45045 +
45046 + if (s_pre.mode & GR_NESTED) {
45047 + userp = s_pre.prev;
45048 + continue;
45049 + }
45050 +
45051 + ret = do_copy_user_subj(userp, role);
45052 +
45053 + err = PTR_ERR(ret);
45054 + if (IS_ERR(ret))
45055 + return err;
45056 +
45057 + insert_acl_subj_label(ret, role);
45058 +
45059 + userp = s_pre.prev;
45060 + }
45061 +
45062 + return 0;
45063 +}
45064 +
45065 +static int
45066 +copy_user_acl(struct gr_arg *arg)
45067 +{
45068 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
45069 + struct sprole_pw *sptmp;
45070 + struct gr_hash_struct *ghash;
45071 + uid_t *domainlist;
45072 + unsigned int r_num;
45073 + unsigned int len;
45074 + char *tmp;
45075 + int err = 0;
45076 + __u16 i;
45077 + __u32 num_subjs;
45078 +
45079 + /* we need a default and kernel role */
45080 + if (arg->role_db.num_roles < 2)
45081 + return -EINVAL;
45082 +
45083 + /* copy special role authentication info from userspace */
45084 +
45085 + num_sprole_pws = arg->num_sprole_pws;
45086 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
45087 +
45088 + if (!acl_special_roles) {
45089 + err = -ENOMEM;
45090 + goto cleanup;
45091 + }
45092 +
45093 + for (i = 0; i < num_sprole_pws; i++) {
45094 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
45095 + if (!sptmp) {
45096 + err = -ENOMEM;
45097 + goto cleanup;
45098 + }
45099 + if (copy_from_user(sptmp, arg->sprole_pws + i,
45100 + sizeof (struct sprole_pw))) {
45101 + err = -EFAULT;
45102 + goto cleanup;
45103 + }
45104 +
45105 + len =
45106 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
45107 +
45108 + if (!len || len >= GR_SPROLE_LEN) {
45109 + err = -EINVAL;
45110 + goto cleanup;
45111 + }
45112 +
45113 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
45114 + err = -ENOMEM;
45115 + goto cleanup;
45116 + }
45117 +
45118 + if (copy_from_user(tmp, sptmp->rolename, len)) {
45119 + err = -EFAULT;
45120 + goto cleanup;
45121 + }
45122 + tmp[len-1] = '\0';
45123 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
45124 + printk(KERN_ALERT "Copying special role %s\n", tmp);
45125 +#endif
45126 + sptmp->rolename = tmp;
45127 + acl_special_roles[i] = sptmp;
45128 + }
45129 +
45130 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
45131 +
45132 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
45133 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
45134 +
45135 + if (!r_tmp) {
45136 + err = -ENOMEM;
45137 + goto cleanup;
45138 + }
45139 +
45140 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
45141 + sizeof (struct acl_role_label *))) {
45142 + err = -EFAULT;
45143 + goto cleanup;
45144 + }
45145 +
45146 + if (copy_from_user(r_tmp, r_utmp2,
45147 + sizeof (struct acl_role_label))) {
45148 + err = -EFAULT;
45149 + goto cleanup;
45150 + }
45151 +
45152 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
45153 +
45154 + if (!len || len >= PATH_MAX) {
45155 + err = -EINVAL;
45156 + goto cleanup;
45157 + }
45158 +
45159 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
45160 + err = -ENOMEM;
45161 + goto cleanup;
45162 + }
45163 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
45164 + err = -EFAULT;
45165 + goto cleanup;
45166 + }
45167 + tmp[len-1] = '\0';
45168 + r_tmp->rolename = tmp;
45169 +
45170 + if (!strcmp(r_tmp->rolename, "default")
45171 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
45172 + default_role = r_tmp;
45173 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
45174 + kernel_role = r_tmp;
45175 + }
45176 +
45177 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
45178 + err = -ENOMEM;
45179 + goto cleanup;
45180 + }
45181 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
45182 + err = -EFAULT;
45183 + goto cleanup;
45184 + }
45185 +
45186 + r_tmp->hash = ghash;
45187 +
45188 + num_subjs = count_user_subjs(r_tmp->hash->first);
45189 +
45190 + r_tmp->subj_hash_size = num_subjs;
45191 + r_tmp->subj_hash =
45192 + (struct acl_subject_label **)
45193 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
45194 +
45195 + if (!r_tmp->subj_hash) {
45196 + err = -ENOMEM;
45197 + goto cleanup;
45198 + }
45199 +
45200 + err = copy_user_allowedips(r_tmp);
45201 + if (err)
45202 + goto cleanup;
45203 +
45204 + /* copy domain info */
45205 + if (r_tmp->domain_children != NULL) {
45206 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
45207 + if (domainlist == NULL) {
45208 + err = -ENOMEM;
45209 + goto cleanup;
45210 + }
45211 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
45212 + err = -EFAULT;
45213 + goto cleanup;
45214 + }
45215 + r_tmp->domain_children = domainlist;
45216 + }
45217 +
45218 + err = copy_user_transitions(r_tmp);
45219 + if (err)
45220 + goto cleanup;
45221 +
45222 + memset(r_tmp->subj_hash, 0,
45223 + r_tmp->subj_hash_size *
45224 + sizeof (struct acl_subject_label *));
45225 +
45226 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
45227 +
45228 + if (err)
45229 + goto cleanup;
45230 +
45231 + /* set nested subject list to null */
45232 + r_tmp->hash->first = NULL;
45233 +
45234 + insert_acl_role_label(r_tmp);
45235 + }
45236 +
45237 + goto return_err;
45238 + cleanup:
45239 + free_variables();
45240 + return_err:
45241 + return err;
45242 +
45243 +}
45244 +
45245 +static int
45246 +gracl_init(struct gr_arg *args)
45247 +{
45248 + int error = 0;
45249 +
45250 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
45251 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
45252 +
45253 + if (init_variables(args)) {
45254 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
45255 + error = -ENOMEM;
45256 + free_variables();
45257 + goto out;
45258 + }
45259 +
45260 + error = copy_user_acl(args);
45261 + free_init_variables();
45262 + if (error) {
45263 + free_variables();
45264 + goto out;
45265 + }
45266 +
45267 + if ((error = gr_set_acls(0))) {
45268 + free_variables();
45269 + goto out;
45270 + }
45271 +
45272 + pax_open_kernel();
45273 + gr_status |= GR_READY;
45274 + pax_close_kernel();
45275 +
45276 + out:
45277 + return error;
45278 +}
45279 +
45280 +/* derived from glibc fnmatch() 0: match, 1: no match*/
45281 +
45282 +static int
45283 +glob_match(const char *p, const char *n)
45284 +{
45285 + char c;
45286 +
45287 + while ((c = *p++) != '\0') {
45288 + switch (c) {
45289 + case '?':
45290 + if (*n == '\0')
45291 + return 1;
45292 + else if (*n == '/')
45293 + return 1;
45294 + break;
45295 + case '\\':
45296 + if (*n != c)
45297 + return 1;
45298 + break;
45299 + case '*':
45300 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
45301 + if (*n == '/')
45302 + return 1;
45303 + else if (c == '?') {
45304 + if (*n == '\0')
45305 + return 1;
45306 + else
45307 + ++n;
45308 + }
45309 + }
45310 + if (c == '\0') {
45311 + return 0;
45312 + } else {
45313 + const char *endp;
45314 +
45315 + if ((endp = strchr(n, '/')) == NULL)
45316 + endp = n + strlen(n);
45317 +
45318 + if (c == '[') {
45319 + for (--p; n < endp; ++n)
45320 + if (!glob_match(p, n))
45321 + return 0;
45322 + } else if (c == '/') {
45323 + while (*n != '\0' && *n != '/')
45324 + ++n;
45325 + if (*n == '/' && !glob_match(p, n + 1))
45326 + return 0;
45327 + } else {
45328 + for (--p; n < endp; ++n)
45329 + if (*n == c && !glob_match(p, n))
45330 + return 0;
45331 + }
45332 +
45333 + return 1;
45334 + }
45335 + case '[':
45336 + {
45337 + int not;
45338 + char cold;
45339 +
45340 + if (*n == '\0' || *n == '/')
45341 + return 1;
45342 +
45343 + not = (*p == '!' || *p == '^');
45344 + if (not)
45345 + ++p;
45346 +
45347 + c = *p++;
45348 + for (;;) {
45349 + unsigned char fn = (unsigned char)*n;
45350 +
45351 + if (c == '\0')
45352 + return 1;
45353 + else {
45354 + if (c == fn)
45355 + goto matched;
45356 + cold = c;
45357 + c = *p++;
45358 +
45359 + if (c == '-' && *p != ']') {
45360 + unsigned char cend = *p++;
45361 +
45362 + if (cend == '\0')
45363 + return 1;
45364 +
45365 + if (cold <= fn && fn <= cend)
45366 + goto matched;
45367 +
45368 + c = *p++;
45369 + }
45370 + }
45371 +
45372 + if (c == ']')
45373 + break;
45374 + }
45375 + if (!not)
45376 + return 1;
45377 + break;
45378 + matched:
45379 + while (c != ']') {
45380 + if (c == '\0')
45381 + return 1;
45382 +
45383 + c = *p++;
45384 + }
45385 + if (not)
45386 + return 1;
45387 + }
45388 + break;
45389 + default:
45390 + if (c != *n)
45391 + return 1;
45392 + }
45393 +
45394 + ++n;
45395 + }
45396 +
45397 + if (*n == '\0')
45398 + return 0;
45399 +
45400 + if (*n == '/')
45401 + return 0;
45402 +
45403 + return 1;
45404 +}
45405 +
45406 +static struct acl_object_label *
45407 +chk_glob_label(struct acl_object_label *globbed,
45408 + struct dentry *dentry, struct vfsmount *mnt, char **path)
45409 +{
45410 + struct acl_object_label *tmp;
45411 +
45412 + if (*path == NULL)
45413 + *path = gr_to_filename_nolock(dentry, mnt);
45414 +
45415 + tmp = globbed;
45416 +
45417 + while (tmp) {
45418 + if (!glob_match(tmp->filename, *path))
45419 + return tmp;
45420 + tmp = tmp->next;
45421 + }
45422 +
45423 + return NULL;
45424 +}
45425 +
45426 +static struct acl_object_label *
45427 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
45428 + const ino_t curr_ino, const dev_t curr_dev,
45429 + const struct acl_subject_label *subj, char **path, const int checkglob)
45430 +{
45431 + struct acl_subject_label *tmpsubj;
45432 + struct acl_object_label *retval;
45433 + struct acl_object_label *retval2;
45434 +
45435 + tmpsubj = (struct acl_subject_label *) subj;
45436 + read_lock(&gr_inode_lock);
45437 + do {
45438 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
45439 + if (retval) {
45440 + if (checkglob && retval->globbed) {
45441 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
45442 + (struct vfsmount *)orig_mnt, path);
45443 + if (retval2)
45444 + retval = retval2;
45445 + }
45446 + break;
45447 + }
45448 + } while ((tmpsubj = tmpsubj->parent_subject));
45449 + read_unlock(&gr_inode_lock);
45450 +
45451 + return retval;
45452 +}
45453 +
45454 +static __inline__ struct acl_object_label *
45455 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
45456 + const struct dentry *curr_dentry,
45457 + const struct acl_subject_label *subj, char **path, const int checkglob)
45458 +{
45459 + int newglob = checkglob;
45460 +
45461 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
45462 + as we don't want a / * rule to match instead of the / object
45463 + don't do this for create lookups that call this function though, since they're looking up
45464 + on the parent and thus need globbing checks on all paths
45465 + */
45466 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
45467 + newglob = GR_NO_GLOB;
45468 +
45469 + return __full_lookup(orig_dentry, orig_mnt,
45470 + curr_dentry->d_inode->i_ino,
45471 + __get_dev(curr_dentry), subj, path, newglob);
45472 +}
45473 +
45474 +static struct acl_object_label *
45475 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45476 + const struct acl_subject_label *subj, char *path, const int checkglob)
45477 +{
45478 + struct dentry *dentry = (struct dentry *) l_dentry;
45479 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
45480 + struct acl_object_label *retval;
45481 +
45482 + spin_lock(&dcache_lock);
45483 + spin_lock(&vfsmount_lock);
45484 +
45485 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
45486 +#ifdef CONFIG_NET
45487 + mnt == sock_mnt ||
45488 +#endif
45489 +#ifdef CONFIG_HUGETLBFS
45490 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
45491 +#endif
45492 + /* ignore Eric Biederman */
45493 + IS_PRIVATE(l_dentry->d_inode))) {
45494 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
45495 + goto out;
45496 + }
45497 +
45498 + for (;;) {
45499 + if (dentry == real_root && mnt == real_root_mnt)
45500 + break;
45501 +
45502 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
45503 + if (mnt->mnt_parent == mnt)
45504 + break;
45505 +
45506 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
45507 + if (retval != NULL)
45508 + goto out;
45509 +
45510 + dentry = mnt->mnt_mountpoint;
45511 + mnt = mnt->mnt_parent;
45512 + continue;
45513 + }
45514 +
45515 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
45516 + if (retval != NULL)
45517 + goto out;
45518 +
45519 + dentry = dentry->d_parent;
45520 + }
45521 +
45522 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
45523 +
45524 + if (retval == NULL)
45525 + retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
45526 +out:
45527 + spin_unlock(&vfsmount_lock);
45528 + spin_unlock(&dcache_lock);
45529 +
45530 + BUG_ON(retval == NULL);
45531 +
45532 + return retval;
45533 +}
45534 +
45535 +static __inline__ struct acl_object_label *
45536 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45537 + const struct acl_subject_label *subj)
45538 +{
45539 + char *path = NULL;
45540 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
45541 +}
45542 +
45543 +static __inline__ struct acl_object_label *
45544 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45545 + const struct acl_subject_label *subj)
45546 +{
45547 + char *path = NULL;
45548 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
45549 +}
45550 +
45551 +static __inline__ struct acl_object_label *
45552 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45553 + const struct acl_subject_label *subj, char *path)
45554 +{
45555 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
45556 +}
45557 +
45558 +static struct acl_subject_label *
45559 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45560 + const struct acl_role_label *role)
45561 +{
45562 + struct dentry *dentry = (struct dentry *) l_dentry;
45563 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
45564 + struct acl_subject_label *retval;
45565 +
45566 + spin_lock(&dcache_lock);
45567 + spin_lock(&vfsmount_lock);
45568 +
45569 + for (;;) {
45570 + if (dentry == real_root && mnt == real_root_mnt)
45571 + break;
45572 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
45573 + if (mnt->mnt_parent == mnt)
45574 + break;
45575 +
45576 + read_lock(&gr_inode_lock);
45577 + retval =
45578 + lookup_acl_subj_label(dentry->d_inode->i_ino,
45579 + __get_dev(dentry), role);
45580 + read_unlock(&gr_inode_lock);
45581 + if (retval != NULL)
45582 + goto out;
45583 +
45584 + dentry = mnt->mnt_mountpoint;
45585 + mnt = mnt->mnt_parent;
45586 + continue;
45587 + }
45588 +
45589 + read_lock(&gr_inode_lock);
45590 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
45591 + __get_dev(dentry), role);
45592 + read_unlock(&gr_inode_lock);
45593 + if (retval != NULL)
45594 + goto out;
45595 +
45596 + dentry = dentry->d_parent;
45597 + }
45598 +
45599 + read_lock(&gr_inode_lock);
45600 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
45601 + __get_dev(dentry), role);
45602 + read_unlock(&gr_inode_lock);
45603 +
45604 + if (unlikely(retval == NULL)) {
45605 + read_lock(&gr_inode_lock);
45606 + retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
45607 + __get_dev(real_root), role);
45608 + read_unlock(&gr_inode_lock);
45609 + }
45610 +out:
45611 + spin_unlock(&vfsmount_lock);
45612 + spin_unlock(&dcache_lock);
45613 +
45614 + BUG_ON(retval == NULL);
45615 +
45616 + return retval;
45617 +}
45618 +
45619 +static void
45620 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
45621 +{
45622 + struct task_struct *task = current;
45623 + const struct cred *cred = current_cred();
45624 +
45625 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
45626 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
45627 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
45628 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
45629 +
45630 + return;
45631 +}
45632 +
45633 +static void
45634 +gr_log_learn_sysctl(const char *path, const __u32 mode)
45635 +{
45636 + struct task_struct *task = current;
45637 + const struct cred *cred = current_cred();
45638 +
45639 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
45640 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
45641 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
45642 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
45643 +
45644 + return;
45645 +}
45646 +
45647 +static void
45648 +gr_log_learn_id_change(const char type, const unsigned int real,
45649 + const unsigned int effective, const unsigned int fs)
45650 +{
45651 + struct task_struct *task = current;
45652 + const struct cred *cred = current_cred();
45653 +
45654 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
45655 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
45656 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
45657 + type, real, effective, fs, &task->signal->saved_ip);
45658 +
45659 + return;
45660 +}
45661 +
45662 +__u32
45663 +gr_check_link(const struct dentry * new_dentry,
45664 + const struct dentry * parent_dentry,
45665 + const struct vfsmount * parent_mnt,
45666 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
45667 +{
45668 + struct acl_object_label *obj;
45669 + __u32 oldmode, newmode;
45670 + __u32 needmode;
45671 +
45672 + if (unlikely(!(gr_status & GR_READY)))
45673 + return (GR_CREATE | GR_LINK);
45674 +
45675 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
45676 + oldmode = obj->mode;
45677 +
45678 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
45679 + oldmode |= (GR_CREATE | GR_LINK);
45680 +
45681 + needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
45682 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
45683 + needmode |= GR_SETID | GR_AUDIT_SETID;
45684 +
45685 + newmode =
45686 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
45687 + oldmode | needmode);
45688 +
45689 + needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
45690 + GR_SETID | GR_READ | GR_FIND | GR_DELETE |
45691 + GR_INHERIT | GR_AUDIT_INHERIT);
45692 +
45693 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
45694 + goto bad;
45695 +
45696 + if ((oldmode & needmode) != needmode)
45697 + goto bad;
45698 +
45699 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
45700 + if ((newmode & needmode) != needmode)
45701 + goto bad;
45702 +
45703 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
45704 + return newmode;
45705 +bad:
45706 + needmode = oldmode;
45707 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
45708 + needmode |= GR_SETID;
45709 +
45710 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
45711 + gr_log_learn(old_dentry, old_mnt, needmode);
45712 + return (GR_CREATE | GR_LINK);
45713 + } else if (newmode & GR_SUPPRESS)
45714 + return GR_SUPPRESS;
45715 + else
45716 + return 0;
45717 +}
45718 +
45719 +__u32
45720 +gr_search_file(const struct dentry * dentry, const __u32 mode,
45721 + const struct vfsmount * mnt)
45722 +{
45723 + __u32 retval = mode;
45724 + struct acl_subject_label *curracl;
45725 + struct acl_object_label *currobj;
45726 +
45727 + if (unlikely(!(gr_status & GR_READY)))
45728 + return (mode & ~GR_AUDITS);
45729 +
45730 + curracl = current->acl;
45731 +
45732 + currobj = chk_obj_label(dentry, mnt, curracl);
45733 + retval = currobj->mode & mode;
45734 +
45735 + /* if we're opening a specified transfer file for writing
45736 + (e.g. /dev/initctl), then transfer our role to init
45737 + */
45738 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
45739 + current->role->roletype & GR_ROLE_PERSIST)) {
45740 + struct task_struct *task = init_pid_ns.child_reaper;
45741 +
45742 + if (task->role != current->role) {
45743 + task->acl_sp_role = 0;
45744 + task->acl_role_id = current->acl_role_id;
45745 + task->role = current->role;
45746 + rcu_read_lock();
45747 + read_lock(&grsec_exec_file_lock);
45748 + gr_apply_subject_to_task(task);
45749 + read_unlock(&grsec_exec_file_lock);
45750 + rcu_read_unlock();
45751 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
45752 + }
45753 + }
45754 +
45755 + if (unlikely
45756 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
45757 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
45758 + __u32 new_mode = mode;
45759 +
45760 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
45761 +
45762 + retval = new_mode;
45763 +
45764 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
45765 + new_mode |= GR_INHERIT;
45766 +
45767 + if (!(mode & GR_NOLEARN))
45768 + gr_log_learn(dentry, mnt, new_mode);
45769 + }
45770 +
45771 + return retval;
45772 +}
45773 +
45774 +__u32
45775 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
45776 + const struct vfsmount * mnt, const __u32 mode)
45777 +{
45778 + struct name_entry *match;
45779 + struct acl_object_label *matchpo;
45780 + struct acl_subject_label *curracl;
45781 + char *path;
45782 + __u32 retval;
45783 +
45784 + if (unlikely(!(gr_status & GR_READY)))
45785 + return (mode & ~GR_AUDITS);
45786 +
45787 + preempt_disable();
45788 + path = gr_to_filename_rbac(new_dentry, mnt);
45789 + match = lookup_name_entry_create(path);
45790 +
45791 + if (!match)
45792 + goto check_parent;
45793 +
45794 + curracl = current->acl;
45795 +
45796 + read_lock(&gr_inode_lock);
45797 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
45798 + read_unlock(&gr_inode_lock);
45799 +
45800 + if (matchpo) {
45801 + if ((matchpo->mode & mode) !=
45802 + (mode & ~(GR_AUDITS | GR_SUPPRESS))
45803 + && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
45804 + __u32 new_mode = mode;
45805 +
45806 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
45807 +
45808 + gr_log_learn(new_dentry, mnt, new_mode);
45809 +
45810 + preempt_enable();
45811 + return new_mode;
45812 + }
45813 + preempt_enable();
45814 + return (matchpo->mode & mode);
45815 + }
45816 +
45817 + check_parent:
45818 + curracl = current->acl;
45819 +
45820 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
45821 + retval = matchpo->mode & mode;
45822 +
45823 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
45824 + && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
45825 + __u32 new_mode = mode;
45826 +
45827 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
45828 +
45829 + gr_log_learn(new_dentry, mnt, new_mode);
45830 + preempt_enable();
45831 + return new_mode;
45832 + }
45833 +
45834 + preempt_enable();
45835 + return retval;
45836 +}
45837 +
45838 +int
45839 +gr_check_hidden_task(const struct task_struct *task)
45840 +{
45841 + if (unlikely(!(gr_status & GR_READY)))
45842 + return 0;
45843 +
45844 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
45845 + return 1;
45846 +
45847 + return 0;
45848 +}
45849 +
45850 +int
45851 +gr_check_protected_task(const struct task_struct *task)
45852 +{
45853 + if (unlikely(!(gr_status & GR_READY) || !task))
45854 + return 0;
45855 +
45856 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
45857 + task->acl != current->acl)
45858 + return 1;
45859 +
45860 + return 0;
45861 +}
45862 +
45863 +int
45864 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
45865 +{
45866 + struct task_struct *p;
45867 + int ret = 0;
45868 +
45869 + if (unlikely(!(gr_status & GR_READY) || !pid))
45870 + return ret;
45871 +
45872 + read_lock(&tasklist_lock);
45873 + do_each_pid_task(pid, type, p) {
45874 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
45875 + p->acl != current->acl) {
45876 + ret = 1;
45877 + goto out;
45878 + }
45879 + } while_each_pid_task(pid, type, p);
45880 +out:
45881 + read_unlock(&tasklist_lock);
45882 +
45883 + return ret;
45884 +}
45885 +
45886 +void
45887 +gr_copy_label(struct task_struct *tsk)
45888 +{
45889 + tsk->signal->used_accept = 0;
45890 + tsk->acl_sp_role = 0;
45891 + tsk->acl_role_id = current->acl_role_id;
45892 + tsk->acl = current->acl;
45893 + tsk->role = current->role;
45894 + tsk->signal->curr_ip = current->signal->curr_ip;
45895 + tsk->signal->saved_ip = current->signal->saved_ip;
45896 + if (current->exec_file)
45897 + get_file(current->exec_file);
45898 + tsk->exec_file = current->exec_file;
45899 + tsk->is_writable = current->is_writable;
45900 + if (unlikely(current->signal->used_accept)) {
45901 + current->signal->curr_ip = 0;
45902 + current->signal->saved_ip = 0;
45903 + }
45904 +
45905 + return;
45906 +}
45907 +
45908 +static void
45909 +gr_set_proc_res(struct task_struct *task)
45910 +{
45911 + struct acl_subject_label *proc;
45912 + unsigned short i;
45913 +
45914 + proc = task->acl;
45915 +
45916 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
45917 + return;
45918 +
45919 + for (i = 0; i < RLIM_NLIMITS; i++) {
45920 + if (!(proc->resmask & (1 << i)))
45921 + continue;
45922 +
45923 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
45924 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
45925 + }
45926 +
45927 + return;
45928 +}
45929 +
45930 +extern int __gr_process_user_ban(struct user_struct *user);
45931 +
45932 +int
45933 +gr_check_user_change(int real, int effective, int fs)
45934 +{
45935 + unsigned int i;
45936 + __u16 num;
45937 + uid_t *uidlist;
45938 + int curuid;
45939 + int realok = 0;
45940 + int effectiveok = 0;
45941 + int fsok = 0;
45942 +
45943 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
45944 + struct user_struct *user;
45945 +
45946 + if (real == -1)
45947 + goto skipit;
45948 +
45949 + user = find_user(real);
45950 + if (user == NULL)
45951 + goto skipit;
45952 +
45953 + if (__gr_process_user_ban(user)) {
45954 + /* for find_user */
45955 + free_uid(user);
45956 + return 1;
45957 + }
45958 +
45959 + /* for find_user */
45960 + free_uid(user);
45961 +
45962 +skipit:
45963 +#endif
45964 +
45965 + if (unlikely(!(gr_status & GR_READY)))
45966 + return 0;
45967 +
45968 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
45969 + gr_log_learn_id_change('u', real, effective, fs);
45970 +
45971 + num = current->acl->user_trans_num;
45972 + uidlist = current->acl->user_transitions;
45973 +
45974 + if (uidlist == NULL)
45975 + return 0;
45976 +
45977 + if (real == -1)
45978 + realok = 1;
45979 + if (effective == -1)
45980 + effectiveok = 1;
45981 + if (fs == -1)
45982 + fsok = 1;
45983 +
45984 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
45985 + for (i = 0; i < num; i++) {
45986 + curuid = (int)uidlist[i];
45987 + if (real == curuid)
45988 + realok = 1;
45989 + if (effective == curuid)
45990 + effectiveok = 1;
45991 + if (fs == curuid)
45992 + fsok = 1;
45993 + }
45994 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
45995 + for (i = 0; i < num; i++) {
45996 + curuid = (int)uidlist[i];
45997 + if (real == curuid)
45998 + break;
45999 + if (effective == curuid)
46000 + break;
46001 + if (fs == curuid)
46002 + break;
46003 + }
46004 + /* not in deny list */
46005 + if (i == num) {
46006 + realok = 1;
46007 + effectiveok = 1;
46008 + fsok = 1;
46009 + }
46010 + }
46011 +
46012 + if (realok && effectiveok && fsok)
46013 + return 0;
46014 + else {
46015 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
46016 + return 1;
46017 + }
46018 +}
46019 +
46020 +int
46021 +gr_check_group_change(int real, int effective, int fs)
46022 +{
46023 + unsigned int i;
46024 + __u16 num;
46025 + gid_t *gidlist;
46026 + int curgid;
46027 + int realok = 0;
46028 + int effectiveok = 0;
46029 + int fsok = 0;
46030 +
46031 + if (unlikely(!(gr_status & GR_READY)))
46032 + return 0;
46033 +
46034 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
46035 + gr_log_learn_id_change('g', real, effective, fs);
46036 +
46037 + num = current->acl->group_trans_num;
46038 + gidlist = current->acl->group_transitions;
46039 +
46040 + if (gidlist == NULL)
46041 + return 0;
46042 +
46043 + if (real == -1)
46044 + realok = 1;
46045 + if (effective == -1)
46046 + effectiveok = 1;
46047 + if (fs == -1)
46048 + fsok = 1;
46049 +
46050 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
46051 + for (i = 0; i < num; i++) {
46052 + curgid = (int)gidlist[i];
46053 + if (real == curgid)
46054 + realok = 1;
46055 + if (effective == curgid)
46056 + effectiveok = 1;
46057 + if (fs == curgid)
46058 + fsok = 1;
46059 + }
46060 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
46061 + for (i = 0; i < num; i++) {
46062 + curgid = (int)gidlist[i];
46063 + if (real == curgid)
46064 + break;
46065 + if (effective == curgid)
46066 + break;
46067 + if (fs == curgid)
46068 + break;
46069 + }
46070 + /* not in deny list */
46071 + if (i == num) {
46072 + realok = 1;
46073 + effectiveok = 1;
46074 + fsok = 1;
46075 + }
46076 + }
46077 +
46078 + if (realok && effectiveok && fsok)
46079 + return 0;
46080 + else {
46081 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
46082 + return 1;
46083 + }
46084 +}
46085 +
46086 +void
46087 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
46088 +{
46089 + struct acl_role_label *role = task->role;
46090 + struct acl_subject_label *subj = NULL;
46091 + struct acl_object_label *obj;
46092 + struct file *filp;
46093 +
46094 + if (unlikely(!(gr_status & GR_READY)))
46095 + return;
46096 +
46097 + filp = task->exec_file;
46098 +
46099 + /* kernel process, we'll give them the kernel role */
46100 + if (unlikely(!filp)) {
46101 + task->role = kernel_role;
46102 + task->acl = kernel_role->root_label;
46103 + return;
46104 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
46105 + role = lookup_acl_role_label(task, uid, gid);
46106 +
46107 + /* perform subject lookup in possibly new role
46108 + we can use this result below in the case where role == task->role
46109 + */
46110 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
46111 +
46112 + /* if we changed uid/gid, but result in the same role
46113 + and are using inheritance, don't lose the inherited subject
46114 + if current subject is other than what normal lookup
46115 + would result in, we arrived via inheritance, don't
46116 + lose subject
46117 + */
46118 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
46119 + (subj == task->acl)))
46120 + task->acl = subj;
46121 +
46122 + task->role = role;
46123 +
46124 + task->is_writable = 0;
46125 +
46126 + /* ignore additional mmap checks for processes that are writable
46127 + by the default ACL */
46128 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
46129 + if (unlikely(obj->mode & GR_WRITE))
46130 + task->is_writable = 1;
46131 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
46132 + if (unlikely(obj->mode & GR_WRITE))
46133 + task->is_writable = 1;
46134 +
46135 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46136 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
46137 +#endif
46138 +
46139 + gr_set_proc_res(task);
46140 +
46141 + return;
46142 +}
46143 +
46144 +int
46145 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
46146 + const int unsafe_share)
46147 +{
46148 + struct task_struct *task = current;
46149 + struct acl_subject_label *newacl;
46150 + struct acl_object_label *obj;
46151 + __u32 retmode;
46152 +
46153 + if (unlikely(!(gr_status & GR_READY)))
46154 + return 0;
46155 +
46156 + newacl = chk_subj_label(dentry, mnt, task->role);
46157 +
46158 + task_lock(task);
46159 + if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
46160 + !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
46161 + !(task->role->roletype & GR_ROLE_GOD) &&
46162 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
46163 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
46164 + task_unlock(task);
46165 + if (unsafe_share)
46166 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
46167 + else
46168 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
46169 + return -EACCES;
46170 + }
46171 + task_unlock(task);
46172 +
46173 + obj = chk_obj_label(dentry, mnt, task->acl);
46174 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
46175 +
46176 + if (!(task->acl->mode & GR_INHERITLEARN) &&
46177 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
46178 + if (obj->nested)
46179 + task->acl = obj->nested;
46180 + else
46181 + task->acl = newacl;
46182 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
46183 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
46184 +
46185 + task->is_writable = 0;
46186 +
46187 + /* ignore additional mmap checks for processes that are writable
46188 + by the default ACL */
46189 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
46190 + if (unlikely(obj->mode & GR_WRITE))
46191 + task->is_writable = 1;
46192 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
46193 + if (unlikely(obj->mode & GR_WRITE))
46194 + task->is_writable = 1;
46195 +
46196 + gr_set_proc_res(task);
46197 +
46198 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46199 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
46200 +#endif
46201 + return 0;
46202 +}
46203 +
46204 +/* always called with valid inodev ptr */
46205 +static void
46206 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
46207 +{
46208 + struct acl_object_label *matchpo;
46209 + struct acl_subject_label *matchps;
46210 + struct acl_subject_label *subj;
46211 + struct acl_role_label *role;
46212 + unsigned int x;
46213 +
46214 + FOR_EACH_ROLE_START(role)
46215 + FOR_EACH_SUBJECT_START(role, subj, x)
46216 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
46217 + matchpo->mode |= GR_DELETED;
46218 + FOR_EACH_SUBJECT_END(subj,x)
46219 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
46220 + if (subj->inode == ino && subj->device == dev)
46221 + subj->mode |= GR_DELETED;
46222 + FOR_EACH_NESTED_SUBJECT_END(subj)
46223 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
46224 + matchps->mode |= GR_DELETED;
46225 + FOR_EACH_ROLE_END(role)
46226 +
46227 + inodev->nentry->deleted = 1;
46228 +
46229 + return;
46230 +}
46231 +
46232 +void
46233 +gr_handle_delete(const ino_t ino, const dev_t dev)
46234 +{
46235 + struct inodev_entry *inodev;
46236 +
46237 + if (unlikely(!(gr_status & GR_READY)))
46238 + return;
46239 +
46240 + write_lock(&gr_inode_lock);
46241 + inodev = lookup_inodev_entry(ino, dev);
46242 + if (inodev != NULL)
46243 + do_handle_delete(inodev, ino, dev);
46244 + write_unlock(&gr_inode_lock);
46245 +
46246 + return;
46247 +}
46248 +
46249 +static void
46250 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
46251 + const ino_t newinode, const dev_t newdevice,
46252 + struct acl_subject_label *subj)
46253 +{
46254 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
46255 + struct acl_object_label *match;
46256 +
46257 + match = subj->obj_hash[index];
46258 +
46259 + while (match && (match->inode != oldinode ||
46260 + match->device != olddevice ||
46261 + !(match->mode & GR_DELETED)))
46262 + match = match->next;
46263 +
46264 + if (match && (match->inode == oldinode)
46265 + && (match->device == olddevice)
46266 + && (match->mode & GR_DELETED)) {
46267 + if (match->prev == NULL) {
46268 + subj->obj_hash[index] = match->next;
46269 + if (match->next != NULL)
46270 + match->next->prev = NULL;
46271 + } else {
46272 + match->prev->next = match->next;
46273 + if (match->next != NULL)
46274 + match->next->prev = match->prev;
46275 + }
46276 + match->prev = NULL;
46277 + match->next = NULL;
46278 + match->inode = newinode;
46279 + match->device = newdevice;
46280 + match->mode &= ~GR_DELETED;
46281 +
46282 + insert_acl_obj_label(match, subj);
46283 + }
46284 +
46285 + return;
46286 +}
46287 +
46288 +static void
46289 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
46290 + const ino_t newinode, const dev_t newdevice,
46291 + struct acl_role_label *role)
46292 +{
46293 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
46294 + struct acl_subject_label *match;
46295 +
46296 + match = role->subj_hash[index];
46297 +
46298 + while (match && (match->inode != oldinode ||
46299 + match->device != olddevice ||
46300 + !(match->mode & GR_DELETED)))
46301 + match = match->next;
46302 +
46303 + if (match && (match->inode == oldinode)
46304 + && (match->device == olddevice)
46305 + && (match->mode & GR_DELETED)) {
46306 + if (match->prev == NULL) {
46307 + role->subj_hash[index] = match->next;
46308 + if (match->next != NULL)
46309 + match->next->prev = NULL;
46310 + } else {
46311 + match->prev->next = match->next;
46312 + if (match->next != NULL)
46313 + match->next->prev = match->prev;
46314 + }
46315 + match->prev = NULL;
46316 + match->next = NULL;
46317 + match->inode = newinode;
46318 + match->device = newdevice;
46319 + match->mode &= ~GR_DELETED;
46320 +
46321 + insert_acl_subj_label(match, role);
46322 + }
46323 +
46324 + return;
46325 +}
46326 +
46327 +static void
46328 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
46329 + const ino_t newinode, const dev_t newdevice)
46330 +{
46331 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
46332 + struct inodev_entry *match;
46333 +
46334 + match = inodev_set.i_hash[index];
46335 +
46336 + while (match && (match->nentry->inode != oldinode ||
46337 + match->nentry->device != olddevice || !match->nentry->deleted))
46338 + match = match->next;
46339 +
46340 + if (match && (match->nentry->inode == oldinode)
46341 + && (match->nentry->device == olddevice) &&
46342 + match->nentry->deleted) {
46343 + if (match->prev == NULL) {
46344 + inodev_set.i_hash[index] = match->next;
46345 + if (match->next != NULL)
46346 + match->next->prev = NULL;
46347 + } else {
46348 + match->prev->next = match->next;
46349 + if (match->next != NULL)
46350 + match->next->prev = match->prev;
46351 + }
46352 + match->prev = NULL;
46353 + match->next = NULL;
46354 + match->nentry->inode = newinode;
46355 + match->nentry->device = newdevice;
46356 + match->nentry->deleted = 0;
46357 +
46358 + insert_inodev_entry(match);
46359 + }
46360 +
46361 + return;
46362 +}
46363 +
46364 +static void
46365 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
46366 + const struct vfsmount *mnt)
46367 +{
46368 + struct acl_subject_label *subj;
46369 + struct acl_role_label *role;
46370 + unsigned int x;
46371 + ino_t inode = dentry->d_inode->i_ino;
46372 + dev_t dev = __get_dev(dentry);
46373 +
46374 + FOR_EACH_ROLE_START(role)
46375 + update_acl_subj_label(matchn->inode, matchn->device,
46376 + inode, dev, role);
46377 +
46378 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
46379 + if ((subj->inode == inode) && (subj->device == dev)) {
46380 + subj->inode = inode;
46381 + subj->device = dev;
46382 + }
46383 + FOR_EACH_NESTED_SUBJECT_END(subj)
46384 + FOR_EACH_SUBJECT_START(role, subj, x)
46385 + update_acl_obj_label(matchn->inode, matchn->device,
46386 + inode, dev, subj);
46387 + FOR_EACH_SUBJECT_END(subj,x)
46388 + FOR_EACH_ROLE_END(role)
46389 +
46390 + update_inodev_entry(matchn->inode, matchn->device, inode, dev);
46391 +
46392 + return;
46393 +}
46394 +
46395 +void
46396 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
46397 +{
46398 + struct name_entry *matchn;
46399 +
46400 + if (unlikely(!(gr_status & GR_READY)))
46401 + return;
46402 +
46403 + preempt_disable();
46404 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
46405 +
46406 + if (unlikely((unsigned long)matchn)) {
46407 + write_lock(&gr_inode_lock);
46408 + do_handle_create(matchn, dentry, mnt);
46409 + write_unlock(&gr_inode_lock);
46410 + }
46411 + preempt_enable();
46412 +
46413 + return;
46414 +}
46415 +
46416 +void
46417 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
46418 + struct dentry *old_dentry,
46419 + struct dentry *new_dentry,
46420 + struct vfsmount *mnt, const __u8 replace)
46421 +{
46422 + struct name_entry *matchn;
46423 + struct inodev_entry *inodev;
46424 + ino_t oldinode = old_dentry->d_inode->i_ino;
46425 + dev_t olddev = __get_dev(old_dentry);
46426 +
46427 + /* vfs_rename swaps the name and parent link for old_dentry and
46428 + new_dentry
46429 + at this point, old_dentry has the new name, parent link, and inode
46430 + for the renamed file
46431 + if a file is being replaced by a rename, new_dentry has the inode
46432 + and name for the replaced file
46433 + */
46434 +
46435 + if (unlikely(!(gr_status & GR_READY)))
46436 + return;
46437 +
46438 + preempt_disable();
46439 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
46440 +
46441 + /* we wouldn't have to check d_inode if it weren't for
46442 + NFS silly-renaming
46443 + */
46444 +
46445 + write_lock(&gr_inode_lock);
46446 + if (unlikely(replace && new_dentry->d_inode)) {
46447 + ino_t newinode = new_dentry->d_inode->i_ino;
46448 + dev_t newdev = __get_dev(new_dentry);
46449 + inodev = lookup_inodev_entry(newinode, newdev);
46450 + if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
46451 + do_handle_delete(inodev, newinode, newdev);
46452 + }
46453 +
46454 + inodev = lookup_inodev_entry(oldinode, olddev);
46455 + if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
46456 + do_handle_delete(inodev, oldinode, olddev);
46457 +
46458 + if (unlikely((unsigned long)matchn))
46459 + do_handle_create(matchn, old_dentry, mnt);
46460 +
46461 + write_unlock(&gr_inode_lock);
46462 + preempt_enable();
46463 +
46464 + return;
46465 +}
46466 +
46467 +static int
46468 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
46469 + unsigned char **sum)
46470 +{
46471 + struct acl_role_label *r;
46472 + struct role_allowed_ip *ipp;
46473 + struct role_transition *trans;
46474 + unsigned int i;
46475 + int found = 0;
46476 + u32 curr_ip = current->signal->curr_ip;
46477 +
46478 + current->signal->saved_ip = curr_ip;
46479 +
46480 + /* check transition table */
46481 +
46482 + for (trans = current->role->transitions; trans; trans = trans->next) {
46483 + if (!strcmp(rolename, trans->rolename)) {
46484 + found = 1;
46485 + break;
46486 + }
46487 + }
46488 +
46489 + if (!found)
46490 + return 0;
46491 +
46492 + /* handle special roles that do not require authentication
46493 + and check ip */
46494 +
46495 + FOR_EACH_ROLE_START(r)
46496 + if (!strcmp(rolename, r->rolename) &&
46497 + (r->roletype & GR_ROLE_SPECIAL)) {
46498 + found = 0;
46499 + if (r->allowed_ips != NULL) {
46500 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
46501 + if ((ntohl(curr_ip) & ipp->netmask) ==
46502 + (ntohl(ipp->addr) & ipp->netmask))
46503 + found = 1;
46504 + }
46505 + } else
46506 + found = 2;
46507 + if (!found)
46508 + return 0;
46509 +
46510 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
46511 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
46512 + *salt = NULL;
46513 + *sum = NULL;
46514 + return 1;
46515 + }
46516 + }
46517 + FOR_EACH_ROLE_END(r)
46518 +
46519 + for (i = 0; i < num_sprole_pws; i++) {
46520 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
46521 + *salt = acl_special_roles[i]->salt;
46522 + *sum = acl_special_roles[i]->sum;
46523 + return 1;
46524 + }
46525 + }
46526 +
46527 + return 0;
46528 +}
46529 +
46530 +static void
46531 +assign_special_role(char *rolename)
46532 +{
46533 + struct acl_object_label *obj;
46534 + struct acl_role_label *r;
46535 + struct acl_role_label *assigned = NULL;
46536 + struct task_struct *tsk;
46537 + struct file *filp;
46538 +
46539 + FOR_EACH_ROLE_START(r)
46540 + if (!strcmp(rolename, r->rolename) &&
46541 + (r->roletype & GR_ROLE_SPECIAL)) {
46542 + assigned = r;
46543 + break;
46544 + }
46545 + FOR_EACH_ROLE_END(r)
46546 +
46547 + if (!assigned)
46548 + return;
46549 +
46550 + read_lock(&tasklist_lock);
46551 + read_lock(&grsec_exec_file_lock);
46552 +
46553 + tsk = current->real_parent;
46554 + if (tsk == NULL)
46555 + goto out_unlock;
46556 +
46557 + filp = tsk->exec_file;
46558 + if (filp == NULL)
46559 + goto out_unlock;
46560 +
46561 + tsk->is_writable = 0;
46562 +
46563 + tsk->acl_sp_role = 1;
46564 + tsk->acl_role_id = ++acl_sp_role_value;
46565 + tsk->role = assigned;
46566 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
46567 +
46568 + /* ignore additional mmap checks for processes that are writable
46569 + by the default ACL */
46570 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
46571 + if (unlikely(obj->mode & GR_WRITE))
46572 + tsk->is_writable = 1;
46573 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
46574 + if (unlikely(obj->mode & GR_WRITE))
46575 + tsk->is_writable = 1;
46576 +
46577 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46578 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
46579 +#endif
46580 +
46581 +out_unlock:
46582 + read_unlock(&grsec_exec_file_lock);
46583 + read_unlock(&tasklist_lock);
46584 + return;
46585 +}
46586 +
46587 +int gr_check_secure_terminal(struct task_struct *task)
46588 +{
46589 + struct task_struct *p, *p2, *p3;
46590 + struct files_struct *files;
46591 + struct fdtable *fdt;
46592 + struct file *our_file = NULL, *file;
46593 + int i;
46594 +
46595 + if (task->signal->tty == NULL)
46596 + return 1;
46597 +
46598 + files = get_files_struct(task);
46599 + if (files != NULL) {
46600 + rcu_read_lock();
46601 + fdt = files_fdtable(files);
46602 + for (i=0; i < fdt->max_fds; i++) {
46603 + file = fcheck_files(files, i);
46604 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
46605 + get_file(file);
46606 + our_file = file;
46607 + }
46608 + }
46609 + rcu_read_unlock();
46610 + put_files_struct(files);
46611 + }
46612 +
46613 + if (our_file == NULL)
46614 + return 1;
46615 +
46616 + read_lock(&tasklist_lock);
46617 + do_each_thread(p2, p) {
46618 + files = get_files_struct(p);
46619 + if (files == NULL ||
46620 + (p->signal && p->signal->tty == task->signal->tty)) {
46621 + if (files != NULL)
46622 + put_files_struct(files);
46623 + continue;
46624 + }
46625 + rcu_read_lock();
46626 + fdt = files_fdtable(files);
46627 + for (i=0; i < fdt->max_fds; i++) {
46628 + file = fcheck_files(files, i);
46629 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
46630 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
46631 + p3 = task;
46632 + while (p3->pid > 0) {
46633 + if (p3 == p)
46634 + break;
46635 + p3 = p3->real_parent;
46636 + }
46637 + if (p3 == p)
46638 + break;
46639 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
46640 + gr_handle_alertkill(p);
46641 + rcu_read_unlock();
46642 + put_files_struct(files);
46643 + read_unlock(&tasklist_lock);
46644 + fput(our_file);
46645 + return 0;
46646 + }
46647 + }
46648 + rcu_read_unlock();
46649 + put_files_struct(files);
46650 + } while_each_thread(p2, p);
46651 + read_unlock(&tasklist_lock);
46652 +
46653 + fput(our_file);
46654 + return 1;
46655 +}
46656 +
46657 +ssize_t
46658 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
46659 +{
46660 + struct gr_arg_wrapper uwrap;
46661 + unsigned char *sprole_salt = NULL;
46662 + unsigned char *sprole_sum = NULL;
46663 + int error = sizeof (struct gr_arg_wrapper);
46664 + int error2 = 0;
46665 +
46666 + mutex_lock(&gr_dev_mutex);
46667 +
46668 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
46669 + error = -EPERM;
46670 + goto out;
46671 + }
46672 +
46673 + if (count != sizeof (struct gr_arg_wrapper)) {
46674 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
46675 + error = -EINVAL;
46676 + goto out;
46677 + }
46678 +
46679 +
46680 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
46681 + gr_auth_expires = 0;
46682 + gr_auth_attempts = 0;
46683 + }
46684 +
46685 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
46686 + error = -EFAULT;
46687 + goto out;
46688 + }
46689 +
46690 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
46691 + error = -EINVAL;
46692 + goto out;
46693 + }
46694 +
46695 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
46696 + error = -EFAULT;
46697 + goto out;
46698 + }
46699 +
46700 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
46701 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
46702 + time_after(gr_auth_expires, get_seconds())) {
46703 + error = -EBUSY;
46704 + goto out;
46705 + }
46706 +
46707 + /* if non-root trying to do anything other than use a special role,
46708 + do not attempt authentication, do not count towards authentication
46709 + locking
46710 + */
46711 +
46712 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
46713 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
46714 + current_uid()) {
46715 + error = -EPERM;
46716 + goto out;
46717 + }
46718 +
46719 + /* ensure pw and special role name are null terminated */
46720 +
46721 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
46722 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
46723 +
46724 + /* Okay.
46725 + * We have our enough of the argument structure..(we have yet
46726 + * to copy_from_user the tables themselves) . Copy the tables
46727 + * only if we need them, i.e. for loading operations. */
46728 +
46729 + switch (gr_usermode->mode) {
46730 + case GR_STATUS:
46731 + if (gr_status & GR_READY) {
46732 + error = 1;
46733 + if (!gr_check_secure_terminal(current))
46734 + error = 3;
46735 + } else
46736 + error = 2;
46737 + goto out;
46738 + case GR_SHUTDOWN:
46739 + if ((gr_status & GR_READY)
46740 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
46741 + pax_open_kernel();
46742 + gr_status &= ~GR_READY;
46743 + pax_close_kernel();
46744 +
46745 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
46746 + free_variables();
46747 + memset(gr_usermode, 0, sizeof (struct gr_arg));
46748 + memset(gr_system_salt, 0, GR_SALT_LEN);
46749 + memset(gr_system_sum, 0, GR_SHA_LEN);
46750 + } else if (gr_status & GR_READY) {
46751 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
46752 + error = -EPERM;
46753 + } else {
46754 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
46755 + error = -EAGAIN;
46756 + }
46757 + break;
46758 + case GR_ENABLE:
46759 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
46760 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
46761 + else {
46762 + if (gr_status & GR_READY)
46763 + error = -EAGAIN;
46764 + else
46765 + error = error2;
46766 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
46767 + }
46768 + break;
46769 + case GR_RELOAD:
46770 + if (!(gr_status & GR_READY)) {
46771 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
46772 + error = -EAGAIN;
46773 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
46774 + lock_kernel();
46775 +
46776 + pax_open_kernel();
46777 + gr_status &= ~GR_READY;
46778 + pax_close_kernel();
46779 +
46780 + free_variables();
46781 + if (!(error2 = gracl_init(gr_usermode))) {
46782 + unlock_kernel();
46783 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
46784 + } else {
46785 + unlock_kernel();
46786 + error = error2;
46787 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
46788 + }
46789 + } else {
46790 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
46791 + error = -EPERM;
46792 + }
46793 + break;
46794 + case GR_SEGVMOD:
46795 + if (unlikely(!(gr_status & GR_READY))) {
46796 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
46797 + error = -EAGAIN;
46798 + break;
46799 + }
46800 +
46801 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
46802 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
46803 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
46804 + struct acl_subject_label *segvacl;
46805 + segvacl =
46806 + lookup_acl_subj_label(gr_usermode->segv_inode,
46807 + gr_usermode->segv_device,
46808 + current->role);
46809 + if (segvacl) {
46810 + segvacl->crashes = 0;
46811 + segvacl->expires = 0;
46812 + }
46813 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
46814 + gr_remove_uid(gr_usermode->segv_uid);
46815 + }
46816 + } else {
46817 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
46818 + error = -EPERM;
46819 + }
46820 + break;
46821 + case GR_SPROLE:
46822 + case GR_SPROLEPAM:
46823 + if (unlikely(!(gr_status & GR_READY))) {
46824 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
46825 + error = -EAGAIN;
46826 + break;
46827 + }
46828 +
46829 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
46830 + current->role->expires = 0;
46831 + current->role->auth_attempts = 0;
46832 + }
46833 +
46834 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
46835 + time_after(current->role->expires, get_seconds())) {
46836 + error = -EBUSY;
46837 + goto out;
46838 + }
46839 +
46840 + if (lookup_special_role_auth
46841 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
46842 + && ((!sprole_salt && !sprole_sum)
46843 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
46844 + char *p = "";
46845 + assign_special_role(gr_usermode->sp_role);
46846 + read_lock(&tasklist_lock);
46847 + if (current->real_parent)
46848 + p = current->real_parent->role->rolename;
46849 + read_unlock(&tasklist_lock);
46850 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
46851 + p, acl_sp_role_value);
46852 + } else {
46853 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
46854 + error = -EPERM;
46855 + if(!(current->role->auth_attempts++))
46856 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
46857 +
46858 + goto out;
46859 + }
46860 + break;
46861 + case GR_UNSPROLE:
46862 + if (unlikely(!(gr_status & GR_READY))) {
46863 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
46864 + error = -EAGAIN;
46865 + break;
46866 + }
46867 +
46868 + if (current->role->roletype & GR_ROLE_SPECIAL) {
46869 + char *p = "";
46870 + int i = 0;
46871 +
46872 + read_lock(&tasklist_lock);
46873 + if (current->real_parent) {
46874 + p = current->real_parent->role->rolename;
46875 + i = current->real_parent->acl_role_id;
46876 + }
46877 + read_unlock(&tasklist_lock);
46878 +
46879 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
46880 + gr_set_acls(1);
46881 + } else {
46882 + error = -EPERM;
46883 + goto out;
46884 + }
46885 + break;
46886 + default:
46887 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
46888 + error = -EINVAL;
46889 + break;
46890 + }
46891 +
46892 + if (error != -EPERM)
46893 + goto out;
46894 +
46895 + if(!(gr_auth_attempts++))
46896 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
46897 +
46898 + out:
46899 + mutex_unlock(&gr_dev_mutex);
46900 + return error;
46901 +}
46902 +
46903 +/* must be called with
46904 + rcu_read_lock();
46905 + read_lock(&tasklist_lock);
46906 + read_lock(&grsec_exec_file_lock);
46907 +*/
46908 +int gr_apply_subject_to_task(struct task_struct *task)
46909 +{
46910 + struct acl_object_label *obj;
46911 + char *tmpname;
46912 + struct acl_subject_label *tmpsubj;
46913 + struct file *filp;
46914 + struct name_entry *nmatch;
46915 +
46916 + filp = task->exec_file;
46917 + if (filp == NULL)
46918 + return 0;
46919 +
46920 + /* the following is to apply the correct subject
46921 + on binaries running when the RBAC system
46922 + is enabled, when the binaries have been
46923 + replaced or deleted since their execution
46924 + -----
46925 + when the RBAC system starts, the inode/dev
46926 + from exec_file will be one the RBAC system
46927 + is unaware of. It only knows the inode/dev
46928 + of the present file on disk, or the absence
46929 + of it.
46930 + */
46931 + preempt_disable();
46932 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
46933 +
46934 + nmatch = lookup_name_entry(tmpname);
46935 + preempt_enable();
46936 + tmpsubj = NULL;
46937 + if (nmatch) {
46938 + if (nmatch->deleted)
46939 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
46940 + else
46941 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
46942 + if (tmpsubj != NULL)
46943 + task->acl = tmpsubj;
46944 + }
46945 + if (tmpsubj == NULL)
46946 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
46947 + task->role);
46948 + if (task->acl) {
46949 + struct acl_subject_label *curr;
46950 + curr = task->acl;
46951 +
46952 + task->is_writable = 0;
46953 + /* ignore additional mmap checks for processes that are writable
46954 + by the default ACL */
46955 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
46956 + if (unlikely(obj->mode & GR_WRITE))
46957 + task->is_writable = 1;
46958 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
46959 + if (unlikely(obj->mode & GR_WRITE))
46960 + task->is_writable = 1;
46961 +
46962 + gr_set_proc_res(task);
46963 +
46964 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46965 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
46966 +#endif
46967 + } else {
46968 + return 1;
46969 + }
46970 +
46971 + return 0;
46972 +}
46973 +
46974 +int
46975 +gr_set_acls(const int type)
46976 +{
46977 + struct task_struct *task, *task2;
46978 + struct acl_role_label *role = current->role;
46979 + __u16 acl_role_id = current->acl_role_id;
46980 + const struct cred *cred;
46981 + int ret;
46982 +
46983 + rcu_read_lock();
46984 + read_lock(&tasklist_lock);
46985 + read_lock(&grsec_exec_file_lock);
46986 + do_each_thread(task2, task) {
46987 + /* check to see if we're called from the exit handler,
46988 + if so, only replace ACLs that have inherited the admin
46989 + ACL */
46990 +
46991 + if (type && (task->role != role ||
46992 + task->acl_role_id != acl_role_id))
46993 + continue;
46994 +
46995 + task->acl_role_id = 0;
46996 + task->acl_sp_role = 0;
46997 +
46998 + if (task->exec_file) {
46999 + cred = __task_cred(task);
47000 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
47001 +
47002 + ret = gr_apply_subject_to_task(task);
47003 + if (ret) {
47004 + read_unlock(&grsec_exec_file_lock);
47005 + read_unlock(&tasklist_lock);
47006 + rcu_read_unlock();
47007 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
47008 + return ret;
47009 + }
47010 + } else {
47011 + // it's a kernel process
47012 + task->role = kernel_role;
47013 + task->acl = kernel_role->root_label;
47014 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
47015 + task->acl->mode &= ~GR_PROCFIND;
47016 +#endif
47017 + }
47018 + } while_each_thread(task2, task);
47019 + read_unlock(&grsec_exec_file_lock);
47020 + read_unlock(&tasklist_lock);
47021 + rcu_read_unlock();
47022 +
47023 + return 0;
47024 +}
47025 +
47026 +void
47027 +gr_learn_resource(const struct task_struct *task,
47028 + const int res, const unsigned long wanted, const int gt)
47029 +{
47030 + struct acl_subject_label *acl;
47031 + const struct cred *cred;
47032 +
47033 + if (unlikely((gr_status & GR_READY) &&
47034 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
47035 + goto skip_reslog;
47036 +
47037 +#ifdef CONFIG_GRKERNSEC_RESLOG
47038 + gr_log_resource(task, res, wanted, gt);
47039 +#endif
47040 + skip_reslog:
47041 +
47042 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
47043 + return;
47044 +
47045 + acl = task->acl;
47046 +
47047 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
47048 + !(acl->resmask & (1 << (unsigned short) res))))
47049 + return;
47050 +
47051 + if (wanted >= acl->res[res].rlim_cur) {
47052 + unsigned long res_add;
47053 +
47054 + res_add = wanted;
47055 + switch (res) {
47056 + case RLIMIT_CPU:
47057 + res_add += GR_RLIM_CPU_BUMP;
47058 + break;
47059 + case RLIMIT_FSIZE:
47060 + res_add += GR_RLIM_FSIZE_BUMP;
47061 + break;
47062 + case RLIMIT_DATA:
47063 + res_add += GR_RLIM_DATA_BUMP;
47064 + break;
47065 + case RLIMIT_STACK:
47066 + res_add += GR_RLIM_STACK_BUMP;
47067 + break;
47068 + case RLIMIT_CORE:
47069 + res_add += GR_RLIM_CORE_BUMP;
47070 + break;
47071 + case RLIMIT_RSS:
47072 + res_add += GR_RLIM_RSS_BUMP;
47073 + break;
47074 + case RLIMIT_NPROC:
47075 + res_add += GR_RLIM_NPROC_BUMP;
47076 + break;
47077 + case RLIMIT_NOFILE:
47078 + res_add += GR_RLIM_NOFILE_BUMP;
47079 + break;
47080 + case RLIMIT_MEMLOCK:
47081 + res_add += GR_RLIM_MEMLOCK_BUMP;
47082 + break;
47083 + case RLIMIT_AS:
47084 + res_add += GR_RLIM_AS_BUMP;
47085 + break;
47086 + case RLIMIT_LOCKS:
47087 + res_add += GR_RLIM_LOCKS_BUMP;
47088 + break;
47089 + case RLIMIT_SIGPENDING:
47090 + res_add += GR_RLIM_SIGPENDING_BUMP;
47091 + break;
47092 + case RLIMIT_MSGQUEUE:
47093 + res_add += GR_RLIM_MSGQUEUE_BUMP;
47094 + break;
47095 + case RLIMIT_NICE:
47096 + res_add += GR_RLIM_NICE_BUMP;
47097 + break;
47098 + case RLIMIT_RTPRIO:
47099 + res_add += GR_RLIM_RTPRIO_BUMP;
47100 + break;
47101 + case RLIMIT_RTTIME:
47102 + res_add += GR_RLIM_RTTIME_BUMP;
47103 + break;
47104 + }
47105 +
47106 + acl->res[res].rlim_cur = res_add;
47107 +
47108 + if (wanted > acl->res[res].rlim_max)
47109 + acl->res[res].rlim_max = res_add;
47110 +
47111 + /* only log the subject filename, since resource logging is supported for
47112 + single-subject learning only */
47113 + rcu_read_lock();
47114 + cred = __task_cred(task);
47115 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
47116 + task->role->roletype, cred->uid, cred->gid, acl->filename,
47117 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
47118 + "", (unsigned long) res, &task->signal->saved_ip);
47119 + rcu_read_unlock();
47120 + }
47121 +
47122 + return;
47123 +}
47124 +
47125 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
47126 +void
47127 +pax_set_initial_flags(struct linux_binprm *bprm)
47128 +{
47129 + struct task_struct *task = current;
47130 + struct acl_subject_label *proc;
47131 + unsigned long flags;
47132 +
47133 + if (unlikely(!(gr_status & GR_READY)))
47134 + return;
47135 +
47136 + flags = pax_get_flags(task);
47137 +
47138 + proc = task->acl;
47139 +
47140 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
47141 + flags &= ~MF_PAX_PAGEEXEC;
47142 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
47143 + flags &= ~MF_PAX_SEGMEXEC;
47144 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
47145 + flags &= ~MF_PAX_RANDMMAP;
47146 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
47147 + flags &= ~MF_PAX_EMUTRAMP;
47148 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
47149 + flags &= ~MF_PAX_MPROTECT;
47150 +
47151 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
47152 + flags |= MF_PAX_PAGEEXEC;
47153 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
47154 + flags |= MF_PAX_SEGMEXEC;
47155 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
47156 + flags |= MF_PAX_RANDMMAP;
47157 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
47158 + flags |= MF_PAX_EMUTRAMP;
47159 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
47160 + flags |= MF_PAX_MPROTECT;
47161 +
47162 + pax_set_flags(task, flags);
47163 +
47164 + return;
47165 +}
47166 +#endif
47167 +
47168 +#ifdef CONFIG_SYSCTL
47169 +/* Eric Biederman likes breaking userland ABI and every inode-based security
47170 + system to save 35kb of memory */
47171 +
47172 +/* we modify the passed in filename, but adjust it back before returning */
47173 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
47174 +{
47175 + struct name_entry *nmatch;
47176 + char *p, *lastp = NULL;
47177 + struct acl_object_label *obj = NULL, *tmp;
47178 + struct acl_subject_label *tmpsubj;
47179 + char c = '\0';
47180 +
47181 + read_lock(&gr_inode_lock);
47182 +
47183 + p = name + len - 1;
47184 + do {
47185 + nmatch = lookup_name_entry(name);
47186 + if (lastp != NULL)
47187 + *lastp = c;
47188 +
47189 + if (nmatch == NULL)
47190 + goto next_component;
47191 + tmpsubj = current->acl;
47192 + do {
47193 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
47194 + if (obj != NULL) {
47195 + tmp = obj->globbed;
47196 + while (tmp) {
47197 + if (!glob_match(tmp->filename, name)) {
47198 + obj = tmp;
47199 + goto found_obj;
47200 + }
47201 + tmp = tmp->next;
47202 + }
47203 + goto found_obj;
47204 + }
47205 + } while ((tmpsubj = tmpsubj->parent_subject));
47206 +next_component:
47207 + /* end case */
47208 + if (p == name)
47209 + break;
47210 +
47211 + while (*p != '/')
47212 + p--;
47213 + if (p == name)
47214 + lastp = p + 1;
47215 + else {
47216 + lastp = p;
47217 + p--;
47218 + }
47219 + c = *lastp;
47220 + *lastp = '\0';
47221 + } while (1);
47222 +found_obj:
47223 + read_unlock(&gr_inode_lock);
47224 + /* obj returned will always be non-null */
47225 + return obj;
47226 +}
47227 +
47228 +/* returns 0 when allowing, non-zero on error
47229 + op of 0 is used for readdir, so we don't log the names of hidden files
47230 +*/
47231 +__u32
47232 +gr_handle_sysctl(const struct ctl_table *table, const int op)
47233 +{
47234 + ctl_table *tmp;
47235 + const char *proc_sys = "/proc/sys";
47236 + char *path;
47237 + struct acl_object_label *obj;
47238 + unsigned short len = 0, pos = 0, depth = 0, i;
47239 + __u32 err = 0;
47240 + __u32 mode = 0;
47241 +
47242 + if (unlikely(!(gr_status & GR_READY)))
47243 + return 0;
47244 +
47245 + /* for now, ignore operations on non-sysctl entries if it's not a
47246 + readdir*/
47247 + if (table->child != NULL && op != 0)
47248 + return 0;
47249 +
47250 + mode |= GR_FIND;
47251 + /* it's only a read if it's an entry, read on dirs is for readdir */
47252 + if (op & MAY_READ)
47253 + mode |= GR_READ;
47254 + if (op & MAY_WRITE)
47255 + mode |= GR_WRITE;
47256 +
47257 + preempt_disable();
47258 +
47259 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
47260 +
47261 + /* it's only a read/write if it's an actual entry, not a dir
47262 + (which are opened for readdir)
47263 + */
47264 +
47265 + /* convert the requested sysctl entry into a pathname */
47266 +
47267 + for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
47268 + len += strlen(tmp->procname);
47269 + len++;
47270 + depth++;
47271 + }
47272 +
47273 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
47274 + /* deny */
47275 + goto out;
47276 + }
47277 +
47278 + memset(path, 0, PAGE_SIZE);
47279 +
47280 + memcpy(path, proc_sys, strlen(proc_sys));
47281 +
47282 + pos += strlen(proc_sys);
47283 +
47284 + for (; depth > 0; depth--) {
47285 + path[pos] = '/';
47286 + pos++;
47287 + for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
47288 + if (depth == i) {
47289 + memcpy(path + pos, tmp->procname,
47290 + strlen(tmp->procname));
47291 + pos += strlen(tmp->procname);
47292 + }
47293 + i++;
47294 + }
47295 + }
47296 +
47297 + obj = gr_lookup_by_name(path, pos);
47298 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
47299 +
47300 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
47301 + ((err & mode) != mode))) {
47302 + __u32 new_mode = mode;
47303 +
47304 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
47305 +
47306 + err = 0;
47307 + gr_log_learn_sysctl(path, new_mode);
47308 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
47309 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
47310 + err = -ENOENT;
47311 + } else if (!(err & GR_FIND)) {
47312 + err = -ENOENT;
47313 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
47314 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
47315 + path, (mode & GR_READ) ? " reading" : "",
47316 + (mode & GR_WRITE) ? " writing" : "");
47317 + err = -EACCES;
47318 + } else if ((err & mode) != mode) {
47319 + err = -EACCES;
47320 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
47321 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
47322 + path, (mode & GR_READ) ? " reading" : "",
47323 + (mode & GR_WRITE) ? " writing" : "");
47324 + err = 0;
47325 + } else
47326 + err = 0;
47327 +
47328 + out:
47329 + preempt_enable();
47330 +
47331 + return err;
47332 +}
47333 +#endif
47334 +
47335 +int
47336 +gr_handle_proc_ptrace(struct task_struct *task)
47337 +{
47338 + struct file *filp;
47339 + struct task_struct *tmp = task;
47340 + struct task_struct *curtemp = current;
47341 + __u32 retmode;
47342 +
47343 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
47344 + if (unlikely(!(gr_status & GR_READY)))
47345 + return 0;
47346 +#endif
47347 +
47348 + read_lock(&tasklist_lock);
47349 + read_lock(&grsec_exec_file_lock);
47350 + filp = task->exec_file;
47351 +
47352 + while (tmp->pid > 0) {
47353 + if (tmp == curtemp)
47354 + break;
47355 + tmp = tmp->real_parent;
47356 + }
47357 +
47358 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
47359 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
47360 + read_unlock(&grsec_exec_file_lock);
47361 + read_unlock(&tasklist_lock);
47362 + return 1;
47363 + }
47364 +
47365 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
47366 + if (!(gr_status & GR_READY)) {
47367 + read_unlock(&grsec_exec_file_lock);
47368 + read_unlock(&tasklist_lock);
47369 + return 0;
47370 + }
47371 +#endif
47372 +
47373 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
47374 + read_unlock(&grsec_exec_file_lock);
47375 + read_unlock(&tasklist_lock);
47376 +
47377 + if (retmode & GR_NOPTRACE)
47378 + return 1;
47379 +
47380 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
47381 + && (current->acl != task->acl || (current->acl != current->role->root_label
47382 + && current->pid != task->pid)))
47383 + return 1;
47384 +
47385 + return 0;
47386 +}
47387 +
47388 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
47389 +{
47390 + if (unlikely(!(gr_status & GR_READY)))
47391 + return;
47392 +
47393 + if (!(current->role->roletype & GR_ROLE_GOD))
47394 + return;
47395 +
47396 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
47397 + p->role->rolename, gr_task_roletype_to_char(p),
47398 + p->acl->filename);
47399 +}
47400 +
47401 +int
47402 +gr_handle_ptrace(struct task_struct *task, const long request)
47403 +{
47404 + struct task_struct *tmp = task;
47405 + struct task_struct *curtemp = current;
47406 + __u32 retmode;
47407 +
47408 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
47409 + if (unlikely(!(gr_status & GR_READY)))
47410 + return 0;
47411 +#endif
47412 +
47413 + read_lock(&tasklist_lock);
47414 + while (tmp->pid > 0) {
47415 + if (tmp == curtemp)
47416 + break;
47417 + tmp = tmp->real_parent;
47418 + }
47419 +
47420 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
47421 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
47422 + read_unlock(&tasklist_lock);
47423 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
47424 + return 1;
47425 + }
47426 + read_unlock(&tasklist_lock);
47427 +
47428 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
47429 + if (!(gr_status & GR_READY))
47430 + return 0;
47431 +#endif
47432 +
47433 + read_lock(&grsec_exec_file_lock);
47434 + if (unlikely(!task->exec_file)) {
47435 + read_unlock(&grsec_exec_file_lock);
47436 + return 0;
47437 + }
47438 +
47439 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
47440 + read_unlock(&grsec_exec_file_lock);
47441 +
47442 + if (retmode & GR_NOPTRACE) {
47443 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
47444 + return 1;
47445 + }
47446 +
47447 + if (retmode & GR_PTRACERD) {
47448 + switch (request) {
47449 + case PTRACE_POKETEXT:
47450 + case PTRACE_POKEDATA:
47451 + case PTRACE_POKEUSR:
47452 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
47453 + case PTRACE_SETREGS:
47454 + case PTRACE_SETFPREGS:
47455 +#endif
47456 +#ifdef CONFIG_X86
47457 + case PTRACE_SETFPXREGS:
47458 +#endif
47459 +#ifdef CONFIG_ALTIVEC
47460 + case PTRACE_SETVRREGS:
47461 +#endif
47462 + return 1;
47463 + default:
47464 + return 0;
47465 + }
47466 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
47467 + !(current->role->roletype & GR_ROLE_GOD) &&
47468 + (current->acl != task->acl)) {
47469 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
47470 + return 1;
47471 + }
47472 +
47473 + return 0;
47474 +}
47475 +
47476 +static int is_writable_mmap(const struct file *filp)
47477 +{
47478 + struct task_struct *task = current;
47479 + struct acl_object_label *obj, *obj2;
47480 +
47481 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
47482 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
47483 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
47484 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
47485 + task->role->root_label);
47486 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
47487 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
47488 + return 1;
47489 + }
47490 + }
47491 + return 0;
47492 +}
47493 +
47494 +int
47495 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
47496 +{
47497 + __u32 mode;
47498 +
47499 + if (unlikely(!file || !(prot & PROT_EXEC)))
47500 + return 1;
47501 +
47502 + if (is_writable_mmap(file))
47503 + return 0;
47504 +
47505 + mode =
47506 + gr_search_file(file->f_path.dentry,
47507 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
47508 + file->f_path.mnt);
47509 +
47510 + if (!gr_tpe_allow(file))
47511 + return 0;
47512 +
47513 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
47514 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
47515 + return 0;
47516 + } else if (unlikely(!(mode & GR_EXEC))) {
47517 + return 0;
47518 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
47519 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
47520 + return 1;
47521 + }
47522 +
47523 + return 1;
47524 +}
47525 +
47526 +int
47527 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
47528 +{
47529 + __u32 mode;
47530 +
47531 + if (unlikely(!file || !(prot & PROT_EXEC)))
47532 + return 1;
47533 +
47534 + if (is_writable_mmap(file))
47535 + return 0;
47536 +
47537 + mode =
47538 + gr_search_file(file->f_path.dentry,
47539 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
47540 + file->f_path.mnt);
47541 +
47542 + if (!gr_tpe_allow(file))
47543 + return 0;
47544 +
47545 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
47546 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
47547 + return 0;
47548 + } else if (unlikely(!(mode & GR_EXEC))) {
47549 + return 0;
47550 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
47551 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
47552 + return 1;
47553 + }
47554 +
47555 + return 1;
47556 +}
47557 +
47558 +void
47559 +gr_acl_handle_psacct(struct task_struct *task, const long code)
47560 +{
47561 + unsigned long runtime;
47562 + unsigned long cputime;
47563 + unsigned int wday, cday;
47564 + __u8 whr, chr;
47565 + __u8 wmin, cmin;
47566 + __u8 wsec, csec;
47567 + struct timespec timeval;
47568 +
47569 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
47570 + !(task->acl->mode & GR_PROCACCT)))
47571 + return;
47572 +
47573 + do_posix_clock_monotonic_gettime(&timeval);
47574 + runtime = timeval.tv_sec - task->start_time.tv_sec;
47575 + wday = runtime / (3600 * 24);
47576 + runtime -= wday * (3600 * 24);
47577 + whr = runtime / 3600;
47578 + runtime -= whr * 3600;
47579 + wmin = runtime / 60;
47580 + runtime -= wmin * 60;
47581 + wsec = runtime;
47582 +
47583 + cputime = (task->utime + task->stime) / HZ;
47584 + cday = cputime / (3600 * 24);
47585 + cputime -= cday * (3600 * 24);
47586 + chr = cputime / 3600;
47587 + cputime -= chr * 3600;
47588 + cmin = cputime / 60;
47589 + cputime -= cmin * 60;
47590 + csec = cputime;
47591 +
47592 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
47593 +
47594 + return;
47595 +}
47596 +
47597 +void gr_set_kernel_label(struct task_struct *task)
47598 +{
47599 + if (gr_status & GR_READY) {
47600 + task->role = kernel_role;
47601 + task->acl = kernel_role->root_label;
47602 + }
47603 + return;
47604 +}
47605 +
47606 +#ifdef CONFIG_TASKSTATS
47607 +int gr_is_taskstats_denied(int pid)
47608 +{
47609 + struct task_struct *task;
47610 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47611 + const struct cred *cred;
47612 +#endif
47613 + int ret = 0;
47614 +
47615 + /* restrict taskstats viewing to un-chrooted root users
47616 + who have the 'view' subject flag if the RBAC system is enabled
47617 + */
47618 +
47619 + rcu_read_lock();
47620 + read_lock(&tasklist_lock);
47621 + task = find_task_by_vpid(pid);
47622 + if (task) {
47623 +#ifdef CONFIG_GRKERNSEC_CHROOT
47624 + if (proc_is_chrooted(task))
47625 + ret = -EACCES;
47626 +#endif
47627 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47628 + cred = __task_cred(task);
47629 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47630 + if (cred->uid != 0)
47631 + ret = -EACCES;
47632 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47633 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
47634 + ret = -EACCES;
47635 +#endif
47636 +#endif
47637 + if (gr_status & GR_READY) {
47638 + if (!(task->acl->mode & GR_VIEW))
47639 + ret = -EACCES;
47640 + }
47641 + } else
47642 + ret = -ENOENT;
47643 +
47644 + read_unlock(&tasklist_lock);
47645 + rcu_read_unlock();
47646 +
47647 + return ret;
47648 +}
47649 +#endif
47650 +
47651 +/* AUXV entries are filled via a descendant of search_binary_handler
47652 + after we've already applied the subject for the target
47653 +*/
47654 +int gr_acl_enable_at_secure(void)
47655 +{
47656 + if (unlikely(!(gr_status & GR_READY)))
47657 + return 0;
47658 +
47659 + if (current->acl->mode & GR_ATSECURE)
47660 + return 1;
47661 +
47662 + return 0;
47663 +}
47664 +
47665 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
47666 +{
47667 + struct task_struct *task = current;
47668 + struct dentry *dentry = file->f_path.dentry;
47669 + struct vfsmount *mnt = file->f_path.mnt;
47670 + struct acl_object_label *obj, *tmp;
47671 + struct acl_subject_label *subj;
47672 + unsigned int bufsize;
47673 + int is_not_root;
47674 + char *path;
47675 + dev_t dev = __get_dev(dentry);
47676 +
47677 + if (unlikely(!(gr_status & GR_READY)))
47678 + return 1;
47679 +
47680 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
47681 + return 1;
47682 +
47683 + /* ignore Eric Biederman */
47684 + if (IS_PRIVATE(dentry->d_inode))
47685 + return 1;
47686 +
47687 + subj = task->acl;
47688 + do {
47689 + obj = lookup_acl_obj_label(ino, dev, subj);
47690 + if (obj != NULL)
47691 + return (obj->mode & GR_FIND) ? 1 : 0;
47692 + } while ((subj = subj->parent_subject));
47693 +
47694 + /* this is purely an optimization since we're looking for an object
47695 + for the directory we're doing a readdir on
47696 + if it's possible for any globbed object to match the entry we're
47697 + filling into the directory, then the object we find here will be
47698 + an anchor point with attached globbed objects
47699 + */
47700 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
47701 + if (obj->globbed == NULL)
47702 + return (obj->mode & GR_FIND) ? 1 : 0;
47703 +
47704 + is_not_root = ((obj->filename[0] == '/') &&
47705 + (obj->filename[1] == '\0')) ? 0 : 1;
47706 + bufsize = PAGE_SIZE - namelen - is_not_root;
47707 +
47708 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
47709 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
47710 + return 1;
47711 +
47712 + preempt_disable();
47713 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
47714 + bufsize);
47715 +
47716 + bufsize = strlen(path);
47717 +
47718 + /* if base is "/", don't append an additional slash */
47719 + if (is_not_root)
47720 + *(path + bufsize) = '/';
47721 + memcpy(path + bufsize + is_not_root, name, namelen);
47722 + *(path + bufsize + namelen + is_not_root) = '\0';
47723 +
47724 + tmp = obj->globbed;
47725 + while (tmp) {
47726 + if (!glob_match(tmp->filename, path)) {
47727 + preempt_enable();
47728 + return (tmp->mode & GR_FIND) ? 1 : 0;
47729 + }
47730 + tmp = tmp->next;
47731 + }
47732 + preempt_enable();
47733 + return (obj->mode & GR_FIND) ? 1 : 0;
47734 +}
47735 +
47736 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
47737 +EXPORT_SYMBOL(gr_acl_is_enabled);
47738 +#endif
47739 +EXPORT_SYMBOL(gr_learn_resource);
47740 +EXPORT_SYMBOL(gr_set_kernel_label);
47741 +#ifdef CONFIG_SECURITY
47742 +EXPORT_SYMBOL(gr_check_user_change);
47743 +EXPORT_SYMBOL(gr_check_group_change);
47744 +#endif
47745 +
47746 diff -urNp linux-2.6.32.41/grsecurity/gracl_cap.c linux-2.6.32.41/grsecurity/gracl_cap.c
47747 --- linux-2.6.32.41/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
47748 +++ linux-2.6.32.41/grsecurity/gracl_cap.c 2011-04-17 15:56:46.000000000 -0400
47749 @@ -0,0 +1,138 @@
47750 +#include <linux/kernel.h>
47751 +#include <linux/module.h>
47752 +#include <linux/sched.h>
47753 +#include <linux/gracl.h>
47754 +#include <linux/grsecurity.h>
47755 +#include <linux/grinternal.h>
47756 +
47757 +static const char *captab_log[] = {
47758 + "CAP_CHOWN",
47759 + "CAP_DAC_OVERRIDE",
47760 + "CAP_DAC_READ_SEARCH",
47761 + "CAP_FOWNER",
47762 + "CAP_FSETID",
47763 + "CAP_KILL",
47764 + "CAP_SETGID",
47765 + "CAP_SETUID",
47766 + "CAP_SETPCAP",
47767 + "CAP_LINUX_IMMUTABLE",
47768 + "CAP_NET_BIND_SERVICE",
47769 + "CAP_NET_BROADCAST",
47770 + "CAP_NET_ADMIN",
47771 + "CAP_NET_RAW",
47772 + "CAP_IPC_LOCK",
47773 + "CAP_IPC_OWNER",
47774 + "CAP_SYS_MODULE",
47775 + "CAP_SYS_RAWIO",
47776 + "CAP_SYS_CHROOT",
47777 + "CAP_SYS_PTRACE",
47778 + "CAP_SYS_PACCT",
47779 + "CAP_SYS_ADMIN",
47780 + "CAP_SYS_BOOT",
47781 + "CAP_SYS_NICE",
47782 + "CAP_SYS_RESOURCE",
47783 + "CAP_SYS_TIME",
47784 + "CAP_SYS_TTY_CONFIG",
47785 + "CAP_MKNOD",
47786 + "CAP_LEASE",
47787 + "CAP_AUDIT_WRITE",
47788 + "CAP_AUDIT_CONTROL",
47789 + "CAP_SETFCAP",
47790 + "CAP_MAC_OVERRIDE",
47791 + "CAP_MAC_ADMIN"
47792 +};
47793 +
47794 +EXPORT_SYMBOL(gr_is_capable);
47795 +EXPORT_SYMBOL(gr_is_capable_nolog);
47796 +
47797 +int
47798 +gr_is_capable(const int cap)
47799 +{
47800 + struct task_struct *task = current;
47801 + const struct cred *cred = current_cred();
47802 + struct acl_subject_label *curracl;
47803 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
47804 + kernel_cap_t cap_audit = __cap_empty_set;
47805 +
47806 + if (!gr_acl_is_enabled())
47807 + return 1;
47808 +
47809 + curracl = task->acl;
47810 +
47811 + cap_drop = curracl->cap_lower;
47812 + cap_mask = curracl->cap_mask;
47813 + cap_audit = curracl->cap_invert_audit;
47814 +
47815 + while ((curracl = curracl->parent_subject)) {
47816 + /* if the cap isn't specified in the current computed mask but is specified in the
47817 + current level subject, and is lowered in the current level subject, then add
47818 + it to the set of dropped capabilities
47819 + otherwise, add the current level subject's mask to the current computed mask
47820 + */
47821 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
47822 + cap_raise(cap_mask, cap);
47823 + if (cap_raised(curracl->cap_lower, cap))
47824 + cap_raise(cap_drop, cap);
47825 + if (cap_raised(curracl->cap_invert_audit, cap))
47826 + cap_raise(cap_audit, cap);
47827 + }
47828 + }
47829 +
47830 + if (!cap_raised(cap_drop, cap)) {
47831 + if (cap_raised(cap_audit, cap))
47832 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
47833 + return 1;
47834 + }
47835 +
47836 + curracl = task->acl;
47837 +
47838 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
47839 + && cap_raised(cred->cap_effective, cap)) {
47840 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
47841 + task->role->roletype, cred->uid,
47842 + cred->gid, task->exec_file ?
47843 + gr_to_filename(task->exec_file->f_path.dentry,
47844 + task->exec_file->f_path.mnt) : curracl->filename,
47845 + curracl->filename, 0UL,
47846 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
47847 + return 1;
47848 + }
47849 +
47850 + if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
47851 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
47852 + return 0;
47853 +}
47854 +
47855 +int
47856 +gr_is_capable_nolog(const int cap)
47857 +{
47858 + struct acl_subject_label *curracl;
47859 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
47860 +
47861 + if (!gr_acl_is_enabled())
47862 + return 1;
47863 +
47864 + curracl = current->acl;
47865 +
47866 + cap_drop = curracl->cap_lower;
47867 + cap_mask = curracl->cap_mask;
47868 +
47869 + while ((curracl = curracl->parent_subject)) {
47870 + /* if the cap isn't specified in the current computed mask but is specified in the
47871 + current level subject, and is lowered in the current level subject, then add
47872 + it to the set of dropped capabilities
47873 + otherwise, add the current level subject's mask to the current computed mask
47874 + */
47875 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
47876 + cap_raise(cap_mask, cap);
47877 + if (cap_raised(curracl->cap_lower, cap))
47878 + cap_raise(cap_drop, cap);
47879 + }
47880 + }
47881 +
47882 + if (!cap_raised(cap_drop, cap))
47883 + return 1;
47884 +
47885 + return 0;
47886 +}
47887 +
47888 diff -urNp linux-2.6.32.41/grsecurity/gracl_fs.c linux-2.6.32.41/grsecurity/gracl_fs.c
47889 --- linux-2.6.32.41/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
47890 +++ linux-2.6.32.41/grsecurity/gracl_fs.c 2011-04-17 15:56:46.000000000 -0400
47891 @@ -0,0 +1,431 @@
47892 +#include <linux/kernel.h>
47893 +#include <linux/sched.h>
47894 +#include <linux/types.h>
47895 +#include <linux/fs.h>
47896 +#include <linux/file.h>
47897 +#include <linux/stat.h>
47898 +#include <linux/grsecurity.h>
47899 +#include <linux/grinternal.h>
47900 +#include <linux/gracl.h>
47901 +
47902 +__u32
47903 +gr_acl_handle_hidden_file(const struct dentry * dentry,
47904 + const struct vfsmount * mnt)
47905 +{
47906 + __u32 mode;
47907 +
47908 + if (unlikely(!dentry->d_inode))
47909 + return GR_FIND;
47910 +
47911 + mode =
47912 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
47913 +
47914 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
47915 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
47916 + return mode;
47917 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
47918 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
47919 + return 0;
47920 + } else if (unlikely(!(mode & GR_FIND)))
47921 + return 0;
47922 +
47923 + return GR_FIND;
47924 +}
47925 +
47926 +__u32
47927 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
47928 + const int fmode)
47929 +{
47930 + __u32 reqmode = GR_FIND;
47931 + __u32 mode;
47932 +
47933 + if (unlikely(!dentry->d_inode))
47934 + return reqmode;
47935 +
47936 + if (unlikely(fmode & O_APPEND))
47937 + reqmode |= GR_APPEND;
47938 + else if (unlikely(fmode & FMODE_WRITE))
47939 + reqmode |= GR_WRITE;
47940 + if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
47941 + reqmode |= GR_READ;
47942 + if ((fmode & FMODE_GREXEC) && (fmode & FMODE_EXEC))
47943 + reqmode &= ~GR_READ;
47944 + mode =
47945 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
47946 + mnt);
47947 +
47948 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
47949 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
47950 + reqmode & GR_READ ? " reading" : "",
47951 + reqmode & GR_WRITE ? " writing" : reqmode &
47952 + GR_APPEND ? " appending" : "");
47953 + return reqmode;
47954 + } else
47955 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
47956 + {
47957 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
47958 + reqmode & GR_READ ? " reading" : "",
47959 + reqmode & GR_WRITE ? " writing" : reqmode &
47960 + GR_APPEND ? " appending" : "");
47961 + return 0;
47962 + } else if (unlikely((mode & reqmode) != reqmode))
47963 + return 0;
47964 +
47965 + return reqmode;
47966 +}
47967 +
47968 +__u32
47969 +gr_acl_handle_creat(const struct dentry * dentry,
47970 + const struct dentry * p_dentry,
47971 + const struct vfsmount * p_mnt, const int fmode,
47972 + const int imode)
47973 +{
47974 + __u32 reqmode = GR_WRITE | GR_CREATE;
47975 + __u32 mode;
47976 +
47977 + if (unlikely(fmode & O_APPEND))
47978 + reqmode |= GR_APPEND;
47979 + if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
47980 + reqmode |= GR_READ;
47981 + if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
47982 + reqmode |= GR_SETID;
47983 +
47984 + mode =
47985 + gr_check_create(dentry, p_dentry, p_mnt,
47986 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
47987 +
47988 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
47989 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
47990 + reqmode & GR_READ ? " reading" : "",
47991 + reqmode & GR_WRITE ? " writing" : reqmode &
47992 + GR_APPEND ? " appending" : "");
47993 + return reqmode;
47994 + } else
47995 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
47996 + {
47997 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
47998 + reqmode & GR_READ ? " reading" : "",
47999 + reqmode & GR_WRITE ? " writing" : reqmode &
48000 + GR_APPEND ? " appending" : "");
48001 + return 0;
48002 + } else if (unlikely((mode & reqmode) != reqmode))
48003 + return 0;
48004 +
48005 + return reqmode;
48006 +}
48007 +
48008 +__u32
48009 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
48010 + const int fmode)
48011 +{
48012 + __u32 mode, reqmode = GR_FIND;
48013 +
48014 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
48015 + reqmode |= GR_EXEC;
48016 + if (fmode & S_IWOTH)
48017 + reqmode |= GR_WRITE;
48018 + if (fmode & S_IROTH)
48019 + reqmode |= GR_READ;
48020 +
48021 + mode =
48022 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
48023 + mnt);
48024 +
48025 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48026 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
48027 + reqmode & GR_READ ? " reading" : "",
48028 + reqmode & GR_WRITE ? " writing" : "",
48029 + reqmode & GR_EXEC ? " executing" : "");
48030 + return reqmode;
48031 + } else
48032 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48033 + {
48034 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
48035 + reqmode & GR_READ ? " reading" : "",
48036 + reqmode & GR_WRITE ? " writing" : "",
48037 + reqmode & GR_EXEC ? " executing" : "");
48038 + return 0;
48039 + } else if (unlikely((mode & reqmode) != reqmode))
48040 + return 0;
48041 +
48042 + return reqmode;
48043 +}
48044 +
48045 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
48046 +{
48047 + __u32 mode;
48048 +
48049 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
48050 +
48051 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
48052 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
48053 + return mode;
48054 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
48055 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
48056 + return 0;
48057 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
48058 + return 0;
48059 +
48060 + return (reqmode);
48061 +}
48062 +
48063 +__u32
48064 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
48065 +{
48066 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
48067 +}
48068 +
48069 +__u32
48070 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
48071 +{
48072 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
48073 +}
48074 +
48075 +__u32
48076 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
48077 +{
48078 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
48079 +}
48080 +
48081 +__u32
48082 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
48083 +{
48084 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
48085 +}
48086 +
48087 +__u32
48088 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
48089 + mode_t mode)
48090 +{
48091 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
48092 + return 1;
48093 +
48094 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
48095 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
48096 + GR_FCHMOD_ACL_MSG);
48097 + } else {
48098 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
48099 + }
48100 +}
48101 +
48102 +__u32
48103 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
48104 + mode_t mode)
48105 +{
48106 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
48107 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
48108 + GR_CHMOD_ACL_MSG);
48109 + } else {
48110 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
48111 + }
48112 +}
48113 +
48114 +__u32
48115 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
48116 +{
48117 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
48118 +}
48119 +
48120 +__u32
48121 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
48122 +{
48123 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
48124 +}
48125 +
48126 +__u32
48127 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
48128 +{
48129 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
48130 +}
48131 +
48132 +__u32
48133 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
48134 +{
48135 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
48136 + GR_UNIXCONNECT_ACL_MSG);
48137 +}
48138 +
48139 +/* hardlinks require at minimum create permission,
48140 + any additional privilege required is based on the
48141 + privilege of the file being linked to
48142 +*/
48143 +__u32
48144 +gr_acl_handle_link(const struct dentry * new_dentry,
48145 + const struct dentry * parent_dentry,
48146 + const struct vfsmount * parent_mnt,
48147 + const struct dentry * old_dentry,
48148 + const struct vfsmount * old_mnt, const char *to)
48149 +{
48150 + __u32 mode;
48151 + __u32 needmode = GR_CREATE | GR_LINK;
48152 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
48153 +
48154 + mode =
48155 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
48156 + old_mnt);
48157 +
48158 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
48159 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
48160 + return mode;
48161 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
48162 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
48163 + return 0;
48164 + } else if (unlikely((mode & needmode) != needmode))
48165 + return 0;
48166 +
48167 + return 1;
48168 +}
48169 +
48170 +__u32
48171 +gr_acl_handle_symlink(const struct dentry * new_dentry,
48172 + const struct dentry * parent_dentry,
48173 + const struct vfsmount * parent_mnt, const char *from)
48174 +{
48175 + __u32 needmode = GR_WRITE | GR_CREATE;
48176 + __u32 mode;
48177 +
48178 + mode =
48179 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
48180 + GR_CREATE | GR_AUDIT_CREATE |
48181 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
48182 +
48183 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
48184 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
48185 + return mode;
48186 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
48187 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
48188 + return 0;
48189 + } else if (unlikely((mode & needmode) != needmode))
48190 + return 0;
48191 +
48192 + return (GR_WRITE | GR_CREATE);
48193 +}
48194 +
48195 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
48196 +{
48197 + __u32 mode;
48198 +
48199 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
48200 +
48201 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
48202 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
48203 + return mode;
48204 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
48205 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
48206 + return 0;
48207 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
48208 + return 0;
48209 +
48210 + return (reqmode);
48211 +}
48212 +
48213 +__u32
48214 +gr_acl_handle_mknod(const struct dentry * new_dentry,
48215 + const struct dentry * parent_dentry,
48216 + const struct vfsmount * parent_mnt,
48217 + const int mode)
48218 +{
48219 + __u32 reqmode = GR_WRITE | GR_CREATE;
48220 + if (unlikely(mode & (S_ISUID | S_ISGID)))
48221 + reqmode |= GR_SETID;
48222 +
48223 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
48224 + reqmode, GR_MKNOD_ACL_MSG);
48225 +}
48226 +
48227 +__u32
48228 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
48229 + const struct dentry *parent_dentry,
48230 + const struct vfsmount *parent_mnt)
48231 +{
48232 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
48233 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
48234 +}
48235 +
48236 +#define RENAME_CHECK_SUCCESS(old, new) \
48237 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
48238 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
48239 +
48240 +int
48241 +gr_acl_handle_rename(struct dentry *new_dentry,
48242 + struct dentry *parent_dentry,
48243 + const struct vfsmount *parent_mnt,
48244 + struct dentry *old_dentry,
48245 + struct inode *old_parent_inode,
48246 + struct vfsmount *old_mnt, const char *newname)
48247 +{
48248 + __u32 comp1, comp2;
48249 + int error = 0;
48250 +
48251 + if (unlikely(!gr_acl_is_enabled()))
48252 + return 0;
48253 +
48254 + if (!new_dentry->d_inode) {
48255 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
48256 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
48257 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
48258 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
48259 + GR_DELETE | GR_AUDIT_DELETE |
48260 + GR_AUDIT_READ | GR_AUDIT_WRITE |
48261 + GR_SUPPRESS, old_mnt);
48262 + } else {
48263 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
48264 + GR_CREATE | GR_DELETE |
48265 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
48266 + GR_AUDIT_READ | GR_AUDIT_WRITE |
48267 + GR_SUPPRESS, parent_mnt);
48268 + comp2 =
48269 + gr_search_file(old_dentry,
48270 + GR_READ | GR_WRITE | GR_AUDIT_READ |
48271 + GR_DELETE | GR_AUDIT_DELETE |
48272 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
48273 + }
48274 +
48275 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
48276 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
48277 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
48278 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
48279 + && !(comp2 & GR_SUPPRESS)) {
48280 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
48281 + error = -EACCES;
48282 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
48283 + error = -EACCES;
48284 +
48285 + return error;
48286 +}
48287 +
48288 +void
48289 +gr_acl_handle_exit(void)
48290 +{
48291 + u16 id;
48292 + char *rolename;
48293 + struct file *exec_file;
48294 +
48295 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
48296 + !(current->role->roletype & GR_ROLE_PERSIST))) {
48297 + id = current->acl_role_id;
48298 + rolename = current->role->rolename;
48299 + gr_set_acls(1);
48300 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
48301 + }
48302 +
48303 + write_lock(&grsec_exec_file_lock);
48304 + exec_file = current->exec_file;
48305 + current->exec_file = NULL;
48306 + write_unlock(&grsec_exec_file_lock);
48307 +
48308 + if (exec_file)
48309 + fput(exec_file);
48310 +}
48311 +
48312 +int
48313 +gr_acl_handle_procpidmem(const struct task_struct *task)
48314 +{
48315 + if (unlikely(!gr_acl_is_enabled()))
48316 + return 0;
48317 +
48318 + if (task != current && task->acl->mode & GR_PROTPROCFD)
48319 + return -EACCES;
48320 +
48321 + return 0;
48322 +}
48323 diff -urNp linux-2.6.32.41/grsecurity/gracl_ip.c linux-2.6.32.41/grsecurity/gracl_ip.c
48324 --- linux-2.6.32.41/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
48325 +++ linux-2.6.32.41/grsecurity/gracl_ip.c 2011-04-17 15:56:46.000000000 -0400
48326 @@ -0,0 +1,382 @@
48327 +#include <linux/kernel.h>
48328 +#include <asm/uaccess.h>
48329 +#include <asm/errno.h>
48330 +#include <net/sock.h>
48331 +#include <linux/file.h>
48332 +#include <linux/fs.h>
48333 +#include <linux/net.h>
48334 +#include <linux/in.h>
48335 +#include <linux/skbuff.h>
48336 +#include <linux/ip.h>
48337 +#include <linux/udp.h>
48338 +#include <linux/smp_lock.h>
48339 +#include <linux/types.h>
48340 +#include <linux/sched.h>
48341 +#include <linux/netdevice.h>
48342 +#include <linux/inetdevice.h>
48343 +#include <linux/gracl.h>
48344 +#include <linux/grsecurity.h>
48345 +#include <linux/grinternal.h>
48346 +
48347 +#define GR_BIND 0x01
48348 +#define GR_CONNECT 0x02
48349 +#define GR_INVERT 0x04
48350 +#define GR_BINDOVERRIDE 0x08
48351 +#define GR_CONNECTOVERRIDE 0x10
48352 +#define GR_SOCK_FAMILY 0x20
48353 +
48354 +static const char * gr_protocols[IPPROTO_MAX] = {
48355 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
48356 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
48357 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
48358 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
48359 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
48360 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
48361 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
48362 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
48363 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
48364 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
48365 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
48366 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
48367 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
48368 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
48369 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
48370 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
48371 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
48372 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
48373 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
48374 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
48375 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
48376 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
48377 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
48378 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
48379 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
48380 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
48381 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
48382 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
48383 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
48384 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
48385 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
48386 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
48387 + };
48388 +
48389 +static const char * gr_socktypes[SOCK_MAX] = {
48390 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
48391 + "unknown:7", "unknown:8", "unknown:9", "packet"
48392 + };
48393 +
48394 +static const char * gr_sockfamilies[AF_MAX+1] = {
48395 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
48396 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
48397 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
48398 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
48399 + };
48400 +
48401 +const char *
48402 +gr_proto_to_name(unsigned char proto)
48403 +{
48404 + return gr_protocols[proto];
48405 +}
48406 +
48407 +const char *
48408 +gr_socktype_to_name(unsigned char type)
48409 +{
48410 + return gr_socktypes[type];
48411 +}
48412 +
48413 +const char *
48414 +gr_sockfamily_to_name(unsigned char family)
48415 +{
48416 + return gr_sockfamilies[family];
48417 +}
48418 +
48419 +int
48420 +gr_search_socket(const int domain, const int type, const int protocol)
48421 +{
48422 + struct acl_subject_label *curr;
48423 + const struct cred *cred = current_cred();
48424 +
48425 + if (unlikely(!gr_acl_is_enabled()))
48426 + goto exit;
48427 +
48428 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
48429 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
48430 + goto exit; // let the kernel handle it
48431 +
48432 + curr = current->acl;
48433 +
48434 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
48435 + /* the family is allowed, if this is PF_INET allow it only if
48436 + the extra sock type/protocol checks pass */
48437 + if (domain == PF_INET)
48438 + goto inet_check;
48439 + goto exit;
48440 + } else {
48441 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
48442 + __u32 fakeip = 0;
48443 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48444 + current->role->roletype, cred->uid,
48445 + cred->gid, current->exec_file ?
48446 + gr_to_filename(current->exec_file->f_path.dentry,
48447 + current->exec_file->f_path.mnt) :
48448 + curr->filename, curr->filename,
48449 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
48450 + &current->signal->saved_ip);
48451 + goto exit;
48452 + }
48453 + goto exit_fail;
48454 + }
48455 +
48456 +inet_check:
48457 + /* the rest of this checking is for IPv4 only */
48458 + if (!curr->ips)
48459 + goto exit;
48460 +
48461 + if ((curr->ip_type & (1 << type)) &&
48462 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
48463 + goto exit;
48464 +
48465 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
48466 + /* we don't place acls on raw sockets , and sometimes
48467 + dgram/ip sockets are opened for ioctl and not
48468 + bind/connect, so we'll fake a bind learn log */
48469 + if (type == SOCK_RAW || type == SOCK_PACKET) {
48470 + __u32 fakeip = 0;
48471 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48472 + current->role->roletype, cred->uid,
48473 + cred->gid, current->exec_file ?
48474 + gr_to_filename(current->exec_file->f_path.dentry,
48475 + current->exec_file->f_path.mnt) :
48476 + curr->filename, curr->filename,
48477 + &fakeip, 0, type,
48478 + protocol, GR_CONNECT, &current->signal->saved_ip);
48479 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
48480 + __u32 fakeip = 0;
48481 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48482 + current->role->roletype, cred->uid,
48483 + cred->gid, current->exec_file ?
48484 + gr_to_filename(current->exec_file->f_path.dentry,
48485 + current->exec_file->f_path.mnt) :
48486 + curr->filename, curr->filename,
48487 + &fakeip, 0, type,
48488 + protocol, GR_BIND, &current->signal->saved_ip);
48489 + }
48490 + /* we'll log when they use connect or bind */
48491 + goto exit;
48492 + }
48493 +
48494 +exit_fail:
48495 + if (domain == PF_INET)
48496 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
48497 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
48498 + else
48499 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
48500 + gr_socktype_to_name(type), protocol);
48501 +
48502 + return 0;
48503 +exit:
48504 + return 1;
48505 +}
48506 +
48507 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
48508 +{
48509 + if ((ip->mode & mode) &&
48510 + (ip_port >= ip->low) &&
48511 + (ip_port <= ip->high) &&
48512 + ((ntohl(ip_addr) & our_netmask) ==
48513 + (ntohl(our_addr) & our_netmask))
48514 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
48515 + && (ip->type & (1 << type))) {
48516 + if (ip->mode & GR_INVERT)
48517 + return 2; // specifically denied
48518 + else
48519 + return 1; // allowed
48520 + }
48521 +
48522 + return 0; // not specifically allowed, may continue parsing
48523 +}
48524 +
48525 +static int
48526 +gr_search_connectbind(const int full_mode, struct sock *sk,
48527 + struct sockaddr_in *addr, const int type)
48528 +{
48529 + char iface[IFNAMSIZ] = {0};
48530 + struct acl_subject_label *curr;
48531 + struct acl_ip_label *ip;
48532 + struct inet_sock *isk;
48533 + struct net_device *dev;
48534 + struct in_device *idev;
48535 + unsigned long i;
48536 + int ret;
48537 + int mode = full_mode & (GR_BIND | GR_CONNECT);
48538 + __u32 ip_addr = 0;
48539 + __u32 our_addr;
48540 + __u32 our_netmask;
48541 + char *p;
48542 + __u16 ip_port = 0;
48543 + const struct cred *cred = current_cred();
48544 +
48545 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
48546 + return 0;
48547 +
48548 + curr = current->acl;
48549 + isk = inet_sk(sk);
48550 +
48551 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
48552 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
48553 + addr->sin_addr.s_addr = curr->inaddr_any_override;
48554 + if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
48555 + struct sockaddr_in saddr;
48556 + int err;
48557 +
48558 + saddr.sin_family = AF_INET;
48559 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
48560 + saddr.sin_port = isk->sport;
48561 +
48562 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
48563 + if (err)
48564 + return err;
48565 +
48566 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
48567 + if (err)
48568 + return err;
48569 + }
48570 +
48571 + if (!curr->ips)
48572 + return 0;
48573 +
48574 + ip_addr = addr->sin_addr.s_addr;
48575 + ip_port = ntohs(addr->sin_port);
48576 +
48577 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
48578 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48579 + current->role->roletype, cred->uid,
48580 + cred->gid, current->exec_file ?
48581 + gr_to_filename(current->exec_file->f_path.dentry,
48582 + current->exec_file->f_path.mnt) :
48583 + curr->filename, curr->filename,
48584 + &ip_addr, ip_port, type,
48585 + sk->sk_protocol, mode, &current->signal->saved_ip);
48586 + return 0;
48587 + }
48588 +
48589 + for (i = 0; i < curr->ip_num; i++) {
48590 + ip = *(curr->ips + i);
48591 + if (ip->iface != NULL) {
48592 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
48593 + p = strchr(iface, ':');
48594 + if (p != NULL)
48595 + *p = '\0';
48596 + dev = dev_get_by_name(sock_net(sk), iface);
48597 + if (dev == NULL)
48598 + continue;
48599 + idev = in_dev_get(dev);
48600 + if (idev == NULL) {
48601 + dev_put(dev);
48602 + continue;
48603 + }
48604 + rcu_read_lock();
48605 + for_ifa(idev) {
48606 + if (!strcmp(ip->iface, ifa->ifa_label)) {
48607 + our_addr = ifa->ifa_address;
48608 + our_netmask = 0xffffffff;
48609 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
48610 + if (ret == 1) {
48611 + rcu_read_unlock();
48612 + in_dev_put(idev);
48613 + dev_put(dev);
48614 + return 0;
48615 + } else if (ret == 2) {
48616 + rcu_read_unlock();
48617 + in_dev_put(idev);
48618 + dev_put(dev);
48619 + goto denied;
48620 + }
48621 + }
48622 + } endfor_ifa(idev);
48623 + rcu_read_unlock();
48624 + in_dev_put(idev);
48625 + dev_put(dev);
48626 + } else {
48627 + our_addr = ip->addr;
48628 + our_netmask = ip->netmask;
48629 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
48630 + if (ret == 1)
48631 + return 0;
48632 + else if (ret == 2)
48633 + goto denied;
48634 + }
48635 + }
48636 +
48637 +denied:
48638 + if (mode == GR_BIND)
48639 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
48640 + else if (mode == GR_CONNECT)
48641 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
48642 +
48643 + return -EACCES;
48644 +}
48645 +
48646 +int
48647 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
48648 +{
48649 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
48650 +}
48651 +
48652 +int
48653 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
48654 +{
48655 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
48656 +}
48657 +
48658 +int gr_search_listen(struct socket *sock)
48659 +{
48660 + struct sock *sk = sock->sk;
48661 + struct sockaddr_in addr;
48662 +
48663 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
48664 + addr.sin_port = inet_sk(sk)->sport;
48665 +
48666 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
48667 +}
48668 +
48669 +int gr_search_accept(struct socket *sock)
48670 +{
48671 + struct sock *sk = sock->sk;
48672 + struct sockaddr_in addr;
48673 +
48674 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
48675 + addr.sin_port = inet_sk(sk)->sport;
48676 +
48677 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
48678 +}
48679 +
48680 +int
48681 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
48682 +{
48683 + if (addr)
48684 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
48685 + else {
48686 + struct sockaddr_in sin;
48687 + const struct inet_sock *inet = inet_sk(sk);
48688 +
48689 + sin.sin_addr.s_addr = inet->daddr;
48690 + sin.sin_port = inet->dport;
48691 +
48692 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
48693 + }
48694 +}
48695 +
48696 +int
48697 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
48698 +{
48699 + struct sockaddr_in sin;
48700 +
48701 + if (unlikely(skb->len < sizeof (struct udphdr)))
48702 + return 0; // skip this packet
48703 +
48704 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
48705 + sin.sin_port = udp_hdr(skb)->source;
48706 +
48707 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
48708 +}
48709 diff -urNp linux-2.6.32.41/grsecurity/gracl_learn.c linux-2.6.32.41/grsecurity/gracl_learn.c
48710 --- linux-2.6.32.41/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
48711 +++ linux-2.6.32.41/grsecurity/gracl_learn.c 2011-04-17 15:56:46.000000000 -0400
48712 @@ -0,0 +1,211 @@
48713 +#include <linux/kernel.h>
48714 +#include <linux/mm.h>
48715 +#include <linux/sched.h>
48716 +#include <linux/poll.h>
48717 +#include <linux/smp_lock.h>
48718 +#include <linux/string.h>
48719 +#include <linux/file.h>
48720 +#include <linux/types.h>
48721 +#include <linux/vmalloc.h>
48722 +#include <linux/grinternal.h>
48723 +
48724 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
48725 + size_t count, loff_t *ppos);
48726 +extern int gr_acl_is_enabled(void);
48727 +
48728 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
48729 +static int gr_learn_attached;
48730 +
48731 +/* use a 512k buffer */
48732 +#define LEARN_BUFFER_SIZE (512 * 1024)
48733 +
48734 +static DEFINE_SPINLOCK(gr_learn_lock);
48735 +static DEFINE_MUTEX(gr_learn_user_mutex);
48736 +
48737 +/* we need to maintain two buffers, so that the kernel context of grlearn
48738 + uses a semaphore around the userspace copying, and the other kernel contexts
48739 + use a spinlock when copying into the buffer, since they cannot sleep
48740 +*/
48741 +static char *learn_buffer;
48742 +static char *learn_buffer_user;
48743 +static int learn_buffer_len;
48744 +static int learn_buffer_user_len;
48745 +
48746 +static ssize_t
48747 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
48748 +{
48749 + DECLARE_WAITQUEUE(wait, current);
48750 + ssize_t retval = 0;
48751 +
48752 + add_wait_queue(&learn_wait, &wait);
48753 + set_current_state(TASK_INTERRUPTIBLE);
48754 + do {
48755 + mutex_lock(&gr_learn_user_mutex);
48756 + spin_lock(&gr_learn_lock);
48757 + if (learn_buffer_len)
48758 + break;
48759 + spin_unlock(&gr_learn_lock);
48760 + mutex_unlock(&gr_learn_user_mutex);
48761 + if (file->f_flags & O_NONBLOCK) {
48762 + retval = -EAGAIN;
48763 + goto out;
48764 + }
48765 + if (signal_pending(current)) {
48766 + retval = -ERESTARTSYS;
48767 + goto out;
48768 + }
48769 +
48770 + schedule();
48771 + } while (1);
48772 +
48773 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
48774 + learn_buffer_user_len = learn_buffer_len;
48775 + retval = learn_buffer_len;
48776 + learn_buffer_len = 0;
48777 +
48778 + spin_unlock(&gr_learn_lock);
48779 +
48780 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
48781 + retval = -EFAULT;
48782 +
48783 + mutex_unlock(&gr_learn_user_mutex);
48784 +out:
48785 + set_current_state(TASK_RUNNING);
48786 + remove_wait_queue(&learn_wait, &wait);
48787 + return retval;
48788 +}
48789 +
48790 +static unsigned int
48791 +poll_learn(struct file * file, poll_table * wait)
48792 +{
48793 + poll_wait(file, &learn_wait, wait);
48794 +
48795 + if (learn_buffer_len)
48796 + return (POLLIN | POLLRDNORM);
48797 +
48798 + return 0;
48799 +}
48800 +
48801 +void
48802 +gr_clear_learn_entries(void)
48803 +{
48804 + char *tmp;
48805 +
48806 + mutex_lock(&gr_learn_user_mutex);
48807 + if (learn_buffer != NULL) {
48808 + spin_lock(&gr_learn_lock);
48809 + tmp = learn_buffer;
48810 + learn_buffer = NULL;
48811 + spin_unlock(&gr_learn_lock);
48812 + vfree(learn_buffer);
48813 + }
48814 + if (learn_buffer_user != NULL) {
48815 + vfree(learn_buffer_user);
48816 + learn_buffer_user = NULL;
48817 + }
48818 + learn_buffer_len = 0;
48819 + mutex_unlock(&gr_learn_user_mutex);
48820 +
48821 + return;
48822 +}
48823 +
48824 +void
48825 +gr_add_learn_entry(const char *fmt, ...)
48826 +{
48827 + va_list args;
48828 + unsigned int len;
48829 +
48830 + if (!gr_learn_attached)
48831 + return;
48832 +
48833 + spin_lock(&gr_learn_lock);
48834 +
48835 + /* leave a gap at the end so we know when it's "full" but don't have to
48836 + compute the exact length of the string we're trying to append
48837 + */
48838 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
48839 + spin_unlock(&gr_learn_lock);
48840 + wake_up_interruptible(&learn_wait);
48841 + return;
48842 + }
48843 + if (learn_buffer == NULL) {
48844 + spin_unlock(&gr_learn_lock);
48845 + return;
48846 + }
48847 +
48848 + va_start(args, fmt);
48849 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
48850 + va_end(args);
48851 +
48852 + learn_buffer_len += len + 1;
48853 +
48854 + spin_unlock(&gr_learn_lock);
48855 + wake_up_interruptible(&learn_wait);
48856 +
48857 + return;
48858 +}
48859 +
48860 +static int
48861 +open_learn(struct inode *inode, struct file *file)
48862 +{
48863 + if (file->f_mode & FMODE_READ && gr_learn_attached)
48864 + return -EBUSY;
48865 + if (file->f_mode & FMODE_READ) {
48866 + int retval = 0;
48867 + mutex_lock(&gr_learn_user_mutex);
48868 + if (learn_buffer == NULL)
48869 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
48870 + if (learn_buffer_user == NULL)
48871 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
48872 + if (learn_buffer == NULL) {
48873 + retval = -ENOMEM;
48874 + goto out_error;
48875 + }
48876 + if (learn_buffer_user == NULL) {
48877 + retval = -ENOMEM;
48878 + goto out_error;
48879 + }
48880 + learn_buffer_len = 0;
48881 + learn_buffer_user_len = 0;
48882 + gr_learn_attached = 1;
48883 +out_error:
48884 + mutex_unlock(&gr_learn_user_mutex);
48885 + return retval;
48886 + }
48887 + return 0;
48888 +}
48889 +
48890 +static int
48891 +close_learn(struct inode *inode, struct file *file)
48892 +{
48893 + char *tmp;
48894 +
48895 + if (file->f_mode & FMODE_READ) {
48896 + mutex_lock(&gr_learn_user_mutex);
48897 + if (learn_buffer != NULL) {
48898 + spin_lock(&gr_learn_lock);
48899 + tmp = learn_buffer;
48900 + learn_buffer = NULL;
48901 + spin_unlock(&gr_learn_lock);
48902 + vfree(tmp);
48903 + }
48904 + if (learn_buffer_user != NULL) {
48905 + vfree(learn_buffer_user);
48906 + learn_buffer_user = NULL;
48907 + }
48908 + learn_buffer_len = 0;
48909 + learn_buffer_user_len = 0;
48910 + gr_learn_attached = 0;
48911 + mutex_unlock(&gr_learn_user_mutex);
48912 + }
48913 +
48914 + return 0;
48915 +}
48916 +
48917 +const struct file_operations grsec_fops = {
48918 + .read = read_learn,
48919 + .write = write_grsec_handler,
48920 + .open = open_learn,
48921 + .release = close_learn,
48922 + .poll = poll_learn,
48923 +};
48924 diff -urNp linux-2.6.32.41/grsecurity/gracl_res.c linux-2.6.32.41/grsecurity/gracl_res.c
48925 --- linux-2.6.32.41/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
48926 +++ linux-2.6.32.41/grsecurity/gracl_res.c 2011-04-17 15:56:46.000000000 -0400
48927 @@ -0,0 +1,67 @@
48928 +#include <linux/kernel.h>
48929 +#include <linux/sched.h>
48930 +#include <linux/gracl.h>
48931 +#include <linux/grinternal.h>
48932 +
48933 +static const char *restab_log[] = {
48934 + [RLIMIT_CPU] = "RLIMIT_CPU",
48935 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
48936 + [RLIMIT_DATA] = "RLIMIT_DATA",
48937 + [RLIMIT_STACK] = "RLIMIT_STACK",
48938 + [RLIMIT_CORE] = "RLIMIT_CORE",
48939 + [RLIMIT_RSS] = "RLIMIT_RSS",
48940 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
48941 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
48942 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
48943 + [RLIMIT_AS] = "RLIMIT_AS",
48944 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
48945 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
48946 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
48947 + [RLIMIT_NICE] = "RLIMIT_NICE",
48948 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
48949 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
48950 + [GR_CRASH_RES] = "RLIMIT_CRASH"
48951 +};
48952 +
48953 +void
48954 +gr_log_resource(const struct task_struct *task,
48955 + const int res, const unsigned long wanted, const int gt)
48956 +{
48957 + const struct cred *cred;
48958 + unsigned long rlim;
48959 +
48960 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
48961 + return;
48962 +
48963 + // not yet supported resource
48964 + if (unlikely(!restab_log[res]))
48965 + return;
48966 +
48967 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
48968 + rlim = task->signal->rlim[res].rlim_max;
48969 + else
48970 + rlim = task->signal->rlim[res].rlim_cur;
48971 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
48972 + return;
48973 +
48974 + rcu_read_lock();
48975 + cred = __task_cred(task);
48976 +
48977 + if (res == RLIMIT_NPROC &&
48978 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
48979 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
48980 + goto out_rcu_unlock;
48981 + else if (res == RLIMIT_MEMLOCK &&
48982 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
48983 + goto out_rcu_unlock;
48984 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
48985 + goto out_rcu_unlock;
48986 + rcu_read_unlock();
48987 +
48988 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
48989 +
48990 + return;
48991 +out_rcu_unlock:
48992 + rcu_read_unlock();
48993 + return;
48994 +}
48995 diff -urNp linux-2.6.32.41/grsecurity/gracl_segv.c linux-2.6.32.41/grsecurity/gracl_segv.c
48996 --- linux-2.6.32.41/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
48997 +++ linux-2.6.32.41/grsecurity/gracl_segv.c 2011-04-17 15:56:46.000000000 -0400
48998 @@ -0,0 +1,284 @@
48999 +#include <linux/kernel.h>
49000 +#include <linux/mm.h>
49001 +#include <asm/uaccess.h>
49002 +#include <asm/errno.h>
49003 +#include <asm/mman.h>
49004 +#include <net/sock.h>
49005 +#include <linux/file.h>
49006 +#include <linux/fs.h>
49007 +#include <linux/net.h>
49008 +#include <linux/in.h>
49009 +#include <linux/smp_lock.h>
49010 +#include <linux/slab.h>
49011 +#include <linux/types.h>
49012 +#include <linux/sched.h>
49013 +#include <linux/timer.h>
49014 +#include <linux/gracl.h>
49015 +#include <linux/grsecurity.h>
49016 +#include <linux/grinternal.h>
49017 +
49018 +static struct crash_uid *uid_set;
49019 +static unsigned short uid_used;
49020 +static DEFINE_SPINLOCK(gr_uid_lock);
49021 +extern rwlock_t gr_inode_lock;
49022 +extern struct acl_subject_label *
49023 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
49024 + struct acl_role_label *role);
49025 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
49026 +
49027 +int
49028 +gr_init_uidset(void)
49029 +{
49030 + uid_set =
49031 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
49032 + uid_used = 0;
49033 +
49034 + return uid_set ? 1 : 0;
49035 +}
49036 +
49037 +void
49038 +gr_free_uidset(void)
49039 +{
49040 + if (uid_set)
49041 + kfree(uid_set);
49042 +
49043 + return;
49044 +}
49045 +
49046 +int
49047 +gr_find_uid(const uid_t uid)
49048 +{
49049 + struct crash_uid *tmp = uid_set;
49050 + uid_t buid;
49051 + int low = 0, high = uid_used - 1, mid;
49052 +
49053 + while (high >= low) {
49054 + mid = (low + high) >> 1;
49055 + buid = tmp[mid].uid;
49056 + if (buid == uid)
49057 + return mid;
49058 + if (buid > uid)
49059 + high = mid - 1;
49060 + if (buid < uid)
49061 + low = mid + 1;
49062 + }
49063 +
49064 + return -1;
49065 +}
49066 +
49067 +static __inline__ void
49068 +gr_insertsort(void)
49069 +{
49070 + unsigned short i, j;
49071 + struct crash_uid index;
49072 +
49073 + for (i = 1; i < uid_used; i++) {
49074 + index = uid_set[i];
49075 + j = i;
49076 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
49077 + uid_set[j] = uid_set[j - 1];
49078 + j--;
49079 + }
49080 + uid_set[j] = index;
49081 + }
49082 +
49083 + return;
49084 +}
49085 +
49086 +static __inline__ void
49087 +gr_insert_uid(const uid_t uid, const unsigned long expires)
49088 +{
49089 + int loc;
49090 +
49091 + if (uid_used == GR_UIDTABLE_MAX)
49092 + return;
49093 +
49094 + loc = gr_find_uid(uid);
49095 +
49096 + if (loc >= 0) {
49097 + uid_set[loc].expires = expires;
49098 + return;
49099 + }
49100 +
49101 + uid_set[uid_used].uid = uid;
49102 + uid_set[uid_used].expires = expires;
49103 + uid_used++;
49104 +
49105 + gr_insertsort();
49106 +
49107 + return;
49108 +}
49109 +
49110 +void
49111 +gr_remove_uid(const unsigned short loc)
49112 +{
49113 + unsigned short i;
49114 +
49115 + for (i = loc + 1; i < uid_used; i++)
49116 + uid_set[i - 1] = uid_set[i];
49117 +
49118 + uid_used--;
49119 +
49120 + return;
49121 +}
49122 +
49123 +int
49124 +gr_check_crash_uid(const uid_t uid)
49125 +{
49126 + int loc;
49127 + int ret = 0;
49128 +
49129 + if (unlikely(!gr_acl_is_enabled()))
49130 + return 0;
49131 +
49132 + spin_lock(&gr_uid_lock);
49133 + loc = gr_find_uid(uid);
49134 +
49135 + if (loc < 0)
49136 + goto out_unlock;
49137 +
49138 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
49139 + gr_remove_uid(loc);
49140 + else
49141 + ret = 1;
49142 +
49143 +out_unlock:
49144 + spin_unlock(&gr_uid_lock);
49145 + return ret;
49146 +}
49147 +
49148 +static __inline__ int
49149 +proc_is_setxid(const struct cred *cred)
49150 +{
49151 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
49152 + cred->uid != cred->fsuid)
49153 + return 1;
49154 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
49155 + cred->gid != cred->fsgid)
49156 + return 1;
49157 +
49158 + return 0;
49159 +}
49160 +
49161 +void
49162 +gr_handle_crash(struct task_struct *task, const int sig)
49163 +{
49164 + struct acl_subject_label *curr;
49165 + struct acl_subject_label *curr2;
49166 + struct task_struct *tsk, *tsk2;
49167 + const struct cred *cred;
49168 + const struct cred *cred2;
49169 +
49170 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
49171 + return;
49172 +
49173 + if (unlikely(!gr_acl_is_enabled()))
49174 + return;
49175 +
49176 + curr = task->acl;
49177 +
49178 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
49179 + return;
49180 +
49181 + if (time_before_eq(curr->expires, get_seconds())) {
49182 + curr->expires = 0;
49183 + curr->crashes = 0;
49184 + }
49185 +
49186 + curr->crashes++;
49187 +
49188 + if (!curr->expires)
49189 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
49190 +
49191 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
49192 + time_after(curr->expires, get_seconds())) {
49193 + rcu_read_lock();
49194 + cred = __task_cred(task);
49195 + if (cred->uid && proc_is_setxid(cred)) {
49196 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
49197 + spin_lock(&gr_uid_lock);
49198 + gr_insert_uid(cred->uid, curr->expires);
49199 + spin_unlock(&gr_uid_lock);
49200 + curr->expires = 0;
49201 + curr->crashes = 0;
49202 + read_lock(&tasklist_lock);
49203 + do_each_thread(tsk2, tsk) {
49204 + cred2 = __task_cred(tsk);
49205 + if (tsk != task && cred2->uid == cred->uid)
49206 + gr_fake_force_sig(SIGKILL, tsk);
49207 + } while_each_thread(tsk2, tsk);
49208 + read_unlock(&tasklist_lock);
49209 + } else {
49210 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
49211 + read_lock(&tasklist_lock);
49212 + do_each_thread(tsk2, tsk) {
49213 + if (likely(tsk != task)) {
49214 + curr2 = tsk->acl;
49215 +
49216 + if (curr2->device == curr->device &&
49217 + curr2->inode == curr->inode)
49218 + gr_fake_force_sig(SIGKILL, tsk);
49219 + }
49220 + } while_each_thread(tsk2, tsk);
49221 + read_unlock(&tasklist_lock);
49222 + }
49223 + rcu_read_unlock();
49224 + }
49225 +
49226 + return;
49227 +}
49228 +
49229 +int
49230 +gr_check_crash_exec(const struct file *filp)
49231 +{
49232 + struct acl_subject_label *curr;
49233 +
49234 + if (unlikely(!gr_acl_is_enabled()))
49235 + return 0;
49236 +
49237 + read_lock(&gr_inode_lock);
49238 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
49239 + filp->f_path.dentry->d_inode->i_sb->s_dev,
49240 + current->role);
49241 + read_unlock(&gr_inode_lock);
49242 +
49243 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
49244 + (!curr->crashes && !curr->expires))
49245 + return 0;
49246 +
49247 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
49248 + time_after(curr->expires, get_seconds()))
49249 + return 1;
49250 + else if (time_before_eq(curr->expires, get_seconds())) {
49251 + curr->crashes = 0;
49252 + curr->expires = 0;
49253 + }
49254 +
49255 + return 0;
49256 +}
49257 +
49258 +void
49259 +gr_handle_alertkill(struct task_struct *task)
49260 +{
49261 + struct acl_subject_label *curracl;
49262 + __u32 curr_ip;
49263 + struct task_struct *p, *p2;
49264 +
49265 + if (unlikely(!gr_acl_is_enabled()))
49266 + return;
49267 +
49268 + curracl = task->acl;
49269 + curr_ip = task->signal->curr_ip;
49270 +
49271 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
49272 + read_lock(&tasklist_lock);
49273 + do_each_thread(p2, p) {
49274 + if (p->signal->curr_ip == curr_ip)
49275 + gr_fake_force_sig(SIGKILL, p);
49276 + } while_each_thread(p2, p);
49277 + read_unlock(&tasklist_lock);
49278 + } else if (curracl->mode & GR_KILLPROC)
49279 + gr_fake_force_sig(SIGKILL, task);
49280 +
49281 + return;
49282 +}
49283 diff -urNp linux-2.6.32.41/grsecurity/gracl_shm.c linux-2.6.32.41/grsecurity/gracl_shm.c
49284 --- linux-2.6.32.41/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
49285 +++ linux-2.6.32.41/grsecurity/gracl_shm.c 2011-04-17 15:56:46.000000000 -0400
49286 @@ -0,0 +1,40 @@
49287 +#include <linux/kernel.h>
49288 +#include <linux/mm.h>
49289 +#include <linux/sched.h>
49290 +#include <linux/file.h>
49291 +#include <linux/ipc.h>
49292 +#include <linux/gracl.h>
49293 +#include <linux/grsecurity.h>
49294 +#include <linux/grinternal.h>
49295 +
49296 +int
49297 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
49298 + const time_t shm_createtime, const uid_t cuid, const int shmid)
49299 +{
49300 + struct task_struct *task;
49301 +
49302 + if (!gr_acl_is_enabled())
49303 + return 1;
49304 +
49305 + rcu_read_lock();
49306 + read_lock(&tasklist_lock);
49307 +
49308 + task = find_task_by_vpid(shm_cprid);
49309 +
49310 + if (unlikely(!task))
49311 + task = find_task_by_vpid(shm_lapid);
49312 +
49313 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
49314 + (task->pid == shm_lapid)) &&
49315 + (task->acl->mode & GR_PROTSHM) &&
49316 + (task->acl != current->acl))) {
49317 + read_unlock(&tasklist_lock);
49318 + rcu_read_unlock();
49319 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
49320 + return 0;
49321 + }
49322 + read_unlock(&tasklist_lock);
49323 + rcu_read_unlock();
49324 +
49325 + return 1;
49326 +}
49327 diff -urNp linux-2.6.32.41/grsecurity/grsec_chdir.c linux-2.6.32.41/grsecurity/grsec_chdir.c
49328 --- linux-2.6.32.41/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
49329 +++ linux-2.6.32.41/grsecurity/grsec_chdir.c 2011-04-17 15:56:46.000000000 -0400
49330 @@ -0,0 +1,19 @@
49331 +#include <linux/kernel.h>
49332 +#include <linux/sched.h>
49333 +#include <linux/fs.h>
49334 +#include <linux/file.h>
49335 +#include <linux/grsecurity.h>
49336 +#include <linux/grinternal.h>
49337 +
49338 +void
49339 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
49340 +{
49341 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
49342 + if ((grsec_enable_chdir && grsec_enable_group &&
49343 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
49344 + !grsec_enable_group)) {
49345 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
49346 + }
49347 +#endif
49348 + return;
49349 +}
49350 diff -urNp linux-2.6.32.41/grsecurity/grsec_chroot.c linux-2.6.32.41/grsecurity/grsec_chroot.c
49351 --- linux-2.6.32.41/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
49352 +++ linux-2.6.32.41/grsecurity/grsec_chroot.c 2011-04-17 15:56:46.000000000 -0400
49353 @@ -0,0 +1,395 @@
49354 +#include <linux/kernel.h>
49355 +#include <linux/module.h>
49356 +#include <linux/sched.h>
49357 +#include <linux/file.h>
49358 +#include <linux/fs.h>
49359 +#include <linux/mount.h>
49360 +#include <linux/types.h>
49361 +#include <linux/pid_namespace.h>
49362 +#include <linux/grsecurity.h>
49363 +#include <linux/grinternal.h>
49364 +
49365 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
49366 +{
49367 +#ifdef CONFIG_GRKERNSEC
49368 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
49369 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
49370 + task->gr_is_chrooted = 1;
49371 + else
49372 + task->gr_is_chrooted = 0;
49373 +
49374 + task->gr_chroot_dentry = path->dentry;
49375 +#endif
49376 + return;
49377 +}
49378 +
49379 +void gr_clear_chroot_entries(struct task_struct *task)
49380 +{
49381 +#ifdef CONFIG_GRKERNSEC
49382 + task->gr_is_chrooted = 0;
49383 + task->gr_chroot_dentry = NULL;
49384 +#endif
49385 + return;
49386 +}
49387 +
49388 +int
49389 +gr_handle_chroot_unix(const pid_t pid)
49390 +{
49391 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
49392 + struct pid *spid = NULL;
49393 +
49394 + if (unlikely(!grsec_enable_chroot_unix))
49395 + return 1;
49396 +
49397 + if (likely(!proc_is_chrooted(current)))
49398 + return 1;
49399 +
49400 + rcu_read_lock();
49401 + read_lock(&tasklist_lock);
49402 +
49403 + spid = find_vpid(pid);
49404 + if (spid) {
49405 + struct task_struct *p;
49406 + p = pid_task(spid, PIDTYPE_PID);
49407 + if (unlikely(p && !have_same_root(current, p))) {
49408 + read_unlock(&tasklist_lock);
49409 + rcu_read_unlock();
49410 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
49411 + return 0;
49412 + }
49413 + }
49414 + read_unlock(&tasklist_lock);
49415 + rcu_read_unlock();
49416 +#endif
49417 + return 1;
49418 +}
49419 +
49420 +int
49421 +gr_handle_chroot_nice(void)
49422 +{
49423 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
49424 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
49425 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
49426 + return -EPERM;
49427 + }
49428 +#endif
49429 + return 0;
49430 +}
49431 +
49432 +int
49433 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
49434 +{
49435 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
49436 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
49437 + && proc_is_chrooted(current)) {
49438 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
49439 + return -EACCES;
49440 + }
49441 +#endif
49442 + return 0;
49443 +}
49444 +
49445 +int
49446 +gr_handle_chroot_rawio(const struct inode *inode)
49447 +{
49448 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
49449 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
49450 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
49451 + return 1;
49452 +#endif
49453 + return 0;
49454 +}
49455 +
49456 +int
49457 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
49458 +{
49459 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
49460 + struct task_struct *p;
49461 + int ret = 0;
49462 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
49463 + return ret;
49464 +
49465 + read_lock(&tasklist_lock);
49466 + do_each_pid_task(pid, type, p) {
49467 + if (!have_same_root(current, p)) {
49468 + ret = 1;
49469 + goto out;
49470 + }
49471 + } while_each_pid_task(pid, type, p);
49472 +out:
49473 + read_unlock(&tasklist_lock);
49474 + return ret;
49475 +#endif
49476 + return 0;
49477 +}
49478 +
49479 +int
49480 +gr_pid_is_chrooted(struct task_struct *p)
49481 +{
49482 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
49483 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
49484 + return 0;
49485 +
49486 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
49487 + !have_same_root(current, p)) {
49488 + return 1;
49489 + }
49490 +#endif
49491 + return 0;
49492 +}
49493 +
49494 +EXPORT_SYMBOL(gr_pid_is_chrooted);
49495 +
49496 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
49497 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
49498 +{
49499 + struct dentry *dentry = (struct dentry *)u_dentry;
49500 + struct vfsmount *mnt = (struct vfsmount *)u_mnt;
49501 + struct dentry *realroot;
49502 + struct vfsmount *realrootmnt;
49503 + struct dentry *currentroot;
49504 + struct vfsmount *currentmnt;
49505 + struct task_struct *reaper = &init_task;
49506 + int ret = 1;
49507 +
49508 + read_lock(&reaper->fs->lock);
49509 + realrootmnt = mntget(reaper->fs->root.mnt);
49510 + realroot = dget(reaper->fs->root.dentry);
49511 + read_unlock(&reaper->fs->lock);
49512 +
49513 + read_lock(&current->fs->lock);
49514 + currentmnt = mntget(current->fs->root.mnt);
49515 + currentroot = dget(current->fs->root.dentry);
49516 + read_unlock(&current->fs->lock);
49517 +
49518 + spin_lock(&dcache_lock);
49519 + for (;;) {
49520 + if (unlikely((dentry == realroot && mnt == realrootmnt)
49521 + || (dentry == currentroot && mnt == currentmnt)))
49522 + break;
49523 + if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
49524 + if (mnt->mnt_parent == mnt)
49525 + break;
49526 + dentry = mnt->mnt_mountpoint;
49527 + mnt = mnt->mnt_parent;
49528 + continue;
49529 + }
49530 + dentry = dentry->d_parent;
49531 + }
49532 + spin_unlock(&dcache_lock);
49533 +
49534 + dput(currentroot);
49535 + mntput(currentmnt);
49536 +
49537 + /* access is outside of chroot */
49538 + if (dentry == realroot && mnt == realrootmnt)
49539 + ret = 0;
49540 +
49541 + dput(realroot);
49542 + mntput(realrootmnt);
49543 + return ret;
49544 +}
49545 +#endif
49546 +
49547 +int
49548 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
49549 +{
49550 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
49551 + if (!grsec_enable_chroot_fchdir)
49552 + return 1;
49553 +
49554 + if (!proc_is_chrooted(current))
49555 + return 1;
49556 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
49557 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
49558 + return 0;
49559 + }
49560 +#endif
49561 + return 1;
49562 +}
49563 +
49564 +int
49565 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
49566 + const time_t shm_createtime)
49567 +{
49568 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
49569 + struct pid *pid = NULL;
49570 + time_t starttime;
49571 +
49572 + if (unlikely(!grsec_enable_chroot_shmat))
49573 + return 1;
49574 +
49575 + if (likely(!proc_is_chrooted(current)))
49576 + return 1;
49577 +
49578 + rcu_read_lock();
49579 + read_lock(&tasklist_lock);
49580 +
49581 + pid = find_vpid(shm_cprid);
49582 + if (pid) {
49583 + struct task_struct *p;
49584 + p = pid_task(pid, PIDTYPE_PID);
49585 + if (p == NULL)
49586 + goto unlock;
49587 + starttime = p->start_time.tv_sec;
49588 + if (unlikely(!have_same_root(current, p) &&
49589 + time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime))) {
49590 + read_unlock(&tasklist_lock);
49591 + rcu_read_unlock();
49592 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
49593 + return 0;
49594 + }
49595 + } else {
49596 + pid = find_vpid(shm_lapid);
49597 + if (pid) {
49598 + struct task_struct *p;
49599 + p = pid_task(pid, PIDTYPE_PID);
49600 + if (p == NULL)
49601 + goto unlock;
49602 + if (unlikely(!have_same_root(current, p))) {
49603 + read_unlock(&tasklist_lock);
49604 + rcu_read_unlock();
49605 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
49606 + return 0;
49607 + }
49608 + }
49609 + }
49610 +
49611 +unlock:
49612 + read_unlock(&tasklist_lock);
49613 + rcu_read_unlock();
49614 +#endif
49615 + return 1;
49616 +}
49617 +
49618 +void
49619 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
49620 +{
49621 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
49622 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
49623 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
49624 +#endif
49625 + return;
49626 +}
49627 +
49628 +int
49629 +gr_handle_chroot_mknod(const struct dentry *dentry,
49630 + const struct vfsmount *mnt, const int mode)
49631 +{
49632 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
49633 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
49634 + proc_is_chrooted(current)) {
49635 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
49636 + return -EPERM;
49637 + }
49638 +#endif
49639 + return 0;
49640 +}
49641 +
49642 +int
49643 +gr_handle_chroot_mount(const struct dentry *dentry,
49644 + const struct vfsmount *mnt, const char *dev_name)
49645 +{
49646 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
49647 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
49648 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name, dentry, mnt);
49649 + return -EPERM;
49650 + }
49651 +#endif
49652 + return 0;
49653 +}
49654 +
49655 +int
49656 +gr_handle_chroot_pivot(void)
49657 +{
49658 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
49659 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
49660 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
49661 + return -EPERM;
49662 + }
49663 +#endif
49664 + return 0;
49665 +}
49666 +
49667 +int
49668 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
49669 +{
49670 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
49671 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
49672 + !gr_is_outside_chroot(dentry, mnt)) {
49673 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
49674 + return -EPERM;
49675 + }
49676 +#endif
49677 + return 0;
49678 +}
49679 +
49680 +int
49681 +gr_handle_chroot_caps(struct path *path)
49682 +{
49683 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
49684 + if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
49685 + (init_task.fs->root.dentry != path->dentry) &&
49686 + (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
49687 +
49688 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
49689 + const struct cred *old = current_cred();
49690 + struct cred *new = prepare_creds();
49691 + if (new == NULL)
49692 + return 1;
49693 +
49694 + new->cap_permitted = cap_drop(old->cap_permitted,
49695 + chroot_caps);
49696 + new->cap_inheritable = cap_drop(old->cap_inheritable,
49697 + chroot_caps);
49698 + new->cap_effective = cap_drop(old->cap_effective,
49699 + chroot_caps);
49700 +
49701 + commit_creds(new);
49702 +
49703 + return 0;
49704 + }
49705 +#endif
49706 + return 0;
49707 +}
49708 +
49709 +int
49710 +gr_handle_chroot_sysctl(const int op)
49711 +{
49712 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
49713 + if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
49714 + && (op & MAY_WRITE))
49715 + return -EACCES;
49716 +#endif
49717 + return 0;
49718 +}
49719 +
49720 +void
49721 +gr_handle_chroot_chdir(struct path *path)
49722 +{
49723 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
49724 + if (grsec_enable_chroot_chdir)
49725 + set_fs_pwd(current->fs, path);
49726 +#endif
49727 + return;
49728 +}
49729 +
49730 +int
49731 +gr_handle_chroot_chmod(const struct dentry *dentry,
49732 + const struct vfsmount *mnt, const int mode)
49733 +{
49734 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
49735 + /* allow chmod +s on directories, but not on files */
49736 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
49737 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
49738 + proc_is_chrooted(current)) {
49739 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
49740 + return -EPERM;
49741 + }
49742 +#endif
49743 + return 0;
49744 +}
49745 +
49746 +#ifdef CONFIG_SECURITY
49747 +EXPORT_SYMBOL(gr_handle_chroot_caps);
49748 +#endif
49749 diff -urNp linux-2.6.32.41/grsecurity/grsec_disabled.c linux-2.6.32.41/grsecurity/grsec_disabled.c
49750 --- linux-2.6.32.41/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
49751 +++ linux-2.6.32.41/grsecurity/grsec_disabled.c 2011-04-17 15:56:46.000000000 -0400
49752 @@ -0,0 +1,447 @@
49753 +#include <linux/kernel.h>
49754 +#include <linux/module.h>
49755 +#include <linux/sched.h>
49756 +#include <linux/file.h>
49757 +#include <linux/fs.h>
49758 +#include <linux/kdev_t.h>
49759 +#include <linux/net.h>
49760 +#include <linux/in.h>
49761 +#include <linux/ip.h>
49762 +#include <linux/skbuff.h>
49763 +#include <linux/sysctl.h>
49764 +
49765 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
49766 +void
49767 +pax_set_initial_flags(struct linux_binprm *bprm)
49768 +{
49769 + return;
49770 +}
49771 +#endif
49772 +
49773 +#ifdef CONFIG_SYSCTL
49774 +__u32
49775 +gr_handle_sysctl(const struct ctl_table * table, const int op)
49776 +{
49777 + return 0;
49778 +}
49779 +#endif
49780 +
49781 +#ifdef CONFIG_TASKSTATS
49782 +int gr_is_taskstats_denied(int pid)
49783 +{
49784 + return 0;
49785 +}
49786 +#endif
49787 +
49788 +int
49789 +gr_acl_is_enabled(void)
49790 +{
49791 + return 0;
49792 +}
49793 +
49794 +int
49795 +gr_handle_rawio(const struct inode *inode)
49796 +{
49797 + return 0;
49798 +}
49799 +
49800 +void
49801 +gr_acl_handle_psacct(struct task_struct *task, const long code)
49802 +{
49803 + return;
49804 +}
49805 +
49806 +int
49807 +gr_handle_ptrace(struct task_struct *task, const long request)
49808 +{
49809 + return 0;
49810 +}
49811 +
49812 +int
49813 +gr_handle_proc_ptrace(struct task_struct *task)
49814 +{
49815 + return 0;
49816 +}
49817 +
49818 +void
49819 +gr_learn_resource(const struct task_struct *task,
49820 + const int res, const unsigned long wanted, const int gt)
49821 +{
49822 + return;
49823 +}
49824 +
49825 +int
49826 +gr_set_acls(const int type)
49827 +{
49828 + return 0;
49829 +}
49830 +
49831 +int
49832 +gr_check_hidden_task(const struct task_struct *tsk)
49833 +{
49834 + return 0;
49835 +}
49836 +
49837 +int
49838 +gr_check_protected_task(const struct task_struct *task)
49839 +{
49840 + return 0;
49841 +}
49842 +
49843 +int
49844 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
49845 +{
49846 + return 0;
49847 +}
49848 +
49849 +void
49850 +gr_copy_label(struct task_struct *tsk)
49851 +{
49852 + return;
49853 +}
49854 +
49855 +void
49856 +gr_set_pax_flags(struct task_struct *task)
49857 +{
49858 + return;
49859 +}
49860 +
49861 +int
49862 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
49863 + const int unsafe_share)
49864 +{
49865 + return 0;
49866 +}
49867 +
49868 +void
49869 +gr_handle_delete(const ino_t ino, const dev_t dev)
49870 +{
49871 + return;
49872 +}
49873 +
49874 +void
49875 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
49876 +{
49877 + return;
49878 +}
49879 +
49880 +void
49881 +gr_handle_crash(struct task_struct *task, const int sig)
49882 +{
49883 + return;
49884 +}
49885 +
49886 +int
49887 +gr_check_crash_exec(const struct file *filp)
49888 +{
49889 + return 0;
49890 +}
49891 +
49892 +int
49893 +gr_check_crash_uid(const uid_t uid)
49894 +{
49895 + return 0;
49896 +}
49897 +
49898 +void
49899 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
49900 + struct dentry *old_dentry,
49901 + struct dentry *new_dentry,
49902 + struct vfsmount *mnt, const __u8 replace)
49903 +{
49904 + return;
49905 +}
49906 +
49907 +int
49908 +gr_search_socket(const int family, const int type, const int protocol)
49909 +{
49910 + return 1;
49911 +}
49912 +
49913 +int
49914 +gr_search_connectbind(const int mode, const struct socket *sock,
49915 + const struct sockaddr_in *addr)
49916 +{
49917 + return 0;
49918 +}
49919 +
49920 +int
49921 +gr_is_capable(const int cap)
49922 +{
49923 + return 1;
49924 +}
49925 +
49926 +int
49927 +gr_is_capable_nolog(const int cap)
49928 +{
49929 + return 1;
49930 +}
49931 +
49932 +void
49933 +gr_handle_alertkill(struct task_struct *task)
49934 +{
49935 + return;
49936 +}
49937 +
49938 +__u32
49939 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
49940 +{
49941 + return 1;
49942 +}
49943 +
49944 +__u32
49945 +gr_acl_handle_hidden_file(const struct dentry * dentry,
49946 + const struct vfsmount * mnt)
49947 +{
49948 + return 1;
49949 +}
49950 +
49951 +__u32
49952 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
49953 + const int fmode)
49954 +{
49955 + return 1;
49956 +}
49957 +
49958 +__u32
49959 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
49960 +{
49961 + return 1;
49962 +}
49963 +
49964 +__u32
49965 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
49966 +{
49967 + return 1;
49968 +}
49969 +
49970 +int
49971 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
49972 + unsigned int *vm_flags)
49973 +{
49974 + return 1;
49975 +}
49976 +
49977 +__u32
49978 +gr_acl_handle_truncate(const struct dentry * dentry,
49979 + const struct vfsmount * mnt)
49980 +{
49981 + return 1;
49982 +}
49983 +
49984 +__u32
49985 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
49986 +{
49987 + return 1;
49988 +}
49989 +
49990 +__u32
49991 +gr_acl_handle_access(const struct dentry * dentry,
49992 + const struct vfsmount * mnt, const int fmode)
49993 +{
49994 + return 1;
49995 +}
49996 +
49997 +__u32
49998 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
49999 + mode_t mode)
50000 +{
50001 + return 1;
50002 +}
50003 +
50004 +__u32
50005 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
50006 + mode_t mode)
50007 +{
50008 + return 1;
50009 +}
50010 +
50011 +__u32
50012 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
50013 +{
50014 + return 1;
50015 +}
50016 +
50017 +__u32
50018 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
50019 +{
50020 + return 1;
50021 +}
50022 +
50023 +void
50024 +grsecurity_init(void)
50025 +{
50026 + return;
50027 +}
50028 +
50029 +__u32
50030 +gr_acl_handle_mknod(const struct dentry * new_dentry,
50031 + const struct dentry * parent_dentry,
50032 + const struct vfsmount * parent_mnt,
50033 + const int mode)
50034 +{
50035 + return 1;
50036 +}
50037 +
50038 +__u32
50039 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
50040 + const struct dentry * parent_dentry,
50041 + const struct vfsmount * parent_mnt)
50042 +{
50043 + return 1;
50044 +}
50045 +
50046 +__u32
50047 +gr_acl_handle_symlink(const struct dentry * new_dentry,
50048 + const struct dentry * parent_dentry,
50049 + const struct vfsmount * parent_mnt, const char *from)
50050 +{
50051 + return 1;
50052 +}
50053 +
50054 +__u32
50055 +gr_acl_handle_link(const struct dentry * new_dentry,
50056 + const struct dentry * parent_dentry,
50057 + const struct vfsmount * parent_mnt,
50058 + const struct dentry * old_dentry,
50059 + const struct vfsmount * old_mnt, const char *to)
50060 +{
50061 + return 1;
50062 +}
50063 +
50064 +int
50065 +gr_acl_handle_rename(const struct dentry *new_dentry,
50066 + const struct dentry *parent_dentry,
50067 + const struct vfsmount *parent_mnt,
50068 + const struct dentry *old_dentry,
50069 + const struct inode *old_parent_inode,
50070 + const struct vfsmount *old_mnt, const char *newname)
50071 +{
50072 + return 0;
50073 +}
50074 +
50075 +int
50076 +gr_acl_handle_filldir(const struct file *file, const char *name,
50077 + const int namelen, const ino_t ino)
50078 +{
50079 + return 1;
50080 +}
50081 +
50082 +int
50083 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
50084 + const time_t shm_createtime, const uid_t cuid, const int shmid)
50085 +{
50086 + return 1;
50087 +}
50088 +
50089 +int
50090 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
50091 +{
50092 + return 0;
50093 +}
50094 +
50095 +int
50096 +gr_search_accept(const struct socket *sock)
50097 +{
50098 + return 0;
50099 +}
50100 +
50101 +int
50102 +gr_search_listen(const struct socket *sock)
50103 +{
50104 + return 0;
50105 +}
50106 +
50107 +int
50108 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
50109 +{
50110 + return 0;
50111 +}
50112 +
50113 +__u32
50114 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
50115 +{
50116 + return 1;
50117 +}
50118 +
50119 +__u32
50120 +gr_acl_handle_creat(const struct dentry * dentry,
50121 + const struct dentry * p_dentry,
50122 + const struct vfsmount * p_mnt, const int fmode,
50123 + const int imode)
50124 +{
50125 + return 1;
50126 +}
50127 +
50128 +void
50129 +gr_acl_handle_exit(void)
50130 +{
50131 + return;
50132 +}
50133 +
50134 +int
50135 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
50136 +{
50137 + return 1;
50138 +}
50139 +
50140 +void
50141 +gr_set_role_label(const uid_t uid, const gid_t gid)
50142 +{
50143 + return;
50144 +}
50145 +
50146 +int
50147 +gr_acl_handle_procpidmem(const struct task_struct *task)
50148 +{
50149 + return 0;
50150 +}
50151 +
50152 +int
50153 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
50154 +{
50155 + return 0;
50156 +}
50157 +
50158 +int
50159 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
50160 +{
50161 + return 0;
50162 +}
50163 +
50164 +void
50165 +gr_set_kernel_label(struct task_struct *task)
50166 +{
50167 + return;
50168 +}
50169 +
50170 +int
50171 +gr_check_user_change(int real, int effective, int fs)
50172 +{
50173 + return 0;
50174 +}
50175 +
50176 +int
50177 +gr_check_group_change(int real, int effective, int fs)
50178 +{
50179 + return 0;
50180 +}
50181 +
50182 +int gr_acl_enable_at_secure(void)
50183 +{
50184 + return 0;
50185 +}
50186 +
50187 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
50188 +{
50189 + return dentry->d_inode->i_sb->s_dev;
50190 +}
50191 +
50192 +EXPORT_SYMBOL(gr_is_capable);
50193 +EXPORT_SYMBOL(gr_is_capable_nolog);
50194 +EXPORT_SYMBOL(gr_learn_resource);
50195 +EXPORT_SYMBOL(gr_set_kernel_label);
50196 +#ifdef CONFIG_SECURITY
50197 +EXPORT_SYMBOL(gr_check_user_change);
50198 +EXPORT_SYMBOL(gr_check_group_change);
50199 +#endif
50200 diff -urNp linux-2.6.32.41/grsecurity/grsec_exec.c linux-2.6.32.41/grsecurity/grsec_exec.c
50201 --- linux-2.6.32.41/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
50202 +++ linux-2.6.32.41/grsecurity/grsec_exec.c 2011-04-17 15:56:46.000000000 -0400
50203 @@ -0,0 +1,148 @@
50204 +#include <linux/kernel.h>
50205 +#include <linux/sched.h>
50206 +#include <linux/file.h>
50207 +#include <linux/binfmts.h>
50208 +#include <linux/smp_lock.h>
50209 +#include <linux/fs.h>
50210 +#include <linux/types.h>
50211 +#include <linux/grdefs.h>
50212 +#include <linux/grinternal.h>
50213 +#include <linux/capability.h>
50214 +#include <linux/compat.h>
50215 +
50216 +#include <asm/uaccess.h>
50217 +
50218 +#ifdef CONFIG_GRKERNSEC_EXECLOG
50219 +static char gr_exec_arg_buf[132];
50220 +static DEFINE_MUTEX(gr_exec_arg_mutex);
50221 +#endif
50222 +
50223 +int
50224 +gr_handle_nproc(void)
50225 +{
50226 +#ifdef CONFIG_GRKERNSEC_EXECVE
50227 + const struct cred *cred = current_cred();
50228 + if (grsec_enable_execve && cred->user &&
50229 + (atomic_read(&cred->user->processes) >
50230 + current->signal->rlim[RLIMIT_NPROC].rlim_cur) &&
50231 + !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) {
50232 + gr_log_noargs(GR_DONT_AUDIT, GR_NPROC_MSG);
50233 + return -EAGAIN;
50234 + }
50235 +#endif
50236 + return 0;
50237 +}
50238 +
50239 +void
50240 +gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
50241 +{
50242 +#ifdef CONFIG_GRKERNSEC_EXECLOG
50243 + char *grarg = gr_exec_arg_buf;
50244 + unsigned int i, x, execlen = 0;
50245 + char c;
50246 +
50247 + if (!((grsec_enable_execlog && grsec_enable_group &&
50248 + in_group_p(grsec_audit_gid))
50249 + || (grsec_enable_execlog && !grsec_enable_group)))
50250 + return;
50251 +
50252 + mutex_lock(&gr_exec_arg_mutex);
50253 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
50254 +
50255 + if (unlikely(argv == NULL))
50256 + goto log;
50257 +
50258 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
50259 + const char __user *p;
50260 + unsigned int len;
50261 +
50262 + if (copy_from_user(&p, argv + i, sizeof(p)))
50263 + goto log;
50264 + if (!p)
50265 + goto log;
50266 + len = strnlen_user(p, 128 - execlen);
50267 + if (len > 128 - execlen)
50268 + len = 128 - execlen;
50269 + else if (len > 0)
50270 + len--;
50271 + if (copy_from_user(grarg + execlen, p, len))
50272 + goto log;
50273 +
50274 + /* rewrite unprintable characters */
50275 + for (x = 0; x < len; x++) {
50276 + c = *(grarg + execlen + x);
50277 + if (c < 32 || c > 126)
50278 + *(grarg + execlen + x) = ' ';
50279 + }
50280 +
50281 + execlen += len;
50282 + *(grarg + execlen) = ' ';
50283 + *(grarg + execlen + 1) = '\0';
50284 + execlen++;
50285 + }
50286 +
50287 + log:
50288 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
50289 + bprm->file->f_path.mnt, grarg);
50290 + mutex_unlock(&gr_exec_arg_mutex);
50291 +#endif
50292 + return;
50293 +}
50294 +
50295 +#ifdef CONFIG_COMPAT
50296 +void
50297 +gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
50298 +{
50299 +#ifdef CONFIG_GRKERNSEC_EXECLOG
50300 + char *grarg = gr_exec_arg_buf;
50301 + unsigned int i, x, execlen = 0;
50302 + char c;
50303 +
50304 + if (!((grsec_enable_execlog && grsec_enable_group &&
50305 + in_group_p(grsec_audit_gid))
50306 + || (grsec_enable_execlog && !grsec_enable_group)))
50307 + return;
50308 +
50309 + mutex_lock(&gr_exec_arg_mutex);
50310 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
50311 +
50312 + if (unlikely(argv == NULL))
50313 + goto log;
50314 +
50315 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
50316 + compat_uptr_t p;
50317 + unsigned int len;
50318 +
50319 + if (get_user(p, argv + i))
50320 + goto log;
50321 + len = strnlen_user(compat_ptr(p), 128 - execlen);
50322 + if (len > 128 - execlen)
50323 + len = 128 - execlen;
50324 + else if (len > 0)
50325 + len--;
50326 + else
50327 + goto log;
50328 + if (copy_from_user(grarg + execlen, compat_ptr(p), len))
50329 + goto log;
50330 +
50331 + /* rewrite unprintable characters */
50332 + for (x = 0; x < len; x++) {
50333 + c = *(grarg + execlen + x);
50334 + if (c < 32 || c > 126)
50335 + *(grarg + execlen + x) = ' ';
50336 + }
50337 +
50338 + execlen += len;
50339 + *(grarg + execlen) = ' ';
50340 + *(grarg + execlen + 1) = '\0';
50341 + execlen++;
50342 + }
50343 +
50344 + log:
50345 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
50346 + bprm->file->f_path.mnt, grarg);
50347 + mutex_unlock(&gr_exec_arg_mutex);
50348 +#endif
50349 + return;
50350 +}
50351 +#endif
50352 diff -urNp linux-2.6.32.41/grsecurity/grsec_fifo.c linux-2.6.32.41/grsecurity/grsec_fifo.c
50353 --- linux-2.6.32.41/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
50354 +++ linux-2.6.32.41/grsecurity/grsec_fifo.c 2011-04-17 15:56:46.000000000 -0400
50355 @@ -0,0 +1,24 @@
50356 +#include <linux/kernel.h>
50357 +#include <linux/sched.h>
50358 +#include <linux/fs.h>
50359 +#include <linux/file.h>
50360 +#include <linux/grinternal.h>
50361 +
50362 +int
50363 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
50364 + const struct dentry *dir, const int flag, const int acc_mode)
50365 +{
50366 +#ifdef CONFIG_GRKERNSEC_FIFO
50367 + const struct cred *cred = current_cred();
50368 +
50369 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
50370 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
50371 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
50372 + (cred->fsuid != dentry->d_inode->i_uid)) {
50373 + if (!inode_permission(dentry->d_inode, acc_mode))
50374 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
50375 + return -EACCES;
50376 + }
50377 +#endif
50378 + return 0;
50379 +}
50380 diff -urNp linux-2.6.32.41/grsecurity/grsec_fork.c linux-2.6.32.41/grsecurity/grsec_fork.c
50381 --- linux-2.6.32.41/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
50382 +++ linux-2.6.32.41/grsecurity/grsec_fork.c 2011-04-17 15:56:46.000000000 -0400
50383 @@ -0,0 +1,23 @@
50384 +#include <linux/kernel.h>
50385 +#include <linux/sched.h>
50386 +#include <linux/grsecurity.h>
50387 +#include <linux/grinternal.h>
50388 +#include <linux/errno.h>
50389 +
50390 +void
50391 +gr_log_forkfail(const int retval)
50392 +{
50393 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
50394 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
50395 + switch (retval) {
50396 + case -EAGAIN:
50397 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
50398 + break;
50399 + case -ENOMEM:
50400 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
50401 + break;
50402 + }
50403 + }
50404 +#endif
50405 + return;
50406 +}
50407 diff -urNp linux-2.6.32.41/grsecurity/grsec_init.c linux-2.6.32.41/grsecurity/grsec_init.c
50408 --- linux-2.6.32.41/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
50409 +++ linux-2.6.32.41/grsecurity/grsec_init.c 2011-04-17 15:56:46.000000000 -0400
50410 @@ -0,0 +1,270 @@
50411 +#include <linux/kernel.h>
50412 +#include <linux/sched.h>
50413 +#include <linux/mm.h>
50414 +#include <linux/smp_lock.h>
50415 +#include <linux/gracl.h>
50416 +#include <linux/slab.h>
50417 +#include <linux/vmalloc.h>
50418 +#include <linux/percpu.h>
50419 +#include <linux/module.h>
50420 +
50421 +int grsec_enable_link;
50422 +int grsec_enable_dmesg;
50423 +int grsec_enable_harden_ptrace;
50424 +int grsec_enable_fifo;
50425 +int grsec_enable_execve;
50426 +int grsec_enable_execlog;
50427 +int grsec_enable_signal;
50428 +int grsec_enable_forkfail;
50429 +int grsec_enable_audit_ptrace;
50430 +int grsec_enable_time;
50431 +int grsec_enable_audit_textrel;
50432 +int grsec_enable_group;
50433 +int grsec_audit_gid;
50434 +int grsec_enable_chdir;
50435 +int grsec_enable_mount;
50436 +int grsec_enable_rofs;
50437 +int grsec_enable_chroot_findtask;
50438 +int grsec_enable_chroot_mount;
50439 +int grsec_enable_chroot_shmat;
50440 +int grsec_enable_chroot_fchdir;
50441 +int grsec_enable_chroot_double;
50442 +int grsec_enable_chroot_pivot;
50443 +int grsec_enable_chroot_chdir;
50444 +int grsec_enable_chroot_chmod;
50445 +int grsec_enable_chroot_mknod;
50446 +int grsec_enable_chroot_nice;
50447 +int grsec_enable_chroot_execlog;
50448 +int grsec_enable_chroot_caps;
50449 +int grsec_enable_chroot_sysctl;
50450 +int grsec_enable_chroot_unix;
50451 +int grsec_enable_tpe;
50452 +int grsec_tpe_gid;
50453 +int grsec_enable_blackhole;
50454 +#ifdef CONFIG_IPV6_MODULE
50455 +EXPORT_SYMBOL(grsec_enable_blackhole);
50456 +#endif
50457 +int grsec_lastack_retries;
50458 +int grsec_enable_tpe_all;
50459 +int grsec_enable_tpe_invert;
50460 +int grsec_enable_socket_all;
50461 +int grsec_socket_all_gid;
50462 +int grsec_enable_socket_client;
50463 +int grsec_socket_client_gid;
50464 +int grsec_enable_socket_server;
50465 +int grsec_socket_server_gid;
50466 +int grsec_resource_logging;
50467 +int grsec_disable_privio;
50468 +int grsec_enable_log_rwxmaps;
50469 +int grsec_lock;
50470 +
50471 +DEFINE_SPINLOCK(grsec_alert_lock);
50472 +unsigned long grsec_alert_wtime = 0;
50473 +unsigned long grsec_alert_fyet = 0;
50474 +
50475 +DEFINE_SPINLOCK(grsec_audit_lock);
50476 +
50477 +DEFINE_RWLOCK(grsec_exec_file_lock);
50478 +
50479 +char *gr_shared_page[4];
50480 +
50481 +char *gr_alert_log_fmt;
50482 +char *gr_audit_log_fmt;
50483 +char *gr_alert_log_buf;
50484 +char *gr_audit_log_buf;
50485 +
50486 +extern struct gr_arg *gr_usermode;
50487 +extern unsigned char *gr_system_salt;
50488 +extern unsigned char *gr_system_sum;
50489 +
50490 +void __init
50491 +grsecurity_init(void)
50492 +{
50493 + int j;
50494 + /* create the per-cpu shared pages */
50495 +
50496 +#ifdef CONFIG_X86
50497 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
50498 +#endif
50499 +
50500 + for (j = 0; j < 4; j++) {
50501 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
50502 + if (gr_shared_page[j] == NULL) {
50503 + panic("Unable to allocate grsecurity shared page");
50504 + return;
50505 + }
50506 + }
50507 +
50508 + /* allocate log buffers */
50509 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
50510 + if (!gr_alert_log_fmt) {
50511 + panic("Unable to allocate grsecurity alert log format buffer");
50512 + return;
50513 + }
50514 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
50515 + if (!gr_audit_log_fmt) {
50516 + panic("Unable to allocate grsecurity audit log format buffer");
50517 + return;
50518 + }
50519 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
50520 + if (!gr_alert_log_buf) {
50521 + panic("Unable to allocate grsecurity alert log buffer");
50522 + return;
50523 + }
50524 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
50525 + if (!gr_audit_log_buf) {
50526 + panic("Unable to allocate grsecurity audit log buffer");
50527 + return;
50528 + }
50529 +
50530 + /* allocate memory for authentication structure */
50531 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
50532 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
50533 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
50534 +
50535 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
50536 + panic("Unable to allocate grsecurity authentication structure");
50537 + return;
50538 + }
50539 +
50540 +
50541 +#ifdef CONFIG_GRKERNSEC_IO
50542 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
50543 + grsec_disable_privio = 1;
50544 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
50545 + grsec_disable_privio = 1;
50546 +#else
50547 + grsec_disable_privio = 0;
50548 +#endif
50549 +#endif
50550 +
50551 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
50552 + /* for backward compatibility, tpe_invert always defaults to on if
50553 + enabled in the kernel
50554 + */
50555 + grsec_enable_tpe_invert = 1;
50556 +#endif
50557 +
50558 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
50559 +#ifndef CONFIG_GRKERNSEC_SYSCTL
50560 + grsec_lock = 1;
50561 +#endif
50562 +
50563 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
50564 + grsec_enable_audit_textrel = 1;
50565 +#endif
50566 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
50567 + grsec_enable_log_rwxmaps = 1;
50568 +#endif
50569 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
50570 + grsec_enable_group = 1;
50571 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
50572 +#endif
50573 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
50574 + grsec_enable_chdir = 1;
50575 +#endif
50576 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
50577 + grsec_enable_harden_ptrace = 1;
50578 +#endif
50579 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
50580 + grsec_enable_mount = 1;
50581 +#endif
50582 +#ifdef CONFIG_GRKERNSEC_LINK
50583 + grsec_enable_link = 1;
50584 +#endif
50585 +#ifdef CONFIG_GRKERNSEC_DMESG
50586 + grsec_enable_dmesg = 1;
50587 +#endif
50588 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
50589 + grsec_enable_blackhole = 1;
50590 + grsec_lastack_retries = 4;
50591 +#endif
50592 +#ifdef CONFIG_GRKERNSEC_FIFO
50593 + grsec_enable_fifo = 1;
50594 +#endif
50595 +#ifdef CONFIG_GRKERNSEC_EXECVE
50596 + grsec_enable_execve = 1;
50597 +#endif
50598 +#ifdef CONFIG_GRKERNSEC_EXECLOG
50599 + grsec_enable_execlog = 1;
50600 +#endif
50601 +#ifdef CONFIG_GRKERNSEC_SIGNAL
50602 + grsec_enable_signal = 1;
50603 +#endif
50604 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
50605 + grsec_enable_forkfail = 1;
50606 +#endif
50607 +#ifdef CONFIG_GRKERNSEC_TIME
50608 + grsec_enable_time = 1;
50609 +#endif
50610 +#ifdef CONFIG_GRKERNSEC_RESLOG
50611 + grsec_resource_logging = 1;
50612 +#endif
50613 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
50614 + grsec_enable_chroot_findtask = 1;
50615 +#endif
50616 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
50617 + grsec_enable_chroot_unix = 1;
50618 +#endif
50619 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
50620 + grsec_enable_chroot_mount = 1;
50621 +#endif
50622 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
50623 + grsec_enable_chroot_fchdir = 1;
50624 +#endif
50625 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
50626 + grsec_enable_chroot_shmat = 1;
50627 +#endif
50628 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
50629 + grsec_enable_audit_ptrace = 1;
50630 +#endif
50631 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
50632 + grsec_enable_chroot_double = 1;
50633 +#endif
50634 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
50635 + grsec_enable_chroot_pivot = 1;
50636 +#endif
50637 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
50638 + grsec_enable_chroot_chdir = 1;
50639 +#endif
50640 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
50641 + grsec_enable_chroot_chmod = 1;
50642 +#endif
50643 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
50644 + grsec_enable_chroot_mknod = 1;
50645 +#endif
50646 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
50647 + grsec_enable_chroot_nice = 1;
50648 +#endif
50649 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
50650 + grsec_enable_chroot_execlog = 1;
50651 +#endif
50652 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50653 + grsec_enable_chroot_caps = 1;
50654 +#endif
50655 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
50656 + grsec_enable_chroot_sysctl = 1;
50657 +#endif
50658 +#ifdef CONFIG_GRKERNSEC_TPE
50659 + grsec_enable_tpe = 1;
50660 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
50661 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
50662 + grsec_enable_tpe_all = 1;
50663 +#endif
50664 +#endif
50665 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
50666 + grsec_enable_socket_all = 1;
50667 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
50668 +#endif
50669 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
50670 + grsec_enable_socket_client = 1;
50671 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
50672 +#endif
50673 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
50674 + grsec_enable_socket_server = 1;
50675 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
50676 +#endif
50677 +#endif
50678 +
50679 + return;
50680 +}
50681 diff -urNp linux-2.6.32.41/grsecurity/grsec_link.c linux-2.6.32.41/grsecurity/grsec_link.c
50682 --- linux-2.6.32.41/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
50683 +++ linux-2.6.32.41/grsecurity/grsec_link.c 2011-04-17 15:56:46.000000000 -0400
50684 @@ -0,0 +1,43 @@
50685 +#include <linux/kernel.h>
50686 +#include <linux/sched.h>
50687 +#include <linux/fs.h>
50688 +#include <linux/file.h>
50689 +#include <linux/grinternal.h>
50690 +
50691 +int
50692 +gr_handle_follow_link(const struct inode *parent,
50693 + const struct inode *inode,
50694 + const struct dentry *dentry, const struct vfsmount *mnt)
50695 +{
50696 +#ifdef CONFIG_GRKERNSEC_LINK
50697 + const struct cred *cred = current_cred();
50698 +
50699 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
50700 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
50701 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
50702 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
50703 + return -EACCES;
50704 + }
50705 +#endif
50706 + return 0;
50707 +}
50708 +
50709 +int
50710 +gr_handle_hardlink(const struct dentry *dentry,
50711 + const struct vfsmount *mnt,
50712 + struct inode *inode, const int mode, const char *to)
50713 +{
50714 +#ifdef CONFIG_GRKERNSEC_LINK
50715 + const struct cred *cred = current_cred();
50716 +
50717 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
50718 + (!S_ISREG(mode) || (mode & S_ISUID) ||
50719 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
50720 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
50721 + !capable(CAP_FOWNER) && cred->uid) {
50722 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
50723 + return -EPERM;
50724 + }
50725 +#endif
50726 + return 0;
50727 +}
50728 diff -urNp linux-2.6.32.41/grsecurity/grsec_log.c linux-2.6.32.41/grsecurity/grsec_log.c
50729 --- linux-2.6.32.41/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
50730 +++ linux-2.6.32.41/grsecurity/grsec_log.c 2011-05-10 21:58:49.000000000 -0400
50731 @@ -0,0 +1,310 @@
50732 +#include <linux/kernel.h>
50733 +#include <linux/sched.h>
50734 +#include <linux/file.h>
50735 +#include <linux/tty.h>
50736 +#include <linux/fs.h>
50737 +#include <linux/grinternal.h>
50738 +
50739 +#ifdef CONFIG_TREE_PREEMPT_RCU
50740 +#define DISABLE_PREEMPT() preempt_disable()
50741 +#define ENABLE_PREEMPT() preempt_enable()
50742 +#else
50743 +#define DISABLE_PREEMPT()
50744 +#define ENABLE_PREEMPT()
50745 +#endif
50746 +
50747 +#define BEGIN_LOCKS(x) \
50748 + DISABLE_PREEMPT(); \
50749 + rcu_read_lock(); \
50750 + read_lock(&tasklist_lock); \
50751 + read_lock(&grsec_exec_file_lock); \
50752 + if (x != GR_DO_AUDIT) \
50753 + spin_lock(&grsec_alert_lock); \
50754 + else \
50755 + spin_lock(&grsec_audit_lock)
50756 +
50757 +#define END_LOCKS(x) \
50758 + if (x != GR_DO_AUDIT) \
50759 + spin_unlock(&grsec_alert_lock); \
50760 + else \
50761 + spin_unlock(&grsec_audit_lock); \
50762 + read_unlock(&grsec_exec_file_lock); \
50763 + read_unlock(&tasklist_lock); \
50764 + rcu_read_unlock(); \
50765 + ENABLE_PREEMPT(); \
50766 + if (x == GR_DONT_AUDIT) \
50767 + gr_handle_alertkill(current)
50768 +
50769 +enum {
50770 + FLOODING,
50771 + NO_FLOODING
50772 +};
50773 +
50774 +extern char *gr_alert_log_fmt;
50775 +extern char *gr_audit_log_fmt;
50776 +extern char *gr_alert_log_buf;
50777 +extern char *gr_audit_log_buf;
50778 +
50779 +static int gr_log_start(int audit)
50780 +{
50781 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
50782 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
50783 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
50784 +
50785 + if (audit == GR_DO_AUDIT)
50786 + goto set_fmt;
50787 +
50788 + if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
50789 + grsec_alert_wtime = jiffies;
50790 + grsec_alert_fyet = 0;
50791 + } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
50792 + grsec_alert_fyet++;
50793 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
50794 + grsec_alert_wtime = jiffies;
50795 + grsec_alert_fyet++;
50796 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
50797 + return FLOODING;
50798 + } else return FLOODING;
50799 +
50800 +set_fmt:
50801 + memset(buf, 0, PAGE_SIZE);
50802 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
50803 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
50804 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
50805 + } else if (current->signal->curr_ip) {
50806 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
50807 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
50808 + } else if (gr_acl_is_enabled()) {
50809 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
50810 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
50811 + } else {
50812 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
50813 + strcpy(buf, fmt);
50814 + }
50815 +
50816 + return NO_FLOODING;
50817 +}
50818 +
50819 +static void gr_log_middle(int audit, const char *msg, va_list ap)
50820 + __attribute__ ((format (printf, 2, 0)));
50821 +
50822 +static void gr_log_middle(int audit, const char *msg, va_list ap)
50823 +{
50824 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
50825 + unsigned int len = strlen(buf);
50826 +
50827 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
50828 +
50829 + return;
50830 +}
50831 +
50832 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
50833 + __attribute__ ((format (printf, 2, 3)));
50834 +
50835 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
50836 +{
50837 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
50838 + unsigned int len = strlen(buf);
50839 + va_list ap;
50840 +
50841 + va_start(ap, msg);
50842 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
50843 + va_end(ap);
50844 +
50845 + return;
50846 +}
50847 +
50848 +static void gr_log_end(int audit)
50849 +{
50850 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
50851 + unsigned int len = strlen(buf);
50852 +
50853 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
50854 + printk("%s\n", buf);
50855 +
50856 + return;
50857 +}
50858 +
50859 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
50860 +{
50861 + int logtype;
50862 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
50863 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
50864 + void *voidptr = NULL;
50865 + int num1 = 0, num2 = 0;
50866 + unsigned long ulong1 = 0, ulong2 = 0;
50867 + struct dentry *dentry = NULL;
50868 + struct vfsmount *mnt = NULL;
50869 + struct file *file = NULL;
50870 + struct task_struct *task = NULL;
50871 + const struct cred *cred, *pcred;
50872 + va_list ap;
50873 +
50874 + BEGIN_LOCKS(audit);
50875 + logtype = gr_log_start(audit);
50876 + if (logtype == FLOODING) {
50877 + END_LOCKS(audit);
50878 + return;
50879 + }
50880 + va_start(ap, argtypes);
50881 + switch (argtypes) {
50882 + case GR_TTYSNIFF:
50883 + task = va_arg(ap, struct task_struct *);
50884 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
50885 + break;
50886 + case GR_SYSCTL_HIDDEN:
50887 + str1 = va_arg(ap, char *);
50888 + gr_log_middle_varargs(audit, msg, result, str1);
50889 + break;
50890 + case GR_RBAC:
50891 + dentry = va_arg(ap, struct dentry *);
50892 + mnt = va_arg(ap, struct vfsmount *);
50893 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
50894 + break;
50895 + case GR_RBAC_STR:
50896 + dentry = va_arg(ap, struct dentry *);
50897 + mnt = va_arg(ap, struct vfsmount *);
50898 + str1 = va_arg(ap, char *);
50899 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
50900 + break;
50901 + case GR_STR_RBAC:
50902 + str1 = va_arg(ap, char *);
50903 + dentry = va_arg(ap, struct dentry *);
50904 + mnt = va_arg(ap, struct vfsmount *);
50905 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
50906 + break;
50907 + case GR_RBAC_MODE2:
50908 + dentry = va_arg(ap, struct dentry *);
50909 + mnt = va_arg(ap, struct vfsmount *);
50910 + str1 = va_arg(ap, char *);
50911 + str2 = va_arg(ap, char *);
50912 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
50913 + break;
50914 + case GR_RBAC_MODE3:
50915 + dentry = va_arg(ap, struct dentry *);
50916 + mnt = va_arg(ap, struct vfsmount *);
50917 + str1 = va_arg(ap, char *);
50918 + str2 = va_arg(ap, char *);
50919 + str3 = va_arg(ap, char *);
50920 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
50921 + break;
50922 + case GR_FILENAME:
50923 + dentry = va_arg(ap, struct dentry *);
50924 + mnt = va_arg(ap, struct vfsmount *);
50925 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
50926 + break;
50927 + case GR_STR_FILENAME:
50928 + str1 = va_arg(ap, char *);
50929 + dentry = va_arg(ap, struct dentry *);
50930 + mnt = va_arg(ap, struct vfsmount *);
50931 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
50932 + break;
50933 + case GR_FILENAME_STR:
50934 + dentry = va_arg(ap, struct dentry *);
50935 + mnt = va_arg(ap, struct vfsmount *);
50936 + str1 = va_arg(ap, char *);
50937 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
50938 + break;
50939 + case GR_FILENAME_TWO_INT:
50940 + dentry = va_arg(ap, struct dentry *);
50941 + mnt = va_arg(ap, struct vfsmount *);
50942 + num1 = va_arg(ap, int);
50943 + num2 = va_arg(ap, int);
50944 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
50945 + break;
50946 + case GR_FILENAME_TWO_INT_STR:
50947 + dentry = va_arg(ap, struct dentry *);
50948 + mnt = va_arg(ap, struct vfsmount *);
50949 + num1 = va_arg(ap, int);
50950 + num2 = va_arg(ap, int);
50951 + str1 = va_arg(ap, char *);
50952 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
50953 + break;
50954 + case GR_TEXTREL:
50955 + file = va_arg(ap, struct file *);
50956 + ulong1 = va_arg(ap, unsigned long);
50957 + ulong2 = va_arg(ap, unsigned long);
50958 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
50959 + break;
50960 + case GR_PTRACE:
50961 + task = va_arg(ap, struct task_struct *);
50962 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
50963 + break;
50964 + case GR_RESOURCE:
50965 + task = va_arg(ap, struct task_struct *);
50966 + cred = __task_cred(task);
50967 + pcred = __task_cred(task->real_parent);
50968 + ulong1 = va_arg(ap, unsigned long);
50969 + str1 = va_arg(ap, char *);
50970 + ulong2 = va_arg(ap, unsigned long);
50971 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
50972 + break;
50973 + case GR_CAP:
50974 + task = va_arg(ap, struct task_struct *);
50975 + cred = __task_cred(task);
50976 + pcred = __task_cred(task->real_parent);
50977 + str1 = va_arg(ap, char *);
50978 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
50979 + break;
50980 + case GR_SIG:
50981 + str1 = va_arg(ap, char *);
50982 + voidptr = va_arg(ap, void *);
50983 + gr_log_middle_varargs(audit, msg, str1, voidptr);
50984 + break;
50985 + case GR_SIG2:
50986 + task = va_arg(ap, struct task_struct *);
50987 + cred = __task_cred(task);
50988 + pcred = __task_cred(task->real_parent);
50989 + num1 = va_arg(ap, int);
50990 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
50991 + break;
50992 + case GR_CRASH1:
50993 + task = va_arg(ap, struct task_struct *);
50994 + cred = __task_cred(task);
50995 + pcred = __task_cred(task->real_parent);
50996 + ulong1 = va_arg(ap, unsigned long);
50997 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
50998 + break;
50999 + case GR_CRASH2:
51000 + task = va_arg(ap, struct task_struct *);
51001 + cred = __task_cred(task);
51002 + pcred = __task_cred(task->real_parent);
51003 + ulong1 = va_arg(ap, unsigned long);
51004 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
51005 + break;
51006 + case GR_RWXMAP:
51007 + file = va_arg(ap, struct file *);
51008 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
51009 + break;
51010 + case GR_PSACCT:
51011 + {
51012 + unsigned int wday, cday;
51013 + __u8 whr, chr;
51014 + __u8 wmin, cmin;
51015 + __u8 wsec, csec;
51016 + char cur_tty[64] = { 0 };
51017 + char parent_tty[64] = { 0 };
51018 +
51019 + task = va_arg(ap, struct task_struct *);
51020 + wday = va_arg(ap, unsigned int);
51021 + cday = va_arg(ap, unsigned int);
51022 + whr = va_arg(ap, int);
51023 + chr = va_arg(ap, int);
51024 + wmin = va_arg(ap, int);
51025 + cmin = va_arg(ap, int);
51026 + wsec = va_arg(ap, int);
51027 + csec = va_arg(ap, int);
51028 + ulong1 = va_arg(ap, unsigned long);
51029 + cred = __task_cred(task);
51030 + pcred = __task_cred(task->real_parent);
51031 +
51032 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51033 + }
51034 + break;
51035 + default:
51036 + gr_log_middle(audit, msg, ap);
51037 + }
51038 + va_end(ap);
51039 + gr_log_end(audit);
51040 + END_LOCKS(audit);
51041 +}
51042 diff -urNp linux-2.6.32.41/grsecurity/grsec_mem.c linux-2.6.32.41/grsecurity/grsec_mem.c
51043 --- linux-2.6.32.41/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
51044 +++ linux-2.6.32.41/grsecurity/grsec_mem.c 2011-04-17 15:56:46.000000000 -0400
51045 @@ -0,0 +1,33 @@
51046 +#include <linux/kernel.h>
51047 +#include <linux/sched.h>
51048 +#include <linux/mm.h>
51049 +#include <linux/mman.h>
51050 +#include <linux/grinternal.h>
51051 +
51052 +void
51053 +gr_handle_ioperm(void)
51054 +{
51055 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
51056 + return;
51057 +}
51058 +
51059 +void
51060 +gr_handle_iopl(void)
51061 +{
51062 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
51063 + return;
51064 +}
51065 +
51066 +void
51067 +gr_handle_mem_readwrite(u64 from, u64 to)
51068 +{
51069 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
51070 + return;
51071 +}
51072 +
51073 +void
51074 +gr_handle_vm86(void)
51075 +{
51076 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
51077 + return;
51078 +}
51079 diff -urNp linux-2.6.32.41/grsecurity/grsec_mount.c linux-2.6.32.41/grsecurity/grsec_mount.c
51080 --- linux-2.6.32.41/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
51081 +++ linux-2.6.32.41/grsecurity/grsec_mount.c 2011-04-17 15:56:46.000000000 -0400
51082 @@ -0,0 +1,62 @@
51083 +#include <linux/kernel.h>
51084 +#include <linux/sched.h>
51085 +#include <linux/mount.h>
51086 +#include <linux/grsecurity.h>
51087 +#include <linux/grinternal.h>
51088 +
51089 +void
51090 +gr_log_remount(const char *devname, const int retval)
51091 +{
51092 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51093 + if (grsec_enable_mount && (retval >= 0))
51094 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
51095 +#endif
51096 + return;
51097 +}
51098 +
51099 +void
51100 +gr_log_unmount(const char *devname, const int retval)
51101 +{
51102 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51103 + if (grsec_enable_mount && (retval >= 0))
51104 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
51105 +#endif
51106 + return;
51107 +}
51108 +
51109 +void
51110 +gr_log_mount(const char *from, const char *to, const int retval)
51111 +{
51112 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51113 + if (grsec_enable_mount && (retval >= 0))
51114 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from, to);
51115 +#endif
51116 + return;
51117 +}
51118 +
51119 +int
51120 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
51121 +{
51122 +#ifdef CONFIG_GRKERNSEC_ROFS
51123 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
51124 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
51125 + return -EPERM;
51126 + } else
51127 + return 0;
51128 +#endif
51129 + return 0;
51130 +}
51131 +
51132 +int
51133 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
51134 +{
51135 +#ifdef CONFIG_GRKERNSEC_ROFS
51136 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
51137 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
51138 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
51139 + return -EPERM;
51140 + } else
51141 + return 0;
51142 +#endif
51143 + return 0;
51144 +}
51145 diff -urNp linux-2.6.32.41/grsecurity/grsec_pax.c linux-2.6.32.41/grsecurity/grsec_pax.c
51146 --- linux-2.6.32.41/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
51147 +++ linux-2.6.32.41/grsecurity/grsec_pax.c 2011-04-17 15:56:46.000000000 -0400
51148 @@ -0,0 +1,36 @@
51149 +#include <linux/kernel.h>
51150 +#include <linux/sched.h>
51151 +#include <linux/mm.h>
51152 +#include <linux/file.h>
51153 +#include <linux/grinternal.h>
51154 +#include <linux/grsecurity.h>
51155 +
51156 +void
51157 +gr_log_textrel(struct vm_area_struct * vma)
51158 +{
51159 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
51160 + if (grsec_enable_audit_textrel)
51161 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
51162 +#endif
51163 + return;
51164 +}
51165 +
51166 +void
51167 +gr_log_rwxmmap(struct file *file)
51168 +{
51169 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
51170 + if (grsec_enable_log_rwxmaps)
51171 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
51172 +#endif
51173 + return;
51174 +}
51175 +
51176 +void
51177 +gr_log_rwxmprotect(struct file *file)
51178 +{
51179 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
51180 + if (grsec_enable_log_rwxmaps)
51181 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
51182 +#endif
51183 + return;
51184 +}
51185 diff -urNp linux-2.6.32.41/grsecurity/grsec_ptrace.c linux-2.6.32.41/grsecurity/grsec_ptrace.c
51186 --- linux-2.6.32.41/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
51187 +++ linux-2.6.32.41/grsecurity/grsec_ptrace.c 2011-04-17 15:56:46.000000000 -0400
51188 @@ -0,0 +1,14 @@
51189 +#include <linux/kernel.h>
51190 +#include <linux/sched.h>
51191 +#include <linux/grinternal.h>
51192 +#include <linux/grsecurity.h>
51193 +
51194 +void
51195 +gr_audit_ptrace(struct task_struct *task)
51196 +{
51197 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
51198 + if (grsec_enable_audit_ptrace)
51199 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
51200 +#endif
51201 + return;
51202 +}
51203 diff -urNp linux-2.6.32.41/grsecurity/grsec_sig.c linux-2.6.32.41/grsecurity/grsec_sig.c
51204 --- linux-2.6.32.41/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
51205 +++ linux-2.6.32.41/grsecurity/grsec_sig.c 2011-05-17 17:30:04.000000000 -0400
51206 @@ -0,0 +1,202 @@
51207 +#include <linux/kernel.h>
51208 +#include <linux/sched.h>
51209 +#include <linux/delay.h>
51210 +#include <linux/grsecurity.h>
51211 +#include <linux/grinternal.h>
51212 +#include <linux/hardirq.h>
51213 +
51214 +char *signames[] = {
51215 + [SIGSEGV] = "Segmentation fault",
51216 + [SIGILL] = "Illegal instruction",
51217 + [SIGABRT] = "Abort",
51218 + [SIGBUS] = "Invalid alignment/Bus error"
51219 +};
51220 +
51221 +void
51222 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
51223 +{
51224 +#ifdef CONFIG_GRKERNSEC_SIGNAL
51225 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
51226 + (sig == SIGABRT) || (sig == SIGBUS))) {
51227 + if (t->pid == current->pid) {
51228 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
51229 + } else {
51230 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
51231 + }
51232 + }
51233 +#endif
51234 + return;
51235 +}
51236 +
51237 +int
51238 +gr_handle_signal(const struct task_struct *p, const int sig)
51239 +{
51240 +#ifdef CONFIG_GRKERNSEC
51241 + if (current->pid > 1 && gr_check_protected_task(p)) {
51242 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
51243 + return -EPERM;
51244 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
51245 + return -EPERM;
51246 + }
51247 +#endif
51248 + return 0;
51249 +}
51250 +
51251 +#ifdef CONFIG_GRKERNSEC
51252 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
51253 +
51254 +int gr_fake_force_sig(int sig, struct task_struct *t)
51255 +{
51256 + unsigned long int flags;
51257 + int ret, blocked, ignored;
51258 + struct k_sigaction *action;
51259 +
51260 + spin_lock_irqsave(&t->sighand->siglock, flags);
51261 + action = &t->sighand->action[sig-1];
51262 + ignored = action->sa.sa_handler == SIG_IGN;
51263 + blocked = sigismember(&t->blocked, sig);
51264 + if (blocked || ignored) {
51265 + action->sa.sa_handler = SIG_DFL;
51266 + if (blocked) {
51267 + sigdelset(&t->blocked, sig);
51268 + recalc_sigpending_and_wake(t);
51269 + }
51270 + }
51271 + if (action->sa.sa_handler == SIG_DFL)
51272 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
51273 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
51274 +
51275 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
51276 +
51277 + return ret;
51278 +}
51279 +#endif
51280 +
51281 +#ifdef CONFIG_GRKERNSEC_BRUTE
51282 +#define GR_USER_BAN_TIME (15 * 60)
51283 +
51284 +static int __get_dumpable(unsigned long mm_flags)
51285 +{
51286 + int ret;
51287 +
51288 + ret = mm_flags & MMF_DUMPABLE_MASK;
51289 + return (ret >= 2) ? 2 : ret;
51290 +}
51291 +#endif
51292 +
51293 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
51294 +{
51295 +#ifdef CONFIG_GRKERNSEC_BRUTE
51296 + uid_t uid = 0;
51297 +
51298 + rcu_read_lock();
51299 + read_lock(&tasklist_lock);
51300 + read_lock(&grsec_exec_file_lock);
51301 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
51302 + p->real_parent->brute = 1;
51303 + else {
51304 + const struct cred *cred = __task_cred(p), *cred2;
51305 + struct task_struct *tsk, *tsk2;
51306 +
51307 + if (!__get_dumpable(mm_flags) && cred->uid) {
51308 + struct user_struct *user;
51309 +
51310 + uid = cred->uid;
51311 +
51312 + /* this is put upon execution past expiration */
51313 + user = find_user(uid);
51314 + if (user == NULL)
51315 + goto unlock;
51316 + user->banned = 1;
51317 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
51318 + if (user->ban_expires == ~0UL)
51319 + user->ban_expires--;
51320 +
51321 + do_each_thread(tsk2, tsk) {
51322 + cred2 = __task_cred(tsk);
51323 + if (tsk != p && cred2->uid == uid)
51324 + gr_fake_force_sig(SIGKILL, tsk);
51325 + } while_each_thread(tsk2, tsk);
51326 + }
51327 + }
51328 +unlock:
51329 + read_unlock(&grsec_exec_file_lock);
51330 + read_unlock(&tasklist_lock);
51331 + rcu_read_unlock();
51332 +
51333 + if (uid)
51334 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
51335 +#endif
51336 + return;
51337 +}
51338 +
51339 +void gr_handle_brute_check(void)
51340 +{
51341 +#ifdef CONFIG_GRKERNSEC_BRUTE
51342 + if (current->brute)
51343 + msleep(30 * 1000);
51344 +#endif
51345 + return;
51346 +}
51347 +
51348 +void gr_handle_kernel_exploit(void)
51349 +{
51350 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
51351 + const struct cred *cred;
51352 + struct task_struct *tsk, *tsk2;
51353 + struct user_struct *user;
51354 + uid_t uid;
51355 +
51356 + if (in_irq() || in_serving_softirq() || in_nmi())
51357 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
51358 +
51359 + uid = current_uid();
51360 +
51361 + if (uid == 0)
51362 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
51363 + else {
51364 + /* kill all the processes of this user, hold a reference
51365 + to their creds struct, and prevent them from creating
51366 + another process until system reset
51367 + */
51368 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
51369 + /* we intentionally leak this ref */
51370 + user = get_uid(current->cred->user);
51371 + if (user) {
51372 + user->banned = 1;
51373 + user->ban_expires = ~0UL;
51374 + }
51375 +
51376 + read_lock(&tasklist_lock);
51377 + do_each_thread(tsk2, tsk) {
51378 + cred = __task_cred(tsk);
51379 + if (cred->uid == uid)
51380 + gr_fake_force_sig(SIGKILL, tsk);
51381 + } while_each_thread(tsk2, tsk);
51382 + read_unlock(&tasklist_lock);
51383 + }
51384 +#endif
51385 +}
51386 +
51387 +int __gr_process_user_ban(struct user_struct *user)
51388 +{
51389 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
51390 + if (unlikely(user->banned)) {
51391 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
51392 + user->banned = 0;
51393 + user->ban_expires = 0;
51394 + free_uid(user);
51395 + } else
51396 + return -EPERM;
51397 + }
51398 +#endif
51399 + return 0;
51400 +}
51401 +
51402 +int gr_process_user_ban(void)
51403 +{
51404 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
51405 + return __gr_process_user_ban(current->cred->user);
51406 +#endif
51407 + return 0;
51408 +}
51409 diff -urNp linux-2.6.32.41/grsecurity/grsec_sock.c linux-2.6.32.41/grsecurity/grsec_sock.c
51410 --- linux-2.6.32.41/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
51411 +++ linux-2.6.32.41/grsecurity/grsec_sock.c 2011-04-17 15:56:46.000000000 -0400
51412 @@ -0,0 +1,275 @@
51413 +#include <linux/kernel.h>
51414 +#include <linux/module.h>
51415 +#include <linux/sched.h>
51416 +#include <linux/file.h>
51417 +#include <linux/net.h>
51418 +#include <linux/in.h>
51419 +#include <linux/ip.h>
51420 +#include <net/sock.h>
51421 +#include <net/inet_sock.h>
51422 +#include <linux/grsecurity.h>
51423 +#include <linux/grinternal.h>
51424 +#include <linux/gracl.h>
51425 +
51426 +kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
51427 +EXPORT_SYMBOL(gr_cap_rtnetlink);
51428 +
51429 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
51430 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
51431 +
51432 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
51433 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
51434 +
51435 +#ifdef CONFIG_UNIX_MODULE
51436 +EXPORT_SYMBOL(gr_acl_handle_unix);
51437 +EXPORT_SYMBOL(gr_acl_handle_mknod);
51438 +EXPORT_SYMBOL(gr_handle_chroot_unix);
51439 +EXPORT_SYMBOL(gr_handle_create);
51440 +#endif
51441 +
51442 +#ifdef CONFIG_GRKERNSEC
51443 +#define gr_conn_table_size 32749
51444 +struct conn_table_entry {
51445 + struct conn_table_entry *next;
51446 + struct signal_struct *sig;
51447 +};
51448 +
51449 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
51450 +DEFINE_SPINLOCK(gr_conn_table_lock);
51451 +
51452 +extern const char * gr_socktype_to_name(unsigned char type);
51453 +extern const char * gr_proto_to_name(unsigned char proto);
51454 +extern const char * gr_sockfamily_to_name(unsigned char family);
51455 +
51456 +static __inline__ int
51457 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
51458 +{
51459 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
51460 +}
51461 +
51462 +static __inline__ int
51463 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
51464 + __u16 sport, __u16 dport)
51465 +{
51466 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
51467 + sig->gr_sport == sport && sig->gr_dport == dport))
51468 + return 1;
51469 + else
51470 + return 0;
51471 +}
51472 +
51473 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
51474 +{
51475 + struct conn_table_entry **match;
51476 + unsigned int index;
51477 +
51478 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
51479 + sig->gr_sport, sig->gr_dport,
51480 + gr_conn_table_size);
51481 +
51482 + newent->sig = sig;
51483 +
51484 + match = &gr_conn_table[index];
51485 + newent->next = *match;
51486 + *match = newent;
51487 +
51488 + return;
51489 +}
51490 +
51491 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
51492 +{
51493 + struct conn_table_entry *match, *last = NULL;
51494 + unsigned int index;
51495 +
51496 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
51497 + sig->gr_sport, sig->gr_dport,
51498 + gr_conn_table_size);
51499 +
51500 + match = gr_conn_table[index];
51501 + while (match && !conn_match(match->sig,
51502 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
51503 + sig->gr_dport)) {
51504 + last = match;
51505 + match = match->next;
51506 + }
51507 +
51508 + if (match) {
51509 + if (last)
51510 + last->next = match->next;
51511 + else
51512 + gr_conn_table[index] = NULL;
51513 + kfree(match);
51514 + }
51515 +
51516 + return;
51517 +}
51518 +
51519 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
51520 + __u16 sport, __u16 dport)
51521 +{
51522 + struct conn_table_entry *match;
51523 + unsigned int index;
51524 +
51525 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
51526 +
51527 + match = gr_conn_table[index];
51528 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
51529 + match = match->next;
51530 +
51531 + if (match)
51532 + return match->sig;
51533 + else
51534 + return NULL;
51535 +}
51536 +
51537 +#endif
51538 +
51539 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
51540 +{
51541 +#ifdef CONFIG_GRKERNSEC
51542 + struct signal_struct *sig = task->signal;
51543 + struct conn_table_entry *newent;
51544 +
51545 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
51546 + if (newent == NULL)
51547 + return;
51548 + /* no bh lock needed since we are called with bh disabled */
51549 + spin_lock(&gr_conn_table_lock);
51550 + gr_del_task_from_ip_table_nolock(sig);
51551 + sig->gr_saddr = inet->rcv_saddr;
51552 + sig->gr_daddr = inet->daddr;
51553 + sig->gr_sport = inet->sport;
51554 + sig->gr_dport = inet->dport;
51555 + gr_add_to_task_ip_table_nolock(sig, newent);
51556 + spin_unlock(&gr_conn_table_lock);
51557 +#endif
51558 + return;
51559 +}
51560 +
51561 +void gr_del_task_from_ip_table(struct task_struct *task)
51562 +{
51563 +#ifdef CONFIG_GRKERNSEC
51564 + spin_lock_bh(&gr_conn_table_lock);
51565 + gr_del_task_from_ip_table_nolock(task->signal);
51566 + spin_unlock_bh(&gr_conn_table_lock);
51567 +#endif
51568 + return;
51569 +}
51570 +
51571 +void
51572 +gr_attach_curr_ip(const struct sock *sk)
51573 +{
51574 +#ifdef CONFIG_GRKERNSEC
51575 + struct signal_struct *p, *set;
51576 + const struct inet_sock *inet = inet_sk(sk);
51577 +
51578 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
51579 + return;
51580 +
51581 + set = current->signal;
51582 +
51583 + spin_lock_bh(&gr_conn_table_lock);
51584 + p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
51585 + inet->dport, inet->sport);
51586 + if (unlikely(p != NULL)) {
51587 + set->curr_ip = p->curr_ip;
51588 + set->used_accept = 1;
51589 + gr_del_task_from_ip_table_nolock(p);
51590 + spin_unlock_bh(&gr_conn_table_lock);
51591 + return;
51592 + }
51593 + spin_unlock_bh(&gr_conn_table_lock);
51594 +
51595 + set->curr_ip = inet->daddr;
51596 + set->used_accept = 1;
51597 +#endif
51598 + return;
51599 +}
51600 +
51601 +int
51602 +gr_handle_sock_all(const int family, const int type, const int protocol)
51603 +{
51604 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
51605 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
51606 + (family != AF_UNIX)) {
51607 + if (family == AF_INET)
51608 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
51609 + else
51610 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
51611 + return -EACCES;
51612 + }
51613 +#endif
51614 + return 0;
51615 +}
51616 +
51617 +int
51618 +gr_handle_sock_server(const struct sockaddr *sck)
51619 +{
51620 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
51621 + if (grsec_enable_socket_server &&
51622 + in_group_p(grsec_socket_server_gid) &&
51623 + sck && (sck->sa_family != AF_UNIX) &&
51624 + (sck->sa_family != AF_LOCAL)) {
51625 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
51626 + return -EACCES;
51627 + }
51628 +#endif
51629 + return 0;
51630 +}
51631 +
51632 +int
51633 +gr_handle_sock_server_other(const struct sock *sck)
51634 +{
51635 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
51636 + if (grsec_enable_socket_server &&
51637 + in_group_p(grsec_socket_server_gid) &&
51638 + sck && (sck->sk_family != AF_UNIX) &&
51639 + (sck->sk_family != AF_LOCAL)) {
51640 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
51641 + return -EACCES;
51642 + }
51643 +#endif
51644 + return 0;
51645 +}
51646 +
51647 +int
51648 +gr_handle_sock_client(const struct sockaddr *sck)
51649 +{
51650 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
51651 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
51652 + sck && (sck->sa_family != AF_UNIX) &&
51653 + (sck->sa_family != AF_LOCAL)) {
51654 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
51655 + return -EACCES;
51656 + }
51657 +#endif
51658 + return 0;
51659 +}
51660 +
51661 +kernel_cap_t
51662 +gr_cap_rtnetlink(struct sock *sock)
51663 +{
51664 +#ifdef CONFIG_GRKERNSEC
51665 + if (!gr_acl_is_enabled())
51666 + return current_cap();
51667 + else if (sock->sk_protocol == NETLINK_ISCSI &&
51668 + cap_raised(current_cap(), CAP_SYS_ADMIN) &&
51669 + gr_is_capable(CAP_SYS_ADMIN))
51670 + return current_cap();
51671 + else if (sock->sk_protocol == NETLINK_AUDIT &&
51672 + cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
51673 + gr_is_capable(CAP_AUDIT_WRITE) &&
51674 + cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
51675 + gr_is_capable(CAP_AUDIT_CONTROL))
51676 + return current_cap();
51677 + else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
51678 + ((sock->sk_protocol == NETLINK_ROUTE) ?
51679 + gr_is_capable_nolog(CAP_NET_ADMIN) :
51680 + gr_is_capable(CAP_NET_ADMIN)))
51681 + return current_cap();
51682 + else
51683 + return __cap_empty_set;
51684 +#else
51685 + return current_cap();
51686 +#endif
51687 +}
51688 diff -urNp linux-2.6.32.41/grsecurity/grsec_sysctl.c linux-2.6.32.41/grsecurity/grsec_sysctl.c
51689 --- linux-2.6.32.41/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
51690 +++ linux-2.6.32.41/grsecurity/grsec_sysctl.c 2011-04-17 15:56:46.000000000 -0400
51691 @@ -0,0 +1,479 @@
51692 +#include <linux/kernel.h>
51693 +#include <linux/sched.h>
51694 +#include <linux/sysctl.h>
51695 +#include <linux/grsecurity.h>
51696 +#include <linux/grinternal.h>
51697 +
51698 +int
51699 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
51700 +{
51701 +#ifdef CONFIG_GRKERNSEC_SYSCTL
51702 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
51703 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
51704 + return -EACCES;
51705 + }
51706 +#endif
51707 + return 0;
51708 +}
51709 +
51710 +#ifdef CONFIG_GRKERNSEC_ROFS
51711 +static int __maybe_unused one = 1;
51712 +#endif
51713 +
51714 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
51715 +ctl_table grsecurity_table[] = {
51716 +#ifdef CONFIG_GRKERNSEC_SYSCTL
51717 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
51718 +#ifdef CONFIG_GRKERNSEC_IO
51719 + {
51720 + .ctl_name = CTL_UNNUMBERED,
51721 + .procname = "disable_priv_io",
51722 + .data = &grsec_disable_privio,
51723 + .maxlen = sizeof(int),
51724 + .mode = 0600,
51725 + .proc_handler = &proc_dointvec,
51726 + },
51727 +#endif
51728 +#endif
51729 +#ifdef CONFIG_GRKERNSEC_LINK
51730 + {
51731 + .ctl_name = CTL_UNNUMBERED,
51732 + .procname = "linking_restrictions",
51733 + .data = &grsec_enable_link,
51734 + .maxlen = sizeof(int),
51735 + .mode = 0600,
51736 + .proc_handler = &proc_dointvec,
51737 + },
51738 +#endif
51739 +#ifdef CONFIG_GRKERNSEC_FIFO
51740 + {
51741 + .ctl_name = CTL_UNNUMBERED,
51742 + .procname = "fifo_restrictions",
51743 + .data = &grsec_enable_fifo,
51744 + .maxlen = sizeof(int),
51745 + .mode = 0600,
51746 + .proc_handler = &proc_dointvec,
51747 + },
51748 +#endif
51749 +#ifdef CONFIG_GRKERNSEC_EXECVE
51750 + {
51751 + .ctl_name = CTL_UNNUMBERED,
51752 + .procname = "execve_limiting",
51753 + .data = &grsec_enable_execve,
51754 + .maxlen = sizeof(int),
51755 + .mode = 0600,
51756 + .proc_handler = &proc_dointvec,
51757 + },
51758 +#endif
51759 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
51760 + {
51761 + .ctl_name = CTL_UNNUMBERED,
51762 + .procname = "ip_blackhole",
51763 + .data = &grsec_enable_blackhole,
51764 + .maxlen = sizeof(int),
51765 + .mode = 0600,
51766 + .proc_handler = &proc_dointvec,
51767 + },
51768 + {
51769 + .ctl_name = CTL_UNNUMBERED,
51770 + .procname = "lastack_retries",
51771 + .data = &grsec_lastack_retries,
51772 + .maxlen = sizeof(int),
51773 + .mode = 0600,
51774 + .proc_handler = &proc_dointvec,
51775 + },
51776 +#endif
51777 +#ifdef CONFIG_GRKERNSEC_EXECLOG
51778 + {
51779 + .ctl_name = CTL_UNNUMBERED,
51780 + .procname = "exec_logging",
51781 + .data = &grsec_enable_execlog,
51782 + .maxlen = sizeof(int),
51783 + .mode = 0600,
51784 + .proc_handler = &proc_dointvec,
51785 + },
51786 +#endif
51787 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
51788 + {
51789 + .ctl_name = CTL_UNNUMBERED,
51790 + .procname = "rwxmap_logging",
51791 + .data = &grsec_enable_log_rwxmaps,
51792 + .maxlen = sizeof(int),
51793 + .mode = 0600,
51794 + .proc_handler = &proc_dointvec,
51795 + },
51796 +#endif
51797 +#ifdef CONFIG_GRKERNSEC_SIGNAL
51798 + {
51799 + .ctl_name = CTL_UNNUMBERED,
51800 + .procname = "signal_logging",
51801 + .data = &grsec_enable_signal,
51802 + .maxlen = sizeof(int),
51803 + .mode = 0600,
51804 + .proc_handler = &proc_dointvec,
51805 + },
51806 +#endif
51807 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
51808 + {
51809 + .ctl_name = CTL_UNNUMBERED,
51810 + .procname = "forkfail_logging",
51811 + .data = &grsec_enable_forkfail,
51812 + .maxlen = sizeof(int),
51813 + .mode = 0600,
51814 + .proc_handler = &proc_dointvec,
51815 + },
51816 +#endif
51817 +#ifdef CONFIG_GRKERNSEC_TIME
51818 + {
51819 + .ctl_name = CTL_UNNUMBERED,
51820 + .procname = "timechange_logging",
51821 + .data = &grsec_enable_time,
51822 + .maxlen = sizeof(int),
51823 + .mode = 0600,
51824 + .proc_handler = &proc_dointvec,
51825 + },
51826 +#endif
51827 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
51828 + {
51829 + .ctl_name = CTL_UNNUMBERED,
51830 + .procname = "chroot_deny_shmat",
51831 + .data = &grsec_enable_chroot_shmat,
51832 + .maxlen = sizeof(int),
51833 + .mode = 0600,
51834 + .proc_handler = &proc_dointvec,
51835 + },
51836 +#endif
51837 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
51838 + {
51839 + .ctl_name = CTL_UNNUMBERED,
51840 + .procname = "chroot_deny_unix",
51841 + .data = &grsec_enable_chroot_unix,
51842 + .maxlen = sizeof(int),
51843 + .mode = 0600,
51844 + .proc_handler = &proc_dointvec,
51845 + },
51846 +#endif
51847 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
51848 + {
51849 + .ctl_name = CTL_UNNUMBERED,
51850 + .procname = "chroot_deny_mount",
51851 + .data = &grsec_enable_chroot_mount,
51852 + .maxlen = sizeof(int),
51853 + .mode = 0600,
51854 + .proc_handler = &proc_dointvec,
51855 + },
51856 +#endif
51857 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
51858 + {
51859 + .ctl_name = CTL_UNNUMBERED,
51860 + .procname = "chroot_deny_fchdir",
51861 + .data = &grsec_enable_chroot_fchdir,
51862 + .maxlen = sizeof(int),
51863 + .mode = 0600,
51864 + .proc_handler = &proc_dointvec,
51865 + },
51866 +#endif
51867 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
51868 + {
51869 + .ctl_name = CTL_UNNUMBERED,
51870 + .procname = "chroot_deny_chroot",
51871 + .data = &grsec_enable_chroot_double,
51872 + .maxlen = sizeof(int),
51873 + .mode = 0600,
51874 + .proc_handler = &proc_dointvec,
51875 + },
51876 +#endif
51877 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
51878 + {
51879 + .ctl_name = CTL_UNNUMBERED,
51880 + .procname = "chroot_deny_pivot",
51881 + .data = &grsec_enable_chroot_pivot,
51882 + .maxlen = sizeof(int),
51883 + .mode = 0600,
51884 + .proc_handler = &proc_dointvec,
51885 + },
51886 +#endif
51887 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
51888 + {
51889 + .ctl_name = CTL_UNNUMBERED,
51890 + .procname = "chroot_enforce_chdir",
51891 + .data = &grsec_enable_chroot_chdir,
51892 + .maxlen = sizeof(int),
51893 + .mode = 0600,
51894 + .proc_handler = &proc_dointvec,
51895 + },
51896 +#endif
51897 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
51898 + {
51899 + .ctl_name = CTL_UNNUMBERED,
51900 + .procname = "chroot_deny_chmod",
51901 + .data = &grsec_enable_chroot_chmod,
51902 + .maxlen = sizeof(int),
51903 + .mode = 0600,
51904 + .proc_handler = &proc_dointvec,
51905 + },
51906 +#endif
51907 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
51908 + {
51909 + .ctl_name = CTL_UNNUMBERED,
51910 + .procname = "chroot_deny_mknod",
51911 + .data = &grsec_enable_chroot_mknod,
51912 + .maxlen = sizeof(int),
51913 + .mode = 0600,
51914 + .proc_handler = &proc_dointvec,
51915 + },
51916 +#endif
51917 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
51918 + {
51919 + .ctl_name = CTL_UNNUMBERED,
51920 + .procname = "chroot_restrict_nice",
51921 + .data = &grsec_enable_chroot_nice,
51922 + .maxlen = sizeof(int),
51923 + .mode = 0600,
51924 + .proc_handler = &proc_dointvec,
51925 + },
51926 +#endif
51927 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
51928 + {
51929 + .ctl_name = CTL_UNNUMBERED,
51930 + .procname = "chroot_execlog",
51931 + .data = &grsec_enable_chroot_execlog,
51932 + .maxlen = sizeof(int),
51933 + .mode = 0600,
51934 + .proc_handler = &proc_dointvec,
51935 + },
51936 +#endif
51937 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
51938 + {
51939 + .ctl_name = CTL_UNNUMBERED,
51940 + .procname = "chroot_caps",
51941 + .data = &grsec_enable_chroot_caps,
51942 + .maxlen = sizeof(int),
51943 + .mode = 0600,
51944 + .proc_handler = &proc_dointvec,
51945 + },
51946 +#endif
51947 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
51948 + {
51949 + .ctl_name = CTL_UNNUMBERED,
51950 + .procname = "chroot_deny_sysctl",
51951 + .data = &grsec_enable_chroot_sysctl,
51952 + .maxlen = sizeof(int),
51953 + .mode = 0600,
51954 + .proc_handler = &proc_dointvec,
51955 + },
51956 +#endif
51957 +#ifdef CONFIG_GRKERNSEC_TPE
51958 + {
51959 + .ctl_name = CTL_UNNUMBERED,
51960 + .procname = "tpe",
51961 + .data = &grsec_enable_tpe,
51962 + .maxlen = sizeof(int),
51963 + .mode = 0600,
51964 + .proc_handler = &proc_dointvec,
51965 + },
51966 + {
51967 + .ctl_name = CTL_UNNUMBERED,
51968 + .procname = "tpe_gid",
51969 + .data = &grsec_tpe_gid,
51970 + .maxlen = sizeof(int),
51971 + .mode = 0600,
51972 + .proc_handler = &proc_dointvec,
51973 + },
51974 +#endif
51975 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
51976 + {
51977 + .ctl_name = CTL_UNNUMBERED,
51978 + .procname = "tpe_invert",
51979 + .data = &grsec_enable_tpe_invert,
51980 + .maxlen = sizeof(int),
51981 + .mode = 0600,
51982 + .proc_handler = &proc_dointvec,
51983 + },
51984 +#endif
51985 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
51986 + {
51987 + .ctl_name = CTL_UNNUMBERED,
51988 + .procname = "tpe_restrict_all",
51989 + .data = &grsec_enable_tpe_all,
51990 + .maxlen = sizeof(int),
51991 + .mode = 0600,
51992 + .proc_handler = &proc_dointvec,
51993 + },
51994 +#endif
51995 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
51996 + {
51997 + .ctl_name = CTL_UNNUMBERED,
51998 + .procname = "socket_all",
51999 + .data = &grsec_enable_socket_all,
52000 + .maxlen = sizeof(int),
52001 + .mode = 0600,
52002 + .proc_handler = &proc_dointvec,
52003 + },
52004 + {
52005 + .ctl_name = CTL_UNNUMBERED,
52006 + .procname = "socket_all_gid",
52007 + .data = &grsec_socket_all_gid,
52008 + .maxlen = sizeof(int),
52009 + .mode = 0600,
52010 + .proc_handler = &proc_dointvec,
52011 + },
52012 +#endif
52013 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
52014 + {
52015 + .ctl_name = CTL_UNNUMBERED,
52016 + .procname = "socket_client",
52017 + .data = &grsec_enable_socket_client,
52018 + .maxlen = sizeof(int),
52019 + .mode = 0600,
52020 + .proc_handler = &proc_dointvec,
52021 + },
52022 + {
52023 + .ctl_name = CTL_UNNUMBERED,
52024 + .procname = "socket_client_gid",
52025 + .data = &grsec_socket_client_gid,
52026 + .maxlen = sizeof(int),
52027 + .mode = 0600,
52028 + .proc_handler = &proc_dointvec,
52029 + },
52030 +#endif
52031 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
52032 + {
52033 + .ctl_name = CTL_UNNUMBERED,
52034 + .procname = "socket_server",
52035 + .data = &grsec_enable_socket_server,
52036 + .maxlen = sizeof(int),
52037 + .mode = 0600,
52038 + .proc_handler = &proc_dointvec,
52039 + },
52040 + {
52041 + .ctl_name = CTL_UNNUMBERED,
52042 + .procname = "socket_server_gid",
52043 + .data = &grsec_socket_server_gid,
52044 + .maxlen = sizeof(int),
52045 + .mode = 0600,
52046 + .proc_handler = &proc_dointvec,
52047 + },
52048 +#endif
52049 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
52050 + {
52051 + .ctl_name = CTL_UNNUMBERED,
52052 + .procname = "audit_group",
52053 + .data = &grsec_enable_group,
52054 + .maxlen = sizeof(int),
52055 + .mode = 0600,
52056 + .proc_handler = &proc_dointvec,
52057 + },
52058 + {
52059 + .ctl_name = CTL_UNNUMBERED,
52060 + .procname = "audit_gid",
52061 + .data = &grsec_audit_gid,
52062 + .maxlen = sizeof(int),
52063 + .mode = 0600,
52064 + .proc_handler = &proc_dointvec,
52065 + },
52066 +#endif
52067 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
52068 + {
52069 + .ctl_name = CTL_UNNUMBERED,
52070 + .procname = "audit_chdir",
52071 + .data = &grsec_enable_chdir,
52072 + .maxlen = sizeof(int),
52073 + .mode = 0600,
52074 + .proc_handler = &proc_dointvec,
52075 + },
52076 +#endif
52077 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
52078 + {
52079 + .ctl_name = CTL_UNNUMBERED,
52080 + .procname = "audit_mount",
52081 + .data = &grsec_enable_mount,
52082 + .maxlen = sizeof(int),
52083 + .mode = 0600,
52084 + .proc_handler = &proc_dointvec,
52085 + },
52086 +#endif
52087 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
52088 + {
52089 + .ctl_name = CTL_UNNUMBERED,
52090 + .procname = "audit_textrel",
52091 + .data = &grsec_enable_audit_textrel,
52092 + .maxlen = sizeof(int),
52093 + .mode = 0600,
52094 + .proc_handler = &proc_dointvec,
52095 + },
52096 +#endif
52097 +#ifdef CONFIG_GRKERNSEC_DMESG
52098 + {
52099 + .ctl_name = CTL_UNNUMBERED,
52100 + .procname = "dmesg",
52101 + .data = &grsec_enable_dmesg,
52102 + .maxlen = sizeof(int),
52103 + .mode = 0600,
52104 + .proc_handler = &proc_dointvec,
52105 + },
52106 +#endif
52107 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52108 + {
52109 + .ctl_name = CTL_UNNUMBERED,
52110 + .procname = "chroot_findtask",
52111 + .data = &grsec_enable_chroot_findtask,
52112 + .maxlen = sizeof(int),
52113 + .mode = 0600,
52114 + .proc_handler = &proc_dointvec,
52115 + },
52116 +#endif
52117 +#ifdef CONFIG_GRKERNSEC_RESLOG
52118 + {
52119 + .ctl_name = CTL_UNNUMBERED,
52120 + .procname = "resource_logging",
52121 + .data = &grsec_resource_logging,
52122 + .maxlen = sizeof(int),
52123 + .mode = 0600,
52124 + .proc_handler = &proc_dointvec,
52125 + },
52126 +#endif
52127 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
52128 + {
52129 + .ctl_name = CTL_UNNUMBERED,
52130 + .procname = "audit_ptrace",
52131 + .data = &grsec_enable_audit_ptrace,
52132 + .maxlen = sizeof(int),
52133 + .mode = 0600,
52134 + .proc_handler = &proc_dointvec,
52135 + },
52136 +#endif
52137 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
52138 + {
52139 + .ctl_name = CTL_UNNUMBERED,
52140 + .procname = "harden_ptrace",
52141 + .data = &grsec_enable_harden_ptrace,
52142 + .maxlen = sizeof(int),
52143 + .mode = 0600,
52144 + .proc_handler = &proc_dointvec,
52145 + },
52146 +#endif
52147 + {
52148 + .ctl_name = CTL_UNNUMBERED,
52149 + .procname = "grsec_lock",
52150 + .data = &grsec_lock,
52151 + .maxlen = sizeof(int),
52152 + .mode = 0600,
52153 + .proc_handler = &proc_dointvec,
52154 + },
52155 +#endif
52156 +#ifdef CONFIG_GRKERNSEC_ROFS
52157 + {
52158 + .ctl_name = CTL_UNNUMBERED,
52159 + .procname = "romount_protect",
52160 + .data = &grsec_enable_rofs,
52161 + .maxlen = sizeof(int),
52162 + .mode = 0600,
52163 + .proc_handler = &proc_dointvec_minmax,
52164 + .extra1 = &one,
52165 + .extra2 = &one,
52166 + },
52167 +#endif
52168 + { .ctl_name = 0 }
52169 +};
52170 +#endif
52171 diff -urNp linux-2.6.32.41/grsecurity/grsec_time.c linux-2.6.32.41/grsecurity/grsec_time.c
52172 --- linux-2.6.32.41/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
52173 +++ linux-2.6.32.41/grsecurity/grsec_time.c 2011-04-17 15:56:46.000000000 -0400
52174 @@ -0,0 +1,16 @@
52175 +#include <linux/kernel.h>
52176 +#include <linux/sched.h>
52177 +#include <linux/grinternal.h>
52178 +#include <linux/module.h>
52179 +
52180 +void
52181 +gr_log_timechange(void)
52182 +{
52183 +#ifdef CONFIG_GRKERNSEC_TIME
52184 + if (grsec_enable_time)
52185 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
52186 +#endif
52187 + return;
52188 +}
52189 +
52190 +EXPORT_SYMBOL(gr_log_timechange);
52191 diff -urNp linux-2.6.32.41/grsecurity/grsec_tpe.c linux-2.6.32.41/grsecurity/grsec_tpe.c
52192 --- linux-2.6.32.41/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
52193 +++ linux-2.6.32.41/grsecurity/grsec_tpe.c 2011-04-17 15:56:46.000000000 -0400
52194 @@ -0,0 +1,39 @@
52195 +#include <linux/kernel.h>
52196 +#include <linux/sched.h>
52197 +#include <linux/file.h>
52198 +#include <linux/fs.h>
52199 +#include <linux/grinternal.h>
52200 +
52201 +extern int gr_acl_tpe_check(void);
52202 +
52203 +int
52204 +gr_tpe_allow(const struct file *file)
52205 +{
52206 +#ifdef CONFIG_GRKERNSEC
52207 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
52208 + const struct cred *cred = current_cred();
52209 +
52210 + if (cred->uid && ((grsec_enable_tpe &&
52211 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
52212 + ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
52213 + (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
52214 +#else
52215 + in_group_p(grsec_tpe_gid)
52216 +#endif
52217 + ) || gr_acl_tpe_check()) &&
52218 + (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
52219 + (inode->i_mode & S_IWOTH))))) {
52220 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
52221 + return 0;
52222 + }
52223 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
52224 + if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
52225 + ((inode->i_uid && (inode->i_uid != cred->uid)) ||
52226 + (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
52227 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
52228 + return 0;
52229 + }
52230 +#endif
52231 +#endif
52232 + return 1;
52233 +}
52234 diff -urNp linux-2.6.32.41/grsecurity/grsum.c linux-2.6.32.41/grsecurity/grsum.c
52235 --- linux-2.6.32.41/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
52236 +++ linux-2.6.32.41/grsecurity/grsum.c 2011-04-17 15:56:46.000000000 -0400
52237 @@ -0,0 +1,61 @@
52238 +#include <linux/err.h>
52239 +#include <linux/kernel.h>
52240 +#include <linux/sched.h>
52241 +#include <linux/mm.h>
52242 +#include <linux/scatterlist.h>
52243 +#include <linux/crypto.h>
52244 +#include <linux/gracl.h>
52245 +
52246 +
52247 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
52248 +#error "crypto and sha256 must be built into the kernel"
52249 +#endif
52250 +
52251 +int
52252 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
52253 +{
52254 + char *p;
52255 + struct crypto_hash *tfm;
52256 + struct hash_desc desc;
52257 + struct scatterlist sg;
52258 + unsigned char temp_sum[GR_SHA_LEN];
52259 + volatile int retval = 0;
52260 + volatile int dummy = 0;
52261 + unsigned int i;
52262 +
52263 + sg_init_table(&sg, 1);
52264 +
52265 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
52266 + if (IS_ERR(tfm)) {
52267 + /* should never happen, since sha256 should be built in */
52268 + return 1;
52269 + }
52270 +
52271 + desc.tfm = tfm;
52272 + desc.flags = 0;
52273 +
52274 + crypto_hash_init(&desc);
52275 +
52276 + p = salt;
52277 + sg_set_buf(&sg, p, GR_SALT_LEN);
52278 + crypto_hash_update(&desc, &sg, sg.length);
52279 +
52280 + p = entry->pw;
52281 + sg_set_buf(&sg, p, strlen(p));
52282 +
52283 + crypto_hash_update(&desc, &sg, sg.length);
52284 +
52285 + crypto_hash_final(&desc, temp_sum);
52286 +
52287 + memset(entry->pw, 0, GR_PW_LEN);
52288 +
52289 + for (i = 0; i < GR_SHA_LEN; i++)
52290 + if (sum[i] != temp_sum[i])
52291 + retval = 1;
52292 + else
52293 + dummy = 1; // waste a cycle
52294 +
52295 + crypto_free_hash(tfm);
52296 +
52297 + return retval;
52298 +}
52299 diff -urNp linux-2.6.32.41/grsecurity/Kconfig linux-2.6.32.41/grsecurity/Kconfig
52300 --- linux-2.6.32.41/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
52301 +++ linux-2.6.32.41/grsecurity/Kconfig 2011-04-17 15:56:46.000000000 -0400
52302 @@ -0,0 +1,1045 @@
52303 +#
52304 +# grecurity configuration
52305 +#
52306 +
52307 +menu "Grsecurity"
52308 +
52309 +config GRKERNSEC
52310 + bool "Grsecurity"
52311 + select CRYPTO
52312 + select CRYPTO_SHA256
52313 + help
52314 + If you say Y here, you will be able to configure many features
52315 + that will enhance the security of your system. It is highly
52316 + recommended that you say Y here and read through the help
52317 + for each option so that you fully understand the features and
52318 + can evaluate their usefulness for your machine.
52319 +
52320 +choice
52321 + prompt "Security Level"
52322 + depends on GRKERNSEC
52323 + default GRKERNSEC_CUSTOM
52324 +
52325 +config GRKERNSEC_LOW
52326 + bool "Low"
52327 + select GRKERNSEC_LINK
52328 + select GRKERNSEC_FIFO
52329 + select GRKERNSEC_EXECVE
52330 + select GRKERNSEC_RANDNET
52331 + select GRKERNSEC_DMESG
52332 + select GRKERNSEC_CHROOT
52333 + select GRKERNSEC_CHROOT_CHDIR
52334 +
52335 + help
52336 + If you choose this option, several of the grsecurity options will
52337 + be enabled that will give you greater protection against a number
52338 + of attacks, while assuring that none of your software will have any
52339 + conflicts with the additional security measures. If you run a lot
52340 + of unusual software, or you are having problems with the higher
52341 + security levels, you should say Y here. With this option, the
52342 + following features are enabled:
52343 +
52344 + - Linking restrictions
52345 + - FIFO restrictions
52346 + - Enforcing RLIMIT_NPROC on execve
52347 + - Restricted dmesg
52348 + - Enforced chdir("/") on chroot
52349 + - Runtime module disabling
52350 +
52351 +config GRKERNSEC_MEDIUM
52352 + bool "Medium"
52353 + select PAX
52354 + select PAX_EI_PAX
52355 + select PAX_PT_PAX_FLAGS
52356 + select PAX_HAVE_ACL_FLAGS
52357 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
52358 + select GRKERNSEC_CHROOT
52359 + select GRKERNSEC_CHROOT_SYSCTL
52360 + select GRKERNSEC_LINK
52361 + select GRKERNSEC_FIFO
52362 + select GRKERNSEC_EXECVE
52363 + select GRKERNSEC_DMESG
52364 + select GRKERNSEC_RANDNET
52365 + select GRKERNSEC_FORKFAIL
52366 + select GRKERNSEC_TIME
52367 + select GRKERNSEC_SIGNAL
52368 + select GRKERNSEC_CHROOT
52369 + select GRKERNSEC_CHROOT_UNIX
52370 + select GRKERNSEC_CHROOT_MOUNT
52371 + select GRKERNSEC_CHROOT_PIVOT
52372 + select GRKERNSEC_CHROOT_DOUBLE
52373 + select GRKERNSEC_CHROOT_CHDIR
52374 + select GRKERNSEC_CHROOT_MKNOD
52375 + select GRKERNSEC_PROC
52376 + select GRKERNSEC_PROC_USERGROUP
52377 + select PAX_RANDUSTACK
52378 + select PAX_ASLR
52379 + select PAX_RANDMMAP
52380 + select PAX_REFCOUNT if (X86 || SPARC64)
52381 + select PAX_USERCOPY if ((X86 || SPARC32 || SPARC64 || PPC) && (SLAB || SLUB || SLOB))
52382 +
52383 + help
52384 + If you say Y here, several features in addition to those included
52385 + in the low additional security level will be enabled. These
52386 + features provide even more security to your system, though in rare
52387 + cases they may be incompatible with very old or poorly written
52388 + software. If you enable this option, make sure that your auth
52389 + service (identd) is running as gid 1001. With this option,
52390 + the following features (in addition to those provided in the
52391 + low additional security level) will be enabled:
52392 +
52393 + - Failed fork logging
52394 + - Time change logging
52395 + - Signal logging
52396 + - Deny mounts in chroot
52397 + - Deny double chrooting
52398 + - Deny sysctl writes in chroot
52399 + - Deny mknod in chroot
52400 + - Deny access to abstract AF_UNIX sockets out of chroot
52401 + - Deny pivot_root in chroot
52402 + - Denied writes of /dev/kmem, /dev/mem, and /dev/port
52403 + - /proc restrictions with special GID set to 10 (usually wheel)
52404 + - Address Space Layout Randomization (ASLR)
52405 + - Prevent exploitation of most refcount overflows
52406 + - Bounds checking of copying between the kernel and userland
52407 +
52408 +config GRKERNSEC_HIGH
52409 + bool "High"
52410 + select GRKERNSEC_LINK
52411 + select GRKERNSEC_FIFO
52412 + select GRKERNSEC_EXECVE
52413 + select GRKERNSEC_DMESG
52414 + select GRKERNSEC_FORKFAIL
52415 + select GRKERNSEC_TIME
52416 + select GRKERNSEC_SIGNAL
52417 + select GRKERNSEC_CHROOT
52418 + select GRKERNSEC_CHROOT_SHMAT
52419 + select GRKERNSEC_CHROOT_UNIX
52420 + select GRKERNSEC_CHROOT_MOUNT
52421 + select GRKERNSEC_CHROOT_FCHDIR
52422 + select GRKERNSEC_CHROOT_PIVOT
52423 + select GRKERNSEC_CHROOT_DOUBLE
52424 + select GRKERNSEC_CHROOT_CHDIR
52425 + select GRKERNSEC_CHROOT_MKNOD
52426 + select GRKERNSEC_CHROOT_CAPS
52427 + select GRKERNSEC_CHROOT_SYSCTL
52428 + select GRKERNSEC_CHROOT_FINDTASK
52429 + select GRKERNSEC_SYSFS_RESTRICT
52430 + select GRKERNSEC_PROC
52431 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
52432 + select GRKERNSEC_HIDESYM
52433 + select GRKERNSEC_BRUTE
52434 + select GRKERNSEC_PROC_USERGROUP
52435 + select GRKERNSEC_KMEM
52436 + select GRKERNSEC_RESLOG
52437 + select GRKERNSEC_RANDNET
52438 + select GRKERNSEC_PROC_ADD
52439 + select GRKERNSEC_CHROOT_CHMOD
52440 + select GRKERNSEC_CHROOT_NICE
52441 + select GRKERNSEC_AUDIT_MOUNT
52442 + select GRKERNSEC_MODHARDEN if (MODULES)
52443 + select GRKERNSEC_HARDEN_PTRACE
52444 + select GRKERNSEC_VM86 if (X86_32)
52445 + select GRKERNSEC_KERN_LOCKOUT if (X86)
52446 + select PAX
52447 + select PAX_RANDUSTACK
52448 + select PAX_ASLR
52449 + select PAX_RANDMMAP
52450 + select PAX_NOEXEC
52451 + select PAX_MPROTECT
52452 + select PAX_EI_PAX
52453 + select PAX_PT_PAX_FLAGS
52454 + select PAX_HAVE_ACL_FLAGS
52455 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
52456 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
52457 + select PAX_RANDKSTACK if (X86_TSC && X86)
52458 + select PAX_SEGMEXEC if (X86_32)
52459 + select PAX_PAGEEXEC
52460 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC32 || SPARC64)
52461 + select PAX_EMUTRAMP if (PARISC)
52462 + select PAX_EMUSIGRT if (PARISC)
52463 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
52464 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
52465 + select PAX_REFCOUNT if (X86 || SPARC64)
52466 + select PAX_USERCOPY if ((X86 || PPC || SPARC32 || SPARC64) && (SLAB || SLUB || SLOB))
52467 + help
52468 + If you say Y here, many of the features of grsecurity will be
52469 + enabled, which will protect you against many kinds of attacks
52470 + against your system. The heightened security comes at a cost
52471 + of an increased chance of incompatibilities with rare software
52472 + on your machine. Since this security level enables PaX, you should
52473 + view <http://pax.grsecurity.net> and read about the PaX
52474 + project. While you are there, download chpax and run it on
52475 + binaries that cause problems with PaX. Also remember that
52476 + since the /proc restrictions are enabled, you must run your
52477 + identd as gid 1001. This security level enables the following
52478 + features in addition to those listed in the low and medium
52479 + security levels:
52480 +
52481 + - Additional /proc restrictions
52482 + - Chmod restrictions in chroot
52483 + - No signals, ptrace, or viewing of processes outside of chroot
52484 + - Capability restrictions in chroot
52485 + - Deny fchdir out of chroot
52486 + - Priority restrictions in chroot
52487 + - Segmentation-based implementation of PaX
52488 + - Mprotect restrictions
52489 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
52490 + - Kernel stack randomization
52491 + - Mount/unmount/remount logging
52492 + - Kernel symbol hiding
52493 + - Prevention of memory exhaustion-based exploits
52494 + - Hardening of module auto-loading
52495 + - Ptrace restrictions
52496 + - Restricted vm86 mode
52497 + - Restricted sysfs/debugfs
52498 + - Active kernel exploit response
52499 +
52500 +config GRKERNSEC_CUSTOM
52501 + bool "Custom"
52502 + help
52503 + If you say Y here, you will be able to configure every grsecurity
52504 + option, which allows you to enable many more features that aren't
52505 + covered in the basic security levels. These additional features
52506 + include TPE, socket restrictions, and the sysctl system for
52507 + grsecurity. It is advised that you read through the help for
52508 + each option to determine its usefulness in your situation.
52509 +
52510 +endchoice
52511 +
52512 +menu "Address Space Protection"
52513 +depends on GRKERNSEC
52514 +
52515 +config GRKERNSEC_KMEM
52516 + bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
52517 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
52518 + help
52519 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
52520 + be written to via mmap or otherwise to modify the running kernel.
52521 + /dev/port will also not be allowed to be opened. If you have module
52522 + support disabled, enabling this will close up four ways that are
52523 + currently used to insert malicious code into the running kernel.
52524 + Even with all these features enabled, we still highly recommend that
52525 + you use the RBAC system, as it is still possible for an attacker to
52526 + modify the running kernel through privileged I/O granted by ioperm/iopl.
52527 + If you are not using XFree86, you may be able to stop this additional
52528 + case by enabling the 'Disable privileged I/O' option. Though nothing
52529 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
52530 + but only to video memory, which is the only writing we allow in this
52531 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
52532 + not be allowed to mprotect it with PROT_WRITE later.
52533 + It is highly recommended that you say Y here if you meet all the
52534 + conditions above.
52535 +
52536 +config GRKERNSEC_VM86
52537 + bool "Restrict VM86 mode"
52538 + depends on X86_32
52539 +
52540 + help
52541 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
52542 + make use of a special execution mode on 32bit x86 processors called
52543 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
52544 + video cards and will still work with this option enabled. The purpose
52545 + of the option is to prevent exploitation of emulation errors in
52546 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
52547 + Nearly all users should be able to enable this option.
52548 +
52549 +config GRKERNSEC_IO
52550 + bool "Disable privileged I/O"
52551 + depends on X86
52552 + select RTC_CLASS
52553 + select RTC_INTF_DEV
52554 + select RTC_DRV_CMOS
52555 +
52556 + help
52557 + If you say Y here, all ioperm and iopl calls will return an error.
52558 + Ioperm and iopl can be used to modify the running kernel.
52559 + Unfortunately, some programs need this access to operate properly,
52560 + the most notable of which are XFree86 and hwclock. hwclock can be
52561 + remedied by having RTC support in the kernel, so real-time
52562 + clock support is enabled if this option is enabled, to ensure
52563 + that hwclock operates correctly. XFree86 still will not
52564 + operate correctly with this option enabled, so DO NOT CHOOSE Y
52565 + IF YOU USE XFree86. If you use XFree86 and you still want to
52566 + protect your kernel against modification, use the RBAC system.
52567 +
52568 +config GRKERNSEC_PROC_MEMMAP
52569 + bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
52570 + default y if (PAX_NOEXEC || PAX_ASLR)
52571 + depends on PAX_NOEXEC || PAX_ASLR
52572 + help
52573 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
52574 + give no information about the addresses of its mappings if
52575 + PaX features that rely on random addresses are enabled on the task.
52576 + If you use PaX it is greatly recommended that you say Y here as it
52577 + closes up a hole that makes the full ASLR useless for suid
52578 + binaries.
52579 +
52580 +config GRKERNSEC_BRUTE
52581 + bool "Deter exploit bruteforcing"
52582 + help
52583 + If you say Y here, attempts to bruteforce exploits against forking
52584 + daemons such as apache or sshd, as well as against suid/sgid binaries
52585 + will be deterred. When a child of a forking daemon is killed by PaX
52586 + or crashes due to an illegal instruction or other suspicious signal,
52587 + the parent process will be delayed 30 seconds upon every subsequent
52588 + fork until the administrator is able to assess the situation and
52589 + restart the daemon.
52590 + In the suid/sgid case, the attempt is logged, the user has all their
52591 + processes terminated, and they are prevented from executing any further
52592 + processes for 15 minutes.
52593 + It is recommended that you also enable signal logging in the auditing
52594 + section so that logs are generated when a process triggers a suspicious
52595 + signal.
52596 +
52597 +config GRKERNSEC_MODHARDEN
52598 + bool "Harden module auto-loading"
52599 + depends on MODULES
52600 + help
52601 + If you say Y here, module auto-loading in response to use of some
52602 + feature implemented by an unloaded module will be restricted to
52603 + root users. Enabling this option helps defend against attacks
52604 + by unprivileged users who abuse the auto-loading behavior to
52605 + cause a vulnerable module to load that is then exploited.
52606 +
52607 + If this option prevents a legitimate use of auto-loading for a
52608 + non-root user, the administrator can execute modprobe manually
52609 + with the exact name of the module mentioned in the alert log.
52610 + Alternatively, the administrator can add the module to the list
52611 + of modules loaded at boot by modifying init scripts.
52612 +
52613 + Modification of init scripts will most likely be needed on
52614 + Ubuntu servers with encrypted home directory support enabled,
52615 + as the first non-root user logging in will cause the ecb(aes),
52616 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
52617 +
52618 +config GRKERNSEC_HIDESYM
52619 + bool "Hide kernel symbols"
52620 + help
52621 + If you say Y here, getting information on loaded modules, and
52622 + displaying all kernel symbols through a syscall will be restricted
52623 + to users with CAP_SYS_MODULE. For software compatibility reasons,
52624 + /proc/kallsyms will be restricted to the root user. The RBAC
52625 + system can hide that entry even from root.
52626 +
52627 + This option also prevents leaking of kernel addresses through
52628 + several /proc entries.
52629 +
52630 + Note that this option is only effective provided the following
52631 + conditions are met:
52632 + 1) The kernel using grsecurity is not precompiled by some distribution
52633 + 2) You have also enabled GRKERNSEC_DMESG
52634 + 3) You are using the RBAC system and hiding other files such as your
52635 + kernel image and System.map. Alternatively, enabling this option
52636 + causes the permissions on /boot, /lib/modules, and the kernel
52637 + source directory to change at compile time to prevent
52638 + reading by non-root users.
52639 + If the above conditions are met, this option will aid in providing a
52640 + useful protection against local kernel exploitation of overflows
52641 + and arbitrary read/write vulnerabilities.
52642 +
52643 +config GRKERNSEC_KERN_LOCKOUT
52644 + bool "Active kernel exploit response"
52645 + depends on X86
52646 + help
52647 + If you say Y here, when a PaX alert is triggered due to suspicious
52648 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
52649 + or an OOPs occurs due to bad memory accesses, instead of just
52650 + terminating the offending process (and potentially allowing
52651 + a subsequent exploit from the same user), we will take one of two
52652 + actions:
52653 + If the user was root, we will panic the system
52654 + If the user was non-root, we will log the attempt, terminate
52655 + all processes owned by the user, then prevent them from creating
52656 + any new processes until the system is restarted
52657 + This deters repeated kernel exploitation/bruteforcing attempts
52658 + and is useful for later forensics.
52659 +
52660 +endmenu
52661 +menu "Role Based Access Control Options"
52662 +depends on GRKERNSEC
52663 +
52664 +config GRKERNSEC_RBAC_DEBUG
52665 + bool
52666 +
52667 +config GRKERNSEC_NO_RBAC
52668 + bool "Disable RBAC system"
52669 + help
52670 + If you say Y here, the /dev/grsec device will be removed from the kernel,
52671 + preventing the RBAC system from being enabled. You should only say Y
52672 + here if you have no intention of using the RBAC system, so as to prevent
52673 + an attacker with root access from misusing the RBAC system to hide files
52674 + and processes when loadable module support and /dev/[k]mem have been
52675 + locked down.
52676 +
52677 +config GRKERNSEC_ACL_HIDEKERN
52678 + bool "Hide kernel processes"
52679 + help
52680 + If you say Y here, all kernel threads will be hidden to all
52681 + processes but those whose subject has the "view hidden processes"
52682 + flag.
52683 +
52684 +config GRKERNSEC_ACL_MAXTRIES
52685 + int "Maximum tries before password lockout"
52686 + default 3
52687 + help
52688 + This option enforces the maximum number of times a user can attempt
52689 + to authorize themselves with the grsecurity RBAC system before being
52690 + denied the ability to attempt authorization again for a specified time.
52691 + The lower the number, the harder it will be to brute-force a password.
52692 +
52693 +config GRKERNSEC_ACL_TIMEOUT
52694 + int "Time to wait after max password tries, in seconds"
52695 + default 30
52696 + help
52697 + This option specifies the time the user must wait after attempting to
52698 + authorize to the RBAC system with the maximum number of invalid
52699 + passwords. The higher the number, the harder it will be to brute-force
52700 + a password.
52701 +
52702 +endmenu
52703 +menu "Filesystem Protections"
52704 +depends on GRKERNSEC
52705 +
52706 +config GRKERNSEC_PROC
52707 + bool "Proc restrictions"
52708 + help
52709 + If you say Y here, the permissions of the /proc filesystem
52710 + will be altered to enhance system security and privacy. You MUST
52711 + choose either a user only restriction or a user and group restriction.
52712 + Depending upon the option you choose, you can either restrict users to
52713 + see only the processes they themselves run, or choose a group that can
52714 + view all processes and files normally restricted to root if you choose
52715 + the "restrict to user only" option. NOTE: If you're running identd as
52716 + a non-root user, you will have to run it as the group you specify here.
52717 +
52718 +config GRKERNSEC_PROC_USER
52719 + bool "Restrict /proc to user only"
52720 + depends on GRKERNSEC_PROC
52721 + help
52722 + If you say Y here, non-root users will only be able to view their own
52723 + processes, and restricts them from viewing network-related information,
52724 + and viewing kernel symbol and module information.
52725 +
52726 +config GRKERNSEC_PROC_USERGROUP
52727 + bool "Allow special group"
52728 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
52729 + help
52730 + If you say Y here, you will be able to select a group that will be
52731 + able to view all processes and network-related information. If you've
52732 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
52733 + remain hidden. This option is useful if you want to run identd as
52734 + a non-root user.
52735 +
52736 +config GRKERNSEC_PROC_GID
52737 + int "GID for special group"
52738 + depends on GRKERNSEC_PROC_USERGROUP
52739 + default 1001
52740 +
52741 +config GRKERNSEC_PROC_ADD
52742 + bool "Additional restrictions"
52743 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
52744 + help
52745 + If you say Y here, additional restrictions will be placed on
52746 + /proc that keep normal users from viewing device information and
52747 + slabinfo information that could be useful for exploits.
52748 +
52749 +config GRKERNSEC_LINK
52750 + bool "Linking restrictions"
52751 + help
52752 + If you say Y here, /tmp race exploits will be prevented, since users
52753 + will no longer be able to follow symlinks owned by other users in
52754 + world-writable +t directories (e.g. /tmp), unless the owner of the
52755 + symlink is the owner of the directory. users will also not be
52756 + able to hardlink to files they do not own. If the sysctl option is
52757 + enabled, a sysctl option with name "linking_restrictions" is created.
52758 +
52759 +config GRKERNSEC_FIFO
52760 + bool "FIFO restrictions"
52761 + help
52762 + If you say Y here, users will not be able to write to FIFOs they don't
52763 + own in world-writable +t directories (e.g. /tmp), unless the owner of
52764 + the FIFO is the same owner of the directory it's held in. If the sysctl
52765 + option is enabled, a sysctl option with name "fifo_restrictions" is
52766 + created.
52767 +
52768 +config GRKERNSEC_SYSFS_RESTRICT
52769 + bool "Sysfs/debugfs restriction"
52770 + depends on SYSFS
52771 + help
52772 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
52773 + any filesystem normally mounted under it (e.g. debugfs) will only
52774 + be accessible by root. These filesystems generally provide access
52775 + to hardware and debug information that isn't appropriate for unprivileged
52776 + users of the system. Sysfs and debugfs have also become a large source
52777 + of new vulnerabilities, ranging from infoleaks to local compromise.
52778 + There has been very little oversight with an eye toward security involved
52779 + in adding new exporters of information to these filesystems, so their
52780 + use is discouraged.
52781 + This option is equivalent to a chmod 0700 of the mount paths.
52782 +
52783 +config GRKERNSEC_ROFS
52784 + bool "Runtime read-only mount protection"
52785 + help
52786 + If you say Y here, a sysctl option with name "romount_protect" will
52787 + be created. By setting this option to 1 at runtime, filesystems
52788 + will be protected in the following ways:
52789 + * No new writable mounts will be allowed
52790 + * Existing read-only mounts won't be able to be remounted read/write
52791 + * Write operations will be denied on all block devices
52792 + This option acts independently of grsec_lock: once it is set to 1,
52793 + it cannot be turned off. Therefore, please be mindful of the resulting
52794 + behavior if this option is enabled in an init script on a read-only
52795 + filesystem. This feature is mainly intended for secure embedded systems.
52796 +
52797 +config GRKERNSEC_CHROOT
52798 + bool "Chroot jail restrictions"
52799 + help
52800 + If you say Y here, you will be able to choose several options that will
52801 + make breaking out of a chrooted jail much more difficult. If you
52802 + encounter no software incompatibilities with the following options, it
52803 + is recommended that you enable each one.
52804 +
52805 +config GRKERNSEC_CHROOT_MOUNT
52806 + bool "Deny mounts"
52807 + depends on GRKERNSEC_CHROOT
52808 + help
52809 + If you say Y here, processes inside a chroot will not be able to
52810 + mount or remount filesystems. If the sysctl option is enabled, a
52811 + sysctl option with name "chroot_deny_mount" is created.
52812 +
52813 +config GRKERNSEC_CHROOT_DOUBLE
52814 + bool "Deny double-chroots"
52815 + depends on GRKERNSEC_CHROOT
52816 + help
52817 + If you say Y here, processes inside a chroot will not be able to chroot
52818 + again outside the chroot. This is a widely used method of breaking
52819 + out of a chroot jail and should not be allowed. If the sysctl
52820 + option is enabled, a sysctl option with name
52821 + "chroot_deny_chroot" is created.
52822 +
52823 +config GRKERNSEC_CHROOT_PIVOT
52824 + bool "Deny pivot_root in chroot"
52825 + depends on GRKERNSEC_CHROOT
52826 + help
52827 + If you say Y here, processes inside a chroot will not be able to use
52828 + a function called pivot_root() that was introduced in Linux 2.3.41. It
52829 + works similar to chroot in that it changes the root filesystem. This
52830 + function could be misused in a chrooted process to attempt to break out
52831 + of the chroot, and therefore should not be allowed. If the sysctl
52832 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
52833 + created.
52834 +
52835 +config GRKERNSEC_CHROOT_CHDIR
52836 + bool "Enforce chdir(\"/\") on all chroots"
52837 + depends on GRKERNSEC_CHROOT
52838 + help
52839 + If you say Y here, the current working directory of all newly-chrooted
52840 + applications will be set to the the root directory of the chroot.
52841 + The man page on chroot(2) states:
52842 + Note that this call does not change the current working
52843 + directory, so that `.' can be outside the tree rooted at
52844 + `/'. In particular, the super-user can escape from a
52845 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
52846 +
52847 + It is recommended that you say Y here, since it's not known to break
52848 + any software. If the sysctl option is enabled, a sysctl option with
52849 + name "chroot_enforce_chdir" is created.
52850 +
52851 +config GRKERNSEC_CHROOT_CHMOD
52852 + bool "Deny (f)chmod +s"
52853 + depends on GRKERNSEC_CHROOT
52854 + help
52855 + If you say Y here, processes inside a chroot will not be able to chmod
52856 + or fchmod files to make them have suid or sgid bits. This protects
52857 + against another published method of breaking a chroot. If the sysctl
52858 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
52859 + created.
52860 +
52861 +config GRKERNSEC_CHROOT_FCHDIR
52862 + bool "Deny fchdir out of chroot"
52863 + depends on GRKERNSEC_CHROOT
52864 + help
52865 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
52866 + to a file descriptor of the chrooting process that points to a directory
52867 + outside the filesystem will be stopped. If the sysctl option
52868 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
52869 +
52870 +config GRKERNSEC_CHROOT_MKNOD
52871 + bool "Deny mknod"
52872 + depends on GRKERNSEC_CHROOT
52873 + help
52874 + If you say Y here, processes inside a chroot will not be allowed to
52875 + mknod. The problem with using mknod inside a chroot is that it
52876 + would allow an attacker to create a device entry that is the same
52877 + as one on the physical root of your system, which could range from
52878 + anything from the console device to a device for your harddrive (which
52879 + they could then use to wipe the drive or steal data). It is recommended
52880 + that you say Y here, unless you run into software incompatibilities.
52881 + If the sysctl option is enabled, a sysctl option with name
52882 + "chroot_deny_mknod" is created.
52883 +
52884 +config GRKERNSEC_CHROOT_SHMAT
52885 + bool "Deny shmat() out of chroot"
52886 + depends on GRKERNSEC_CHROOT
52887 + help
52888 + If you say Y here, processes inside a chroot will not be able to attach
52889 + to shared memory segments that were created outside of the chroot jail.
52890 + It is recommended that you say Y here. If the sysctl option is enabled,
52891 + a sysctl option with name "chroot_deny_shmat" is created.
52892 +
52893 +config GRKERNSEC_CHROOT_UNIX
52894 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
52895 + depends on GRKERNSEC_CHROOT
52896 + help
52897 + If you say Y here, processes inside a chroot will not be able to
52898 + connect to abstract (meaning not belonging to a filesystem) Unix
52899 + domain sockets that were bound outside of a chroot. It is recommended
52900 + that you say Y here. If the sysctl option is enabled, a sysctl option
52901 + with name "chroot_deny_unix" is created.
52902 +
52903 +config GRKERNSEC_CHROOT_FINDTASK
52904 + bool "Protect outside processes"
52905 + depends on GRKERNSEC_CHROOT
52906 + help
52907 + If you say Y here, processes inside a chroot will not be able to
52908 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
52909 + getsid, or view any process outside of the chroot. If the sysctl
52910 + option is enabled, a sysctl option with name "chroot_findtask" is
52911 + created.
52912 +
52913 +config GRKERNSEC_CHROOT_NICE
52914 + bool "Restrict priority changes"
52915 + depends on GRKERNSEC_CHROOT
52916 + help
52917 + If you say Y here, processes inside a chroot will not be able to raise
52918 + the priority of processes in the chroot, or alter the priority of
52919 + processes outside the chroot. This provides more security than simply
52920 + removing CAP_SYS_NICE from the process' capability set. If the
52921 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
52922 + is created.
52923 +
52924 +config GRKERNSEC_CHROOT_SYSCTL
52925 + bool "Deny sysctl writes"
52926 + depends on GRKERNSEC_CHROOT
52927 + help
52928 + If you say Y here, an attacker in a chroot will not be able to
52929 + write to sysctl entries, either by sysctl(2) or through a /proc
52930 + interface. It is strongly recommended that you say Y here. If the
52931 + sysctl option is enabled, a sysctl option with name
52932 + "chroot_deny_sysctl" is created.
52933 +
52934 +config GRKERNSEC_CHROOT_CAPS
52935 + bool "Capability restrictions"
52936 + depends on GRKERNSEC_CHROOT
52937 + help
52938 + If you say Y here, the capabilities on all root processes within a
52939 + chroot jail will be lowered to stop module insertion, raw i/o,
52940 + system and net admin tasks, rebooting the system, modifying immutable
52941 + files, modifying IPC owned by another, and changing the system time.
52942 + This is left an option because it can break some apps. Disable this
52943 + if your chrooted apps are having problems performing those kinds of
52944 + tasks. If the sysctl option is enabled, a sysctl option with
52945 + name "chroot_caps" is created.
52946 +
52947 +endmenu
52948 +menu "Kernel Auditing"
52949 +depends on GRKERNSEC
52950 +
52951 +config GRKERNSEC_AUDIT_GROUP
52952 + bool "Single group for auditing"
52953 + help
52954 + If you say Y here, the exec, chdir, and (un)mount logging features
52955 + will only operate on a group you specify. This option is recommended
52956 + if you only want to watch certain users instead of having a large
52957 + amount of logs from the entire system. If the sysctl option is enabled,
52958 + a sysctl option with name "audit_group" is created.
52959 +
52960 +config GRKERNSEC_AUDIT_GID
52961 + int "GID for auditing"
52962 + depends on GRKERNSEC_AUDIT_GROUP
52963 + default 1007
52964 +
52965 +config GRKERNSEC_EXECLOG
52966 + bool "Exec logging"
52967 + help
52968 + If you say Y here, all execve() calls will be logged (since the
52969 + other exec*() calls are frontends to execve(), all execution
52970 + will be logged). Useful for shell-servers that like to keep track
52971 + of their users. If the sysctl option is enabled, a sysctl option with
52972 + name "exec_logging" is created.
52973 + WARNING: This option when enabled will produce a LOT of logs, especially
52974 + on an active system.
52975 +
52976 +config GRKERNSEC_RESLOG
52977 + bool "Resource logging"
52978 + help
52979 + If you say Y here, all attempts to overstep resource limits will
52980 + be logged with the resource name, the requested size, and the current
52981 + limit. It is highly recommended that you say Y here. If the sysctl
52982 + option is enabled, a sysctl option with name "resource_logging" is
52983 + created. If the RBAC system is enabled, the sysctl value is ignored.
52984 +
52985 +config GRKERNSEC_CHROOT_EXECLOG
52986 + bool "Log execs within chroot"
52987 + help
52988 + If you say Y here, all executions inside a chroot jail will be logged
52989 + to syslog. This can cause a large amount of logs if certain
52990 + applications (eg. djb's daemontools) are installed on the system, and
52991 + is therefore left as an option. If the sysctl option is enabled, a
52992 + sysctl option with name "chroot_execlog" is created.
52993 +
52994 +config GRKERNSEC_AUDIT_PTRACE
52995 + bool "Ptrace logging"
52996 + help
52997 + If you say Y here, all attempts to attach to a process via ptrace
52998 + will be logged. If the sysctl option is enabled, a sysctl option
52999 + with name "audit_ptrace" is created.
53000 +
53001 +config GRKERNSEC_AUDIT_CHDIR
53002 + bool "Chdir logging"
53003 + help
53004 + If you say Y here, all chdir() calls will be logged. If the sysctl
53005 + option is enabled, a sysctl option with name "audit_chdir" is created.
53006 +
53007 +config GRKERNSEC_AUDIT_MOUNT
53008 + bool "(Un)Mount logging"
53009 + help
53010 + If you say Y here, all mounts and unmounts will be logged. If the
53011 + sysctl option is enabled, a sysctl option with name "audit_mount" is
53012 + created.
53013 +
53014 +config GRKERNSEC_SIGNAL
53015 + bool "Signal logging"
53016 + help
53017 + If you say Y here, certain important signals will be logged, such as
53018 + SIGSEGV, which will as a result inform you of when a error in a program
53019 + occurred, which in some cases could mean a possible exploit attempt.
53020 + If the sysctl option is enabled, a sysctl option with name
53021 + "signal_logging" is created.
53022 +
53023 +config GRKERNSEC_FORKFAIL
53024 + bool "Fork failure logging"
53025 + help
53026 + If you say Y here, all failed fork() attempts will be logged.
53027 + This could suggest a fork bomb, or someone attempting to overstep
53028 + their process limit. If the sysctl option is enabled, a sysctl option
53029 + with name "forkfail_logging" is created.
53030 +
53031 +config GRKERNSEC_TIME
53032 + bool "Time change logging"
53033 + help
53034 + If you say Y here, any changes of the system clock will be logged.
53035 + If the sysctl option is enabled, a sysctl option with name
53036 + "timechange_logging" is created.
53037 +
53038 +config GRKERNSEC_PROC_IPADDR
53039 + bool "/proc/<pid>/ipaddr support"
53040 + help
53041 + If you say Y here, a new entry will be added to each /proc/<pid>
53042 + directory that contains the IP address of the person using the task.
53043 + The IP is carried across local TCP and AF_UNIX stream sockets.
53044 + This information can be useful for IDS/IPSes to perform remote response
53045 + to a local attack. The entry is readable by only the owner of the
53046 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
53047 + the RBAC system), and thus does not create privacy concerns.
53048 +
53049 +config GRKERNSEC_RWXMAP_LOG
53050 + bool 'Denied RWX mmap/mprotect logging'
53051 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
53052 + help
53053 + If you say Y here, calls to mmap() and mprotect() with explicit
53054 + usage of PROT_WRITE and PROT_EXEC together will be logged when
53055 + denied by the PAX_MPROTECT feature. If the sysctl option is
53056 + enabled, a sysctl option with name "rwxmap_logging" is created.
53057 +
53058 +config GRKERNSEC_AUDIT_TEXTREL
53059 + bool 'ELF text relocations logging (READ HELP)'
53060 + depends on PAX_MPROTECT
53061 + help
53062 + If you say Y here, text relocations will be logged with the filename
53063 + of the offending library or binary. The purpose of the feature is
53064 + to help Linux distribution developers get rid of libraries and
53065 + binaries that need text relocations which hinder the future progress
53066 + of PaX. Only Linux distribution developers should say Y here, and
53067 + never on a production machine, as this option creates an information
53068 + leak that could aid an attacker in defeating the randomization of
53069 + a single memory region. If the sysctl option is enabled, a sysctl
53070 + option with name "audit_textrel" is created.
53071 +
53072 +endmenu
53073 +
53074 +menu "Executable Protections"
53075 +depends on GRKERNSEC
53076 +
53077 +config GRKERNSEC_EXECVE
53078 + bool "Enforce RLIMIT_NPROC on execs"
53079 + help
53080 + If you say Y here, users with a resource limit on processes will
53081 + have the value checked during execve() calls. The current system
53082 + only checks the system limit during fork() calls. If the sysctl option
53083 + is enabled, a sysctl option with name "execve_limiting" is created.
53084 +
53085 +config GRKERNSEC_DMESG
53086 + bool "Dmesg(8) restriction"
53087 + help
53088 + If you say Y here, non-root users will not be able to use dmesg(8)
53089 + to view up to the last 4kb of messages in the kernel's log buffer.
53090 + The kernel's log buffer often contains kernel addresses and other
53091 + identifying information useful to an attacker in fingerprinting a
53092 + system for a targeted exploit.
53093 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
53094 + created.
53095 +
53096 +config GRKERNSEC_HARDEN_PTRACE
53097 + bool "Deter ptrace-based process snooping"
53098 + help
53099 + If you say Y here, TTY sniffers and other malicious monitoring
53100 + programs implemented through ptrace will be defeated. If you
53101 + have been using the RBAC system, this option has already been
53102 + enabled for several years for all users, with the ability to make
53103 + fine-grained exceptions.
53104 +
53105 + This option only affects the ability of non-root users to ptrace
53106 + processes that are not a descendent of the ptracing process.
53107 + This means that strace ./binary and gdb ./binary will still work,
53108 + but attaching to arbitrary processes will not. If the sysctl
53109 + option is enabled, a sysctl option with name "harden_ptrace" is
53110 + created.
53111 +
53112 +config GRKERNSEC_TPE
53113 + bool "Trusted Path Execution (TPE)"
53114 + help
53115 + If you say Y here, you will be able to choose a gid to add to the
53116 + supplementary groups of users you want to mark as "untrusted."
53117 + These users will not be able to execute any files that are not in
53118 + root-owned directories writable only by root. If the sysctl option
53119 + is enabled, a sysctl option with name "tpe" is created.
53120 +
53121 +config GRKERNSEC_TPE_ALL
53122 + bool "Partially restrict all non-root users"
53123 + depends on GRKERNSEC_TPE
53124 + help
53125 + If you say Y here, all non-root users will be covered under
53126 + a weaker TPE restriction. This is separate from, and in addition to,
53127 + the main TPE options that you have selected elsewhere. Thus, if a
53128 + "trusted" GID is chosen, this restriction applies to even that GID.
53129 + Under this restriction, all non-root users will only be allowed to
53130 + execute files in directories they own that are not group or
53131 + world-writable, or in directories owned by root and writable only by
53132 + root. If the sysctl option is enabled, a sysctl option with name
53133 + "tpe_restrict_all" is created.
53134 +
53135 +config GRKERNSEC_TPE_INVERT
53136 + bool "Invert GID option"
53137 + depends on GRKERNSEC_TPE
53138 + help
53139 + If you say Y here, the group you specify in the TPE configuration will
53140 + decide what group TPE restrictions will be *disabled* for. This
53141 + option is useful if you want TPE restrictions to be applied to most
53142 + users on the system. If the sysctl option is enabled, a sysctl option
53143 + with name "tpe_invert" is created. Unlike other sysctl options, this
53144 + entry will default to on for backward-compatibility.
53145 +
53146 +config GRKERNSEC_TPE_GID
53147 + int "GID for untrusted users"
53148 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
53149 + default 1005
53150 + help
53151 + Setting this GID determines what group TPE restrictions will be
53152 + *enabled* for. If the sysctl option is enabled, a sysctl option
53153 + with name "tpe_gid" is created.
53154 +
53155 +config GRKERNSEC_TPE_GID
53156 + int "GID for trusted users"
53157 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
53158 + default 1005
53159 + help
53160 + Setting this GID determines what group TPE restrictions will be
53161 + *disabled* for. If the sysctl option is enabled, a sysctl option
53162 + with name "tpe_gid" is created.
53163 +
53164 +endmenu
53165 +menu "Network Protections"
53166 +depends on GRKERNSEC
53167 +
53168 +config GRKERNSEC_RANDNET
53169 + bool "Larger entropy pools"
53170 + help
53171 + If you say Y here, the entropy pools used for many features of Linux
53172 + and grsecurity will be doubled in size. Since several grsecurity
53173 + features use additional randomness, it is recommended that you say Y
53174 + here. Saying Y here has a similar effect as modifying
53175 + /proc/sys/kernel/random/poolsize.
53176 +
53177 +config GRKERNSEC_BLACKHOLE
53178 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
53179 + help
53180 + If you say Y here, neither TCP resets nor ICMP
53181 + destination-unreachable packets will be sent in response to packets
53182 + sent to ports for which no associated listening process exists.
53183 + This feature supports both IPV4 and IPV6 and exempts the
53184 + loopback interface from blackholing. Enabling this feature
53185 + makes a host more resilient to DoS attacks and reduces network
53186 + visibility against scanners.
53187 +
53188 + The blackhole feature as-implemented is equivalent to the FreeBSD
53189 + blackhole feature, as it prevents RST responses to all packets, not
53190 + just SYNs. Under most application behavior this causes no
53191 + problems, but applications (like haproxy) may not close certain
53192 + connections in a way that cleanly terminates them on the remote
53193 + end, leaving the remote host in LAST_ACK state. Because of this
53194 + side-effect and to prevent intentional LAST_ACK DoSes, this
53195 + feature also adds automatic mitigation against such attacks.
53196 + The mitigation drastically reduces the amount of time a socket
53197 + can spend in LAST_ACK state. If you're using haproxy and not
53198 + all servers it connects to have this option enabled, consider
53199 + disabling this feature on the haproxy host.
53200 +
53201 + If the sysctl option is enabled, two sysctl options with names
53202 + "ip_blackhole" and "lastack_retries" will be created.
53203 + While "ip_blackhole" takes the standard zero/non-zero on/off
53204 + toggle, "lastack_retries" uses the same kinds of values as
53205 + "tcp_retries1" and "tcp_retries2". The default value of 4
53206 + prevents a socket from lasting more than 45 seconds in LAST_ACK
53207 + state.
53208 +
53209 +config GRKERNSEC_SOCKET
53210 + bool "Socket restrictions"
53211 + help
53212 + If you say Y here, you will be able to choose from several options.
53213 + If you assign a GID on your system and add it to the supplementary
53214 + groups of users you want to restrict socket access to, this patch
53215 + will perform up to three things, based on the option(s) you choose.
53216 +
53217 +config GRKERNSEC_SOCKET_ALL
53218 + bool "Deny any sockets to group"
53219 + depends on GRKERNSEC_SOCKET
53220 + help
53221 + If you say Y here, you will be able to choose a GID of whose users will
53222 + be unable to connect to other hosts from your machine or run server
53223 + applications from your machine. If the sysctl option is enabled, a
53224 + sysctl option with name "socket_all" is created.
53225 +
53226 +config GRKERNSEC_SOCKET_ALL_GID
53227 + int "GID to deny all sockets for"
53228 + depends on GRKERNSEC_SOCKET_ALL
53229 + default 1004
53230 + help
53231 + Here you can choose the GID to disable socket access for. Remember to
53232 + add the users you want socket access disabled for to the GID
53233 + specified here. If the sysctl option is enabled, a sysctl option
53234 + with name "socket_all_gid" is created.
53235 +
53236 +config GRKERNSEC_SOCKET_CLIENT
53237 + bool "Deny client sockets to group"
53238 + depends on GRKERNSEC_SOCKET
53239 + help
53240 + If you say Y here, you will be able to choose a GID of whose users will
53241 + be unable to connect to other hosts from your machine, but will be
53242 + able to run servers. If this option is enabled, all users in the group
53243 + you specify will have to use passive mode when initiating ftp transfers
53244 + from the shell on your machine. If the sysctl option is enabled, a
53245 + sysctl option with name "socket_client" is created.
53246 +
53247 +config GRKERNSEC_SOCKET_CLIENT_GID
53248 + int "GID to deny client sockets for"
53249 + depends on GRKERNSEC_SOCKET_CLIENT
53250 + default 1003
53251 + help
53252 + Here you can choose the GID to disable client socket access for.
53253 + Remember to add the users you want client socket access disabled for to
53254 + the GID specified here. If the sysctl option is enabled, a sysctl
53255 + option with name "socket_client_gid" is created.
53256 +
53257 +config GRKERNSEC_SOCKET_SERVER
53258 + bool "Deny server sockets to group"
53259 + depends on GRKERNSEC_SOCKET
53260 + help
53261 + If you say Y here, you will be able to choose a GID of whose users will
53262 + be unable to run server applications from your machine. If the sysctl
53263 + option is enabled, a sysctl option with name "socket_server" is created.
53264 +
53265 +config GRKERNSEC_SOCKET_SERVER_GID
53266 + int "GID to deny server sockets for"
53267 + depends on GRKERNSEC_SOCKET_SERVER
53268 + default 1002
53269 + help
53270 + Here you can choose the GID to disable server socket access for.
53271 + Remember to add the users you want server socket access disabled for to
53272 + the GID specified here. If the sysctl option is enabled, a sysctl
53273 + option with name "socket_server_gid" is created.
53274 +
53275 +endmenu
53276 +menu "Sysctl support"
53277 +depends on GRKERNSEC && SYSCTL
53278 +
53279 +config GRKERNSEC_SYSCTL
53280 + bool "Sysctl support"
53281 + help
53282 + If you say Y here, you will be able to change the options that
53283 + grsecurity runs with at bootup, without having to recompile your
53284 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
53285 + to enable (1) or disable (0) various features. All the sysctl entries
53286 + are mutable until the "grsec_lock" entry is set to a non-zero value.
53287 + All features enabled in the kernel configuration are disabled at boot
53288 + if you do not say Y to the "Turn on features by default" option.
53289 + All options should be set at startup, and the grsec_lock entry should
53290 + be set to a non-zero value after all the options are set.
53291 + *THIS IS EXTREMELY IMPORTANT*
53292 +
53293 +config GRKERNSEC_SYSCTL_DISTRO
53294 + bool "Extra sysctl support for distro makers (READ HELP)"
53295 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
53296 + help
53297 + If you say Y here, additional sysctl options will be created
53298 + for features that affect processes running as root. Therefore,
53299 + it is critical when using this option that the grsec_lock entry be
53300 + enabled after boot. Only distros with prebuilt kernel packages
53301 + with this option enabled that can ensure grsec_lock is enabled
53302 + after boot should use this option.
53303 + *Failure to set grsec_lock after boot makes all grsec features
53304 + this option covers useless*
53305 +
53306 + Currently this option creates the following sysctl entries:
53307 + "Disable Privileged I/O": "disable_priv_io"
53308 +
53309 +config GRKERNSEC_SYSCTL_ON
53310 + bool "Turn on features by default"
53311 + depends on GRKERNSEC_SYSCTL
53312 + help
53313 + If you say Y here, instead of having all features enabled in the
53314 + kernel configuration disabled at boot time, the features will be
53315 + enabled at boot time. It is recommended you say Y here unless
53316 + there is some reason you would want all sysctl-tunable features to
53317 + be disabled by default. As mentioned elsewhere, it is important
53318 + to enable the grsec_lock entry once you have finished modifying
53319 + the sysctl entries.
53320 +
53321 +endmenu
53322 +menu "Logging Options"
53323 +depends on GRKERNSEC
53324 +
53325 +config GRKERNSEC_FLOODTIME
53326 + int "Seconds in between log messages (minimum)"
53327 + default 10
53328 + help
53329 + This option allows you to enforce the number of seconds between
53330 + grsecurity log messages. The default should be suitable for most
53331 + people, however, if you choose to change it, choose a value small enough
53332 + to allow informative logs to be produced, but large enough to
53333 + prevent flooding.
53334 +
53335 +config GRKERNSEC_FLOODBURST
53336 + int "Number of messages in a burst (maximum)"
53337 + default 4
53338 + help
53339 + This option allows you to choose the maximum number of messages allowed
53340 + within the flood time interval you chose in a separate option. The
53341 + default should be suitable for most people, however if you find that
53342 + many of your logs are being interpreted as flooding, you may want to
53343 + raise this value.
53344 +
53345 +endmenu
53346 +
53347 +endmenu
53348 diff -urNp linux-2.6.32.41/grsecurity/Makefile linux-2.6.32.41/grsecurity/Makefile
53349 --- linux-2.6.32.41/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
53350 +++ linux-2.6.32.41/grsecurity/Makefile 2011-05-24 20:27:46.000000000 -0400
53351 @@ -0,0 +1,33 @@
53352 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
53353 +# during 2001-2009 it has been completely redesigned by Brad Spengler
53354 +# into an RBAC system
53355 +#
53356 +# All code in this directory and various hooks inserted throughout the kernel
53357 +# are copyright Brad Spengler - Open Source Security, Inc., and released
53358 +# under the GPL v2 or higher
53359 +
53360 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
53361 + grsec_mount.o grsec_sig.o grsec_sock.o grsec_sysctl.o \
53362 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
53363 +
53364 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
53365 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
53366 + gracl_learn.o grsec_log.o
53367 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
53368 +
53369 +ifdef CONFIG_NET
53370 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
53371 +endif
53372 +
53373 +ifndef CONFIG_GRKERNSEC
53374 +obj-y += grsec_disabled.o
53375 +endif
53376 +
53377 +ifdef CONFIG_GRKERNSEC_HIDESYM
53378 +extra-y := grsec_hidesym.o
53379 +$(obj)/grsec_hidesym.o:
53380 + @-chmod -f 500 /boot
53381 + @-chmod -f 500 /lib/modules
53382 + @-chmod -f 700 .
53383 + @echo ' grsec: protected kernel image paths'
53384 +endif
53385 diff -urNp linux-2.6.32.41/include/acpi/acpi_drivers.h linux-2.6.32.41/include/acpi/acpi_drivers.h
53386 --- linux-2.6.32.41/include/acpi/acpi_drivers.h 2011-03-27 14:31:47.000000000 -0400
53387 +++ linux-2.6.32.41/include/acpi/acpi_drivers.h 2011-04-17 15:56:46.000000000 -0400
53388 @@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acp
53389 Dock Station
53390 -------------------------------------------------------------------------- */
53391 struct acpi_dock_ops {
53392 - acpi_notify_handler handler;
53393 - acpi_notify_handler uevent;
53394 + const acpi_notify_handler handler;
53395 + const acpi_notify_handler uevent;
53396 };
53397
53398 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
53399 @@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle ha
53400 extern int register_dock_notifier(struct notifier_block *nb);
53401 extern void unregister_dock_notifier(struct notifier_block *nb);
53402 extern int register_hotplug_dock_device(acpi_handle handle,
53403 - struct acpi_dock_ops *ops,
53404 + const struct acpi_dock_ops *ops,
53405 void *context);
53406 extern void unregister_hotplug_dock_device(acpi_handle handle);
53407 #else
53408 @@ -144,7 +144,7 @@ static inline void unregister_dock_notif
53409 {
53410 }
53411 static inline int register_hotplug_dock_device(acpi_handle handle,
53412 - struct acpi_dock_ops *ops,
53413 + const struct acpi_dock_ops *ops,
53414 void *context)
53415 {
53416 return -ENODEV;
53417 diff -urNp linux-2.6.32.41/include/asm-generic/atomic-long.h linux-2.6.32.41/include/asm-generic/atomic-long.h
53418 --- linux-2.6.32.41/include/asm-generic/atomic-long.h 2011-03-27 14:31:47.000000000 -0400
53419 +++ linux-2.6.32.41/include/asm-generic/atomic-long.h 2011-05-16 21:46:57.000000000 -0400
53420 @@ -22,6 +22,12 @@
53421
53422 typedef atomic64_t atomic_long_t;
53423
53424 +#ifdef CONFIG_PAX_REFCOUNT
53425 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
53426 +#else
53427 +typedef atomic64_t atomic_long_unchecked_t;
53428 +#endif
53429 +
53430 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
53431
53432 static inline long atomic_long_read(atomic_long_t *l)
53433 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
53434 return (long)atomic64_read(v);
53435 }
53436
53437 +#ifdef CONFIG_PAX_REFCOUNT
53438 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
53439 +{
53440 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53441 +
53442 + return (long)atomic64_read_unchecked(v);
53443 +}
53444 +#endif
53445 +
53446 static inline void atomic_long_set(atomic_long_t *l, long i)
53447 {
53448 atomic64_t *v = (atomic64_t *)l;
53449 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
53450 atomic64_set(v, i);
53451 }
53452
53453 +#ifdef CONFIG_PAX_REFCOUNT
53454 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
53455 +{
53456 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53457 +
53458 + atomic64_set_unchecked(v, i);
53459 +}
53460 +#endif
53461 +
53462 static inline void atomic_long_inc(atomic_long_t *l)
53463 {
53464 atomic64_t *v = (atomic64_t *)l;
53465 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
53466 atomic64_inc(v);
53467 }
53468
53469 +#ifdef CONFIG_PAX_REFCOUNT
53470 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
53471 +{
53472 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53473 +
53474 + atomic64_inc_unchecked(v);
53475 +}
53476 +#endif
53477 +
53478 static inline void atomic_long_dec(atomic_long_t *l)
53479 {
53480 atomic64_t *v = (atomic64_t *)l;
53481 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
53482 atomic64_dec(v);
53483 }
53484
53485 +#ifdef CONFIG_PAX_REFCOUNT
53486 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
53487 +{
53488 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53489 +
53490 + atomic64_dec_unchecked(v);
53491 +}
53492 +#endif
53493 +
53494 static inline void atomic_long_add(long i, atomic_long_t *l)
53495 {
53496 atomic64_t *v = (atomic64_t *)l;
53497 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long
53498 atomic64_add(i, v);
53499 }
53500
53501 +#ifdef CONFIG_PAX_REFCOUNT
53502 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
53503 +{
53504 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53505 +
53506 + atomic64_add_unchecked(i, v);
53507 +}
53508 +#endif
53509 +
53510 static inline void atomic_long_sub(long i, atomic_long_t *l)
53511 {
53512 atomic64_t *v = (atomic64_t *)l;
53513 @@ -115,6 +166,15 @@ static inline long atomic_long_inc_retur
53514 return (long)atomic64_inc_return(v);
53515 }
53516
53517 +#ifdef CONFIG_PAX_REFCOUNT
53518 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
53519 +{
53520 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53521 +
53522 + return (long)atomic64_inc_return_unchecked(v);
53523 +}
53524 +#endif
53525 +
53526 static inline long atomic_long_dec_return(atomic_long_t *l)
53527 {
53528 atomic64_t *v = (atomic64_t *)l;
53529 @@ -140,6 +200,12 @@ static inline long atomic_long_add_unles
53530
53531 typedef atomic_t atomic_long_t;
53532
53533 +#ifdef CONFIG_PAX_REFCOUNT
53534 +typedef atomic_unchecked_t atomic_long_unchecked_t;
53535 +#else
53536 +typedef atomic_t atomic_long_unchecked_t;
53537 +#endif
53538 +
53539 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
53540 static inline long atomic_long_read(atomic_long_t *l)
53541 {
53542 @@ -148,6 +214,15 @@ static inline long atomic_long_read(atom
53543 return (long)atomic_read(v);
53544 }
53545
53546 +#ifdef CONFIG_PAX_REFCOUNT
53547 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
53548 +{
53549 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53550 +
53551 + return (long)atomic_read_unchecked(v);
53552 +}
53553 +#endif
53554 +
53555 static inline void atomic_long_set(atomic_long_t *l, long i)
53556 {
53557 atomic_t *v = (atomic_t *)l;
53558 @@ -155,6 +230,15 @@ static inline void atomic_long_set(atomi
53559 atomic_set(v, i);
53560 }
53561
53562 +#ifdef CONFIG_PAX_REFCOUNT
53563 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
53564 +{
53565 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53566 +
53567 + atomic_set_unchecked(v, i);
53568 +}
53569 +#endif
53570 +
53571 static inline void atomic_long_inc(atomic_long_t *l)
53572 {
53573 atomic_t *v = (atomic_t *)l;
53574 @@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomi
53575 atomic_inc(v);
53576 }
53577
53578 +#ifdef CONFIG_PAX_REFCOUNT
53579 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
53580 +{
53581 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53582 +
53583 + atomic_inc_unchecked(v);
53584 +}
53585 +#endif
53586 +
53587 static inline void atomic_long_dec(atomic_long_t *l)
53588 {
53589 atomic_t *v = (atomic_t *)l;
53590 @@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomi
53591 atomic_dec(v);
53592 }
53593
53594 +#ifdef CONFIG_PAX_REFCOUNT
53595 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
53596 +{
53597 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53598 +
53599 + atomic_dec_unchecked(v);
53600 +}
53601 +#endif
53602 +
53603 static inline void atomic_long_add(long i, atomic_long_t *l)
53604 {
53605 atomic_t *v = (atomic_t *)l;
53606 @@ -176,6 +278,15 @@ static inline void atomic_long_add(long
53607 atomic_add(i, v);
53608 }
53609
53610 +#ifdef CONFIG_PAX_REFCOUNT
53611 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
53612 +{
53613 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53614 +
53615 + atomic_add_unchecked(i, v);
53616 +}
53617 +#endif
53618 +
53619 static inline void atomic_long_sub(long i, atomic_long_t *l)
53620 {
53621 atomic_t *v = (atomic_t *)l;
53622 @@ -232,6 +343,15 @@ static inline long atomic_long_inc_retur
53623 return (long)atomic_inc_return(v);
53624 }
53625
53626 +#ifdef CONFIG_PAX_REFCOUNT
53627 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
53628 +{
53629 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53630 +
53631 + return (long)atomic_inc_return_unchecked(v);
53632 +}
53633 +#endif
53634 +
53635 static inline long atomic_long_dec_return(atomic_long_t *l)
53636 {
53637 atomic_t *v = (atomic_t *)l;
53638 @@ -255,4 +375,47 @@ static inline long atomic_long_add_unles
53639
53640 #endif /* BITS_PER_LONG == 64 */
53641
53642 +#ifdef CONFIG_PAX_REFCOUNT
53643 +static inline void pax_refcount_needs_these_functions(void)
53644 +{
53645 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
53646 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
53647 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
53648 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
53649 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
53650 + atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
53651 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
53652 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
53653 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
53654 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
53655 + atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
53656 +
53657 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
53658 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
53659 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
53660 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
53661 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
53662 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
53663 +}
53664 +#else
53665 +#define atomic_read_unchecked(v) atomic_read(v)
53666 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
53667 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
53668 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
53669 +#define atomic_inc_unchecked(v) atomic_inc(v)
53670 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
53671 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
53672 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
53673 +#define atomic_dec_unchecked(v) atomic_dec(v)
53674 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
53675 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
53676 +
53677 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
53678 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
53679 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
53680 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
53681 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
53682 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
53683 +#endif
53684 +
53685 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
53686 diff -urNp linux-2.6.32.41/include/asm-generic/cache.h linux-2.6.32.41/include/asm-generic/cache.h
53687 --- linux-2.6.32.41/include/asm-generic/cache.h 2011-03-27 14:31:47.000000000 -0400
53688 +++ linux-2.6.32.41/include/asm-generic/cache.h 2011-05-04 17:56:28.000000000 -0400
53689 @@ -6,7 +6,7 @@
53690 * cache lines need to provide their own cache.h.
53691 */
53692
53693 -#define L1_CACHE_SHIFT 5
53694 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
53695 +#define L1_CACHE_SHIFT 5U
53696 +#define L1_CACHE_BYTES (1U << L1_CACHE_SHIFT)
53697
53698 #endif /* __ASM_GENERIC_CACHE_H */
53699 diff -urNp linux-2.6.32.41/include/asm-generic/dma-mapping-common.h linux-2.6.32.41/include/asm-generic/dma-mapping-common.h
53700 --- linux-2.6.32.41/include/asm-generic/dma-mapping-common.h 2011-03-27 14:31:47.000000000 -0400
53701 +++ linux-2.6.32.41/include/asm-generic/dma-mapping-common.h 2011-04-17 15:56:46.000000000 -0400
53702 @@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_
53703 enum dma_data_direction dir,
53704 struct dma_attrs *attrs)
53705 {
53706 - struct dma_map_ops *ops = get_dma_ops(dev);
53707 + const struct dma_map_ops *ops = get_dma_ops(dev);
53708 dma_addr_t addr;
53709
53710 kmemcheck_mark_initialized(ptr, size);
53711 @@ -30,7 +30,7 @@ static inline void dma_unmap_single_attr
53712 enum dma_data_direction dir,
53713 struct dma_attrs *attrs)
53714 {
53715 - struct dma_map_ops *ops = get_dma_ops(dev);
53716 + const struct dma_map_ops *ops = get_dma_ops(dev);
53717
53718 BUG_ON(!valid_dma_direction(dir));
53719 if (ops->unmap_page)
53720 @@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struc
53721 int nents, enum dma_data_direction dir,
53722 struct dma_attrs *attrs)
53723 {
53724 - struct dma_map_ops *ops = get_dma_ops(dev);
53725 + const struct dma_map_ops *ops = get_dma_ops(dev);
53726 int i, ents;
53727 struct scatterlist *s;
53728
53729 @@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(st
53730 int nents, enum dma_data_direction dir,
53731 struct dma_attrs *attrs)
53732 {
53733 - struct dma_map_ops *ops = get_dma_ops(dev);
53734 + const struct dma_map_ops *ops = get_dma_ops(dev);
53735
53736 BUG_ON(!valid_dma_direction(dir));
53737 debug_dma_unmap_sg(dev, sg, nents, dir);
53738 @@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(st
53739 size_t offset, size_t size,
53740 enum dma_data_direction dir)
53741 {
53742 - struct dma_map_ops *ops = get_dma_ops(dev);
53743 + const struct dma_map_ops *ops = get_dma_ops(dev);
53744 dma_addr_t addr;
53745
53746 kmemcheck_mark_initialized(page_address(page) + offset, size);
53747 @@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(st
53748 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
53749 size_t size, enum dma_data_direction dir)
53750 {
53751 - struct dma_map_ops *ops = get_dma_ops(dev);
53752 + const struct dma_map_ops *ops = get_dma_ops(dev);
53753
53754 BUG_ON(!valid_dma_direction(dir));
53755 if (ops->unmap_page)
53756 @@ -97,7 +97,7 @@ static inline void dma_sync_single_for_c
53757 size_t size,
53758 enum dma_data_direction dir)
53759 {
53760 - struct dma_map_ops *ops = get_dma_ops(dev);
53761 + const struct dma_map_ops *ops = get_dma_ops(dev);
53762
53763 BUG_ON(!valid_dma_direction(dir));
53764 if (ops->sync_single_for_cpu)
53765 @@ -109,7 +109,7 @@ static inline void dma_sync_single_for_d
53766 dma_addr_t addr, size_t size,
53767 enum dma_data_direction dir)
53768 {
53769 - struct dma_map_ops *ops = get_dma_ops(dev);
53770 + const struct dma_map_ops *ops = get_dma_ops(dev);
53771
53772 BUG_ON(!valid_dma_direction(dir));
53773 if (ops->sync_single_for_device)
53774 @@ -123,7 +123,7 @@ static inline void dma_sync_single_range
53775 size_t size,
53776 enum dma_data_direction dir)
53777 {
53778 - struct dma_map_ops *ops = get_dma_ops(dev);
53779 + const struct dma_map_ops *ops = get_dma_ops(dev);
53780
53781 BUG_ON(!valid_dma_direction(dir));
53782 if (ops->sync_single_range_for_cpu) {
53783 @@ -140,7 +140,7 @@ static inline void dma_sync_single_range
53784 size_t size,
53785 enum dma_data_direction dir)
53786 {
53787 - struct dma_map_ops *ops = get_dma_ops(dev);
53788 + const struct dma_map_ops *ops = get_dma_ops(dev);
53789
53790 BUG_ON(!valid_dma_direction(dir));
53791 if (ops->sync_single_range_for_device) {
53792 @@ -155,7 +155,7 @@ static inline void
53793 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
53794 int nelems, enum dma_data_direction dir)
53795 {
53796 - struct dma_map_ops *ops = get_dma_ops(dev);
53797 + const struct dma_map_ops *ops = get_dma_ops(dev);
53798
53799 BUG_ON(!valid_dma_direction(dir));
53800 if (ops->sync_sg_for_cpu)
53801 @@ -167,7 +167,7 @@ static inline void
53802 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
53803 int nelems, enum dma_data_direction dir)
53804 {
53805 - struct dma_map_ops *ops = get_dma_ops(dev);
53806 + const struct dma_map_ops *ops = get_dma_ops(dev);
53807
53808 BUG_ON(!valid_dma_direction(dir));
53809 if (ops->sync_sg_for_device)
53810 diff -urNp linux-2.6.32.41/include/asm-generic/futex.h linux-2.6.32.41/include/asm-generic/futex.h
53811 --- linux-2.6.32.41/include/asm-generic/futex.h 2011-03-27 14:31:47.000000000 -0400
53812 +++ linux-2.6.32.41/include/asm-generic/futex.h 2011-04-17 15:56:46.000000000 -0400
53813 @@ -6,7 +6,7 @@
53814 #include <asm/errno.h>
53815
53816 static inline int
53817 -futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
53818 +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
53819 {
53820 int op = (encoded_op >> 28) & 7;
53821 int cmp = (encoded_op >> 24) & 15;
53822 @@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op,
53823 }
53824
53825 static inline int
53826 -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
53827 +futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
53828 {
53829 return -ENOSYS;
53830 }
53831 diff -urNp linux-2.6.32.41/include/asm-generic/int-l64.h linux-2.6.32.41/include/asm-generic/int-l64.h
53832 --- linux-2.6.32.41/include/asm-generic/int-l64.h 2011-03-27 14:31:47.000000000 -0400
53833 +++ linux-2.6.32.41/include/asm-generic/int-l64.h 2011-04-17 15:56:46.000000000 -0400
53834 @@ -46,6 +46,8 @@ typedef unsigned int u32;
53835 typedef signed long s64;
53836 typedef unsigned long u64;
53837
53838 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
53839 +
53840 #define S8_C(x) x
53841 #define U8_C(x) x ## U
53842 #define S16_C(x) x
53843 diff -urNp linux-2.6.32.41/include/asm-generic/int-ll64.h linux-2.6.32.41/include/asm-generic/int-ll64.h
53844 --- linux-2.6.32.41/include/asm-generic/int-ll64.h 2011-03-27 14:31:47.000000000 -0400
53845 +++ linux-2.6.32.41/include/asm-generic/int-ll64.h 2011-04-17 15:56:46.000000000 -0400
53846 @@ -51,6 +51,8 @@ typedef unsigned int u32;
53847 typedef signed long long s64;
53848 typedef unsigned long long u64;
53849
53850 +typedef unsigned long long intoverflow_t;
53851 +
53852 #define S8_C(x) x
53853 #define U8_C(x) x ## U
53854 #define S16_C(x) x
53855 diff -urNp linux-2.6.32.41/include/asm-generic/kmap_types.h linux-2.6.32.41/include/asm-generic/kmap_types.h
53856 --- linux-2.6.32.41/include/asm-generic/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
53857 +++ linux-2.6.32.41/include/asm-generic/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
53858 @@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
53859 KMAP_D(16) KM_IRQ_PTE,
53860 KMAP_D(17) KM_NMI,
53861 KMAP_D(18) KM_NMI_PTE,
53862 -KMAP_D(19) KM_TYPE_NR
53863 +KMAP_D(19) KM_CLEARPAGE,
53864 +KMAP_D(20) KM_TYPE_NR
53865 };
53866
53867 #undef KMAP_D
53868 diff -urNp linux-2.6.32.41/include/asm-generic/pgtable.h linux-2.6.32.41/include/asm-generic/pgtable.h
53869 --- linux-2.6.32.41/include/asm-generic/pgtable.h 2011-03-27 14:31:47.000000000 -0400
53870 +++ linux-2.6.32.41/include/asm-generic/pgtable.h 2011-04-17 15:56:46.000000000 -0400
53871 @@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_ar
53872 unsigned long size);
53873 #endif
53874
53875 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
53876 +static inline unsigned long pax_open_kernel(void) { return 0; }
53877 +#endif
53878 +
53879 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
53880 +static inline unsigned long pax_close_kernel(void) { return 0; }
53881 +#endif
53882 +
53883 #endif /* !__ASSEMBLY__ */
53884
53885 #endif /* _ASM_GENERIC_PGTABLE_H */
53886 diff -urNp linux-2.6.32.41/include/asm-generic/pgtable-nopmd.h linux-2.6.32.41/include/asm-generic/pgtable-nopmd.h
53887 --- linux-2.6.32.41/include/asm-generic/pgtable-nopmd.h 2011-03-27 14:31:47.000000000 -0400
53888 +++ linux-2.6.32.41/include/asm-generic/pgtable-nopmd.h 2011-04-17 15:56:46.000000000 -0400
53889 @@ -1,14 +1,19 @@
53890 #ifndef _PGTABLE_NOPMD_H
53891 #define _PGTABLE_NOPMD_H
53892
53893 -#ifndef __ASSEMBLY__
53894 -
53895 #include <asm-generic/pgtable-nopud.h>
53896
53897 -struct mm_struct;
53898 -
53899 #define __PAGETABLE_PMD_FOLDED
53900
53901 +#define PMD_SHIFT PUD_SHIFT
53902 +#define PTRS_PER_PMD 1
53903 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
53904 +#define PMD_MASK (~(PMD_SIZE-1))
53905 +
53906 +#ifndef __ASSEMBLY__
53907 +
53908 +struct mm_struct;
53909 +
53910 /*
53911 * Having the pmd type consist of a pud gets the size right, and allows
53912 * us to conceptually access the pud entry that this pmd is folded into
53913 @@ -16,11 +21,6 @@ struct mm_struct;
53914 */
53915 typedef struct { pud_t pud; } pmd_t;
53916
53917 -#define PMD_SHIFT PUD_SHIFT
53918 -#define PTRS_PER_PMD 1
53919 -#define PMD_SIZE (1UL << PMD_SHIFT)
53920 -#define PMD_MASK (~(PMD_SIZE-1))
53921 -
53922 /*
53923 * The "pud_xxx()" functions here are trivial for a folded two-level
53924 * setup: the pmd is never bad, and a pmd always exists (as it's folded
53925 diff -urNp linux-2.6.32.41/include/asm-generic/pgtable-nopud.h linux-2.6.32.41/include/asm-generic/pgtable-nopud.h
53926 --- linux-2.6.32.41/include/asm-generic/pgtable-nopud.h 2011-03-27 14:31:47.000000000 -0400
53927 +++ linux-2.6.32.41/include/asm-generic/pgtable-nopud.h 2011-04-17 15:56:46.000000000 -0400
53928 @@ -1,10 +1,15 @@
53929 #ifndef _PGTABLE_NOPUD_H
53930 #define _PGTABLE_NOPUD_H
53931
53932 -#ifndef __ASSEMBLY__
53933 -
53934 #define __PAGETABLE_PUD_FOLDED
53935
53936 +#define PUD_SHIFT PGDIR_SHIFT
53937 +#define PTRS_PER_PUD 1
53938 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
53939 +#define PUD_MASK (~(PUD_SIZE-1))
53940 +
53941 +#ifndef __ASSEMBLY__
53942 +
53943 /*
53944 * Having the pud type consist of a pgd gets the size right, and allows
53945 * us to conceptually access the pgd entry that this pud is folded into
53946 @@ -12,11 +17,6 @@
53947 */
53948 typedef struct { pgd_t pgd; } pud_t;
53949
53950 -#define PUD_SHIFT PGDIR_SHIFT
53951 -#define PTRS_PER_PUD 1
53952 -#define PUD_SIZE (1UL << PUD_SHIFT)
53953 -#define PUD_MASK (~(PUD_SIZE-1))
53954 -
53955 /*
53956 * The "pgd_xxx()" functions here are trivial for a folded two-level
53957 * setup: the pud is never bad, and a pud always exists (as it's folded
53958 diff -urNp linux-2.6.32.41/include/asm-generic/vmlinux.lds.h linux-2.6.32.41/include/asm-generic/vmlinux.lds.h
53959 --- linux-2.6.32.41/include/asm-generic/vmlinux.lds.h 2011-03-27 14:31:47.000000000 -0400
53960 +++ linux-2.6.32.41/include/asm-generic/vmlinux.lds.h 2011-04-17 15:56:46.000000000 -0400
53961 @@ -199,6 +199,7 @@
53962 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
53963 VMLINUX_SYMBOL(__start_rodata) = .; \
53964 *(.rodata) *(.rodata.*) \
53965 + *(.data.read_only) \
53966 *(__vermagic) /* Kernel version magic */ \
53967 *(__markers_strings) /* Markers: strings */ \
53968 *(__tracepoints_strings)/* Tracepoints: strings */ \
53969 @@ -656,22 +657,24 @@
53970 * section in the linker script will go there too. @phdr should have
53971 * a leading colon.
53972 *
53973 - * Note that this macros defines __per_cpu_load as an absolute symbol.
53974 + * Note that this macros defines per_cpu_load as an absolute symbol.
53975 * If there is no need to put the percpu section at a predetermined
53976 * address, use PERCPU().
53977 */
53978 #define PERCPU_VADDR(vaddr, phdr) \
53979 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
53980 - .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
53981 + per_cpu_load = .; \
53982 + .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
53983 - LOAD_OFFSET) { \
53984 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
53985 VMLINUX_SYMBOL(__per_cpu_start) = .; \
53986 *(.data.percpu.first) \
53987 - *(.data.percpu.page_aligned) \
53988 *(.data.percpu) \
53989 + . = ALIGN(PAGE_SIZE); \
53990 + *(.data.percpu.page_aligned) \
53991 *(.data.percpu.shared_aligned) \
53992 VMLINUX_SYMBOL(__per_cpu_end) = .; \
53993 } phdr \
53994 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
53995 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
53996
53997 /**
53998 * PERCPU - define output section for percpu area, simple version
53999 diff -urNp linux-2.6.32.41/include/drm/drmP.h linux-2.6.32.41/include/drm/drmP.h
54000 --- linux-2.6.32.41/include/drm/drmP.h 2011-03-27 14:31:47.000000000 -0400
54001 +++ linux-2.6.32.41/include/drm/drmP.h 2011-04-17 15:56:46.000000000 -0400
54002 @@ -71,6 +71,7 @@
54003 #include <linux/workqueue.h>
54004 #include <linux/poll.h>
54005 #include <asm/pgalloc.h>
54006 +#include <asm/local.h>
54007 #include "drm.h"
54008
54009 #include <linux/idr.h>
54010 @@ -814,7 +815,7 @@ struct drm_driver {
54011 void (*vgaarb_irq)(struct drm_device *dev, bool state);
54012
54013 /* Driver private ops for this object */
54014 - struct vm_operations_struct *gem_vm_ops;
54015 + const struct vm_operations_struct *gem_vm_ops;
54016
54017 int major;
54018 int minor;
54019 @@ -917,7 +918,7 @@ struct drm_device {
54020
54021 /** \name Usage Counters */
54022 /*@{ */
54023 - int open_count; /**< Outstanding files open */
54024 + local_t open_count; /**< Outstanding files open */
54025 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
54026 atomic_t vma_count; /**< Outstanding vma areas open */
54027 int buf_use; /**< Buffers in use -- cannot alloc */
54028 @@ -928,7 +929,7 @@ struct drm_device {
54029 /*@{ */
54030 unsigned long counters;
54031 enum drm_stat_type types[15];
54032 - atomic_t counts[15];
54033 + atomic_unchecked_t counts[15];
54034 /*@} */
54035
54036 struct list_head filelist;
54037 @@ -1016,7 +1017,7 @@ struct drm_device {
54038 struct pci_controller *hose;
54039 #endif
54040 struct drm_sg_mem *sg; /**< Scatter gather memory */
54041 - unsigned int num_crtcs; /**< Number of CRTCs on this device */
54042 + unsigned int num_crtcs; /**< Number of CRTCs on this device */
54043 void *dev_private; /**< device private data */
54044 void *mm_private;
54045 struct address_space *dev_mapping;
54046 @@ -1042,11 +1043,11 @@ struct drm_device {
54047 spinlock_t object_name_lock;
54048 struct idr object_name_idr;
54049 atomic_t object_count;
54050 - atomic_t object_memory;
54051 + atomic_unchecked_t object_memory;
54052 atomic_t pin_count;
54053 - atomic_t pin_memory;
54054 + atomic_unchecked_t pin_memory;
54055 atomic_t gtt_count;
54056 - atomic_t gtt_memory;
54057 + atomic_unchecked_t gtt_memory;
54058 uint32_t gtt_total;
54059 uint32_t invalidate_domains; /* domains pending invalidation */
54060 uint32_t flush_domains; /* domains pending flush */
54061 diff -urNp linux-2.6.32.41/include/linux/a.out.h linux-2.6.32.41/include/linux/a.out.h
54062 --- linux-2.6.32.41/include/linux/a.out.h 2011-03-27 14:31:47.000000000 -0400
54063 +++ linux-2.6.32.41/include/linux/a.out.h 2011-04-17 15:56:46.000000000 -0400
54064 @@ -39,6 +39,14 @@ enum machine_type {
54065 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
54066 };
54067
54068 +/* Constants for the N_FLAGS field */
54069 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
54070 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
54071 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
54072 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
54073 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
54074 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
54075 +
54076 #if !defined (N_MAGIC)
54077 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
54078 #endif
54079 diff -urNp linux-2.6.32.41/include/linux/atmdev.h linux-2.6.32.41/include/linux/atmdev.h
54080 --- linux-2.6.32.41/include/linux/atmdev.h 2011-03-27 14:31:47.000000000 -0400
54081 +++ linux-2.6.32.41/include/linux/atmdev.h 2011-04-17 15:56:46.000000000 -0400
54082 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
54083 #endif
54084
54085 struct k_atm_aal_stats {
54086 -#define __HANDLE_ITEM(i) atomic_t i
54087 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
54088 __AAL_STAT_ITEMS
54089 #undef __HANDLE_ITEM
54090 };
54091 diff -urNp linux-2.6.32.41/include/linux/backlight.h linux-2.6.32.41/include/linux/backlight.h
54092 --- linux-2.6.32.41/include/linux/backlight.h 2011-03-27 14:31:47.000000000 -0400
54093 +++ linux-2.6.32.41/include/linux/backlight.h 2011-04-17 15:56:46.000000000 -0400
54094 @@ -36,18 +36,18 @@ struct backlight_device;
54095 struct fb_info;
54096
54097 struct backlight_ops {
54098 - unsigned int options;
54099 + const unsigned int options;
54100
54101 #define BL_CORE_SUSPENDRESUME (1 << 0)
54102
54103 /* Notify the backlight driver some property has changed */
54104 - int (*update_status)(struct backlight_device *);
54105 + int (* const update_status)(struct backlight_device *);
54106 /* Return the current backlight brightness (accounting for power,
54107 fb_blank etc.) */
54108 - int (*get_brightness)(struct backlight_device *);
54109 + int (* const get_brightness)(struct backlight_device *);
54110 /* Check if given framebuffer device is the one bound to this backlight;
54111 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
54112 - int (*check_fb)(struct fb_info *);
54113 + int (* const check_fb)(struct fb_info *);
54114 };
54115
54116 /* This structure defines all the properties of a backlight */
54117 @@ -86,7 +86,7 @@ struct backlight_device {
54118 registered this device has been unloaded, and if class_get_devdata()
54119 points to something in the body of that driver, it is also invalid. */
54120 struct mutex ops_lock;
54121 - struct backlight_ops *ops;
54122 + const struct backlight_ops *ops;
54123
54124 /* The framebuffer notifier block */
54125 struct notifier_block fb_notif;
54126 @@ -103,7 +103,7 @@ static inline void backlight_update_stat
54127 }
54128
54129 extern struct backlight_device *backlight_device_register(const char *name,
54130 - struct device *dev, void *devdata, struct backlight_ops *ops);
54131 + struct device *dev, void *devdata, const struct backlight_ops *ops);
54132 extern void backlight_device_unregister(struct backlight_device *bd);
54133 extern void backlight_force_update(struct backlight_device *bd,
54134 enum backlight_update_reason reason);
54135 diff -urNp linux-2.6.32.41/include/linux/binfmts.h linux-2.6.32.41/include/linux/binfmts.h
54136 --- linux-2.6.32.41/include/linux/binfmts.h 2011-04-17 17:00:52.000000000 -0400
54137 +++ linux-2.6.32.41/include/linux/binfmts.h 2011-04-17 15:56:46.000000000 -0400
54138 @@ -83,6 +83,7 @@ struct linux_binfmt {
54139 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
54140 int (*load_shlib)(struct file *);
54141 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
54142 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
54143 unsigned long min_coredump; /* minimal dump size */
54144 int hasvdso;
54145 };
54146 diff -urNp linux-2.6.32.41/include/linux/blkdev.h linux-2.6.32.41/include/linux/blkdev.h
54147 --- linux-2.6.32.41/include/linux/blkdev.h 2011-03-27 14:31:47.000000000 -0400
54148 +++ linux-2.6.32.41/include/linux/blkdev.h 2011-04-17 15:56:46.000000000 -0400
54149 @@ -1265,19 +1265,19 @@ static inline int blk_integrity_rq(struc
54150 #endif /* CONFIG_BLK_DEV_INTEGRITY */
54151
54152 struct block_device_operations {
54153 - int (*open) (struct block_device *, fmode_t);
54154 - int (*release) (struct gendisk *, fmode_t);
54155 - int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54156 - int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54157 - int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54158 - int (*direct_access) (struct block_device *, sector_t,
54159 + int (* const open) (struct block_device *, fmode_t);
54160 + int (* const release) (struct gendisk *, fmode_t);
54161 + int (* const locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54162 + int (* const ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54163 + int (* const compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54164 + int (* const direct_access) (struct block_device *, sector_t,
54165 void **, unsigned long *);
54166 - int (*media_changed) (struct gendisk *);
54167 - unsigned long long (*set_capacity) (struct gendisk *,
54168 + int (* const media_changed) (struct gendisk *);
54169 + unsigned long long (* const set_capacity) (struct gendisk *,
54170 unsigned long long);
54171 - int (*revalidate_disk) (struct gendisk *);
54172 - int (*getgeo)(struct block_device *, struct hd_geometry *);
54173 - struct module *owner;
54174 + int (* const revalidate_disk) (struct gendisk *);
54175 + int (*const getgeo)(struct block_device *, struct hd_geometry *);
54176 + struct module * const owner;
54177 };
54178
54179 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
54180 diff -urNp linux-2.6.32.41/include/linux/blktrace_api.h linux-2.6.32.41/include/linux/blktrace_api.h
54181 --- linux-2.6.32.41/include/linux/blktrace_api.h 2011-03-27 14:31:47.000000000 -0400
54182 +++ linux-2.6.32.41/include/linux/blktrace_api.h 2011-05-04 17:56:28.000000000 -0400
54183 @@ -160,7 +160,7 @@ struct blk_trace {
54184 struct dentry *dir;
54185 struct dentry *dropped_file;
54186 struct dentry *msg_file;
54187 - atomic_t dropped;
54188 + atomic_unchecked_t dropped;
54189 };
54190
54191 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
54192 diff -urNp linux-2.6.32.41/include/linux/byteorder/little_endian.h linux-2.6.32.41/include/linux/byteorder/little_endian.h
54193 --- linux-2.6.32.41/include/linux/byteorder/little_endian.h 2011-03-27 14:31:47.000000000 -0400
54194 +++ linux-2.6.32.41/include/linux/byteorder/little_endian.h 2011-04-17 15:56:46.000000000 -0400
54195 @@ -42,51 +42,51 @@
54196
54197 static inline __le64 __cpu_to_le64p(const __u64 *p)
54198 {
54199 - return (__force __le64)*p;
54200 + return (__force const __le64)*p;
54201 }
54202 static inline __u64 __le64_to_cpup(const __le64 *p)
54203 {
54204 - return (__force __u64)*p;
54205 + return (__force const __u64)*p;
54206 }
54207 static inline __le32 __cpu_to_le32p(const __u32 *p)
54208 {
54209 - return (__force __le32)*p;
54210 + return (__force const __le32)*p;
54211 }
54212 static inline __u32 __le32_to_cpup(const __le32 *p)
54213 {
54214 - return (__force __u32)*p;
54215 + return (__force const __u32)*p;
54216 }
54217 static inline __le16 __cpu_to_le16p(const __u16 *p)
54218 {
54219 - return (__force __le16)*p;
54220 + return (__force const __le16)*p;
54221 }
54222 static inline __u16 __le16_to_cpup(const __le16 *p)
54223 {
54224 - return (__force __u16)*p;
54225 + return (__force const __u16)*p;
54226 }
54227 static inline __be64 __cpu_to_be64p(const __u64 *p)
54228 {
54229 - return (__force __be64)__swab64p(p);
54230 + return (__force const __be64)__swab64p(p);
54231 }
54232 static inline __u64 __be64_to_cpup(const __be64 *p)
54233 {
54234 - return __swab64p((__u64 *)p);
54235 + return __swab64p((const __u64 *)p);
54236 }
54237 static inline __be32 __cpu_to_be32p(const __u32 *p)
54238 {
54239 - return (__force __be32)__swab32p(p);
54240 + return (__force const __be32)__swab32p(p);
54241 }
54242 static inline __u32 __be32_to_cpup(const __be32 *p)
54243 {
54244 - return __swab32p((__u32 *)p);
54245 + return __swab32p((const __u32 *)p);
54246 }
54247 static inline __be16 __cpu_to_be16p(const __u16 *p)
54248 {
54249 - return (__force __be16)__swab16p(p);
54250 + return (__force const __be16)__swab16p(p);
54251 }
54252 static inline __u16 __be16_to_cpup(const __be16 *p)
54253 {
54254 - return __swab16p((__u16 *)p);
54255 + return __swab16p((const __u16 *)p);
54256 }
54257 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
54258 #define __le64_to_cpus(x) do { (void)(x); } while (0)
54259 diff -urNp linux-2.6.32.41/include/linux/cache.h linux-2.6.32.41/include/linux/cache.h
54260 --- linux-2.6.32.41/include/linux/cache.h 2011-03-27 14:31:47.000000000 -0400
54261 +++ linux-2.6.32.41/include/linux/cache.h 2011-04-17 15:56:46.000000000 -0400
54262 @@ -16,6 +16,10 @@
54263 #define __read_mostly
54264 #endif
54265
54266 +#ifndef __read_only
54267 +#define __read_only __read_mostly
54268 +#endif
54269 +
54270 #ifndef ____cacheline_aligned
54271 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
54272 #endif
54273 diff -urNp linux-2.6.32.41/include/linux/capability.h linux-2.6.32.41/include/linux/capability.h
54274 --- linux-2.6.32.41/include/linux/capability.h 2011-03-27 14:31:47.000000000 -0400
54275 +++ linux-2.6.32.41/include/linux/capability.h 2011-04-17 15:56:46.000000000 -0400
54276 @@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff
54277 (security_real_capable_noaudit((t), (cap)) == 0)
54278
54279 extern int capable(int cap);
54280 +int capable_nolog(int cap);
54281
54282 /* audit system wants to get cap info from files as well */
54283 struct dentry;
54284 diff -urNp linux-2.6.32.41/include/linux/compiler-gcc4.h linux-2.6.32.41/include/linux/compiler-gcc4.h
54285 --- linux-2.6.32.41/include/linux/compiler-gcc4.h 2011-03-27 14:31:47.000000000 -0400
54286 +++ linux-2.6.32.41/include/linux/compiler-gcc4.h 2011-04-17 15:56:46.000000000 -0400
54287 @@ -36,4 +36,8 @@
54288 the kernel context */
54289 #define __cold __attribute__((__cold__))
54290
54291 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
54292 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
54293 +#define __bos0(ptr) __bos((ptr), 0)
54294 +#define __bos1(ptr) __bos((ptr), 1)
54295 #endif
54296 diff -urNp linux-2.6.32.41/include/linux/compiler.h linux-2.6.32.41/include/linux/compiler.h
54297 --- linux-2.6.32.41/include/linux/compiler.h 2011-03-27 14:31:47.000000000 -0400
54298 +++ linux-2.6.32.41/include/linux/compiler.h 2011-04-17 15:56:46.000000000 -0400
54299 @@ -256,6 +256,22 @@ void ftrace_likely_update(struct ftrace_
54300 #define __cold
54301 #endif
54302
54303 +#ifndef __alloc_size
54304 +#define __alloc_size
54305 +#endif
54306 +
54307 +#ifndef __bos
54308 +#define __bos
54309 +#endif
54310 +
54311 +#ifndef __bos0
54312 +#define __bos0
54313 +#endif
54314 +
54315 +#ifndef __bos1
54316 +#define __bos1
54317 +#endif
54318 +
54319 /* Simple shorthand for a section definition */
54320 #ifndef __section
54321 # define __section(S) __attribute__ ((__section__(#S)))
54322 @@ -278,6 +294,7 @@ void ftrace_likely_update(struct ftrace_
54323 * use is to mediate communication between process-level code and irq/NMI
54324 * handlers, all running on the same CPU.
54325 */
54326 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
54327 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
54328 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
54329
54330 #endif /* __LINUX_COMPILER_H */
54331 diff -urNp linux-2.6.32.41/include/linux/dcache.h linux-2.6.32.41/include/linux/dcache.h
54332 --- linux-2.6.32.41/include/linux/dcache.h 2011-03-27 14:31:47.000000000 -0400
54333 +++ linux-2.6.32.41/include/linux/dcache.h 2011-04-23 13:34:46.000000000 -0400
54334 @@ -119,6 +119,8 @@ struct dentry {
54335 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
54336 };
54337
54338 +#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
54339 +
54340 /*
54341 * dentry->d_lock spinlock nesting subclasses:
54342 *
54343 diff -urNp linux-2.6.32.41/include/linux/decompress/mm.h linux-2.6.32.41/include/linux/decompress/mm.h
54344 --- linux-2.6.32.41/include/linux/decompress/mm.h 2011-03-27 14:31:47.000000000 -0400
54345 +++ linux-2.6.32.41/include/linux/decompress/mm.h 2011-04-17 15:56:46.000000000 -0400
54346 @@ -78,7 +78,7 @@ static void free(void *where)
54347 * warnings when not needed (indeed large_malloc / large_free are not
54348 * needed by inflate */
54349
54350 -#define malloc(a) kmalloc(a, GFP_KERNEL)
54351 +#define malloc(a) kmalloc((a), GFP_KERNEL)
54352 #define free(a) kfree(a)
54353
54354 #define large_malloc(a) vmalloc(a)
54355 diff -urNp linux-2.6.32.41/include/linux/dma-mapping.h linux-2.6.32.41/include/linux/dma-mapping.h
54356 --- linux-2.6.32.41/include/linux/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
54357 +++ linux-2.6.32.41/include/linux/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
54358 @@ -16,50 +16,50 @@ enum dma_data_direction {
54359 };
54360
54361 struct dma_map_ops {
54362 - void* (*alloc_coherent)(struct device *dev, size_t size,
54363 + void* (* const alloc_coherent)(struct device *dev, size_t size,
54364 dma_addr_t *dma_handle, gfp_t gfp);
54365 - void (*free_coherent)(struct device *dev, size_t size,
54366 + void (* const free_coherent)(struct device *dev, size_t size,
54367 void *vaddr, dma_addr_t dma_handle);
54368 - dma_addr_t (*map_page)(struct device *dev, struct page *page,
54369 + dma_addr_t (* const map_page)(struct device *dev, struct page *page,
54370 unsigned long offset, size_t size,
54371 enum dma_data_direction dir,
54372 struct dma_attrs *attrs);
54373 - void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
54374 + void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
54375 size_t size, enum dma_data_direction dir,
54376 struct dma_attrs *attrs);
54377 - int (*map_sg)(struct device *dev, struct scatterlist *sg,
54378 + int (* const map_sg)(struct device *dev, struct scatterlist *sg,
54379 int nents, enum dma_data_direction dir,
54380 struct dma_attrs *attrs);
54381 - void (*unmap_sg)(struct device *dev,
54382 + void (* const unmap_sg)(struct device *dev,
54383 struct scatterlist *sg, int nents,
54384 enum dma_data_direction dir,
54385 struct dma_attrs *attrs);
54386 - void (*sync_single_for_cpu)(struct device *dev,
54387 + void (* const sync_single_for_cpu)(struct device *dev,
54388 dma_addr_t dma_handle, size_t size,
54389 enum dma_data_direction dir);
54390 - void (*sync_single_for_device)(struct device *dev,
54391 + void (* const sync_single_for_device)(struct device *dev,
54392 dma_addr_t dma_handle, size_t size,
54393 enum dma_data_direction dir);
54394 - void (*sync_single_range_for_cpu)(struct device *dev,
54395 + void (* const sync_single_range_for_cpu)(struct device *dev,
54396 dma_addr_t dma_handle,
54397 unsigned long offset,
54398 size_t size,
54399 enum dma_data_direction dir);
54400 - void (*sync_single_range_for_device)(struct device *dev,
54401 + void (* const sync_single_range_for_device)(struct device *dev,
54402 dma_addr_t dma_handle,
54403 unsigned long offset,
54404 size_t size,
54405 enum dma_data_direction dir);
54406 - void (*sync_sg_for_cpu)(struct device *dev,
54407 + void (* const sync_sg_for_cpu)(struct device *dev,
54408 struct scatterlist *sg, int nents,
54409 enum dma_data_direction dir);
54410 - void (*sync_sg_for_device)(struct device *dev,
54411 + void (* const sync_sg_for_device)(struct device *dev,
54412 struct scatterlist *sg, int nents,
54413 enum dma_data_direction dir);
54414 - int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
54415 - int (*dma_supported)(struct device *dev, u64 mask);
54416 + int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
54417 + int (* const dma_supported)(struct device *dev, u64 mask);
54418 int (*set_dma_mask)(struct device *dev, u64 mask);
54419 - int is_phys;
54420 + const int is_phys;
54421 };
54422
54423 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
54424 diff -urNp linux-2.6.32.41/include/linux/dst.h linux-2.6.32.41/include/linux/dst.h
54425 --- linux-2.6.32.41/include/linux/dst.h 2011-03-27 14:31:47.000000000 -0400
54426 +++ linux-2.6.32.41/include/linux/dst.h 2011-04-17 15:56:46.000000000 -0400
54427 @@ -380,7 +380,7 @@ struct dst_node
54428 struct thread_pool *pool;
54429
54430 /* Transaction IDs live here */
54431 - atomic_long_t gen;
54432 + atomic_long_unchecked_t gen;
54433
54434 /*
54435 * How frequently and how many times transaction
54436 diff -urNp linux-2.6.32.41/include/linux/elf.h linux-2.6.32.41/include/linux/elf.h
54437 --- linux-2.6.32.41/include/linux/elf.h 2011-03-27 14:31:47.000000000 -0400
54438 +++ linux-2.6.32.41/include/linux/elf.h 2011-04-17 15:56:46.000000000 -0400
54439 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
54440 #define PT_GNU_EH_FRAME 0x6474e550
54441
54442 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
54443 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
54444 +
54445 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
54446 +
54447 +/* Constants for the e_flags field */
54448 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
54449 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
54450 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
54451 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
54452 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
54453 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
54454
54455 /* These constants define the different elf file types */
54456 #define ET_NONE 0
54457 @@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
54458 #define DT_DEBUG 21
54459 #define DT_TEXTREL 22
54460 #define DT_JMPREL 23
54461 +#define DT_FLAGS 30
54462 + #define DF_TEXTREL 0x00000004
54463 #define DT_ENCODING 32
54464 #define OLD_DT_LOOS 0x60000000
54465 #define DT_LOOS 0x6000000d
54466 @@ -230,6 +243,19 @@ typedef struct elf64_hdr {
54467 #define PF_W 0x2
54468 #define PF_X 0x1
54469
54470 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
54471 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
54472 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
54473 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
54474 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
54475 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
54476 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
54477 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
54478 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
54479 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
54480 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
54481 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
54482 +
54483 typedef struct elf32_phdr{
54484 Elf32_Word p_type;
54485 Elf32_Off p_offset;
54486 @@ -322,6 +348,8 @@ typedef struct elf64_shdr {
54487 #define EI_OSABI 7
54488 #define EI_PAD 8
54489
54490 +#define EI_PAX 14
54491 +
54492 #define ELFMAG0 0x7f /* EI_MAG */
54493 #define ELFMAG1 'E'
54494 #define ELFMAG2 'L'
54495 @@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
54496 #define elf_phdr elf32_phdr
54497 #define elf_note elf32_note
54498 #define elf_addr_t Elf32_Off
54499 +#define elf_dyn Elf32_Dyn
54500
54501 #else
54502
54503 @@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
54504 #define elf_phdr elf64_phdr
54505 #define elf_note elf64_note
54506 #define elf_addr_t Elf64_Off
54507 +#define elf_dyn Elf64_Dyn
54508
54509 #endif
54510
54511 diff -urNp linux-2.6.32.41/include/linux/fscache-cache.h linux-2.6.32.41/include/linux/fscache-cache.h
54512 --- linux-2.6.32.41/include/linux/fscache-cache.h 2011-03-27 14:31:47.000000000 -0400
54513 +++ linux-2.6.32.41/include/linux/fscache-cache.h 2011-05-04 17:56:28.000000000 -0400
54514 @@ -116,7 +116,7 @@ struct fscache_operation {
54515 #endif
54516 };
54517
54518 -extern atomic_t fscache_op_debug_id;
54519 +extern atomic_unchecked_t fscache_op_debug_id;
54520 extern const struct slow_work_ops fscache_op_slow_work_ops;
54521
54522 extern void fscache_enqueue_operation(struct fscache_operation *);
54523 @@ -134,7 +134,7 @@ static inline void fscache_operation_ini
54524 fscache_operation_release_t release)
54525 {
54526 atomic_set(&op->usage, 1);
54527 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
54528 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
54529 op->release = release;
54530 INIT_LIST_HEAD(&op->pend_link);
54531 fscache_set_op_state(op, "Init");
54532 diff -urNp linux-2.6.32.41/include/linux/fs.h linux-2.6.32.41/include/linux/fs.h
54533 --- linux-2.6.32.41/include/linux/fs.h 2011-03-27 14:31:47.000000000 -0400
54534 +++ linux-2.6.32.41/include/linux/fs.h 2011-04-17 15:56:46.000000000 -0400
54535 @@ -90,6 +90,11 @@ struct inodes_stat_t {
54536 /* Expect random access pattern */
54537 #define FMODE_RANDOM ((__force fmode_t)4096)
54538
54539 +/* Hack for grsec so as not to require read permission simply to execute
54540 + * a binary
54541 + */
54542 +#define FMODE_GREXEC ((__force fmode_t)0x2000000)
54543 +
54544 /*
54545 * The below are the various read and write types that we support. Some of
54546 * them include behavioral modifiers that send information down to the
54547 @@ -568,41 +573,41 @@ typedef int (*read_actor_t)(read_descrip
54548 unsigned long, unsigned long);
54549
54550 struct address_space_operations {
54551 - int (*writepage)(struct page *page, struct writeback_control *wbc);
54552 - int (*readpage)(struct file *, struct page *);
54553 - void (*sync_page)(struct page *);
54554 + int (* const writepage)(struct page *page, struct writeback_control *wbc);
54555 + int (* const readpage)(struct file *, struct page *);
54556 + void (* const sync_page)(struct page *);
54557
54558 /* Write back some dirty pages from this mapping. */
54559 - int (*writepages)(struct address_space *, struct writeback_control *);
54560 + int (* const writepages)(struct address_space *, struct writeback_control *);
54561
54562 /* Set a page dirty. Return true if this dirtied it */
54563 - int (*set_page_dirty)(struct page *page);
54564 + int (* const set_page_dirty)(struct page *page);
54565
54566 - int (*readpages)(struct file *filp, struct address_space *mapping,
54567 + int (* const readpages)(struct file *filp, struct address_space *mapping,
54568 struct list_head *pages, unsigned nr_pages);
54569
54570 - int (*write_begin)(struct file *, struct address_space *mapping,
54571 + int (* const write_begin)(struct file *, struct address_space *mapping,
54572 loff_t pos, unsigned len, unsigned flags,
54573 struct page **pagep, void **fsdata);
54574 - int (*write_end)(struct file *, struct address_space *mapping,
54575 + int (* const write_end)(struct file *, struct address_space *mapping,
54576 loff_t pos, unsigned len, unsigned copied,
54577 struct page *page, void *fsdata);
54578
54579 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
54580 - sector_t (*bmap)(struct address_space *, sector_t);
54581 - void (*invalidatepage) (struct page *, unsigned long);
54582 - int (*releasepage) (struct page *, gfp_t);
54583 - ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
54584 + sector_t (* const bmap)(struct address_space *, sector_t);
54585 + void (* const invalidatepage) (struct page *, unsigned long);
54586 + int (* const releasepage) (struct page *, gfp_t);
54587 + ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
54588 loff_t offset, unsigned long nr_segs);
54589 - int (*get_xip_mem)(struct address_space *, pgoff_t, int,
54590 + int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
54591 void **, unsigned long *);
54592 /* migrate the contents of a page to the specified target */
54593 - int (*migratepage) (struct address_space *,
54594 + int (* const migratepage) (struct address_space *,
54595 struct page *, struct page *);
54596 - int (*launder_page) (struct page *);
54597 - int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
54598 + int (* const launder_page) (struct page *);
54599 + int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
54600 unsigned long);
54601 - int (*error_remove_page)(struct address_space *, struct page *);
54602 + int (* const error_remove_page)(struct address_space *, struct page *);
54603 };
54604
54605 /*
54606 @@ -1030,19 +1035,19 @@ static inline int file_check_writeable(s
54607 typedef struct files_struct *fl_owner_t;
54608
54609 struct file_lock_operations {
54610 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
54611 - void (*fl_release_private)(struct file_lock *);
54612 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
54613 + void (* const fl_release_private)(struct file_lock *);
54614 };
54615
54616 struct lock_manager_operations {
54617 - int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
54618 - void (*fl_notify)(struct file_lock *); /* unblock callback */
54619 - int (*fl_grant)(struct file_lock *, struct file_lock *, int);
54620 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
54621 - void (*fl_release_private)(struct file_lock *);
54622 - void (*fl_break)(struct file_lock *);
54623 - int (*fl_mylease)(struct file_lock *, struct file_lock *);
54624 - int (*fl_change)(struct file_lock **, int);
54625 + int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
54626 + void (* const fl_notify)(struct file_lock *); /* unblock callback */
54627 + int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
54628 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
54629 + void (* const fl_release_private)(struct file_lock *);
54630 + void (* const fl_break)(struct file_lock *);
54631 + int (* const fl_mylease)(struct file_lock *, struct file_lock *);
54632 + int (* const fl_change)(struct file_lock **, int);
54633 };
54634
54635 struct lock_manager {
54636 @@ -1441,7 +1446,7 @@ struct fiemap_extent_info {
54637 unsigned int fi_flags; /* Flags as passed from user */
54638 unsigned int fi_extents_mapped; /* Number of mapped extents */
54639 unsigned int fi_extents_max; /* Size of fiemap_extent array */
54640 - struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
54641 + struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
54642 * array */
54643 };
54644 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
54645 @@ -1558,30 +1563,30 @@ extern ssize_t vfs_writev(struct file *,
54646 unsigned long, loff_t *);
54647
54648 struct super_operations {
54649 - struct inode *(*alloc_inode)(struct super_block *sb);
54650 - void (*destroy_inode)(struct inode *);
54651 + struct inode *(* const alloc_inode)(struct super_block *sb);
54652 + void (* const destroy_inode)(struct inode *);
54653
54654 - void (*dirty_inode) (struct inode *);
54655 - int (*write_inode) (struct inode *, int);
54656 - void (*drop_inode) (struct inode *);
54657 - void (*delete_inode) (struct inode *);
54658 - void (*put_super) (struct super_block *);
54659 - void (*write_super) (struct super_block *);
54660 - int (*sync_fs)(struct super_block *sb, int wait);
54661 - int (*freeze_fs) (struct super_block *);
54662 - int (*unfreeze_fs) (struct super_block *);
54663 - int (*statfs) (struct dentry *, struct kstatfs *);
54664 - int (*remount_fs) (struct super_block *, int *, char *);
54665 - void (*clear_inode) (struct inode *);
54666 - void (*umount_begin) (struct super_block *);
54667 + void (* const dirty_inode) (struct inode *);
54668 + int (* const write_inode) (struct inode *, int);
54669 + void (* const drop_inode) (struct inode *);
54670 + void (* const delete_inode) (struct inode *);
54671 + void (* const put_super) (struct super_block *);
54672 + void (* const write_super) (struct super_block *);
54673 + int (* const sync_fs)(struct super_block *sb, int wait);
54674 + int (* const freeze_fs) (struct super_block *);
54675 + int (* const unfreeze_fs) (struct super_block *);
54676 + int (* const statfs) (struct dentry *, struct kstatfs *);
54677 + int (* const remount_fs) (struct super_block *, int *, char *);
54678 + void (* const clear_inode) (struct inode *);
54679 + void (* const umount_begin) (struct super_block *);
54680
54681 - int (*show_options)(struct seq_file *, struct vfsmount *);
54682 - int (*show_stats)(struct seq_file *, struct vfsmount *);
54683 + int (* const show_options)(struct seq_file *, struct vfsmount *);
54684 + int (* const show_stats)(struct seq_file *, struct vfsmount *);
54685 #ifdef CONFIG_QUOTA
54686 - ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
54687 - ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
54688 + ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
54689 + ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
54690 #endif
54691 - int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
54692 + int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
54693 };
54694
54695 /*
54696 diff -urNp linux-2.6.32.41/include/linux/fs_struct.h linux-2.6.32.41/include/linux/fs_struct.h
54697 --- linux-2.6.32.41/include/linux/fs_struct.h 2011-03-27 14:31:47.000000000 -0400
54698 +++ linux-2.6.32.41/include/linux/fs_struct.h 2011-04-17 15:56:46.000000000 -0400
54699 @@ -4,7 +4,7 @@
54700 #include <linux/path.h>
54701
54702 struct fs_struct {
54703 - int users;
54704 + atomic_t users;
54705 rwlock_t lock;
54706 int umask;
54707 int in_exec;
54708 diff -urNp linux-2.6.32.41/include/linux/ftrace_event.h linux-2.6.32.41/include/linux/ftrace_event.h
54709 --- linux-2.6.32.41/include/linux/ftrace_event.h 2011-03-27 14:31:47.000000000 -0400
54710 +++ linux-2.6.32.41/include/linux/ftrace_event.h 2011-05-04 17:56:28.000000000 -0400
54711 @@ -163,7 +163,7 @@ extern int trace_define_field(struct ftr
54712 int filter_type);
54713 extern int trace_define_common_fields(struct ftrace_event_call *call);
54714
54715 -#define is_signed_type(type) (((type)(-1)) < 0)
54716 +#define is_signed_type(type) (((type)(-1)) < (type)1)
54717
54718 int trace_set_clr_event(const char *system, const char *event, int set);
54719
54720 diff -urNp linux-2.6.32.41/include/linux/genhd.h linux-2.6.32.41/include/linux/genhd.h
54721 --- linux-2.6.32.41/include/linux/genhd.h 2011-03-27 14:31:47.000000000 -0400
54722 +++ linux-2.6.32.41/include/linux/genhd.h 2011-04-17 15:56:46.000000000 -0400
54723 @@ -161,7 +161,7 @@ struct gendisk {
54724
54725 struct timer_rand_state *random;
54726
54727 - atomic_t sync_io; /* RAID */
54728 + atomic_unchecked_t sync_io; /* RAID */
54729 struct work_struct async_notify;
54730 #ifdef CONFIG_BLK_DEV_INTEGRITY
54731 struct blk_integrity *integrity;
54732 diff -urNp linux-2.6.32.41/include/linux/gracl.h linux-2.6.32.41/include/linux/gracl.h
54733 --- linux-2.6.32.41/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
54734 +++ linux-2.6.32.41/include/linux/gracl.h 2011-04-17 15:56:46.000000000 -0400
54735 @@ -0,0 +1,317 @@
54736 +#ifndef GR_ACL_H
54737 +#define GR_ACL_H
54738 +
54739 +#include <linux/grdefs.h>
54740 +#include <linux/resource.h>
54741 +#include <linux/capability.h>
54742 +#include <linux/dcache.h>
54743 +#include <asm/resource.h>
54744 +
54745 +/* Major status information */
54746 +
54747 +#define GR_VERSION "grsecurity 2.2.2"
54748 +#define GRSECURITY_VERSION 0x2202
54749 +
54750 +enum {
54751 + GR_SHUTDOWN = 0,
54752 + GR_ENABLE = 1,
54753 + GR_SPROLE = 2,
54754 + GR_RELOAD = 3,
54755 + GR_SEGVMOD = 4,
54756 + GR_STATUS = 5,
54757 + GR_UNSPROLE = 6,
54758 + GR_PASSSET = 7,
54759 + GR_SPROLEPAM = 8,
54760 +};
54761 +
54762 +/* Password setup definitions
54763 + * kernel/grhash.c */
54764 +enum {
54765 + GR_PW_LEN = 128,
54766 + GR_SALT_LEN = 16,
54767 + GR_SHA_LEN = 32,
54768 +};
54769 +
54770 +enum {
54771 + GR_SPROLE_LEN = 64,
54772 +};
54773 +
54774 +enum {
54775 + GR_NO_GLOB = 0,
54776 + GR_REG_GLOB,
54777 + GR_CREATE_GLOB
54778 +};
54779 +
54780 +#define GR_NLIMITS 32
54781 +
54782 +/* Begin Data Structures */
54783 +
54784 +struct sprole_pw {
54785 + unsigned char *rolename;
54786 + unsigned char salt[GR_SALT_LEN];
54787 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
54788 +};
54789 +
54790 +struct name_entry {
54791 + __u32 key;
54792 + ino_t inode;
54793 + dev_t device;
54794 + char *name;
54795 + __u16 len;
54796 + __u8 deleted;
54797 + struct name_entry *prev;
54798 + struct name_entry *next;
54799 +};
54800 +
54801 +struct inodev_entry {
54802 + struct name_entry *nentry;
54803 + struct inodev_entry *prev;
54804 + struct inodev_entry *next;
54805 +};
54806 +
54807 +struct acl_role_db {
54808 + struct acl_role_label **r_hash;
54809 + __u32 r_size;
54810 +};
54811 +
54812 +struct inodev_db {
54813 + struct inodev_entry **i_hash;
54814 + __u32 i_size;
54815 +};
54816 +
54817 +struct name_db {
54818 + struct name_entry **n_hash;
54819 + __u32 n_size;
54820 +};
54821 +
54822 +struct crash_uid {
54823 + uid_t uid;
54824 + unsigned long expires;
54825 +};
54826 +
54827 +struct gr_hash_struct {
54828 + void **table;
54829 + void **nametable;
54830 + void *first;
54831 + __u32 table_size;
54832 + __u32 used_size;
54833 + int type;
54834 +};
54835 +
54836 +/* Userspace Grsecurity ACL data structures */
54837 +
54838 +struct acl_subject_label {
54839 + char *filename;
54840 + ino_t inode;
54841 + dev_t device;
54842 + __u32 mode;
54843 + kernel_cap_t cap_mask;
54844 + kernel_cap_t cap_lower;
54845 + kernel_cap_t cap_invert_audit;
54846 +
54847 + struct rlimit res[GR_NLIMITS];
54848 + __u32 resmask;
54849 +
54850 + __u8 user_trans_type;
54851 + __u8 group_trans_type;
54852 + uid_t *user_transitions;
54853 + gid_t *group_transitions;
54854 + __u16 user_trans_num;
54855 + __u16 group_trans_num;
54856 +
54857 + __u32 sock_families[2];
54858 + __u32 ip_proto[8];
54859 + __u32 ip_type;
54860 + struct acl_ip_label **ips;
54861 + __u32 ip_num;
54862 + __u32 inaddr_any_override;
54863 +
54864 + __u32 crashes;
54865 + unsigned long expires;
54866 +
54867 + struct acl_subject_label *parent_subject;
54868 + struct gr_hash_struct *hash;
54869 + struct acl_subject_label *prev;
54870 + struct acl_subject_label *next;
54871 +
54872 + struct acl_object_label **obj_hash;
54873 + __u32 obj_hash_size;
54874 + __u16 pax_flags;
54875 +};
54876 +
54877 +struct role_allowed_ip {
54878 + __u32 addr;
54879 + __u32 netmask;
54880 +
54881 + struct role_allowed_ip *prev;
54882 + struct role_allowed_ip *next;
54883 +};
54884 +
54885 +struct role_transition {
54886 + char *rolename;
54887 +
54888 + struct role_transition *prev;
54889 + struct role_transition *next;
54890 +};
54891 +
54892 +struct acl_role_label {
54893 + char *rolename;
54894 + uid_t uidgid;
54895 + __u16 roletype;
54896 +
54897 + __u16 auth_attempts;
54898 + unsigned long expires;
54899 +
54900 + struct acl_subject_label *root_label;
54901 + struct gr_hash_struct *hash;
54902 +
54903 + struct acl_role_label *prev;
54904 + struct acl_role_label *next;
54905 +
54906 + struct role_transition *transitions;
54907 + struct role_allowed_ip *allowed_ips;
54908 + uid_t *domain_children;
54909 + __u16 domain_child_num;
54910 +
54911 + struct acl_subject_label **subj_hash;
54912 + __u32 subj_hash_size;
54913 +};
54914 +
54915 +struct user_acl_role_db {
54916 + struct acl_role_label **r_table;
54917 + __u32 num_pointers; /* Number of allocations to track */
54918 + __u32 num_roles; /* Number of roles */
54919 + __u32 num_domain_children; /* Number of domain children */
54920 + __u32 num_subjects; /* Number of subjects */
54921 + __u32 num_objects; /* Number of objects */
54922 +};
54923 +
54924 +struct acl_object_label {
54925 + char *filename;
54926 + ino_t inode;
54927 + dev_t device;
54928 + __u32 mode;
54929 +
54930 + struct acl_subject_label *nested;
54931 + struct acl_object_label *globbed;
54932 +
54933 + /* next two structures not used */
54934 +
54935 + struct acl_object_label *prev;
54936 + struct acl_object_label *next;
54937 +};
54938 +
54939 +struct acl_ip_label {
54940 + char *iface;
54941 + __u32 addr;
54942 + __u32 netmask;
54943 + __u16 low, high;
54944 + __u8 mode;
54945 + __u32 type;
54946 + __u32 proto[8];
54947 +
54948 + /* next two structures not used */
54949 +
54950 + struct acl_ip_label *prev;
54951 + struct acl_ip_label *next;
54952 +};
54953 +
54954 +struct gr_arg {
54955 + struct user_acl_role_db role_db;
54956 + unsigned char pw[GR_PW_LEN];
54957 + unsigned char salt[GR_SALT_LEN];
54958 + unsigned char sum[GR_SHA_LEN];
54959 + unsigned char sp_role[GR_SPROLE_LEN];
54960 + struct sprole_pw *sprole_pws;
54961 + dev_t segv_device;
54962 + ino_t segv_inode;
54963 + uid_t segv_uid;
54964 + __u16 num_sprole_pws;
54965 + __u16 mode;
54966 +};
54967 +
54968 +struct gr_arg_wrapper {
54969 + struct gr_arg *arg;
54970 + __u32 version;
54971 + __u32 size;
54972 +};
54973 +
54974 +struct subject_map {
54975 + struct acl_subject_label *user;
54976 + struct acl_subject_label *kernel;
54977 + struct subject_map *prev;
54978 + struct subject_map *next;
54979 +};
54980 +
54981 +struct acl_subj_map_db {
54982 + struct subject_map **s_hash;
54983 + __u32 s_size;
54984 +};
54985 +
54986 +/* End Data Structures Section */
54987 +
54988 +/* Hash functions generated by empirical testing by Brad Spengler
54989 + Makes good use of the low bits of the inode. Generally 0-1 times
54990 + in loop for successful match. 0-3 for unsuccessful match.
54991 + Shift/add algorithm with modulus of table size and an XOR*/
54992 +
54993 +static __inline__ unsigned int
54994 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
54995 +{
54996 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
54997 +}
54998 +
54999 + static __inline__ unsigned int
55000 +shash(const struct acl_subject_label *userp, const unsigned int sz)
55001 +{
55002 + return ((const unsigned long)userp % sz);
55003 +}
55004 +
55005 +static __inline__ unsigned int
55006 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
55007 +{
55008 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
55009 +}
55010 +
55011 +static __inline__ unsigned int
55012 +nhash(const char *name, const __u16 len, const unsigned int sz)
55013 +{
55014 + return full_name_hash((const unsigned char *)name, len) % sz;
55015 +}
55016 +
55017 +#define FOR_EACH_ROLE_START(role) \
55018 + role = role_list; \
55019 + while (role) {
55020 +
55021 +#define FOR_EACH_ROLE_END(role) \
55022 + role = role->prev; \
55023 + }
55024 +
55025 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
55026 + subj = NULL; \
55027 + iter = 0; \
55028 + while (iter < role->subj_hash_size) { \
55029 + if (subj == NULL) \
55030 + subj = role->subj_hash[iter]; \
55031 + if (subj == NULL) { \
55032 + iter++; \
55033 + continue; \
55034 + }
55035 +
55036 +#define FOR_EACH_SUBJECT_END(subj,iter) \
55037 + subj = subj->next; \
55038 + if (subj == NULL) \
55039 + iter++; \
55040 + }
55041 +
55042 +
55043 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
55044 + subj = role->hash->first; \
55045 + while (subj != NULL) {
55046 +
55047 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
55048 + subj = subj->next; \
55049 + }
55050 +
55051 +#endif
55052 +
55053 diff -urNp linux-2.6.32.41/include/linux/gralloc.h linux-2.6.32.41/include/linux/gralloc.h
55054 --- linux-2.6.32.41/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
55055 +++ linux-2.6.32.41/include/linux/gralloc.h 2011-04-17 15:56:46.000000000 -0400
55056 @@ -0,0 +1,9 @@
55057 +#ifndef __GRALLOC_H
55058 +#define __GRALLOC_H
55059 +
55060 +void acl_free_all(void);
55061 +int acl_alloc_stack_init(unsigned long size);
55062 +void *acl_alloc(unsigned long len);
55063 +void *acl_alloc_num(unsigned long num, unsigned long len);
55064 +
55065 +#endif
55066 diff -urNp linux-2.6.32.41/include/linux/grdefs.h linux-2.6.32.41/include/linux/grdefs.h
55067 --- linux-2.6.32.41/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
55068 +++ linux-2.6.32.41/include/linux/grdefs.h 2011-06-11 16:20:26.000000000 -0400
55069 @@ -0,0 +1,140 @@
55070 +#ifndef GRDEFS_H
55071 +#define GRDEFS_H
55072 +
55073 +/* Begin grsecurity status declarations */
55074 +
55075 +enum {
55076 + GR_READY = 0x01,
55077 + GR_STATUS_INIT = 0x00 // disabled state
55078 +};
55079 +
55080 +/* Begin ACL declarations */
55081 +
55082 +/* Role flags */
55083 +
55084 +enum {
55085 + GR_ROLE_USER = 0x0001,
55086 + GR_ROLE_GROUP = 0x0002,
55087 + GR_ROLE_DEFAULT = 0x0004,
55088 + GR_ROLE_SPECIAL = 0x0008,
55089 + GR_ROLE_AUTH = 0x0010,
55090 + GR_ROLE_NOPW = 0x0020,
55091 + GR_ROLE_GOD = 0x0040,
55092 + GR_ROLE_LEARN = 0x0080,
55093 + GR_ROLE_TPE = 0x0100,
55094 + GR_ROLE_DOMAIN = 0x0200,
55095 + GR_ROLE_PAM = 0x0400,
55096 + GR_ROLE_PERSIST = 0x800
55097 +};
55098 +
55099 +/* ACL Subject and Object mode flags */
55100 +enum {
55101 + GR_DELETED = 0x80000000
55102 +};
55103 +
55104 +/* ACL Object-only mode flags */
55105 +enum {
55106 + GR_READ = 0x00000001,
55107 + GR_APPEND = 0x00000002,
55108 + GR_WRITE = 0x00000004,
55109 + GR_EXEC = 0x00000008,
55110 + GR_FIND = 0x00000010,
55111 + GR_INHERIT = 0x00000020,
55112 + GR_SETID = 0x00000040,
55113 + GR_CREATE = 0x00000080,
55114 + GR_DELETE = 0x00000100,
55115 + GR_LINK = 0x00000200,
55116 + GR_AUDIT_READ = 0x00000400,
55117 + GR_AUDIT_APPEND = 0x00000800,
55118 + GR_AUDIT_WRITE = 0x00001000,
55119 + GR_AUDIT_EXEC = 0x00002000,
55120 + GR_AUDIT_FIND = 0x00004000,
55121 + GR_AUDIT_INHERIT= 0x00008000,
55122 + GR_AUDIT_SETID = 0x00010000,
55123 + GR_AUDIT_CREATE = 0x00020000,
55124 + GR_AUDIT_DELETE = 0x00040000,
55125 + GR_AUDIT_LINK = 0x00080000,
55126 + GR_PTRACERD = 0x00100000,
55127 + GR_NOPTRACE = 0x00200000,
55128 + GR_SUPPRESS = 0x00400000,
55129 + GR_NOLEARN = 0x00800000,
55130 + GR_INIT_TRANSFER= 0x01000000
55131 +};
55132 +
55133 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
55134 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
55135 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
55136 +
55137 +/* ACL subject-only mode flags */
55138 +enum {
55139 + GR_KILL = 0x00000001,
55140 + GR_VIEW = 0x00000002,
55141 + GR_PROTECTED = 0x00000004,
55142 + GR_LEARN = 0x00000008,
55143 + GR_OVERRIDE = 0x00000010,
55144 + /* just a placeholder, this mode is only used in userspace */
55145 + GR_DUMMY = 0x00000020,
55146 + GR_PROTSHM = 0x00000040,
55147 + GR_KILLPROC = 0x00000080,
55148 + GR_KILLIPPROC = 0x00000100,
55149 + /* just a placeholder, this mode is only used in userspace */
55150 + GR_NOTROJAN = 0x00000200,
55151 + GR_PROTPROCFD = 0x00000400,
55152 + GR_PROCACCT = 0x00000800,
55153 + GR_RELAXPTRACE = 0x00001000,
55154 + GR_NESTED = 0x00002000,
55155 + GR_INHERITLEARN = 0x00004000,
55156 + GR_PROCFIND = 0x00008000,
55157 + GR_POVERRIDE = 0x00010000,
55158 + GR_KERNELAUTH = 0x00020000,
55159 + GR_ATSECURE = 0x00040000,
55160 + GR_SHMEXEC = 0x00080000
55161 +};
55162 +
55163 +enum {
55164 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
55165 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
55166 + GR_PAX_ENABLE_MPROTECT = 0x0004,
55167 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
55168 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
55169 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
55170 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
55171 + GR_PAX_DISABLE_MPROTECT = 0x0400,
55172 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
55173 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
55174 +};
55175 +
55176 +enum {
55177 + GR_ID_USER = 0x01,
55178 + GR_ID_GROUP = 0x02,
55179 +};
55180 +
55181 +enum {
55182 + GR_ID_ALLOW = 0x01,
55183 + GR_ID_DENY = 0x02,
55184 +};
55185 +
55186 +#define GR_CRASH_RES 31
55187 +#define GR_UIDTABLE_MAX 500
55188 +
55189 +/* begin resource learning section */
55190 +enum {
55191 + GR_RLIM_CPU_BUMP = 60,
55192 + GR_RLIM_FSIZE_BUMP = 50000,
55193 + GR_RLIM_DATA_BUMP = 10000,
55194 + GR_RLIM_STACK_BUMP = 1000,
55195 + GR_RLIM_CORE_BUMP = 10000,
55196 + GR_RLIM_RSS_BUMP = 500000,
55197 + GR_RLIM_NPROC_BUMP = 1,
55198 + GR_RLIM_NOFILE_BUMP = 5,
55199 + GR_RLIM_MEMLOCK_BUMP = 50000,
55200 + GR_RLIM_AS_BUMP = 500000,
55201 + GR_RLIM_LOCKS_BUMP = 2,
55202 + GR_RLIM_SIGPENDING_BUMP = 5,
55203 + GR_RLIM_MSGQUEUE_BUMP = 10000,
55204 + GR_RLIM_NICE_BUMP = 1,
55205 + GR_RLIM_RTPRIO_BUMP = 1,
55206 + GR_RLIM_RTTIME_BUMP = 1000000
55207 +};
55208 +
55209 +#endif
55210 diff -urNp linux-2.6.32.41/include/linux/grinternal.h linux-2.6.32.41/include/linux/grinternal.h
55211 --- linux-2.6.32.41/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
55212 +++ linux-2.6.32.41/include/linux/grinternal.h 2011-04-17 15:56:46.000000000 -0400
55213 @@ -0,0 +1,218 @@
55214 +#ifndef __GRINTERNAL_H
55215 +#define __GRINTERNAL_H
55216 +
55217 +#ifdef CONFIG_GRKERNSEC
55218 +
55219 +#include <linux/fs.h>
55220 +#include <linux/mnt_namespace.h>
55221 +#include <linux/nsproxy.h>
55222 +#include <linux/gracl.h>
55223 +#include <linux/grdefs.h>
55224 +#include <linux/grmsg.h>
55225 +
55226 +void gr_add_learn_entry(const char *fmt, ...)
55227 + __attribute__ ((format (printf, 1, 2)));
55228 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
55229 + const struct vfsmount *mnt);
55230 +__u32 gr_check_create(const struct dentry *new_dentry,
55231 + const struct dentry *parent,
55232 + const struct vfsmount *mnt, const __u32 mode);
55233 +int gr_check_protected_task(const struct task_struct *task);
55234 +__u32 to_gr_audit(const __u32 reqmode);
55235 +int gr_set_acls(const int type);
55236 +int gr_apply_subject_to_task(struct task_struct *task);
55237 +int gr_acl_is_enabled(void);
55238 +char gr_roletype_to_char(void);
55239 +
55240 +void gr_handle_alertkill(struct task_struct *task);
55241 +char *gr_to_filename(const struct dentry *dentry,
55242 + const struct vfsmount *mnt);
55243 +char *gr_to_filename1(const struct dentry *dentry,
55244 + const struct vfsmount *mnt);
55245 +char *gr_to_filename2(const struct dentry *dentry,
55246 + const struct vfsmount *mnt);
55247 +char *gr_to_filename3(const struct dentry *dentry,
55248 + const struct vfsmount *mnt);
55249 +
55250 +extern int grsec_enable_harden_ptrace;
55251 +extern int grsec_enable_link;
55252 +extern int grsec_enable_fifo;
55253 +extern int grsec_enable_execve;
55254 +extern int grsec_enable_shm;
55255 +extern int grsec_enable_execlog;
55256 +extern int grsec_enable_signal;
55257 +extern int grsec_enable_audit_ptrace;
55258 +extern int grsec_enable_forkfail;
55259 +extern int grsec_enable_time;
55260 +extern int grsec_enable_rofs;
55261 +extern int grsec_enable_chroot_shmat;
55262 +extern int grsec_enable_chroot_findtask;
55263 +extern int grsec_enable_chroot_mount;
55264 +extern int grsec_enable_chroot_double;
55265 +extern int grsec_enable_chroot_pivot;
55266 +extern int grsec_enable_chroot_chdir;
55267 +extern int grsec_enable_chroot_chmod;
55268 +extern int grsec_enable_chroot_mknod;
55269 +extern int grsec_enable_chroot_fchdir;
55270 +extern int grsec_enable_chroot_nice;
55271 +extern int grsec_enable_chroot_execlog;
55272 +extern int grsec_enable_chroot_caps;
55273 +extern int grsec_enable_chroot_sysctl;
55274 +extern int grsec_enable_chroot_unix;
55275 +extern int grsec_enable_tpe;
55276 +extern int grsec_tpe_gid;
55277 +extern int grsec_enable_tpe_all;
55278 +extern int grsec_enable_tpe_invert;
55279 +extern int grsec_enable_socket_all;
55280 +extern int grsec_socket_all_gid;
55281 +extern int grsec_enable_socket_client;
55282 +extern int grsec_socket_client_gid;
55283 +extern int grsec_enable_socket_server;
55284 +extern int grsec_socket_server_gid;
55285 +extern int grsec_audit_gid;
55286 +extern int grsec_enable_group;
55287 +extern int grsec_enable_audit_textrel;
55288 +extern int grsec_enable_log_rwxmaps;
55289 +extern int grsec_enable_mount;
55290 +extern int grsec_enable_chdir;
55291 +extern int grsec_resource_logging;
55292 +extern int grsec_enable_blackhole;
55293 +extern int grsec_lastack_retries;
55294 +extern int grsec_lock;
55295 +
55296 +extern spinlock_t grsec_alert_lock;
55297 +extern unsigned long grsec_alert_wtime;
55298 +extern unsigned long grsec_alert_fyet;
55299 +
55300 +extern spinlock_t grsec_audit_lock;
55301 +
55302 +extern rwlock_t grsec_exec_file_lock;
55303 +
55304 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
55305 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
55306 + (tsk)->exec_file->f_vfsmnt) : "/")
55307 +
55308 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
55309 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
55310 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
55311 +
55312 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
55313 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
55314 + (tsk)->exec_file->f_vfsmnt) : "/")
55315 +
55316 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
55317 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
55318 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
55319 +
55320 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
55321 +
55322 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
55323 +
55324 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
55325 + (task)->pid, (cred)->uid, \
55326 + (cred)->euid, (cred)->gid, (cred)->egid, \
55327 + gr_parent_task_fullpath(task), \
55328 + (task)->real_parent->comm, (task)->real_parent->pid, \
55329 + (pcred)->uid, (pcred)->euid, \
55330 + (pcred)->gid, (pcred)->egid
55331 +
55332 +#define GR_CHROOT_CAPS {{ \
55333 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
55334 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
55335 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
55336 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
55337 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
55338 + CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
55339 +
55340 +#define security_learn(normal_msg,args...) \
55341 +({ \
55342 + read_lock(&grsec_exec_file_lock); \
55343 + gr_add_learn_entry(normal_msg "\n", ## args); \
55344 + read_unlock(&grsec_exec_file_lock); \
55345 +})
55346 +
55347 +enum {
55348 + GR_DO_AUDIT,
55349 + GR_DONT_AUDIT,
55350 + GR_DONT_AUDIT_GOOD
55351 +};
55352 +
55353 +enum {
55354 + GR_TTYSNIFF,
55355 + GR_RBAC,
55356 + GR_RBAC_STR,
55357 + GR_STR_RBAC,
55358 + GR_RBAC_MODE2,
55359 + GR_RBAC_MODE3,
55360 + GR_FILENAME,
55361 + GR_SYSCTL_HIDDEN,
55362 + GR_NOARGS,
55363 + GR_ONE_INT,
55364 + GR_ONE_INT_TWO_STR,
55365 + GR_ONE_STR,
55366 + GR_STR_INT,
55367 + GR_TWO_STR_INT,
55368 + GR_TWO_INT,
55369 + GR_TWO_U64,
55370 + GR_THREE_INT,
55371 + GR_FIVE_INT_TWO_STR,
55372 + GR_TWO_STR,
55373 + GR_THREE_STR,
55374 + GR_FOUR_STR,
55375 + GR_STR_FILENAME,
55376 + GR_FILENAME_STR,
55377 + GR_FILENAME_TWO_INT,
55378 + GR_FILENAME_TWO_INT_STR,
55379 + GR_TEXTREL,
55380 + GR_PTRACE,
55381 + GR_RESOURCE,
55382 + GR_CAP,
55383 + GR_SIG,
55384 + GR_SIG2,
55385 + GR_CRASH1,
55386 + GR_CRASH2,
55387 + GR_PSACCT,
55388 + GR_RWXMAP
55389 +};
55390 +
55391 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
55392 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
55393 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
55394 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
55395 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
55396 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
55397 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
55398 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
55399 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
55400 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
55401 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
55402 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
55403 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
55404 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
55405 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
55406 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
55407 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
55408 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
55409 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
55410 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
55411 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
55412 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
55413 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
55414 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
55415 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
55416 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
55417 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
55418 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
55419 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
55420 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
55421 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
55422 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
55423 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
55424 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
55425 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
55426 +
55427 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
55428 +
55429 +#endif
55430 +
55431 +#endif
55432 diff -urNp linux-2.6.32.41/include/linux/grmsg.h linux-2.6.32.41/include/linux/grmsg.h
55433 --- linux-2.6.32.41/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
55434 +++ linux-2.6.32.41/include/linux/grmsg.h 2011-04-17 15:56:46.000000000 -0400
55435 @@ -0,0 +1,108 @@
55436 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
55437 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
55438 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
55439 +#define GR_STOPMOD_MSG "denied modification of module state by "
55440 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
55441 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
55442 +#define GR_IOPERM_MSG "denied use of ioperm() by "
55443 +#define GR_IOPL_MSG "denied use of iopl() by "
55444 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
55445 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
55446 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
55447 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
55448 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
55449 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
55450 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
55451 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
55452 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
55453 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
55454 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
55455 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
55456 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
55457 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
55458 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
55459 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
55460 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
55461 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
55462 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
55463 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
55464 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
55465 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
55466 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
55467 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
55468 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
55469 +#define GR_NPROC_MSG "denied overstep of process limit by "
55470 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
55471 +#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
55472 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
55473 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
55474 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
55475 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
55476 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
55477 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
55478 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
55479 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
55480 +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
55481 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
55482 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
55483 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
55484 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
55485 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
55486 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
55487 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
55488 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
55489 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
55490 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
55491 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
55492 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
55493 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
55494 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
55495 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
55496 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
55497 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
55498 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
55499 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
55500 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
55501 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
55502 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
55503 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
55504 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
55505 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
55506 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
55507 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
55508 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
55509 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
55510 +#define GR_NICE_CHROOT_MSG "denied priority change by "
55511 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
55512 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
55513 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
55514 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
55515 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
55516 +#define GR_TIME_MSG "time set by "
55517 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
55518 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
55519 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
55520 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
55521 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
55522 +#define GR_BIND_MSG "denied bind() by "
55523 +#define GR_CONNECT_MSG "denied connect() by "
55524 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
55525 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
55526 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
55527 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
55528 +#define GR_CAP_ACL_MSG "use of %s denied for "
55529 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
55530 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
55531 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
55532 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
55533 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
55534 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
55535 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
55536 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
55537 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
55538 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
55539 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
55540 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
55541 +#define GR_VM86_MSG "denied use of vm86 by "
55542 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
55543 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
55544 diff -urNp linux-2.6.32.41/include/linux/grsecurity.h linux-2.6.32.41/include/linux/grsecurity.h
55545 --- linux-2.6.32.41/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
55546 +++ linux-2.6.32.41/include/linux/grsecurity.h 2011-04-17 15:56:46.000000000 -0400
55547 @@ -0,0 +1,212 @@
55548 +#ifndef GR_SECURITY_H
55549 +#define GR_SECURITY_H
55550 +#include <linux/fs.h>
55551 +#include <linux/fs_struct.h>
55552 +#include <linux/binfmts.h>
55553 +#include <linux/gracl.h>
55554 +#include <linux/compat.h>
55555 +
55556 +/* notify of brain-dead configs */
55557 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
55558 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
55559 +#endif
55560 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
55561 +#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
55562 +#endif
55563 +#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
55564 +#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
55565 +#endif
55566 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
55567 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
55568 +#endif
55569 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
55570 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
55571 +#endif
55572 +
55573 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
55574 +void gr_handle_brute_check(void);
55575 +void gr_handle_kernel_exploit(void);
55576 +int gr_process_user_ban(void);
55577 +
55578 +char gr_roletype_to_char(void);
55579 +
55580 +int gr_acl_enable_at_secure(void);
55581 +
55582 +int gr_check_user_change(int real, int effective, int fs);
55583 +int gr_check_group_change(int real, int effective, int fs);
55584 +
55585 +void gr_del_task_from_ip_table(struct task_struct *p);
55586 +
55587 +int gr_pid_is_chrooted(struct task_struct *p);
55588 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
55589 +int gr_handle_chroot_nice(void);
55590 +int gr_handle_chroot_sysctl(const int op);
55591 +int gr_handle_chroot_setpriority(struct task_struct *p,
55592 + const int niceval);
55593 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
55594 +int gr_handle_chroot_chroot(const struct dentry *dentry,
55595 + const struct vfsmount *mnt);
55596 +int gr_handle_chroot_caps(struct path *path);
55597 +void gr_handle_chroot_chdir(struct path *path);
55598 +int gr_handle_chroot_chmod(const struct dentry *dentry,
55599 + const struct vfsmount *mnt, const int mode);
55600 +int gr_handle_chroot_mknod(const struct dentry *dentry,
55601 + const struct vfsmount *mnt, const int mode);
55602 +int gr_handle_chroot_mount(const struct dentry *dentry,
55603 + const struct vfsmount *mnt,
55604 + const char *dev_name);
55605 +int gr_handle_chroot_pivot(void);
55606 +int gr_handle_chroot_unix(const pid_t pid);
55607 +
55608 +int gr_handle_rawio(const struct inode *inode);
55609 +int gr_handle_nproc(void);
55610 +
55611 +void gr_handle_ioperm(void);
55612 +void gr_handle_iopl(void);
55613 +
55614 +int gr_tpe_allow(const struct file *file);
55615 +
55616 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
55617 +void gr_clear_chroot_entries(struct task_struct *task);
55618 +
55619 +void gr_log_forkfail(const int retval);
55620 +void gr_log_timechange(void);
55621 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
55622 +void gr_log_chdir(const struct dentry *dentry,
55623 + const struct vfsmount *mnt);
55624 +void gr_log_chroot_exec(const struct dentry *dentry,
55625 + const struct vfsmount *mnt);
55626 +void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
55627 +#ifdef CONFIG_COMPAT
55628 +void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
55629 +#endif
55630 +void gr_log_remount(const char *devname, const int retval);
55631 +void gr_log_unmount(const char *devname, const int retval);
55632 +void gr_log_mount(const char *from, const char *to, const int retval);
55633 +void gr_log_textrel(struct vm_area_struct *vma);
55634 +void gr_log_rwxmmap(struct file *file);
55635 +void gr_log_rwxmprotect(struct file *file);
55636 +
55637 +int gr_handle_follow_link(const struct inode *parent,
55638 + const struct inode *inode,
55639 + const struct dentry *dentry,
55640 + const struct vfsmount *mnt);
55641 +int gr_handle_fifo(const struct dentry *dentry,
55642 + const struct vfsmount *mnt,
55643 + const struct dentry *dir, const int flag,
55644 + const int acc_mode);
55645 +int gr_handle_hardlink(const struct dentry *dentry,
55646 + const struct vfsmount *mnt,
55647 + struct inode *inode,
55648 + const int mode, const char *to);
55649 +
55650 +int gr_is_capable(const int cap);
55651 +int gr_is_capable_nolog(const int cap);
55652 +void gr_learn_resource(const struct task_struct *task, const int limit,
55653 + const unsigned long wanted, const int gt);
55654 +void gr_copy_label(struct task_struct *tsk);
55655 +void gr_handle_crash(struct task_struct *task, const int sig);
55656 +int gr_handle_signal(const struct task_struct *p, const int sig);
55657 +int gr_check_crash_uid(const uid_t uid);
55658 +int gr_check_protected_task(const struct task_struct *task);
55659 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
55660 +int gr_acl_handle_mmap(const struct file *file,
55661 + const unsigned long prot);
55662 +int gr_acl_handle_mprotect(const struct file *file,
55663 + const unsigned long prot);
55664 +int gr_check_hidden_task(const struct task_struct *tsk);
55665 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
55666 + const struct vfsmount *mnt);
55667 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
55668 + const struct vfsmount *mnt);
55669 +__u32 gr_acl_handle_access(const struct dentry *dentry,
55670 + const struct vfsmount *mnt, const int fmode);
55671 +__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
55672 + const struct vfsmount *mnt, mode_t mode);
55673 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
55674 + const struct vfsmount *mnt, mode_t mode);
55675 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
55676 + const struct vfsmount *mnt);
55677 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
55678 + const struct vfsmount *mnt);
55679 +int gr_handle_ptrace(struct task_struct *task, const long request);
55680 +int gr_handle_proc_ptrace(struct task_struct *task);
55681 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
55682 + const struct vfsmount *mnt);
55683 +int gr_check_crash_exec(const struct file *filp);
55684 +int gr_acl_is_enabled(void);
55685 +void gr_set_kernel_label(struct task_struct *task);
55686 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
55687 + const gid_t gid);
55688 +int gr_set_proc_label(const struct dentry *dentry,
55689 + const struct vfsmount *mnt,
55690 + const int unsafe_share);
55691 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
55692 + const struct vfsmount *mnt);
55693 +__u32 gr_acl_handle_open(const struct dentry *dentry,
55694 + const struct vfsmount *mnt, const int fmode);
55695 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
55696 + const struct dentry *p_dentry,
55697 + const struct vfsmount *p_mnt, const int fmode,
55698 + const int imode);
55699 +void gr_handle_create(const struct dentry *dentry,
55700 + const struct vfsmount *mnt);
55701 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
55702 + const struct dentry *parent_dentry,
55703 + const struct vfsmount *parent_mnt,
55704 + const int mode);
55705 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
55706 + const struct dentry *parent_dentry,
55707 + const struct vfsmount *parent_mnt);
55708 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
55709 + const struct vfsmount *mnt);
55710 +void gr_handle_delete(const ino_t ino, const dev_t dev);
55711 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
55712 + const struct vfsmount *mnt);
55713 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
55714 + const struct dentry *parent_dentry,
55715 + const struct vfsmount *parent_mnt,
55716 + const char *from);
55717 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
55718 + const struct dentry *parent_dentry,
55719 + const struct vfsmount *parent_mnt,
55720 + const struct dentry *old_dentry,
55721 + const struct vfsmount *old_mnt, const char *to);
55722 +int gr_acl_handle_rename(struct dentry *new_dentry,
55723 + struct dentry *parent_dentry,
55724 + const struct vfsmount *parent_mnt,
55725 + struct dentry *old_dentry,
55726 + struct inode *old_parent_inode,
55727 + struct vfsmount *old_mnt, const char *newname);
55728 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
55729 + struct dentry *old_dentry,
55730 + struct dentry *new_dentry,
55731 + struct vfsmount *mnt, const __u8 replace);
55732 +__u32 gr_check_link(const struct dentry *new_dentry,
55733 + const struct dentry *parent_dentry,
55734 + const struct vfsmount *parent_mnt,
55735 + const struct dentry *old_dentry,
55736 + const struct vfsmount *old_mnt);
55737 +int gr_acl_handle_filldir(const struct file *file, const char *name,
55738 + const unsigned int namelen, const ino_t ino);
55739 +
55740 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
55741 + const struct vfsmount *mnt);
55742 +void gr_acl_handle_exit(void);
55743 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
55744 +int gr_acl_handle_procpidmem(const struct task_struct *task);
55745 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
55746 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
55747 +void gr_audit_ptrace(struct task_struct *task);
55748 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
55749 +
55750 +#ifdef CONFIG_GRKERNSEC
55751 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
55752 +void gr_handle_vm86(void);
55753 +void gr_handle_mem_readwrite(u64 from, u64 to);
55754 +
55755 +extern int grsec_enable_dmesg;
55756 +extern int grsec_disable_privio;
55757 +#endif
55758 +
55759 +#endif
55760 diff -urNp linux-2.6.32.41/include/linux/hdpu_features.h linux-2.6.32.41/include/linux/hdpu_features.h
55761 --- linux-2.6.32.41/include/linux/hdpu_features.h 2011-03-27 14:31:47.000000000 -0400
55762 +++ linux-2.6.32.41/include/linux/hdpu_features.h 2011-04-17 15:56:46.000000000 -0400
55763 @@ -3,7 +3,7 @@
55764 struct cpustate_t {
55765 spinlock_t lock;
55766 int excl;
55767 - int open_count;
55768 + atomic_t open_count;
55769 unsigned char cached_val;
55770 int inited;
55771 unsigned long *set_addr;
55772 diff -urNp linux-2.6.32.41/include/linux/highmem.h linux-2.6.32.41/include/linux/highmem.h
55773 --- linux-2.6.32.41/include/linux/highmem.h 2011-03-27 14:31:47.000000000 -0400
55774 +++ linux-2.6.32.41/include/linux/highmem.h 2011-04-17 15:56:46.000000000 -0400
55775 @@ -137,6 +137,18 @@ static inline void clear_highpage(struct
55776 kunmap_atomic(kaddr, KM_USER0);
55777 }
55778
55779 +static inline void sanitize_highpage(struct page *page)
55780 +{
55781 + void *kaddr;
55782 + unsigned long flags;
55783 +
55784 + local_irq_save(flags);
55785 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
55786 + clear_page(kaddr);
55787 + kunmap_atomic(kaddr, KM_CLEARPAGE);
55788 + local_irq_restore(flags);
55789 +}
55790 +
55791 static inline void zero_user_segments(struct page *page,
55792 unsigned start1, unsigned end1,
55793 unsigned start2, unsigned end2)
55794 diff -urNp linux-2.6.32.41/include/linux/i2o.h linux-2.6.32.41/include/linux/i2o.h
55795 --- linux-2.6.32.41/include/linux/i2o.h 2011-03-27 14:31:47.000000000 -0400
55796 +++ linux-2.6.32.41/include/linux/i2o.h 2011-05-04 17:56:28.000000000 -0400
55797 @@ -564,7 +564,7 @@ struct i2o_controller {
55798 struct i2o_device *exec; /* Executive */
55799 #if BITS_PER_LONG == 64
55800 spinlock_t context_list_lock; /* lock for context_list */
55801 - atomic_t context_list_counter; /* needed for unique contexts */
55802 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
55803 struct list_head context_list; /* list of context id's
55804 and pointers */
55805 #endif
55806 diff -urNp linux-2.6.32.41/include/linux/init_task.h linux-2.6.32.41/include/linux/init_task.h
55807 --- linux-2.6.32.41/include/linux/init_task.h 2011-03-27 14:31:47.000000000 -0400
55808 +++ linux-2.6.32.41/include/linux/init_task.h 2011-05-18 20:44:59.000000000 -0400
55809 @@ -83,6 +83,12 @@ extern struct group_info init_groups;
55810 #define INIT_IDS
55811 #endif
55812
55813 +#ifdef CONFIG_X86
55814 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
55815 +#else
55816 +#define INIT_TASK_THREAD_INFO
55817 +#endif
55818 +
55819 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
55820 /*
55821 * Because of the reduced scope of CAP_SETPCAP when filesystem
55822 @@ -156,6 +162,7 @@ extern struct cred init_cred;
55823 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
55824 .comm = "swapper", \
55825 .thread = INIT_THREAD, \
55826 + INIT_TASK_THREAD_INFO \
55827 .fs = &init_fs, \
55828 .files = &init_files, \
55829 .signal = &init_signals, \
55830 diff -urNp linux-2.6.32.41/include/linux/interrupt.h linux-2.6.32.41/include/linux/interrupt.h
55831 --- linux-2.6.32.41/include/linux/interrupt.h 2011-03-27 14:31:47.000000000 -0400
55832 +++ linux-2.6.32.41/include/linux/interrupt.h 2011-04-17 15:56:46.000000000 -0400
55833 @@ -362,7 +362,7 @@ enum
55834 /* map softirq index to softirq name. update 'softirq_to_name' in
55835 * kernel/softirq.c when adding a new softirq.
55836 */
55837 -extern char *softirq_to_name[NR_SOFTIRQS];
55838 +extern const char * const softirq_to_name[NR_SOFTIRQS];
55839
55840 /* softirq mask and active fields moved to irq_cpustat_t in
55841 * asm/hardirq.h to get better cache usage. KAO
55842 @@ -370,12 +370,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
55843
55844 struct softirq_action
55845 {
55846 - void (*action)(struct softirq_action *);
55847 + void (*action)(void);
55848 };
55849
55850 asmlinkage void do_softirq(void);
55851 asmlinkage void __do_softirq(void);
55852 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
55853 +extern void open_softirq(int nr, void (*action)(void));
55854 extern void softirq_init(void);
55855 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
55856 extern void raise_softirq_irqoff(unsigned int nr);
55857 diff -urNp linux-2.6.32.41/include/linux/irq.h linux-2.6.32.41/include/linux/irq.h
55858 --- linux-2.6.32.41/include/linux/irq.h 2011-03-27 14:31:47.000000000 -0400
55859 +++ linux-2.6.32.41/include/linux/irq.h 2011-04-17 15:56:46.000000000 -0400
55860 @@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq,
55861 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
55862 bool boot)
55863 {
55864 +#ifdef CONFIG_CPUMASK_OFFSTACK
55865 gfp_t gfp = GFP_ATOMIC;
55866
55867 if (boot)
55868 gfp = GFP_NOWAIT;
55869
55870 -#ifdef CONFIG_CPUMASK_OFFSTACK
55871 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
55872 return false;
55873
55874 diff -urNp linux-2.6.32.41/include/linux/kallsyms.h linux-2.6.32.41/include/linux/kallsyms.h
55875 --- linux-2.6.32.41/include/linux/kallsyms.h 2011-03-27 14:31:47.000000000 -0400
55876 +++ linux-2.6.32.41/include/linux/kallsyms.h 2011-04-17 15:56:46.000000000 -0400
55877 @@ -15,7 +15,8 @@
55878
55879 struct module;
55880
55881 -#ifdef CONFIG_KALLSYMS
55882 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
55883 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55884 /* Lookup the address for a symbol. Returns 0 if not found. */
55885 unsigned long kallsyms_lookup_name(const char *name);
55886
55887 @@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(un
55888 /* Stupid that this does nothing, but I didn't create this mess. */
55889 #define __print_symbol(fmt, addr)
55890 #endif /*CONFIG_KALLSYMS*/
55891 +#else /* when included by kallsyms.c, vsnprintf.c, or
55892 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
55893 +extern void __print_symbol(const char *fmt, unsigned long address);
55894 +extern int sprint_symbol(char *buffer, unsigned long address);
55895 +const char *kallsyms_lookup(unsigned long addr,
55896 + unsigned long *symbolsize,
55897 + unsigned long *offset,
55898 + char **modname, char *namebuf);
55899 +#endif
55900
55901 /* This macro allows us to keep printk typechecking */
55902 static void __check_printsym_format(const char *fmt, ...)
55903 diff -urNp linux-2.6.32.41/include/linux/kgdb.h linux-2.6.32.41/include/linux/kgdb.h
55904 --- linux-2.6.32.41/include/linux/kgdb.h 2011-03-27 14:31:47.000000000 -0400
55905 +++ linux-2.6.32.41/include/linux/kgdb.h 2011-05-04 17:56:20.000000000 -0400
55906 @@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
55907
55908 extern int kgdb_connected;
55909
55910 -extern atomic_t kgdb_setting_breakpoint;
55911 -extern atomic_t kgdb_cpu_doing_single_step;
55912 +extern atomic_unchecked_t kgdb_setting_breakpoint;
55913 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
55914
55915 extern struct task_struct *kgdb_usethread;
55916 extern struct task_struct *kgdb_contthread;
55917 @@ -251,20 +251,20 @@ struct kgdb_arch {
55918 */
55919 struct kgdb_io {
55920 const char *name;
55921 - int (*read_char) (void);
55922 - void (*write_char) (u8);
55923 - void (*flush) (void);
55924 - int (*init) (void);
55925 - void (*pre_exception) (void);
55926 - void (*post_exception) (void);
55927 + int (* const read_char) (void);
55928 + void (* const write_char) (u8);
55929 + void (* const flush) (void);
55930 + int (* const init) (void);
55931 + void (* const pre_exception) (void);
55932 + void (* const post_exception) (void);
55933 };
55934
55935 -extern struct kgdb_arch arch_kgdb_ops;
55936 +extern const struct kgdb_arch arch_kgdb_ops;
55937
55938 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
55939
55940 -extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
55941 -extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
55942 +extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
55943 +extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
55944
55945 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
55946 extern int kgdb_mem2hex(char *mem, char *buf, int count);
55947 diff -urNp linux-2.6.32.41/include/linux/kmod.h linux-2.6.32.41/include/linux/kmod.h
55948 --- linux-2.6.32.41/include/linux/kmod.h 2011-03-27 14:31:47.000000000 -0400
55949 +++ linux-2.6.32.41/include/linux/kmod.h 2011-04-17 15:56:46.000000000 -0400
55950 @@ -31,6 +31,8 @@
55951 * usually useless though. */
55952 extern int __request_module(bool wait, const char *name, ...) \
55953 __attribute__((format(printf, 2, 3)));
55954 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
55955 + __attribute__((format(printf, 3, 4)));
55956 #define request_module(mod...) __request_module(true, mod)
55957 #define request_module_nowait(mod...) __request_module(false, mod)
55958 #define try_then_request_module(x, mod...) \
55959 diff -urNp linux-2.6.32.41/include/linux/kobject.h linux-2.6.32.41/include/linux/kobject.h
55960 --- linux-2.6.32.41/include/linux/kobject.h 2011-03-27 14:31:47.000000000 -0400
55961 +++ linux-2.6.32.41/include/linux/kobject.h 2011-04-17 15:56:46.000000000 -0400
55962 @@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kob
55963
55964 struct kobj_type {
55965 void (*release)(struct kobject *kobj);
55966 - struct sysfs_ops *sysfs_ops;
55967 + const struct sysfs_ops *sysfs_ops;
55968 struct attribute **default_attrs;
55969 };
55970
55971 @@ -118,9 +118,9 @@ struct kobj_uevent_env {
55972 };
55973
55974 struct kset_uevent_ops {
55975 - int (*filter)(struct kset *kset, struct kobject *kobj);
55976 - const char *(*name)(struct kset *kset, struct kobject *kobj);
55977 - int (*uevent)(struct kset *kset, struct kobject *kobj,
55978 + int (* const filter)(struct kset *kset, struct kobject *kobj);
55979 + const char *(* const name)(struct kset *kset, struct kobject *kobj);
55980 + int (* const uevent)(struct kset *kset, struct kobject *kobj,
55981 struct kobj_uevent_env *env);
55982 };
55983
55984 @@ -132,7 +132,7 @@ struct kobj_attribute {
55985 const char *buf, size_t count);
55986 };
55987
55988 -extern struct sysfs_ops kobj_sysfs_ops;
55989 +extern const struct sysfs_ops kobj_sysfs_ops;
55990
55991 /**
55992 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
55993 @@ -155,14 +155,14 @@ struct kset {
55994 struct list_head list;
55995 spinlock_t list_lock;
55996 struct kobject kobj;
55997 - struct kset_uevent_ops *uevent_ops;
55998 + const struct kset_uevent_ops *uevent_ops;
55999 };
56000
56001 extern void kset_init(struct kset *kset);
56002 extern int __must_check kset_register(struct kset *kset);
56003 extern void kset_unregister(struct kset *kset);
56004 extern struct kset * __must_check kset_create_and_add(const char *name,
56005 - struct kset_uevent_ops *u,
56006 + const struct kset_uevent_ops *u,
56007 struct kobject *parent_kobj);
56008
56009 static inline struct kset *to_kset(struct kobject *kobj)
56010 diff -urNp linux-2.6.32.41/include/linux/kvm_host.h linux-2.6.32.41/include/linux/kvm_host.h
56011 --- linux-2.6.32.41/include/linux/kvm_host.h 2011-03-27 14:31:47.000000000 -0400
56012 +++ linux-2.6.32.41/include/linux/kvm_host.h 2011-04-17 15:56:46.000000000 -0400
56013 @@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
56014 void vcpu_load(struct kvm_vcpu *vcpu);
56015 void vcpu_put(struct kvm_vcpu *vcpu);
56016
56017 -int kvm_init(void *opaque, unsigned int vcpu_size,
56018 +int kvm_init(const void *opaque, unsigned int vcpu_size,
56019 struct module *module);
56020 void kvm_exit(void);
56021
56022 @@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
56023 struct kvm_guest_debug *dbg);
56024 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
56025
56026 -int kvm_arch_init(void *opaque);
56027 +int kvm_arch_init(const void *opaque);
56028 void kvm_arch_exit(void);
56029
56030 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
56031 diff -urNp linux-2.6.32.41/include/linux/libata.h linux-2.6.32.41/include/linux/libata.h
56032 --- linux-2.6.32.41/include/linux/libata.h 2011-03-27 14:31:47.000000000 -0400
56033 +++ linux-2.6.32.41/include/linux/libata.h 2011-04-23 12:56:11.000000000 -0400
56034 @@ -525,11 +525,11 @@ struct ata_ioports {
56035
56036 struct ata_host {
56037 spinlock_t lock;
56038 - struct device *dev;
56039 + struct device *dev;
56040 void __iomem * const *iomap;
56041 unsigned int n_ports;
56042 void *private_data;
56043 - struct ata_port_operations *ops;
56044 + const struct ata_port_operations *ops;
56045 unsigned long flags;
56046 #ifdef CONFIG_ATA_ACPI
56047 acpi_handle acpi_handle;
56048 @@ -710,7 +710,7 @@ struct ata_link {
56049
56050 struct ata_port {
56051 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
56052 - struct ata_port_operations *ops;
56053 + const struct ata_port_operations *ops;
56054 spinlock_t *lock;
56055 /* Flags owned by the EH context. Only EH should touch these once the
56056 port is active */
56057 @@ -892,7 +892,7 @@ struct ata_port_info {
56058 unsigned long pio_mask;
56059 unsigned long mwdma_mask;
56060 unsigned long udma_mask;
56061 - struct ata_port_operations *port_ops;
56062 + const struct ata_port_operations *port_ops;
56063 void *private_data;
56064 };
56065
56066 @@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timi
56067 extern const unsigned long sata_deb_timing_hotplug[];
56068 extern const unsigned long sata_deb_timing_long[];
56069
56070 -extern struct ata_port_operations ata_dummy_port_ops;
56071 +extern const struct ata_port_operations ata_dummy_port_ops;
56072 extern const struct ata_port_info ata_dummy_port_info;
56073
56074 static inline const unsigned long *
56075 @@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_
56076 struct scsi_host_template *sht);
56077 extern void ata_host_detach(struct ata_host *host);
56078 extern void ata_host_init(struct ata_host *, struct device *,
56079 - unsigned long, struct ata_port_operations *);
56080 + unsigned long, const struct ata_port_operations *);
56081 extern int ata_scsi_detect(struct scsi_host_template *sht);
56082 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
56083 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
56084 diff -urNp linux-2.6.32.41/include/linux/lockd/bind.h linux-2.6.32.41/include/linux/lockd/bind.h
56085 --- linux-2.6.32.41/include/linux/lockd/bind.h 2011-03-27 14:31:47.000000000 -0400
56086 +++ linux-2.6.32.41/include/linux/lockd/bind.h 2011-04-17 15:56:46.000000000 -0400
56087 @@ -23,13 +23,13 @@ struct svc_rqst;
56088 * This is the set of functions for lockd->nfsd communication
56089 */
56090 struct nlmsvc_binding {
56091 - __be32 (*fopen)(struct svc_rqst *,
56092 + __be32 (* const fopen)(struct svc_rqst *,
56093 struct nfs_fh *,
56094 struct file **);
56095 - void (*fclose)(struct file *);
56096 + void (* const fclose)(struct file *);
56097 };
56098
56099 -extern struct nlmsvc_binding * nlmsvc_ops;
56100 +extern const struct nlmsvc_binding * nlmsvc_ops;
56101
56102 /*
56103 * Similar to nfs_client_initdata, but without the NFS-specific
56104 diff -urNp linux-2.6.32.41/include/linux/mm.h linux-2.6.32.41/include/linux/mm.h
56105 --- linux-2.6.32.41/include/linux/mm.h 2011-03-27 14:31:47.000000000 -0400
56106 +++ linux-2.6.32.41/include/linux/mm.h 2011-04-17 15:56:46.000000000 -0400
56107 @@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void
56108
56109 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
56110 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
56111 +
56112 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
56113 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
56114 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
56115 +#else
56116 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
56117 +#endif
56118 +
56119 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
56120 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
56121
56122 @@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
56123 int set_page_dirty_lock(struct page *page);
56124 int clear_page_dirty_for_io(struct page *page);
56125
56126 -/* Is the vma a continuation of the stack vma above it? */
56127 -static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
56128 -{
56129 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
56130 -}
56131 -
56132 extern unsigned long move_page_tables(struct vm_area_struct *vma,
56133 unsigned long old_addr, struct vm_area_struct *new_vma,
56134 unsigned long new_addr, unsigned long len);
56135 @@ -890,6 +891,8 @@ struct shrinker {
56136 extern void register_shrinker(struct shrinker *);
56137 extern void unregister_shrinker(struct shrinker *);
56138
56139 +pgprot_t vm_get_page_prot(unsigned long vm_flags);
56140 +
56141 int vma_wants_writenotify(struct vm_area_struct *vma);
56142
56143 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
56144 @@ -1162,6 +1165,7 @@ out:
56145 }
56146
56147 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
56148 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
56149
56150 extern unsigned long do_brk(unsigned long, unsigned long);
56151
56152 @@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(
56153 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
56154 struct vm_area_struct **pprev);
56155
56156 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
56157 +extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
56158 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
56159 +
56160 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
56161 NULL if none. Assume start_addr < end_addr. */
56162 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
56163 @@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(st
56164 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
56165 }
56166
56167 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
56168 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
56169 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
56170 unsigned long pfn, unsigned long size, pgprot_t);
56171 @@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long
56172 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
56173 extern int sysctl_memory_failure_early_kill;
56174 extern int sysctl_memory_failure_recovery;
56175 -extern atomic_long_t mce_bad_pages;
56176 +extern atomic_long_unchecked_t mce_bad_pages;
56177 +
56178 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
56179 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
56180 +#else
56181 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
56182 +#endif
56183
56184 #endif /* __KERNEL__ */
56185 #endif /* _LINUX_MM_H */
56186 diff -urNp linux-2.6.32.41/include/linux/mm_types.h linux-2.6.32.41/include/linux/mm_types.h
56187 --- linux-2.6.32.41/include/linux/mm_types.h 2011-03-27 14:31:47.000000000 -0400
56188 +++ linux-2.6.32.41/include/linux/mm_types.h 2011-04-17 15:56:46.000000000 -0400
56189 @@ -186,6 +186,8 @@ struct vm_area_struct {
56190 #ifdef CONFIG_NUMA
56191 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
56192 #endif
56193 +
56194 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
56195 };
56196
56197 struct core_thread {
56198 @@ -287,6 +289,24 @@ struct mm_struct {
56199 #ifdef CONFIG_MMU_NOTIFIER
56200 struct mmu_notifier_mm *mmu_notifier_mm;
56201 #endif
56202 +
56203 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
56204 + unsigned long pax_flags;
56205 +#endif
56206 +
56207 +#ifdef CONFIG_PAX_DLRESOLVE
56208 + unsigned long call_dl_resolve;
56209 +#endif
56210 +
56211 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
56212 + unsigned long call_syscall;
56213 +#endif
56214 +
56215 +#ifdef CONFIG_PAX_ASLR
56216 + unsigned long delta_mmap; /* randomized offset */
56217 + unsigned long delta_stack; /* randomized offset */
56218 +#endif
56219 +
56220 };
56221
56222 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
56223 diff -urNp linux-2.6.32.41/include/linux/mmu_notifier.h linux-2.6.32.41/include/linux/mmu_notifier.h
56224 --- linux-2.6.32.41/include/linux/mmu_notifier.h 2011-03-27 14:31:47.000000000 -0400
56225 +++ linux-2.6.32.41/include/linux/mmu_notifier.h 2011-04-17 15:56:46.000000000 -0400
56226 @@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destr
56227 */
56228 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
56229 ({ \
56230 - pte_t __pte; \
56231 + pte_t ___pte; \
56232 struct vm_area_struct *___vma = __vma; \
56233 unsigned long ___address = __address; \
56234 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
56235 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
56236 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
56237 - __pte; \
56238 + ___pte; \
56239 })
56240
56241 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
56242 diff -urNp linux-2.6.32.41/include/linux/mmzone.h linux-2.6.32.41/include/linux/mmzone.h
56243 --- linux-2.6.32.41/include/linux/mmzone.h 2011-03-27 14:31:47.000000000 -0400
56244 +++ linux-2.6.32.41/include/linux/mmzone.h 2011-04-17 15:56:46.000000000 -0400
56245 @@ -350,7 +350,7 @@ struct zone {
56246 unsigned long flags; /* zone flags, see below */
56247
56248 /* Zone statistics */
56249 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
56250 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
56251
56252 /*
56253 * prev_priority holds the scanning priority for this zone. It is
56254 diff -urNp linux-2.6.32.41/include/linux/mod_devicetable.h linux-2.6.32.41/include/linux/mod_devicetable.h
56255 --- linux-2.6.32.41/include/linux/mod_devicetable.h 2011-03-27 14:31:47.000000000 -0400
56256 +++ linux-2.6.32.41/include/linux/mod_devicetable.h 2011-04-17 15:56:46.000000000 -0400
56257 @@ -12,7 +12,7 @@
56258 typedef unsigned long kernel_ulong_t;
56259 #endif
56260
56261 -#define PCI_ANY_ID (~0)
56262 +#define PCI_ANY_ID ((__u16)~0)
56263
56264 struct pci_device_id {
56265 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
56266 @@ -131,7 +131,7 @@ struct usb_device_id {
56267 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
56268 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
56269
56270 -#define HID_ANY_ID (~0)
56271 +#define HID_ANY_ID (~0U)
56272
56273 struct hid_device_id {
56274 __u16 bus;
56275 diff -urNp linux-2.6.32.41/include/linux/module.h linux-2.6.32.41/include/linux/module.h
56276 --- linux-2.6.32.41/include/linux/module.h 2011-03-27 14:31:47.000000000 -0400
56277 +++ linux-2.6.32.41/include/linux/module.h 2011-04-17 15:56:46.000000000 -0400
56278 @@ -287,16 +287,16 @@ struct module
56279 int (*init)(void);
56280
56281 /* If this is non-NULL, vfree after init() returns */
56282 - void *module_init;
56283 + void *module_init_rx, *module_init_rw;
56284
56285 /* Here is the actual code + data, vfree'd on unload. */
56286 - void *module_core;
56287 + void *module_core_rx, *module_core_rw;
56288
56289 /* Here are the sizes of the init and core sections */
56290 - unsigned int init_size, core_size;
56291 + unsigned int init_size_rw, core_size_rw;
56292
56293 /* The size of the executable code in each section. */
56294 - unsigned int init_text_size, core_text_size;
56295 + unsigned int init_size_rx, core_size_rx;
56296
56297 /* Arch-specific module values */
56298 struct mod_arch_specific arch;
56299 @@ -393,16 +393,46 @@ struct module *__module_address(unsigned
56300 bool is_module_address(unsigned long addr);
56301 bool is_module_text_address(unsigned long addr);
56302
56303 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
56304 +{
56305 +
56306 +#ifdef CONFIG_PAX_KERNEXEC
56307 + if (ktla_ktva(addr) >= (unsigned long)start &&
56308 + ktla_ktva(addr) < (unsigned long)start + size)
56309 + return 1;
56310 +#endif
56311 +
56312 + return ((void *)addr >= start && (void *)addr < start + size);
56313 +}
56314 +
56315 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
56316 +{
56317 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
56318 +}
56319 +
56320 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
56321 +{
56322 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
56323 +}
56324 +
56325 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
56326 +{
56327 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
56328 +}
56329 +
56330 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
56331 +{
56332 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
56333 +}
56334 +
56335 static inline int within_module_core(unsigned long addr, struct module *mod)
56336 {
56337 - return (unsigned long)mod->module_core <= addr &&
56338 - addr < (unsigned long)mod->module_core + mod->core_size;
56339 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
56340 }
56341
56342 static inline int within_module_init(unsigned long addr, struct module *mod)
56343 {
56344 - return (unsigned long)mod->module_init <= addr &&
56345 - addr < (unsigned long)mod->module_init + mod->init_size;
56346 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
56347 }
56348
56349 /* Search for module by name: must hold module_mutex. */
56350 diff -urNp linux-2.6.32.41/include/linux/moduleloader.h linux-2.6.32.41/include/linux/moduleloader.h
56351 --- linux-2.6.32.41/include/linux/moduleloader.h 2011-03-27 14:31:47.000000000 -0400
56352 +++ linux-2.6.32.41/include/linux/moduleloader.h 2011-04-17 15:56:46.000000000 -0400
56353 @@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
56354 sections. Returns NULL on failure. */
56355 void *module_alloc(unsigned long size);
56356
56357 +#ifdef CONFIG_PAX_KERNEXEC
56358 +void *module_alloc_exec(unsigned long size);
56359 +#else
56360 +#define module_alloc_exec(x) module_alloc(x)
56361 +#endif
56362 +
56363 /* Free memory returned from module_alloc. */
56364 void module_free(struct module *mod, void *module_region);
56365
56366 +#ifdef CONFIG_PAX_KERNEXEC
56367 +void module_free_exec(struct module *mod, void *module_region);
56368 +#else
56369 +#define module_free_exec(x, y) module_free((x), (y))
56370 +#endif
56371 +
56372 /* Apply the given relocation to the (simplified) ELF. Return -error
56373 or 0. */
56374 int apply_relocate(Elf_Shdr *sechdrs,
56375 diff -urNp linux-2.6.32.41/include/linux/moduleparam.h linux-2.6.32.41/include/linux/moduleparam.h
56376 --- linux-2.6.32.41/include/linux/moduleparam.h 2011-03-27 14:31:47.000000000 -0400
56377 +++ linux-2.6.32.41/include/linux/moduleparam.h 2011-04-17 15:56:46.000000000 -0400
56378 @@ -132,7 +132,7 @@ struct kparam_array
56379
56380 /* Actually copy string: maxlen param is usually sizeof(string). */
56381 #define module_param_string(name, string, len, perm) \
56382 - static const struct kparam_string __param_string_##name \
56383 + static const struct kparam_string __param_string_##name __used \
56384 = { len, string }; \
56385 __module_param_call(MODULE_PARAM_PREFIX, name, \
56386 param_set_copystring, param_get_string, \
56387 @@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffe
56388
56389 /* Comma-separated array: *nump is set to number they actually specified. */
56390 #define module_param_array_named(name, array, type, nump, perm) \
56391 - static const struct kparam_array __param_arr_##name \
56392 + static const struct kparam_array __param_arr_##name __used \
56393 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
56394 sizeof(array[0]), array }; \
56395 __module_param_call(MODULE_PARAM_PREFIX, name, \
56396 diff -urNp linux-2.6.32.41/include/linux/mutex.h linux-2.6.32.41/include/linux/mutex.h
56397 --- linux-2.6.32.41/include/linux/mutex.h 2011-03-27 14:31:47.000000000 -0400
56398 +++ linux-2.6.32.41/include/linux/mutex.h 2011-04-17 15:56:46.000000000 -0400
56399 @@ -51,7 +51,7 @@ struct mutex {
56400 spinlock_t wait_lock;
56401 struct list_head wait_list;
56402 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
56403 - struct thread_info *owner;
56404 + struct task_struct *owner;
56405 #endif
56406 #ifdef CONFIG_DEBUG_MUTEXES
56407 const char *name;
56408 diff -urNp linux-2.6.32.41/include/linux/namei.h linux-2.6.32.41/include/linux/namei.h
56409 --- linux-2.6.32.41/include/linux/namei.h 2011-03-27 14:31:47.000000000 -0400
56410 +++ linux-2.6.32.41/include/linux/namei.h 2011-04-17 15:56:46.000000000 -0400
56411 @@ -22,7 +22,7 @@ struct nameidata {
56412 unsigned int flags;
56413 int last_type;
56414 unsigned depth;
56415 - char *saved_names[MAX_NESTED_LINKS + 1];
56416 + const char *saved_names[MAX_NESTED_LINKS + 1];
56417
56418 /* Intent data */
56419 union {
56420 @@ -84,12 +84,12 @@ extern int follow_up(struct path *);
56421 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
56422 extern void unlock_rename(struct dentry *, struct dentry *);
56423
56424 -static inline void nd_set_link(struct nameidata *nd, char *path)
56425 +static inline void nd_set_link(struct nameidata *nd, const char *path)
56426 {
56427 nd->saved_names[nd->depth] = path;
56428 }
56429
56430 -static inline char *nd_get_link(struct nameidata *nd)
56431 +static inline const char *nd_get_link(const struct nameidata *nd)
56432 {
56433 return nd->saved_names[nd->depth];
56434 }
56435 diff -urNp linux-2.6.32.41/include/linux/netfilter/xt_gradm.h linux-2.6.32.41/include/linux/netfilter/xt_gradm.h
56436 --- linux-2.6.32.41/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
56437 +++ linux-2.6.32.41/include/linux/netfilter/xt_gradm.h 2011-04-17 15:56:46.000000000 -0400
56438 @@ -0,0 +1,9 @@
56439 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
56440 +#define _LINUX_NETFILTER_XT_GRADM_H 1
56441 +
56442 +struct xt_gradm_mtinfo {
56443 + __u16 flags;
56444 + __u16 invflags;
56445 +};
56446 +
56447 +#endif
56448 diff -urNp linux-2.6.32.41/include/linux/nodemask.h linux-2.6.32.41/include/linux/nodemask.h
56449 --- linux-2.6.32.41/include/linux/nodemask.h 2011-03-27 14:31:47.000000000 -0400
56450 +++ linux-2.6.32.41/include/linux/nodemask.h 2011-04-17 15:56:46.000000000 -0400
56451 @@ -464,11 +464,11 @@ static inline int num_node_state(enum no
56452
56453 #define any_online_node(mask) \
56454 ({ \
56455 - int node; \
56456 - for_each_node_mask(node, (mask)) \
56457 - if (node_online(node)) \
56458 + int __node; \
56459 + for_each_node_mask(__node, (mask)) \
56460 + if (node_online(__node)) \
56461 break; \
56462 - node; \
56463 + __node; \
56464 })
56465
56466 #define num_online_nodes() num_node_state(N_ONLINE)
56467 diff -urNp linux-2.6.32.41/include/linux/oprofile.h linux-2.6.32.41/include/linux/oprofile.h
56468 --- linux-2.6.32.41/include/linux/oprofile.h 2011-03-27 14:31:47.000000000 -0400
56469 +++ linux-2.6.32.41/include/linux/oprofile.h 2011-04-17 15:56:46.000000000 -0400
56470 @@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super
56471 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
56472 char const * name, ulong * val);
56473
56474 -/** Create a file for read-only access to an atomic_t. */
56475 +/** Create a file for read-only access to an atomic_unchecked_t. */
56476 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
56477 - char const * name, atomic_t * val);
56478 + char const * name, atomic_unchecked_t * val);
56479
56480 /** create a directory */
56481 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
56482 diff -urNp linux-2.6.32.41/include/linux/perf_event.h linux-2.6.32.41/include/linux/perf_event.h
56483 --- linux-2.6.32.41/include/linux/perf_event.h 2011-03-27 14:31:47.000000000 -0400
56484 +++ linux-2.6.32.41/include/linux/perf_event.h 2011-05-04 17:56:28.000000000 -0400
56485 @@ -476,7 +476,7 @@ struct hw_perf_event {
56486 struct hrtimer hrtimer;
56487 };
56488 };
56489 - atomic64_t prev_count;
56490 + atomic64_unchecked_t prev_count;
56491 u64 sample_period;
56492 u64 last_period;
56493 atomic64_t period_left;
56494 @@ -557,7 +557,7 @@ struct perf_event {
56495 const struct pmu *pmu;
56496
56497 enum perf_event_active_state state;
56498 - atomic64_t count;
56499 + atomic64_unchecked_t count;
56500
56501 /*
56502 * These are the total time in nanoseconds that the event
56503 @@ -595,8 +595,8 @@ struct perf_event {
56504 * These accumulate total time (in nanoseconds) that children
56505 * events have been enabled and running, respectively.
56506 */
56507 - atomic64_t child_total_time_enabled;
56508 - atomic64_t child_total_time_running;
56509 + atomic64_unchecked_t child_total_time_enabled;
56510 + atomic64_unchecked_t child_total_time_running;
56511
56512 /*
56513 * Protect attach/detach and child_list:
56514 diff -urNp linux-2.6.32.41/include/linux/pipe_fs_i.h linux-2.6.32.41/include/linux/pipe_fs_i.h
56515 --- linux-2.6.32.41/include/linux/pipe_fs_i.h 2011-03-27 14:31:47.000000000 -0400
56516 +++ linux-2.6.32.41/include/linux/pipe_fs_i.h 2011-04-17 15:56:46.000000000 -0400
56517 @@ -46,9 +46,9 @@ struct pipe_inode_info {
56518 wait_queue_head_t wait;
56519 unsigned int nrbufs, curbuf;
56520 struct page *tmp_page;
56521 - unsigned int readers;
56522 - unsigned int writers;
56523 - unsigned int waiting_writers;
56524 + atomic_t readers;
56525 + atomic_t writers;
56526 + atomic_t waiting_writers;
56527 unsigned int r_counter;
56528 unsigned int w_counter;
56529 struct fasync_struct *fasync_readers;
56530 diff -urNp linux-2.6.32.41/include/linux/poison.h linux-2.6.32.41/include/linux/poison.h
56531 --- linux-2.6.32.41/include/linux/poison.h 2011-03-27 14:31:47.000000000 -0400
56532 +++ linux-2.6.32.41/include/linux/poison.h 2011-04-17 15:56:46.000000000 -0400
56533 @@ -19,8 +19,8 @@
56534 * under normal circumstances, used to verify that nobody uses
56535 * non-initialized list entries.
56536 */
56537 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
56538 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
56539 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
56540 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
56541
56542 /********** include/linux/timer.h **********/
56543 /*
56544 diff -urNp linux-2.6.32.41/include/linux/proc_fs.h linux-2.6.32.41/include/linux/proc_fs.h
56545 --- linux-2.6.32.41/include/linux/proc_fs.h 2011-03-27 14:31:47.000000000 -0400
56546 +++ linux-2.6.32.41/include/linux/proc_fs.h 2011-04-17 15:56:46.000000000 -0400
56547 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
56548 return proc_create_data(name, mode, parent, proc_fops, NULL);
56549 }
56550
56551 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
56552 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
56553 +{
56554 +#ifdef CONFIG_GRKERNSEC_PROC_USER
56555 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
56556 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
56557 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
56558 +#else
56559 + return proc_create_data(name, mode, parent, proc_fops, NULL);
56560 +#endif
56561 +}
56562 +
56563 +
56564 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
56565 mode_t mode, struct proc_dir_entry *base,
56566 read_proc_t *read_proc, void * data)
56567 diff -urNp linux-2.6.32.41/include/linux/ptrace.h linux-2.6.32.41/include/linux/ptrace.h
56568 --- linux-2.6.32.41/include/linux/ptrace.h 2011-03-27 14:31:47.000000000 -0400
56569 +++ linux-2.6.32.41/include/linux/ptrace.h 2011-04-17 15:56:46.000000000 -0400
56570 @@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_
56571 extern void exit_ptrace(struct task_struct *tracer);
56572 #define PTRACE_MODE_READ 1
56573 #define PTRACE_MODE_ATTACH 2
56574 -/* Returns 0 on success, -errno on denial. */
56575 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
56576 /* Returns true on success, false on denial. */
56577 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
56578 +/* Returns true on success, false on denial. */
56579 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
56580
56581 static inline int ptrace_reparented(struct task_struct *child)
56582 {
56583 diff -urNp linux-2.6.32.41/include/linux/random.h linux-2.6.32.41/include/linux/random.h
56584 --- linux-2.6.32.41/include/linux/random.h 2011-03-27 14:31:47.000000000 -0400
56585 +++ linux-2.6.32.41/include/linux/random.h 2011-04-17 15:56:46.000000000 -0400
56586 @@ -74,6 +74,11 @@ unsigned long randomize_range(unsigned l
56587 u32 random32(void);
56588 void srandom32(u32 seed);
56589
56590 +static inline unsigned long pax_get_random_long(void)
56591 +{
56592 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
56593 +}
56594 +
56595 #endif /* __KERNEL___ */
56596
56597 #endif /* _LINUX_RANDOM_H */
56598 diff -urNp linux-2.6.32.41/include/linux/reboot.h linux-2.6.32.41/include/linux/reboot.h
56599 --- linux-2.6.32.41/include/linux/reboot.h 2011-03-27 14:31:47.000000000 -0400
56600 +++ linux-2.6.32.41/include/linux/reboot.h 2011-05-22 23:02:06.000000000 -0400
56601 @@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
56602 * Architecture-specific implementations of sys_reboot commands.
56603 */
56604
56605 -extern void machine_restart(char *cmd);
56606 -extern void machine_halt(void);
56607 -extern void machine_power_off(void);
56608 +extern void machine_restart(char *cmd) __noreturn;
56609 +extern void machine_halt(void) __noreturn;
56610 +extern void machine_power_off(void) __noreturn;
56611
56612 extern void machine_shutdown(void);
56613 struct pt_regs;
56614 @@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
56615 */
56616
56617 extern void kernel_restart_prepare(char *cmd);
56618 -extern void kernel_restart(char *cmd);
56619 -extern void kernel_halt(void);
56620 -extern void kernel_power_off(void);
56621 +extern void kernel_restart(char *cmd) __noreturn;
56622 +extern void kernel_halt(void) __noreturn;
56623 +extern void kernel_power_off(void) __noreturn;
56624
56625 void ctrl_alt_del(void);
56626
56627 @@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
56628 * Emergency restart, callable from an interrupt handler.
56629 */
56630
56631 -extern void emergency_restart(void);
56632 +extern void emergency_restart(void) __noreturn;
56633 #include <asm/emergency-restart.h>
56634
56635 #endif
56636 diff -urNp linux-2.6.32.41/include/linux/reiserfs_fs.h linux-2.6.32.41/include/linux/reiserfs_fs.h
56637 --- linux-2.6.32.41/include/linux/reiserfs_fs.h 2011-03-27 14:31:47.000000000 -0400
56638 +++ linux-2.6.32.41/include/linux/reiserfs_fs.h 2011-04-17 15:56:46.000000000 -0400
56639 @@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset
56640 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
56641
56642 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
56643 -#define get_generation(s) atomic_read (&fs_generation(s))
56644 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
56645 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
56646 #define __fs_changed(gen,s) (gen != get_generation (s))
56647 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
56648 @@ -1534,24 +1534,24 @@ static inline struct super_block *sb_fro
56649 */
56650
56651 struct item_operations {
56652 - int (*bytes_number) (struct item_head * ih, int block_size);
56653 - void (*decrement_key) (struct cpu_key *);
56654 - int (*is_left_mergeable) (struct reiserfs_key * ih,
56655 + int (* const bytes_number) (struct item_head * ih, int block_size);
56656 + void (* const decrement_key) (struct cpu_key *);
56657 + int (* const is_left_mergeable) (struct reiserfs_key * ih,
56658 unsigned long bsize);
56659 - void (*print_item) (struct item_head *, char *item);
56660 - void (*check_item) (struct item_head *, char *item);
56661 + void (* const print_item) (struct item_head *, char *item);
56662 + void (* const check_item) (struct item_head *, char *item);
56663
56664 - int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
56665 + int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
56666 int is_affected, int insert_size);
56667 - int (*check_left) (struct virtual_item * vi, int free,
56668 + int (* const check_left) (struct virtual_item * vi, int free,
56669 int start_skip, int end_skip);
56670 - int (*check_right) (struct virtual_item * vi, int free);
56671 - int (*part_size) (struct virtual_item * vi, int from, int to);
56672 - int (*unit_num) (struct virtual_item * vi);
56673 - void (*print_vi) (struct virtual_item * vi);
56674 + int (* const check_right) (struct virtual_item * vi, int free);
56675 + int (* const part_size) (struct virtual_item * vi, int from, int to);
56676 + int (* const unit_num) (struct virtual_item * vi);
56677 + void (* const print_vi) (struct virtual_item * vi);
56678 };
56679
56680 -extern struct item_operations *item_ops[TYPE_ANY + 1];
56681 +extern const struct item_operations * const item_ops[TYPE_ANY + 1];
56682
56683 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
56684 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
56685 diff -urNp linux-2.6.32.41/include/linux/reiserfs_fs_sb.h linux-2.6.32.41/include/linux/reiserfs_fs_sb.h
56686 --- linux-2.6.32.41/include/linux/reiserfs_fs_sb.h 2011-03-27 14:31:47.000000000 -0400
56687 +++ linux-2.6.32.41/include/linux/reiserfs_fs_sb.h 2011-04-17 15:56:46.000000000 -0400
56688 @@ -377,7 +377,7 @@ struct reiserfs_sb_info {
56689 /* Comment? -Hans */
56690 wait_queue_head_t s_wait;
56691 /* To be obsoleted soon by per buffer seals.. -Hans */
56692 - atomic_t s_generation_counter; // increased by one every time the
56693 + atomic_unchecked_t s_generation_counter; // increased by one every time the
56694 // tree gets re-balanced
56695 unsigned long s_properties; /* File system properties. Currently holds
56696 on-disk FS format */
56697 diff -urNp linux-2.6.32.41/include/linux/sched.h linux-2.6.32.41/include/linux/sched.h
56698 --- linux-2.6.32.41/include/linux/sched.h 2011-03-27 14:31:47.000000000 -0400
56699 +++ linux-2.6.32.41/include/linux/sched.h 2011-06-04 20:42:54.000000000 -0400
56700 @@ -101,6 +101,7 @@ struct bio;
56701 struct fs_struct;
56702 struct bts_context;
56703 struct perf_event_context;
56704 +struct linux_binprm;
56705
56706 /*
56707 * List of flags we want to share for kernel threads,
56708 @@ -350,7 +351,7 @@ extern signed long schedule_timeout_kill
56709 extern signed long schedule_timeout_uninterruptible(signed long timeout);
56710 asmlinkage void __schedule(void);
56711 asmlinkage void schedule(void);
56712 -extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
56713 +extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
56714
56715 struct nsproxy;
56716 struct user_namespace;
56717 @@ -371,9 +372,12 @@ struct user_namespace;
56718 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
56719
56720 extern int sysctl_max_map_count;
56721 +extern unsigned long sysctl_heap_stack_gap;
56722
56723 #include <linux/aio.h>
56724
56725 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
56726 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
56727 extern unsigned long
56728 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
56729 unsigned long, unsigned long);
56730 @@ -666,6 +670,16 @@ struct signal_struct {
56731 struct tty_audit_buf *tty_audit_buf;
56732 #endif
56733
56734 +#ifdef CONFIG_GRKERNSEC
56735 + u32 curr_ip;
56736 + u32 saved_ip;
56737 + u32 gr_saddr;
56738 + u32 gr_daddr;
56739 + u16 gr_sport;
56740 + u16 gr_dport;
56741 + u8 used_accept:1;
56742 +#endif
56743 +
56744 int oom_adj; /* OOM kill score adjustment (bit shift) */
56745 };
56746
56747 @@ -723,6 +737,11 @@ struct user_struct {
56748 struct key *session_keyring; /* UID's default session keyring */
56749 #endif
56750
56751 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
56752 + unsigned int banned;
56753 + unsigned long ban_expires;
56754 +#endif
56755 +
56756 /* Hash table maintenance information */
56757 struct hlist_node uidhash_node;
56758 uid_t uid;
56759 @@ -1328,8 +1347,8 @@ struct task_struct {
56760 struct list_head thread_group;
56761
56762 struct completion *vfork_done; /* for vfork() */
56763 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
56764 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
56765 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
56766 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
56767
56768 cputime_t utime, stime, utimescaled, stimescaled;
56769 cputime_t gtime;
56770 @@ -1343,16 +1362,6 @@ struct task_struct {
56771 struct task_cputime cputime_expires;
56772 struct list_head cpu_timers[3];
56773
56774 -/* process credentials */
56775 - const struct cred *real_cred; /* objective and real subjective task
56776 - * credentials (COW) */
56777 - const struct cred *cred; /* effective (overridable) subjective task
56778 - * credentials (COW) */
56779 - struct mutex cred_guard_mutex; /* guard against foreign influences on
56780 - * credential calculations
56781 - * (notably. ptrace) */
56782 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
56783 -
56784 char comm[TASK_COMM_LEN]; /* executable name excluding path
56785 - access with [gs]et_task_comm (which lock
56786 it with task_lock())
56787 @@ -1369,6 +1378,10 @@ struct task_struct {
56788 #endif
56789 /* CPU-specific state of this task */
56790 struct thread_struct thread;
56791 +/* thread_info moved to task_struct */
56792 +#ifdef CONFIG_X86
56793 + struct thread_info tinfo;
56794 +#endif
56795 /* filesystem information */
56796 struct fs_struct *fs;
56797 /* open file information */
56798 @@ -1436,6 +1449,15 @@ struct task_struct {
56799 int hardirq_context;
56800 int softirq_context;
56801 #endif
56802 +
56803 +/* process credentials */
56804 + const struct cred *real_cred; /* objective and real subjective task
56805 + * credentials (COW) */
56806 + struct mutex cred_guard_mutex; /* guard against foreign influences on
56807 + * credential calculations
56808 + * (notably. ptrace) */
56809 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
56810 +
56811 #ifdef CONFIG_LOCKDEP
56812 # define MAX_LOCK_DEPTH 48UL
56813 u64 curr_chain_key;
56814 @@ -1456,6 +1478,9 @@ struct task_struct {
56815
56816 struct backing_dev_info *backing_dev_info;
56817
56818 + const struct cred *cred; /* effective (overridable) subjective task
56819 + * credentials (COW) */
56820 +
56821 struct io_context *io_context;
56822
56823 unsigned long ptrace_message;
56824 @@ -1519,6 +1544,21 @@ struct task_struct {
56825 unsigned long default_timer_slack_ns;
56826
56827 struct list_head *scm_work_list;
56828 +
56829 +#ifdef CONFIG_GRKERNSEC
56830 + /* grsecurity */
56831 + struct dentry *gr_chroot_dentry;
56832 + struct acl_subject_label *acl;
56833 + struct acl_role_label *role;
56834 + struct file *exec_file;
56835 + u16 acl_role_id;
56836 + /* is this the task that authenticated to the special role */
56837 + u8 acl_sp_role;
56838 + u8 is_writable;
56839 + u8 brute;
56840 + u8 gr_is_chrooted;
56841 +#endif
56842 +
56843 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
56844 /* Index of current stored adress in ret_stack */
56845 int curr_ret_stack;
56846 @@ -1542,6 +1582,57 @@ struct task_struct {
56847 #endif /* CONFIG_TRACING */
56848 };
56849
56850 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
56851 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
56852 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
56853 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
56854 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
56855 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
56856 +
56857 +#ifdef CONFIG_PAX_SOFTMODE
56858 +extern unsigned int pax_softmode;
56859 +#endif
56860 +
56861 +extern int pax_check_flags(unsigned long *);
56862 +
56863 +/* if tsk != current then task_lock must be held on it */
56864 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
56865 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
56866 +{
56867 + if (likely(tsk->mm))
56868 + return tsk->mm->pax_flags;
56869 + else
56870 + return 0UL;
56871 +}
56872 +
56873 +/* if tsk != current then task_lock must be held on it */
56874 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
56875 +{
56876 + if (likely(tsk->mm)) {
56877 + tsk->mm->pax_flags = flags;
56878 + return 0;
56879 + }
56880 + return -EINVAL;
56881 +}
56882 +#endif
56883 +
56884 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
56885 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
56886 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
56887 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
56888 +#endif
56889 +
56890 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
56891 +void pax_report_insns(void *pc, void *sp);
56892 +void pax_report_refcount_overflow(struct pt_regs *regs);
56893 +void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type);
56894 +
56895 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
56896 +extern void pax_track_stack(void);
56897 +#else
56898 +static inline void pax_track_stack(void) {}
56899 +#endif
56900 +
56901 /* Future-safe accessor for struct task_struct's cpus_allowed. */
56902 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
56903
56904 @@ -1978,7 +2069,9 @@ void yield(void);
56905 extern struct exec_domain default_exec_domain;
56906
56907 union thread_union {
56908 +#ifndef CONFIG_X86
56909 struct thread_info thread_info;
56910 +#endif
56911 unsigned long stack[THREAD_SIZE/sizeof(long)];
56912 };
56913
56914 @@ -2155,7 +2248,7 @@ extern void __cleanup_sighand(struct sig
56915 extern void exit_itimers(struct signal_struct *);
56916 extern void flush_itimer_signals(void);
56917
56918 -extern NORET_TYPE void do_group_exit(int);
56919 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
56920
56921 extern void daemonize(const char *, ...);
56922 extern int allow_signal(int);
56923 @@ -2284,13 +2377,17 @@ static inline unsigned long *end_of_stac
56924
56925 #endif
56926
56927 -static inline int object_is_on_stack(void *obj)
56928 +static inline int object_starts_on_stack(void *obj)
56929 {
56930 - void *stack = task_stack_page(current);
56931 + const void *stack = task_stack_page(current);
56932
56933 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
56934 }
56935
56936 +#ifdef CONFIG_PAX_USERCOPY
56937 +extern int object_is_on_stack(const void *obj, unsigned long len);
56938 +#endif
56939 +
56940 extern void thread_info_cache_init(void);
56941
56942 #ifdef CONFIG_DEBUG_STACK_USAGE
56943 diff -urNp linux-2.6.32.41/include/linux/screen_info.h linux-2.6.32.41/include/linux/screen_info.h
56944 --- linux-2.6.32.41/include/linux/screen_info.h 2011-03-27 14:31:47.000000000 -0400
56945 +++ linux-2.6.32.41/include/linux/screen_info.h 2011-04-17 15:56:46.000000000 -0400
56946 @@ -42,7 +42,8 @@ struct screen_info {
56947 __u16 pages; /* 0x32 */
56948 __u16 vesa_attributes; /* 0x34 */
56949 __u32 capabilities; /* 0x36 */
56950 - __u8 _reserved[6]; /* 0x3a */
56951 + __u16 vesapm_size; /* 0x3a */
56952 + __u8 _reserved[4]; /* 0x3c */
56953 } __attribute__((packed));
56954
56955 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
56956 diff -urNp linux-2.6.32.41/include/linux/security.h linux-2.6.32.41/include/linux/security.h
56957 --- linux-2.6.32.41/include/linux/security.h 2011-03-27 14:31:47.000000000 -0400
56958 +++ linux-2.6.32.41/include/linux/security.h 2011-04-17 15:56:46.000000000 -0400
56959 @@ -34,6 +34,7 @@
56960 #include <linux/key.h>
56961 #include <linux/xfrm.h>
56962 #include <linux/gfp.h>
56963 +#include <linux/grsecurity.h>
56964 #include <net/flow.h>
56965
56966 /* Maximum number of letters for an LSM name string */
56967 diff -urNp linux-2.6.32.41/include/linux/shm.h linux-2.6.32.41/include/linux/shm.h
56968 --- linux-2.6.32.41/include/linux/shm.h 2011-03-27 14:31:47.000000000 -0400
56969 +++ linux-2.6.32.41/include/linux/shm.h 2011-04-17 15:56:46.000000000 -0400
56970 @@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
56971 pid_t shm_cprid;
56972 pid_t shm_lprid;
56973 struct user_struct *mlock_user;
56974 +#ifdef CONFIG_GRKERNSEC
56975 + time_t shm_createtime;
56976 + pid_t shm_lapid;
56977 +#endif
56978 };
56979
56980 /* shm_mode upper byte flags */
56981 diff -urNp linux-2.6.32.41/include/linux/skbuff.h linux-2.6.32.41/include/linux/skbuff.h
56982 --- linux-2.6.32.41/include/linux/skbuff.h 2011-03-27 14:31:47.000000000 -0400
56983 +++ linux-2.6.32.41/include/linux/skbuff.h 2011-05-04 17:56:20.000000000 -0400
56984 @@ -544,7 +544,7 @@ static inline union skb_shared_tx *skb_t
56985 */
56986 static inline int skb_queue_empty(const struct sk_buff_head *list)
56987 {
56988 - return list->next == (struct sk_buff *)list;
56989 + return list->next == (const struct sk_buff *)list;
56990 }
56991
56992 /**
56993 @@ -557,7 +557,7 @@ static inline int skb_queue_empty(const
56994 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
56995 const struct sk_buff *skb)
56996 {
56997 - return (skb->next == (struct sk_buff *) list);
56998 + return (skb->next == (const struct sk_buff *) list);
56999 }
57000
57001 /**
57002 @@ -570,7 +570,7 @@ static inline bool skb_queue_is_last(con
57003 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
57004 const struct sk_buff *skb)
57005 {
57006 - return (skb->prev == (struct sk_buff *) list);
57007 + return (skb->prev == (const struct sk_buff *) list);
57008 }
57009
57010 /**
57011 @@ -1367,7 +1367,7 @@ static inline int skb_network_offset(con
57012 * headroom, you should not reduce this.
57013 */
57014 #ifndef NET_SKB_PAD
57015 -#define NET_SKB_PAD 32
57016 +#define NET_SKB_PAD (_AC(32,U))
57017 #endif
57018
57019 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
57020 diff -urNp linux-2.6.32.41/include/linux/slab_def.h linux-2.6.32.41/include/linux/slab_def.h
57021 --- linux-2.6.32.41/include/linux/slab_def.h 2011-03-27 14:31:47.000000000 -0400
57022 +++ linux-2.6.32.41/include/linux/slab_def.h 2011-05-04 17:56:28.000000000 -0400
57023 @@ -69,10 +69,10 @@ struct kmem_cache {
57024 unsigned long node_allocs;
57025 unsigned long node_frees;
57026 unsigned long node_overflow;
57027 - atomic_t allochit;
57028 - atomic_t allocmiss;
57029 - atomic_t freehit;
57030 - atomic_t freemiss;
57031 + atomic_unchecked_t allochit;
57032 + atomic_unchecked_t allocmiss;
57033 + atomic_unchecked_t freehit;
57034 + atomic_unchecked_t freemiss;
57035
57036 /*
57037 * If debugging is enabled, then the allocator can add additional
57038 diff -urNp linux-2.6.32.41/include/linux/slab.h linux-2.6.32.41/include/linux/slab.h
57039 --- linux-2.6.32.41/include/linux/slab.h 2011-03-27 14:31:47.000000000 -0400
57040 +++ linux-2.6.32.41/include/linux/slab.h 2011-04-17 15:56:46.000000000 -0400
57041 @@ -11,12 +11,20 @@
57042
57043 #include <linux/gfp.h>
57044 #include <linux/types.h>
57045 +#include <linux/err.h>
57046
57047 /*
57048 * Flags to pass to kmem_cache_create().
57049 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
57050 */
57051 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
57052 +
57053 +#ifdef CONFIG_PAX_USERCOPY
57054 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
57055 +#else
57056 +#define SLAB_USERCOPY 0x00000000UL
57057 +#endif
57058 +
57059 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
57060 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
57061 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
57062 @@ -82,10 +90,13 @@
57063 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
57064 * Both make kfree a no-op.
57065 */
57066 -#define ZERO_SIZE_PTR ((void *)16)
57067 +#define ZERO_SIZE_PTR \
57068 +({ \
57069 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
57070 + (void *)(-MAX_ERRNO-1L); \
57071 +})
57072
57073 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
57074 - (unsigned long)ZERO_SIZE_PTR)
57075 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
57076
57077 /*
57078 * struct kmem_cache related prototypes
57079 @@ -138,6 +149,7 @@ void * __must_check krealloc(const void
57080 void kfree(const void *);
57081 void kzfree(const void *);
57082 size_t ksize(const void *);
57083 +void check_object_size(const void *ptr, unsigned long n, bool to);
57084
57085 /*
57086 * Allocator specific definitions. These are mainly used to establish optimized
57087 @@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t
57088
57089 void __init kmem_cache_init_late(void);
57090
57091 +#define kmalloc(x, y) \
57092 +({ \
57093 + void *___retval; \
57094 + intoverflow_t ___x = (intoverflow_t)x; \
57095 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
57096 + ___retval = NULL; \
57097 + else \
57098 + ___retval = kmalloc((size_t)___x, (y)); \
57099 + ___retval; \
57100 +})
57101 +
57102 +#define kmalloc_node(x, y, z) \
57103 +({ \
57104 + void *___retval; \
57105 + intoverflow_t ___x = (intoverflow_t)x; \
57106 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
57107 + ___retval = NULL; \
57108 + else \
57109 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
57110 + ___retval; \
57111 +})
57112 +
57113 +#define kzalloc(x, y) \
57114 +({ \
57115 + void *___retval; \
57116 + intoverflow_t ___x = (intoverflow_t)x; \
57117 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
57118 + ___retval = NULL; \
57119 + else \
57120 + ___retval = kzalloc((size_t)___x, (y)); \
57121 + ___retval; \
57122 +})
57123 +
57124 #endif /* _LINUX_SLAB_H */
57125 diff -urNp linux-2.6.32.41/include/linux/slub_def.h linux-2.6.32.41/include/linux/slub_def.h
57126 --- linux-2.6.32.41/include/linux/slub_def.h 2011-03-27 14:31:47.000000000 -0400
57127 +++ linux-2.6.32.41/include/linux/slub_def.h 2011-04-17 15:56:46.000000000 -0400
57128 @@ -86,7 +86,7 @@ struct kmem_cache {
57129 struct kmem_cache_order_objects max;
57130 struct kmem_cache_order_objects min;
57131 gfp_t allocflags; /* gfp flags to use on each alloc */
57132 - int refcount; /* Refcount for slab cache destroy */
57133 + atomic_t refcount; /* Refcount for slab cache destroy */
57134 void (*ctor)(void *);
57135 int inuse; /* Offset to metadata */
57136 int align; /* Alignment */
57137 diff -urNp linux-2.6.32.41/include/linux/sonet.h linux-2.6.32.41/include/linux/sonet.h
57138 --- linux-2.6.32.41/include/linux/sonet.h 2011-03-27 14:31:47.000000000 -0400
57139 +++ linux-2.6.32.41/include/linux/sonet.h 2011-04-17 15:56:46.000000000 -0400
57140 @@ -61,7 +61,7 @@ struct sonet_stats {
57141 #include <asm/atomic.h>
57142
57143 struct k_sonet_stats {
57144 -#define __HANDLE_ITEM(i) atomic_t i
57145 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
57146 __SONET_ITEMS
57147 #undef __HANDLE_ITEM
57148 };
57149 diff -urNp linux-2.6.32.41/include/linux/sunrpc/clnt.h linux-2.6.32.41/include/linux/sunrpc/clnt.h
57150 --- linux-2.6.32.41/include/linux/sunrpc/clnt.h 2011-03-27 14:31:47.000000000 -0400
57151 +++ linux-2.6.32.41/include/linux/sunrpc/clnt.h 2011-04-17 15:56:46.000000000 -0400
57152 @@ -167,9 +167,9 @@ static inline unsigned short rpc_get_por
57153 {
57154 switch (sap->sa_family) {
57155 case AF_INET:
57156 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
57157 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
57158 case AF_INET6:
57159 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
57160 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
57161 }
57162 return 0;
57163 }
57164 @@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const
57165 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
57166 const struct sockaddr *src)
57167 {
57168 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
57169 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
57170 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
57171
57172 dsin->sin_family = ssin->sin_family;
57173 @@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const
57174 if (sa->sa_family != AF_INET6)
57175 return 0;
57176
57177 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
57178 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
57179 }
57180
57181 #endif /* __KERNEL__ */
57182 diff -urNp linux-2.6.32.41/include/linux/sunrpc/svc_rdma.h linux-2.6.32.41/include/linux/sunrpc/svc_rdma.h
57183 --- linux-2.6.32.41/include/linux/sunrpc/svc_rdma.h 2011-03-27 14:31:47.000000000 -0400
57184 +++ linux-2.6.32.41/include/linux/sunrpc/svc_rdma.h 2011-05-04 17:56:28.000000000 -0400
57185 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
57186 extern unsigned int svcrdma_max_requests;
57187 extern unsigned int svcrdma_max_req_size;
57188
57189 -extern atomic_t rdma_stat_recv;
57190 -extern atomic_t rdma_stat_read;
57191 -extern atomic_t rdma_stat_write;
57192 -extern atomic_t rdma_stat_sq_starve;
57193 -extern atomic_t rdma_stat_rq_starve;
57194 -extern atomic_t rdma_stat_rq_poll;
57195 -extern atomic_t rdma_stat_rq_prod;
57196 -extern atomic_t rdma_stat_sq_poll;
57197 -extern atomic_t rdma_stat_sq_prod;
57198 +extern atomic_unchecked_t rdma_stat_recv;
57199 +extern atomic_unchecked_t rdma_stat_read;
57200 +extern atomic_unchecked_t rdma_stat_write;
57201 +extern atomic_unchecked_t rdma_stat_sq_starve;
57202 +extern atomic_unchecked_t rdma_stat_rq_starve;
57203 +extern atomic_unchecked_t rdma_stat_rq_poll;
57204 +extern atomic_unchecked_t rdma_stat_rq_prod;
57205 +extern atomic_unchecked_t rdma_stat_sq_poll;
57206 +extern atomic_unchecked_t rdma_stat_sq_prod;
57207
57208 #define RPCRDMA_VERSION 1
57209
57210 diff -urNp linux-2.6.32.41/include/linux/suspend.h linux-2.6.32.41/include/linux/suspend.h
57211 --- linux-2.6.32.41/include/linux/suspend.h 2011-03-27 14:31:47.000000000 -0400
57212 +++ linux-2.6.32.41/include/linux/suspend.h 2011-04-17 15:56:46.000000000 -0400
57213 @@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
57214 * which require special recovery actions in that situation.
57215 */
57216 struct platform_suspend_ops {
57217 - int (*valid)(suspend_state_t state);
57218 - int (*begin)(suspend_state_t state);
57219 - int (*prepare)(void);
57220 - int (*prepare_late)(void);
57221 - int (*enter)(suspend_state_t state);
57222 - void (*wake)(void);
57223 - void (*finish)(void);
57224 - void (*end)(void);
57225 - void (*recover)(void);
57226 + int (* const valid)(suspend_state_t state);
57227 + int (* const begin)(suspend_state_t state);
57228 + int (* const prepare)(void);
57229 + int (* const prepare_late)(void);
57230 + int (* const enter)(suspend_state_t state);
57231 + void (* const wake)(void);
57232 + void (* const finish)(void);
57233 + void (* const end)(void);
57234 + void (* const recover)(void);
57235 };
57236
57237 #ifdef CONFIG_SUSPEND
57238 @@ -120,7 +120,7 @@ struct platform_suspend_ops {
57239 * suspend_set_ops - set platform dependent suspend operations
57240 * @ops: The new suspend operations to set.
57241 */
57242 -extern void suspend_set_ops(struct platform_suspend_ops *ops);
57243 +extern void suspend_set_ops(const struct platform_suspend_ops *ops);
57244 extern int suspend_valid_only_mem(suspend_state_t state);
57245
57246 /**
57247 @@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t st
57248 #else /* !CONFIG_SUSPEND */
57249 #define suspend_valid_only_mem NULL
57250
57251 -static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
57252 +static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
57253 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
57254 #endif /* !CONFIG_SUSPEND */
57255
57256 @@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone
57257 * platforms which require special recovery actions in that situation.
57258 */
57259 struct platform_hibernation_ops {
57260 - int (*begin)(void);
57261 - void (*end)(void);
57262 - int (*pre_snapshot)(void);
57263 - void (*finish)(void);
57264 - int (*prepare)(void);
57265 - int (*enter)(void);
57266 - void (*leave)(void);
57267 - int (*pre_restore)(void);
57268 - void (*restore_cleanup)(void);
57269 - void (*recover)(void);
57270 + int (* const begin)(void);
57271 + void (* const end)(void);
57272 + int (* const pre_snapshot)(void);
57273 + void (* const finish)(void);
57274 + int (* const prepare)(void);
57275 + int (* const enter)(void);
57276 + void (* const leave)(void);
57277 + int (* const pre_restore)(void);
57278 + void (* const restore_cleanup)(void);
57279 + void (* const recover)(void);
57280 };
57281
57282 #ifdef CONFIG_HIBERNATION
57283 @@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct
57284 extern void swsusp_unset_page_free(struct page *);
57285 extern unsigned long get_safe_page(gfp_t gfp_mask);
57286
57287 -extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
57288 +extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
57289 extern int hibernate(void);
57290 extern bool system_entering_hibernation(void);
57291 #else /* CONFIG_HIBERNATION */
57292 @@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidd
57293 static inline void swsusp_set_page_free(struct page *p) {}
57294 static inline void swsusp_unset_page_free(struct page *p) {}
57295
57296 -static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
57297 +static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
57298 static inline int hibernate(void) { return -ENOSYS; }
57299 static inline bool system_entering_hibernation(void) { return false; }
57300 #endif /* CONFIG_HIBERNATION */
57301 diff -urNp linux-2.6.32.41/include/linux/sysctl.h linux-2.6.32.41/include/linux/sysctl.h
57302 --- linux-2.6.32.41/include/linux/sysctl.h 2011-03-27 14:31:47.000000000 -0400
57303 +++ linux-2.6.32.41/include/linux/sysctl.h 2011-04-17 15:56:46.000000000 -0400
57304 @@ -164,7 +164,11 @@ enum
57305 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
57306 };
57307
57308 -
57309 +#ifdef CONFIG_PAX_SOFTMODE
57310 +enum {
57311 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
57312 +};
57313 +#endif
57314
57315 /* CTL_VM names: */
57316 enum
57317 @@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_tab
57318
57319 extern int proc_dostring(struct ctl_table *, int,
57320 void __user *, size_t *, loff_t *);
57321 +extern int proc_dostring_modpriv(struct ctl_table *, int,
57322 + void __user *, size_t *, loff_t *);
57323 extern int proc_dointvec(struct ctl_table *, int,
57324 void __user *, size_t *, loff_t *);
57325 extern int proc_dointvec_minmax(struct ctl_table *, int,
57326 @@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name,
57327
57328 extern ctl_handler sysctl_data;
57329 extern ctl_handler sysctl_string;
57330 +extern ctl_handler sysctl_string_modpriv;
57331 extern ctl_handler sysctl_intvec;
57332 extern ctl_handler sysctl_jiffies;
57333 extern ctl_handler sysctl_ms_jiffies;
57334 diff -urNp linux-2.6.32.41/include/linux/sysfs.h linux-2.6.32.41/include/linux/sysfs.h
57335 --- linux-2.6.32.41/include/linux/sysfs.h 2011-03-27 14:31:47.000000000 -0400
57336 +++ linux-2.6.32.41/include/linux/sysfs.h 2011-04-17 15:56:46.000000000 -0400
57337 @@ -75,8 +75,8 @@ struct bin_attribute {
57338 };
57339
57340 struct sysfs_ops {
57341 - ssize_t (*show)(struct kobject *, struct attribute *,char *);
57342 - ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
57343 + ssize_t (* const show)(struct kobject *, struct attribute *,char *);
57344 + ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
57345 };
57346
57347 struct sysfs_dirent;
57348 diff -urNp linux-2.6.32.41/include/linux/thread_info.h linux-2.6.32.41/include/linux/thread_info.h
57349 --- linux-2.6.32.41/include/linux/thread_info.h 2011-03-27 14:31:47.000000000 -0400
57350 +++ linux-2.6.32.41/include/linux/thread_info.h 2011-04-17 15:56:46.000000000 -0400
57351 @@ -23,7 +23,7 @@ struct restart_block {
57352 };
57353 /* For futex_wait and futex_wait_requeue_pi */
57354 struct {
57355 - u32 *uaddr;
57356 + u32 __user *uaddr;
57357 u32 val;
57358 u32 flags;
57359 u32 bitset;
57360 diff -urNp linux-2.6.32.41/include/linux/tty.h linux-2.6.32.41/include/linux/tty.h
57361 --- linux-2.6.32.41/include/linux/tty.h 2011-03-27 14:31:47.000000000 -0400
57362 +++ linux-2.6.32.41/include/linux/tty.h 2011-04-17 15:56:46.000000000 -0400
57363 @@ -13,6 +13,7 @@
57364 #include <linux/tty_driver.h>
57365 #include <linux/tty_ldisc.h>
57366 #include <linux/mutex.h>
57367 +#include <linux/poll.h>
57368
57369 #include <asm/system.h>
57370
57371 @@ -443,7 +444,6 @@ extern int tty_perform_flush(struct tty_
57372 extern dev_t tty_devnum(struct tty_struct *tty);
57373 extern void proc_clear_tty(struct task_struct *p);
57374 extern struct tty_struct *get_current_tty(void);
57375 -extern void tty_default_fops(struct file_operations *fops);
57376 extern struct tty_struct *alloc_tty_struct(void);
57377 extern void free_tty_struct(struct tty_struct *tty);
57378 extern void initialize_tty_struct(struct tty_struct *tty,
57379 @@ -493,6 +493,18 @@ extern void tty_ldisc_begin(void);
57380 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
57381 extern void tty_ldisc_enable(struct tty_struct *tty);
57382
57383 +/* tty_io.c */
57384 +extern ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
57385 +extern ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
57386 +extern unsigned int tty_poll(struct file *, poll_table *);
57387 +#ifdef CONFIG_COMPAT
57388 +extern long tty_compat_ioctl(struct file *file, unsigned int cmd,
57389 + unsigned long arg);
57390 +#else
57391 +#define tty_compat_ioctl NULL
57392 +#endif
57393 +extern int tty_release(struct inode *, struct file *);
57394 +extern int tty_fasync(int fd, struct file *filp, int on);
57395
57396 /* n_tty.c */
57397 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
57398 diff -urNp linux-2.6.32.41/include/linux/tty_ldisc.h linux-2.6.32.41/include/linux/tty_ldisc.h
57399 --- linux-2.6.32.41/include/linux/tty_ldisc.h 2011-03-27 14:31:47.000000000 -0400
57400 +++ linux-2.6.32.41/include/linux/tty_ldisc.h 2011-04-17 15:56:46.000000000 -0400
57401 @@ -139,7 +139,7 @@ struct tty_ldisc_ops {
57402
57403 struct module *owner;
57404
57405 - int refcount;
57406 + atomic_t refcount;
57407 };
57408
57409 struct tty_ldisc {
57410 diff -urNp linux-2.6.32.41/include/linux/types.h linux-2.6.32.41/include/linux/types.h
57411 --- linux-2.6.32.41/include/linux/types.h 2011-03-27 14:31:47.000000000 -0400
57412 +++ linux-2.6.32.41/include/linux/types.h 2011-04-17 15:56:46.000000000 -0400
57413 @@ -191,10 +191,26 @@ typedef struct {
57414 volatile int counter;
57415 } atomic_t;
57416
57417 +#ifdef CONFIG_PAX_REFCOUNT
57418 +typedef struct {
57419 + volatile int counter;
57420 +} atomic_unchecked_t;
57421 +#else
57422 +typedef atomic_t atomic_unchecked_t;
57423 +#endif
57424 +
57425 #ifdef CONFIG_64BIT
57426 typedef struct {
57427 volatile long counter;
57428 } atomic64_t;
57429 +
57430 +#ifdef CONFIG_PAX_REFCOUNT
57431 +typedef struct {
57432 + volatile long counter;
57433 +} atomic64_unchecked_t;
57434 +#else
57435 +typedef atomic64_t atomic64_unchecked_t;
57436 +#endif
57437 #endif
57438
57439 struct ustat {
57440 diff -urNp linux-2.6.32.41/include/linux/uaccess.h linux-2.6.32.41/include/linux/uaccess.h
57441 --- linux-2.6.32.41/include/linux/uaccess.h 2011-03-27 14:31:47.000000000 -0400
57442 +++ linux-2.6.32.41/include/linux/uaccess.h 2011-04-17 15:56:46.000000000 -0400
57443 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
57444 long ret; \
57445 mm_segment_t old_fs = get_fs(); \
57446 \
57447 - set_fs(KERNEL_DS); \
57448 pagefault_disable(); \
57449 + set_fs(KERNEL_DS); \
57450 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
57451 - pagefault_enable(); \
57452 set_fs(old_fs); \
57453 + pagefault_enable(); \
57454 ret; \
57455 })
57456
57457 @@ -93,7 +93,7 @@ static inline unsigned long __copy_from_
57458 * Safely read from address @src to the buffer at @dst. If a kernel fault
57459 * happens, handle that and return -EFAULT.
57460 */
57461 -extern long probe_kernel_read(void *dst, void *src, size_t size);
57462 +extern long probe_kernel_read(void *dst, const void *src, size_t size);
57463
57464 /*
57465 * probe_kernel_write(): safely attempt to write to a location
57466 @@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst,
57467 * Safely write to address @dst from the buffer at @src. If a kernel fault
57468 * happens, handle that and return -EFAULT.
57469 */
57470 -extern long probe_kernel_write(void *dst, void *src, size_t size);
57471 +extern long probe_kernel_write(void *dst, const void *src, size_t size);
57472
57473 #endif /* __LINUX_UACCESS_H__ */
57474 diff -urNp linux-2.6.32.41/include/linux/unaligned/access_ok.h linux-2.6.32.41/include/linux/unaligned/access_ok.h
57475 --- linux-2.6.32.41/include/linux/unaligned/access_ok.h 2011-03-27 14:31:47.000000000 -0400
57476 +++ linux-2.6.32.41/include/linux/unaligned/access_ok.h 2011-04-17 15:56:46.000000000 -0400
57477 @@ -6,32 +6,32 @@
57478
57479 static inline u16 get_unaligned_le16(const void *p)
57480 {
57481 - return le16_to_cpup((__le16 *)p);
57482 + return le16_to_cpup((const __le16 *)p);
57483 }
57484
57485 static inline u32 get_unaligned_le32(const void *p)
57486 {
57487 - return le32_to_cpup((__le32 *)p);
57488 + return le32_to_cpup((const __le32 *)p);
57489 }
57490
57491 static inline u64 get_unaligned_le64(const void *p)
57492 {
57493 - return le64_to_cpup((__le64 *)p);
57494 + return le64_to_cpup((const __le64 *)p);
57495 }
57496
57497 static inline u16 get_unaligned_be16(const void *p)
57498 {
57499 - return be16_to_cpup((__be16 *)p);
57500 + return be16_to_cpup((const __be16 *)p);
57501 }
57502
57503 static inline u32 get_unaligned_be32(const void *p)
57504 {
57505 - return be32_to_cpup((__be32 *)p);
57506 + return be32_to_cpup((const __be32 *)p);
57507 }
57508
57509 static inline u64 get_unaligned_be64(const void *p)
57510 {
57511 - return be64_to_cpup((__be64 *)p);
57512 + return be64_to_cpup((const __be64 *)p);
57513 }
57514
57515 static inline void put_unaligned_le16(u16 val, void *p)
57516 diff -urNp linux-2.6.32.41/include/linux/vmalloc.h linux-2.6.32.41/include/linux/vmalloc.h
57517 --- linux-2.6.32.41/include/linux/vmalloc.h 2011-03-27 14:31:47.000000000 -0400
57518 +++ linux-2.6.32.41/include/linux/vmalloc.h 2011-04-17 15:56:46.000000000 -0400
57519 @@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
57520 #define VM_MAP 0x00000004 /* vmap()ed pages */
57521 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
57522 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
57523 +
57524 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
57525 +#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
57526 +#endif
57527 +
57528 /* bits [20..32] reserved for arch specific ioremap internals */
57529
57530 /*
57531 @@ -123,4 +128,81 @@ struct vm_struct **pcpu_get_vm_areas(con
57532
57533 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
57534
57535 +#define vmalloc(x) \
57536 +({ \
57537 + void *___retval; \
57538 + intoverflow_t ___x = (intoverflow_t)x; \
57539 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
57540 + ___retval = NULL; \
57541 + else \
57542 + ___retval = vmalloc((unsigned long)___x); \
57543 + ___retval; \
57544 +})
57545 +
57546 +#define __vmalloc(x, y, z) \
57547 +({ \
57548 + void *___retval; \
57549 + intoverflow_t ___x = (intoverflow_t)x; \
57550 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
57551 + ___retval = NULL; \
57552 + else \
57553 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
57554 + ___retval; \
57555 +})
57556 +
57557 +#define vmalloc_user(x) \
57558 +({ \
57559 + void *___retval; \
57560 + intoverflow_t ___x = (intoverflow_t)x; \
57561 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
57562 + ___retval = NULL; \
57563 + else \
57564 + ___retval = vmalloc_user((unsigned long)___x); \
57565 + ___retval; \
57566 +})
57567 +
57568 +#define vmalloc_exec(x) \
57569 +({ \
57570 + void *___retval; \
57571 + intoverflow_t ___x = (intoverflow_t)x; \
57572 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
57573 + ___retval = NULL; \
57574 + else \
57575 + ___retval = vmalloc_exec((unsigned long)___x); \
57576 + ___retval; \
57577 +})
57578 +
57579 +#define vmalloc_node(x, y) \
57580 +({ \
57581 + void *___retval; \
57582 + intoverflow_t ___x = (intoverflow_t)x; \
57583 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
57584 + ___retval = NULL; \
57585 + else \
57586 + ___retval = vmalloc_node((unsigned long)___x, (y));\
57587 + ___retval; \
57588 +})
57589 +
57590 +#define vmalloc_32(x) \
57591 +({ \
57592 + void *___retval; \
57593 + intoverflow_t ___x = (intoverflow_t)x; \
57594 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
57595 + ___retval = NULL; \
57596 + else \
57597 + ___retval = vmalloc_32((unsigned long)___x); \
57598 + ___retval; \
57599 +})
57600 +
57601 +#define vmalloc_32_user(x) \
57602 +({ \
57603 + void *___retval; \
57604 + intoverflow_t ___x = (intoverflow_t)x; \
57605 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
57606 + ___retval = NULL; \
57607 + else \
57608 + ___retval = vmalloc_32_user((unsigned long)___x);\
57609 + ___retval; \
57610 +})
57611 +
57612 #endif /* _LINUX_VMALLOC_H */
57613 diff -urNp linux-2.6.32.41/include/linux/vmstat.h linux-2.6.32.41/include/linux/vmstat.h
57614 --- linux-2.6.32.41/include/linux/vmstat.h 2011-03-27 14:31:47.000000000 -0400
57615 +++ linux-2.6.32.41/include/linux/vmstat.h 2011-04-17 15:56:46.000000000 -0400
57616 @@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(in
57617 /*
57618 * Zone based page accounting with per cpu differentials.
57619 */
57620 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
57621 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
57622
57623 static inline void zone_page_state_add(long x, struct zone *zone,
57624 enum zone_stat_item item)
57625 {
57626 - atomic_long_add(x, &zone->vm_stat[item]);
57627 - atomic_long_add(x, &vm_stat[item]);
57628 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
57629 + atomic_long_add_unchecked(x, &vm_stat[item]);
57630 }
57631
57632 static inline unsigned long global_page_state(enum zone_stat_item item)
57633 {
57634 - long x = atomic_long_read(&vm_stat[item]);
57635 + long x = atomic_long_read_unchecked(&vm_stat[item]);
57636 #ifdef CONFIG_SMP
57637 if (x < 0)
57638 x = 0;
57639 @@ -158,7 +158,7 @@ static inline unsigned long global_page_
57640 static inline unsigned long zone_page_state(struct zone *zone,
57641 enum zone_stat_item item)
57642 {
57643 - long x = atomic_long_read(&zone->vm_stat[item]);
57644 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
57645 #ifdef CONFIG_SMP
57646 if (x < 0)
57647 x = 0;
57648 @@ -175,7 +175,7 @@ static inline unsigned long zone_page_st
57649 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
57650 enum zone_stat_item item)
57651 {
57652 - long x = atomic_long_read(&zone->vm_stat[item]);
57653 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
57654
57655 #ifdef CONFIG_SMP
57656 int cpu;
57657 @@ -264,8 +264,8 @@ static inline void __mod_zone_page_state
57658
57659 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
57660 {
57661 - atomic_long_inc(&zone->vm_stat[item]);
57662 - atomic_long_inc(&vm_stat[item]);
57663 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
57664 + atomic_long_inc_unchecked(&vm_stat[item]);
57665 }
57666
57667 static inline void __inc_zone_page_state(struct page *page,
57668 @@ -276,8 +276,8 @@ static inline void __inc_zone_page_state
57669
57670 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
57671 {
57672 - atomic_long_dec(&zone->vm_stat[item]);
57673 - atomic_long_dec(&vm_stat[item]);
57674 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
57675 + atomic_long_dec_unchecked(&vm_stat[item]);
57676 }
57677
57678 static inline void __dec_zone_page_state(struct page *page,
57679 diff -urNp linux-2.6.32.41/include/media/v4l2-device.h linux-2.6.32.41/include/media/v4l2-device.h
57680 --- linux-2.6.32.41/include/media/v4l2-device.h 2011-03-27 14:31:47.000000000 -0400
57681 +++ linux-2.6.32.41/include/media/v4l2-device.h 2011-05-04 17:56:28.000000000 -0400
57682 @@ -71,7 +71,7 @@ int __must_check v4l2_device_register(st
57683 this function returns 0. If the name ends with a digit (e.g. cx18),
57684 then the name will be set to cx18-0 since cx180 looks really odd. */
57685 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
57686 - atomic_t *instance);
57687 + atomic_unchecked_t *instance);
57688
57689 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
57690 Since the parent disappears this ensures that v4l2_dev doesn't have an
57691 diff -urNp linux-2.6.32.41/include/net/flow.h linux-2.6.32.41/include/net/flow.h
57692 --- linux-2.6.32.41/include/net/flow.h 2011-03-27 14:31:47.000000000 -0400
57693 +++ linux-2.6.32.41/include/net/flow.h 2011-05-04 17:56:28.000000000 -0400
57694 @@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net
57695 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
57696 u8 dir, flow_resolve_t resolver);
57697 extern void flow_cache_flush(void);
57698 -extern atomic_t flow_cache_genid;
57699 +extern atomic_unchecked_t flow_cache_genid;
57700
57701 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
57702 {
57703 diff -urNp linux-2.6.32.41/include/net/inetpeer.h linux-2.6.32.41/include/net/inetpeer.h
57704 --- linux-2.6.32.41/include/net/inetpeer.h 2011-03-27 14:31:47.000000000 -0400
57705 +++ linux-2.6.32.41/include/net/inetpeer.h 2011-04-17 15:56:46.000000000 -0400
57706 @@ -24,7 +24,7 @@ struct inet_peer
57707 __u32 dtime; /* the time of last use of not
57708 * referenced entries */
57709 atomic_t refcnt;
57710 - atomic_t rid; /* Frag reception counter */
57711 + atomic_unchecked_t rid; /* Frag reception counter */
57712 __u32 tcp_ts;
57713 unsigned long tcp_ts_stamp;
57714 };
57715 diff -urNp linux-2.6.32.41/include/net/ip_vs.h linux-2.6.32.41/include/net/ip_vs.h
57716 --- linux-2.6.32.41/include/net/ip_vs.h 2011-03-27 14:31:47.000000000 -0400
57717 +++ linux-2.6.32.41/include/net/ip_vs.h 2011-05-04 17:56:28.000000000 -0400
57718 @@ -365,7 +365,7 @@ struct ip_vs_conn {
57719 struct ip_vs_conn *control; /* Master control connection */
57720 atomic_t n_control; /* Number of controlled ones */
57721 struct ip_vs_dest *dest; /* real server */
57722 - atomic_t in_pkts; /* incoming packet counter */
57723 + atomic_unchecked_t in_pkts; /* incoming packet counter */
57724
57725 /* packet transmitter for different forwarding methods. If it
57726 mangles the packet, it must return NF_DROP or better NF_STOLEN,
57727 @@ -466,7 +466,7 @@ struct ip_vs_dest {
57728 union nf_inet_addr addr; /* IP address of the server */
57729 __be16 port; /* port number of the server */
57730 volatile unsigned flags; /* dest status flags */
57731 - atomic_t conn_flags; /* flags to copy to conn */
57732 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
57733 atomic_t weight; /* server weight */
57734
57735 atomic_t refcnt; /* reference counter */
57736 diff -urNp linux-2.6.32.41/include/net/irda/ircomm_tty.h linux-2.6.32.41/include/net/irda/ircomm_tty.h
57737 --- linux-2.6.32.41/include/net/irda/ircomm_tty.h 2011-03-27 14:31:47.000000000 -0400
57738 +++ linux-2.6.32.41/include/net/irda/ircomm_tty.h 2011-04-17 15:56:46.000000000 -0400
57739 @@ -35,6 +35,7 @@
57740 #include <linux/termios.h>
57741 #include <linux/timer.h>
57742 #include <linux/tty.h> /* struct tty_struct */
57743 +#include <asm/local.h>
57744
57745 #include <net/irda/irias_object.h>
57746 #include <net/irda/ircomm_core.h>
57747 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
57748 unsigned short close_delay;
57749 unsigned short closing_wait; /* time to wait before closing */
57750
57751 - int open_count;
57752 - int blocked_open; /* # of blocked opens */
57753 + local_t open_count;
57754 + local_t blocked_open; /* # of blocked opens */
57755
57756 /* Protect concurent access to :
57757 * o self->open_count
57758 diff -urNp linux-2.6.32.41/include/net/iucv/af_iucv.h linux-2.6.32.41/include/net/iucv/af_iucv.h
57759 --- linux-2.6.32.41/include/net/iucv/af_iucv.h 2011-03-27 14:31:47.000000000 -0400
57760 +++ linux-2.6.32.41/include/net/iucv/af_iucv.h 2011-05-04 17:56:28.000000000 -0400
57761 @@ -87,7 +87,7 @@ struct iucv_sock {
57762 struct iucv_sock_list {
57763 struct hlist_head head;
57764 rwlock_t lock;
57765 - atomic_t autobind_name;
57766 + atomic_unchecked_t autobind_name;
57767 };
57768
57769 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
57770 diff -urNp linux-2.6.32.41/include/net/neighbour.h linux-2.6.32.41/include/net/neighbour.h
57771 --- linux-2.6.32.41/include/net/neighbour.h 2011-03-27 14:31:47.000000000 -0400
57772 +++ linux-2.6.32.41/include/net/neighbour.h 2011-04-17 15:56:46.000000000 -0400
57773 @@ -125,12 +125,12 @@ struct neighbour
57774 struct neigh_ops
57775 {
57776 int family;
57777 - void (*solicit)(struct neighbour *, struct sk_buff*);
57778 - void (*error_report)(struct neighbour *, struct sk_buff*);
57779 - int (*output)(struct sk_buff*);
57780 - int (*connected_output)(struct sk_buff*);
57781 - int (*hh_output)(struct sk_buff*);
57782 - int (*queue_xmit)(struct sk_buff*);
57783 + void (* const solicit)(struct neighbour *, struct sk_buff*);
57784 + void (* const error_report)(struct neighbour *, struct sk_buff*);
57785 + int (* const output)(struct sk_buff*);
57786 + int (* const connected_output)(struct sk_buff*);
57787 + int (* const hh_output)(struct sk_buff*);
57788 + int (* const queue_xmit)(struct sk_buff*);
57789 };
57790
57791 struct pneigh_entry
57792 diff -urNp linux-2.6.32.41/include/net/netlink.h linux-2.6.32.41/include/net/netlink.h
57793 --- linux-2.6.32.41/include/net/netlink.h 2011-03-27 14:31:47.000000000 -0400
57794 +++ linux-2.6.32.41/include/net/netlink.h 2011-04-17 15:56:46.000000000 -0400
57795 @@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct
57796 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
57797 {
57798 if (mark)
57799 - skb_trim(skb, (unsigned char *) mark - skb->data);
57800 + skb_trim(skb, (const unsigned char *) mark - skb->data);
57801 }
57802
57803 /**
57804 diff -urNp linux-2.6.32.41/include/net/netns/ipv4.h linux-2.6.32.41/include/net/netns/ipv4.h
57805 --- linux-2.6.32.41/include/net/netns/ipv4.h 2011-03-27 14:31:47.000000000 -0400
57806 +++ linux-2.6.32.41/include/net/netns/ipv4.h 2011-05-04 17:56:28.000000000 -0400
57807 @@ -54,7 +54,7 @@ struct netns_ipv4 {
57808 int current_rt_cache_rebuild_count;
57809
57810 struct timer_list rt_secret_timer;
57811 - atomic_t rt_genid;
57812 + atomic_unchecked_t rt_genid;
57813
57814 #ifdef CONFIG_IP_MROUTE
57815 struct sock *mroute_sk;
57816 diff -urNp linux-2.6.32.41/include/net/sctp/sctp.h linux-2.6.32.41/include/net/sctp/sctp.h
57817 --- linux-2.6.32.41/include/net/sctp/sctp.h 2011-03-27 14:31:47.000000000 -0400
57818 +++ linux-2.6.32.41/include/net/sctp/sctp.h 2011-04-17 15:56:46.000000000 -0400
57819 @@ -305,8 +305,8 @@ extern int sctp_debug_flag;
57820
57821 #else /* SCTP_DEBUG */
57822
57823 -#define SCTP_DEBUG_PRINTK(whatever...)
57824 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
57825 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
57826 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
57827 #define SCTP_ENABLE_DEBUG
57828 #define SCTP_DISABLE_DEBUG
57829 #define SCTP_ASSERT(expr, str, func)
57830 diff -urNp linux-2.6.32.41/include/net/sock.h linux-2.6.32.41/include/net/sock.h
57831 --- linux-2.6.32.41/include/net/sock.h 2011-03-27 14:31:47.000000000 -0400
57832 +++ linux-2.6.32.41/include/net/sock.h 2011-05-04 17:56:28.000000000 -0400
57833 @@ -272,7 +272,7 @@ struct sock {
57834 rwlock_t sk_callback_lock;
57835 int sk_err,
57836 sk_err_soft;
57837 - atomic_t sk_drops;
57838 + atomic_unchecked_t sk_drops;
57839 unsigned short sk_ack_backlog;
57840 unsigned short sk_max_ack_backlog;
57841 __u32 sk_priority;
57842 diff -urNp linux-2.6.32.41/include/net/tcp.h linux-2.6.32.41/include/net/tcp.h
57843 --- linux-2.6.32.41/include/net/tcp.h 2011-03-27 14:31:47.000000000 -0400
57844 +++ linux-2.6.32.41/include/net/tcp.h 2011-04-17 15:56:46.000000000 -0400
57845 @@ -1444,6 +1444,7 @@ enum tcp_seq_states {
57846 struct tcp_seq_afinfo {
57847 char *name;
57848 sa_family_t family;
57849 + /* cannot be const */
57850 struct file_operations seq_fops;
57851 struct seq_operations seq_ops;
57852 };
57853 diff -urNp linux-2.6.32.41/include/net/udp.h linux-2.6.32.41/include/net/udp.h
57854 --- linux-2.6.32.41/include/net/udp.h 2011-03-27 14:31:47.000000000 -0400
57855 +++ linux-2.6.32.41/include/net/udp.h 2011-04-17 15:56:46.000000000 -0400
57856 @@ -187,6 +187,7 @@ struct udp_seq_afinfo {
57857 char *name;
57858 sa_family_t family;
57859 struct udp_table *udp_table;
57860 + /* cannot be const */
57861 struct file_operations seq_fops;
57862 struct seq_operations seq_ops;
57863 };
57864 diff -urNp linux-2.6.32.41/include/scsi/scsi_device.h linux-2.6.32.41/include/scsi/scsi_device.h
57865 --- linux-2.6.32.41/include/scsi/scsi_device.h 2011-04-17 17:00:52.000000000 -0400
57866 +++ linux-2.6.32.41/include/scsi/scsi_device.h 2011-05-04 17:56:28.000000000 -0400
57867 @@ -156,9 +156,9 @@ struct scsi_device {
57868 unsigned int max_device_blocked; /* what device_blocked counts down from */
57869 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
57870
57871 - atomic_t iorequest_cnt;
57872 - atomic_t iodone_cnt;
57873 - atomic_t ioerr_cnt;
57874 + atomic_unchecked_t iorequest_cnt;
57875 + atomic_unchecked_t iodone_cnt;
57876 + atomic_unchecked_t ioerr_cnt;
57877
57878 struct device sdev_gendev,
57879 sdev_dev;
57880 diff -urNp linux-2.6.32.41/include/sound/ac97_codec.h linux-2.6.32.41/include/sound/ac97_codec.h
57881 --- linux-2.6.32.41/include/sound/ac97_codec.h 2011-03-27 14:31:47.000000000 -0400
57882 +++ linux-2.6.32.41/include/sound/ac97_codec.h 2011-04-17 15:56:46.000000000 -0400
57883 @@ -419,15 +419,15 @@
57884 struct snd_ac97;
57885
57886 struct snd_ac97_build_ops {
57887 - int (*build_3d) (struct snd_ac97 *ac97);
57888 - int (*build_specific) (struct snd_ac97 *ac97);
57889 - int (*build_spdif) (struct snd_ac97 *ac97);
57890 - int (*build_post_spdif) (struct snd_ac97 *ac97);
57891 + int (* const build_3d) (struct snd_ac97 *ac97);
57892 + int (* const build_specific) (struct snd_ac97 *ac97);
57893 + int (* const build_spdif) (struct snd_ac97 *ac97);
57894 + int (* const build_post_spdif) (struct snd_ac97 *ac97);
57895 #ifdef CONFIG_PM
57896 - void (*suspend) (struct snd_ac97 *ac97);
57897 - void (*resume) (struct snd_ac97 *ac97);
57898 + void (* const suspend) (struct snd_ac97 *ac97);
57899 + void (* const resume) (struct snd_ac97 *ac97);
57900 #endif
57901 - void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
57902 + void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
57903 };
57904
57905 struct snd_ac97_bus_ops {
57906 @@ -477,7 +477,7 @@ struct snd_ac97_template {
57907
57908 struct snd_ac97 {
57909 /* -- lowlevel (hardware) driver specific -- */
57910 - struct snd_ac97_build_ops * build_ops;
57911 + const struct snd_ac97_build_ops * build_ops;
57912 void *private_data;
57913 void (*private_free) (struct snd_ac97 *ac97);
57914 /* --- */
57915 diff -urNp linux-2.6.32.41/include/sound/ymfpci.h linux-2.6.32.41/include/sound/ymfpci.h
57916 --- linux-2.6.32.41/include/sound/ymfpci.h 2011-03-27 14:31:47.000000000 -0400
57917 +++ linux-2.6.32.41/include/sound/ymfpci.h 2011-05-04 17:56:28.000000000 -0400
57918 @@ -358,7 +358,7 @@ struct snd_ymfpci {
57919 spinlock_t reg_lock;
57920 spinlock_t voice_lock;
57921 wait_queue_head_t interrupt_sleep;
57922 - atomic_t interrupt_sleep_count;
57923 + atomic_unchecked_t interrupt_sleep_count;
57924 struct snd_info_entry *proc_entry;
57925 const struct firmware *dsp_microcode;
57926 const struct firmware *controller_microcode;
57927 diff -urNp linux-2.6.32.41/include/trace/events/irq.h linux-2.6.32.41/include/trace/events/irq.h
57928 --- linux-2.6.32.41/include/trace/events/irq.h 2011-03-27 14:31:47.000000000 -0400
57929 +++ linux-2.6.32.41/include/trace/events/irq.h 2011-04-17 15:56:46.000000000 -0400
57930 @@ -34,7 +34,7 @@
57931 */
57932 TRACE_EVENT(irq_handler_entry,
57933
57934 - TP_PROTO(int irq, struct irqaction *action),
57935 + TP_PROTO(int irq, const struct irqaction *action),
57936
57937 TP_ARGS(irq, action),
57938
57939 @@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
57940 */
57941 TRACE_EVENT(irq_handler_exit,
57942
57943 - TP_PROTO(int irq, struct irqaction *action, int ret),
57944 + TP_PROTO(int irq, const struct irqaction *action, int ret),
57945
57946 TP_ARGS(irq, action, ret),
57947
57948 @@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
57949 */
57950 TRACE_EVENT(softirq_entry,
57951
57952 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
57953 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
57954
57955 TP_ARGS(h, vec),
57956
57957 @@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
57958 */
57959 TRACE_EVENT(softirq_exit,
57960
57961 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
57962 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
57963
57964 TP_ARGS(h, vec),
57965
57966 diff -urNp linux-2.6.32.41/include/video/uvesafb.h linux-2.6.32.41/include/video/uvesafb.h
57967 --- linux-2.6.32.41/include/video/uvesafb.h 2011-03-27 14:31:47.000000000 -0400
57968 +++ linux-2.6.32.41/include/video/uvesafb.h 2011-04-17 15:56:46.000000000 -0400
57969 @@ -177,6 +177,7 @@ struct uvesafb_par {
57970 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
57971 u8 pmi_setpal; /* PMI for palette changes */
57972 u16 *pmi_base; /* protected mode interface location */
57973 + u8 *pmi_code; /* protected mode code location */
57974 void *pmi_start;
57975 void *pmi_pal;
57976 u8 *vbe_state_orig; /*
57977 diff -urNp linux-2.6.32.41/init/do_mounts.c linux-2.6.32.41/init/do_mounts.c
57978 --- linux-2.6.32.41/init/do_mounts.c 2011-03-27 14:31:47.000000000 -0400
57979 +++ linux-2.6.32.41/init/do_mounts.c 2011-04-17 15:56:46.000000000 -0400
57980 @@ -216,11 +216,11 @@ static void __init get_fs_names(char *pa
57981
57982 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
57983 {
57984 - int err = sys_mount(name, "/root", fs, flags, data);
57985 + int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
57986 if (err)
57987 return err;
57988
57989 - sys_chdir("/root");
57990 + sys_chdir((__force const char __user *)"/root");
57991 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
57992 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
57993 current->fs->pwd.mnt->mnt_sb->s_type->name,
57994 @@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...
57995 va_start(args, fmt);
57996 vsprintf(buf, fmt, args);
57997 va_end(args);
57998 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
57999 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
58000 if (fd >= 0) {
58001 sys_ioctl(fd, FDEJECT, 0);
58002 sys_close(fd);
58003 }
58004 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
58005 - fd = sys_open("/dev/console", O_RDWR, 0);
58006 + fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
58007 if (fd >= 0) {
58008 sys_ioctl(fd, TCGETS, (long)&termios);
58009 termios.c_lflag &= ~ICANON;
58010 sys_ioctl(fd, TCSETSF, (long)&termios);
58011 - sys_read(fd, &c, 1);
58012 + sys_read(fd, (char __user *)&c, 1);
58013 termios.c_lflag |= ICANON;
58014 sys_ioctl(fd, TCSETSF, (long)&termios);
58015 sys_close(fd);
58016 @@ -416,6 +416,6 @@ void __init prepare_namespace(void)
58017 mount_root();
58018 out:
58019 devtmpfs_mount("dev");
58020 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
58021 - sys_chroot(".");
58022 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
58023 + sys_chroot((__force char __user *)".");
58024 }
58025 diff -urNp linux-2.6.32.41/init/do_mounts.h linux-2.6.32.41/init/do_mounts.h
58026 --- linux-2.6.32.41/init/do_mounts.h 2011-03-27 14:31:47.000000000 -0400
58027 +++ linux-2.6.32.41/init/do_mounts.h 2011-04-17 15:56:46.000000000 -0400
58028 @@ -15,15 +15,15 @@ extern int root_mountflags;
58029
58030 static inline int create_dev(char *name, dev_t dev)
58031 {
58032 - sys_unlink(name);
58033 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
58034 + sys_unlink((__force char __user *)name);
58035 + return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
58036 }
58037
58038 #if BITS_PER_LONG == 32
58039 static inline u32 bstat(char *name)
58040 {
58041 struct stat64 stat;
58042 - if (sys_stat64(name, &stat) != 0)
58043 + if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
58044 return 0;
58045 if (!S_ISBLK(stat.st_mode))
58046 return 0;
58047 diff -urNp linux-2.6.32.41/init/do_mounts_initrd.c linux-2.6.32.41/init/do_mounts_initrd.c
58048 --- linux-2.6.32.41/init/do_mounts_initrd.c 2011-03-27 14:31:47.000000000 -0400
58049 +++ linux-2.6.32.41/init/do_mounts_initrd.c 2011-04-17 15:56:46.000000000 -0400
58050 @@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shel
58051 sys_close(old_fd);sys_close(root_fd);
58052 sys_close(0);sys_close(1);sys_close(2);
58053 sys_setsid();
58054 - (void) sys_open("/dev/console",O_RDWR,0);
58055 + (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
58056 (void) sys_dup(0);
58057 (void) sys_dup(0);
58058 return kernel_execve(shell, argv, envp_init);
58059 @@ -47,13 +47,13 @@ static void __init handle_initrd(void)
58060 create_dev("/dev/root.old", Root_RAM0);
58061 /* mount initrd on rootfs' /root */
58062 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
58063 - sys_mkdir("/old", 0700);
58064 - root_fd = sys_open("/", 0, 0);
58065 - old_fd = sys_open("/old", 0, 0);
58066 + sys_mkdir((__force const char __user *)"/old", 0700);
58067 + root_fd = sys_open((__force const char __user *)"/", 0, 0);
58068 + old_fd = sys_open((__force const char __user *)"/old", 0, 0);
58069 /* move initrd over / and chdir/chroot in initrd root */
58070 - sys_chdir("/root");
58071 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
58072 - sys_chroot(".");
58073 + sys_chdir((__force const char __user *)"/root");
58074 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
58075 + sys_chroot((__force const char __user *)".");
58076
58077 /*
58078 * In case that a resume from disk is carried out by linuxrc or one of
58079 @@ -70,15 +70,15 @@ static void __init handle_initrd(void)
58080
58081 /* move initrd to rootfs' /old */
58082 sys_fchdir(old_fd);
58083 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
58084 + sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
58085 /* switch root and cwd back to / of rootfs */
58086 sys_fchdir(root_fd);
58087 - sys_chroot(".");
58088 + sys_chroot((__force const char __user *)".");
58089 sys_close(old_fd);
58090 sys_close(root_fd);
58091
58092 if (new_decode_dev(real_root_dev) == Root_RAM0) {
58093 - sys_chdir("/old");
58094 + sys_chdir((__force const char __user *)"/old");
58095 return;
58096 }
58097
58098 @@ -86,17 +86,17 @@ static void __init handle_initrd(void)
58099 mount_root();
58100
58101 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
58102 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
58103 + error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
58104 if (!error)
58105 printk("okay\n");
58106 else {
58107 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
58108 + int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
58109 if (error == -ENOENT)
58110 printk("/initrd does not exist. Ignored.\n");
58111 else
58112 printk("failed\n");
58113 printk(KERN_NOTICE "Unmounting old root\n");
58114 - sys_umount("/old", MNT_DETACH);
58115 + sys_umount((__force char __user *)"/old", MNT_DETACH);
58116 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
58117 if (fd < 0) {
58118 error = fd;
58119 @@ -119,11 +119,11 @@ int __init initrd_load(void)
58120 * mounted in the normal path.
58121 */
58122 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
58123 - sys_unlink("/initrd.image");
58124 + sys_unlink((__force const char __user *)"/initrd.image");
58125 handle_initrd();
58126 return 1;
58127 }
58128 }
58129 - sys_unlink("/initrd.image");
58130 + sys_unlink((__force const char __user *)"/initrd.image");
58131 return 0;
58132 }
58133 diff -urNp linux-2.6.32.41/init/do_mounts_md.c linux-2.6.32.41/init/do_mounts_md.c
58134 --- linux-2.6.32.41/init/do_mounts_md.c 2011-03-27 14:31:47.000000000 -0400
58135 +++ linux-2.6.32.41/init/do_mounts_md.c 2011-04-17 15:56:46.000000000 -0400
58136 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
58137 partitioned ? "_d" : "", minor,
58138 md_setup_args[ent].device_names);
58139
58140 - fd = sys_open(name, 0, 0);
58141 + fd = sys_open((__force char __user *)name, 0, 0);
58142 if (fd < 0) {
58143 printk(KERN_ERR "md: open failed - cannot start "
58144 "array %s\n", name);
58145 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
58146 * array without it
58147 */
58148 sys_close(fd);
58149 - fd = sys_open(name, 0, 0);
58150 + fd = sys_open((__force char __user *)name, 0, 0);
58151 sys_ioctl(fd, BLKRRPART, 0);
58152 }
58153 sys_close(fd);
58154 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
58155
58156 wait_for_device_probe();
58157
58158 - fd = sys_open("/dev/md0", 0, 0);
58159 + fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
58160 if (fd >= 0) {
58161 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
58162 sys_close(fd);
58163 diff -urNp linux-2.6.32.41/init/initramfs.c linux-2.6.32.41/init/initramfs.c
58164 --- linux-2.6.32.41/init/initramfs.c 2011-03-27 14:31:47.000000000 -0400
58165 +++ linux-2.6.32.41/init/initramfs.c 2011-04-17 15:56:46.000000000 -0400
58166 @@ -74,7 +74,7 @@ static void __init free_hash(void)
58167 }
58168 }
58169
58170 -static long __init do_utime(char __user *filename, time_t mtime)
58171 +static long __init do_utime(__force char __user *filename, time_t mtime)
58172 {
58173 struct timespec t[2];
58174
58175 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
58176 struct dir_entry *de, *tmp;
58177 list_for_each_entry_safe(de, tmp, &dir_list, list) {
58178 list_del(&de->list);
58179 - do_utime(de->name, de->mtime);
58180 + do_utime((__force char __user *)de->name, de->mtime);
58181 kfree(de->name);
58182 kfree(de);
58183 }
58184 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
58185 if (nlink >= 2) {
58186 char *old = find_link(major, minor, ino, mode, collected);
58187 if (old)
58188 - return (sys_link(old, collected) < 0) ? -1 : 1;
58189 + return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
58190 }
58191 return 0;
58192 }
58193 @@ -280,11 +280,11 @@ static void __init clean_path(char *path
58194 {
58195 struct stat st;
58196
58197 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
58198 + if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
58199 if (S_ISDIR(st.st_mode))
58200 - sys_rmdir(path);
58201 + sys_rmdir((__force char __user *)path);
58202 else
58203 - sys_unlink(path);
58204 + sys_unlink((__force char __user *)path);
58205 }
58206 }
58207
58208 @@ -305,7 +305,7 @@ static int __init do_name(void)
58209 int openflags = O_WRONLY|O_CREAT;
58210 if (ml != 1)
58211 openflags |= O_TRUNC;
58212 - wfd = sys_open(collected, openflags, mode);
58213 + wfd = sys_open((__force char __user *)collected, openflags, mode);
58214
58215 if (wfd >= 0) {
58216 sys_fchown(wfd, uid, gid);
58217 @@ -317,17 +317,17 @@ static int __init do_name(void)
58218 }
58219 }
58220 } else if (S_ISDIR(mode)) {
58221 - sys_mkdir(collected, mode);
58222 - sys_chown(collected, uid, gid);
58223 - sys_chmod(collected, mode);
58224 + sys_mkdir((__force char __user *)collected, mode);
58225 + sys_chown((__force char __user *)collected, uid, gid);
58226 + sys_chmod((__force char __user *)collected, mode);
58227 dir_add(collected, mtime);
58228 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
58229 S_ISFIFO(mode) || S_ISSOCK(mode)) {
58230 if (maybe_link() == 0) {
58231 - sys_mknod(collected, mode, rdev);
58232 - sys_chown(collected, uid, gid);
58233 - sys_chmod(collected, mode);
58234 - do_utime(collected, mtime);
58235 + sys_mknod((__force char __user *)collected, mode, rdev);
58236 + sys_chown((__force char __user *)collected, uid, gid);
58237 + sys_chmod((__force char __user *)collected, mode);
58238 + do_utime((__force char __user *)collected, mtime);
58239 }
58240 }
58241 return 0;
58242 @@ -336,15 +336,15 @@ static int __init do_name(void)
58243 static int __init do_copy(void)
58244 {
58245 if (count >= body_len) {
58246 - sys_write(wfd, victim, body_len);
58247 + sys_write(wfd, (__force char __user *)victim, body_len);
58248 sys_close(wfd);
58249 - do_utime(vcollected, mtime);
58250 + do_utime((__force char __user *)vcollected, mtime);
58251 kfree(vcollected);
58252 eat(body_len);
58253 state = SkipIt;
58254 return 0;
58255 } else {
58256 - sys_write(wfd, victim, count);
58257 + sys_write(wfd, (__force char __user *)victim, count);
58258 body_len -= count;
58259 eat(count);
58260 return 1;
58261 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
58262 {
58263 collected[N_ALIGN(name_len) + body_len] = '\0';
58264 clean_path(collected, 0);
58265 - sys_symlink(collected + N_ALIGN(name_len), collected);
58266 - sys_lchown(collected, uid, gid);
58267 - do_utime(collected, mtime);
58268 + sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
58269 + sys_lchown((__force char __user *)collected, uid, gid);
58270 + do_utime((__force char __user *)collected, mtime);
58271 state = SkipIt;
58272 next_state = Reset;
58273 return 0;
58274 diff -urNp linux-2.6.32.41/init/Kconfig linux-2.6.32.41/init/Kconfig
58275 --- linux-2.6.32.41/init/Kconfig 2011-05-10 22:12:01.000000000 -0400
58276 +++ linux-2.6.32.41/init/Kconfig 2011-05-10 22:12:34.000000000 -0400
58277 @@ -1004,7 +1004,7 @@ config SLUB_DEBUG
58278
58279 config COMPAT_BRK
58280 bool "Disable heap randomization"
58281 - default y
58282 + default n
58283 help
58284 Randomizing heap placement makes heap exploits harder, but it
58285 also breaks ancient binaries (including anything libc5 based).
58286 diff -urNp linux-2.6.32.41/init/main.c linux-2.6.32.41/init/main.c
58287 --- linux-2.6.32.41/init/main.c 2011-05-10 22:12:01.000000000 -0400
58288 +++ linux-2.6.32.41/init/main.c 2011-05-22 23:02:06.000000000 -0400
58289 @@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void)
58290 #ifdef CONFIG_TC
58291 extern void tc_init(void);
58292 #endif
58293 +extern void grsecurity_init(void);
58294
58295 enum system_states system_state __read_mostly;
58296 EXPORT_SYMBOL(system_state);
58297 @@ -183,6 +184,49 @@ static int __init set_reset_devices(char
58298
58299 __setup("reset_devices", set_reset_devices);
58300
58301 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
58302 +extern char pax_enter_kernel_user[];
58303 +extern char pax_exit_kernel_user[];
58304 +extern pgdval_t clone_pgd_mask;
58305 +#endif
58306 +
58307 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
58308 +static int __init setup_pax_nouderef(char *str)
58309 +{
58310 +#ifdef CONFIG_X86_32
58311 + unsigned int cpu;
58312 + struct desc_struct *gdt;
58313 +
58314 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
58315 + gdt = get_cpu_gdt_table(cpu);
58316 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
58317 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
58318 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
58319 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
58320 + }
58321 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
58322 +#else
58323 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
58324 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
58325 + clone_pgd_mask = ~(pgdval_t)0UL;
58326 +#endif
58327 +
58328 + return 0;
58329 +}
58330 +early_param("pax_nouderef", setup_pax_nouderef);
58331 +#endif
58332 +
58333 +#ifdef CONFIG_PAX_SOFTMODE
58334 +unsigned int pax_softmode;
58335 +
58336 +static int __init setup_pax_softmode(char *str)
58337 +{
58338 + get_option(&str, &pax_softmode);
58339 + return 1;
58340 +}
58341 +__setup("pax_softmode=", setup_pax_softmode);
58342 +#endif
58343 +
58344 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
58345 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
58346 static const char *panic_later, *panic_param;
58347 @@ -705,52 +749,53 @@ int initcall_debug;
58348 core_param(initcall_debug, initcall_debug, bool, 0644);
58349
58350 static char msgbuf[64];
58351 -static struct boot_trace_call call;
58352 -static struct boot_trace_ret ret;
58353 +static struct boot_trace_call trace_call;
58354 +static struct boot_trace_ret trace_ret;
58355
58356 int do_one_initcall(initcall_t fn)
58357 {
58358 int count = preempt_count();
58359 ktime_t calltime, delta, rettime;
58360 + const char *msg1 = "", *msg2 = "";
58361
58362 if (initcall_debug) {
58363 - call.caller = task_pid_nr(current);
58364 - printk("calling %pF @ %i\n", fn, call.caller);
58365 + trace_call.caller = task_pid_nr(current);
58366 + printk("calling %pF @ %i\n", fn, trace_call.caller);
58367 calltime = ktime_get();
58368 - trace_boot_call(&call, fn);
58369 + trace_boot_call(&trace_call, fn);
58370 enable_boot_trace();
58371 }
58372
58373 - ret.result = fn();
58374 + trace_ret.result = fn();
58375
58376 if (initcall_debug) {
58377 disable_boot_trace();
58378 rettime = ktime_get();
58379 delta = ktime_sub(rettime, calltime);
58380 - ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
58381 - trace_boot_ret(&ret, fn);
58382 + trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
58383 + trace_boot_ret(&trace_ret, fn);
58384 printk("initcall %pF returned %d after %Ld usecs\n", fn,
58385 - ret.result, ret.duration);
58386 + trace_ret.result, trace_ret.duration);
58387 }
58388
58389 msgbuf[0] = 0;
58390
58391 - if (ret.result && ret.result != -ENODEV && initcall_debug)
58392 - sprintf(msgbuf, "error code %d ", ret.result);
58393 + if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
58394 + sprintf(msgbuf, "error code %d ", trace_ret.result);
58395
58396 if (preempt_count() != count) {
58397 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
58398 + msg1 = " preemption imbalance";
58399 preempt_count() = count;
58400 }
58401 if (irqs_disabled()) {
58402 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
58403 + msg2 = " disabled interrupts";
58404 local_irq_enable();
58405 }
58406 - if (msgbuf[0]) {
58407 - printk("initcall %pF returned with %s\n", fn, msgbuf);
58408 + if (msgbuf[0] || *msg1 || *msg2) {
58409 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
58410 }
58411
58412 - return ret.result;
58413 + return trace_ret.result;
58414 }
58415
58416
58417 @@ -893,11 +938,13 @@ static int __init kernel_init(void * unu
58418 if (!ramdisk_execute_command)
58419 ramdisk_execute_command = "/init";
58420
58421 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
58422 + if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
58423 ramdisk_execute_command = NULL;
58424 prepare_namespace();
58425 }
58426
58427 + grsecurity_init();
58428 +
58429 /*
58430 * Ok, we have completed the initial bootup, and
58431 * we're essentially up and running. Get rid of the
58432 diff -urNp linux-2.6.32.41/init/noinitramfs.c linux-2.6.32.41/init/noinitramfs.c
58433 --- linux-2.6.32.41/init/noinitramfs.c 2011-03-27 14:31:47.000000000 -0400
58434 +++ linux-2.6.32.41/init/noinitramfs.c 2011-04-17 15:56:46.000000000 -0400
58435 @@ -29,7 +29,7 @@ static int __init default_rootfs(void)
58436 {
58437 int err;
58438
58439 - err = sys_mkdir("/dev", 0755);
58440 + err = sys_mkdir((const char __user *)"/dev", 0755);
58441 if (err < 0)
58442 goto out;
58443
58444 @@ -39,7 +39,7 @@ static int __init default_rootfs(void)
58445 if (err < 0)
58446 goto out;
58447
58448 - err = sys_mkdir("/root", 0700);
58449 + err = sys_mkdir((const char __user *)"/root", 0700);
58450 if (err < 0)
58451 goto out;
58452
58453 diff -urNp linux-2.6.32.41/ipc/mqueue.c linux-2.6.32.41/ipc/mqueue.c
58454 --- linux-2.6.32.41/ipc/mqueue.c 2011-03-27 14:31:47.000000000 -0400
58455 +++ linux-2.6.32.41/ipc/mqueue.c 2011-04-17 15:56:46.000000000 -0400
58456 @@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(st
58457 mq_bytes = (mq_msg_tblsz +
58458 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
58459
58460 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
58461 spin_lock(&mq_lock);
58462 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
58463 u->mq_bytes + mq_bytes >
58464 diff -urNp linux-2.6.32.41/ipc/sem.c linux-2.6.32.41/ipc/sem.c
58465 --- linux-2.6.32.41/ipc/sem.c 2011-03-27 14:31:47.000000000 -0400
58466 +++ linux-2.6.32.41/ipc/sem.c 2011-05-16 21:46:57.000000000 -0400
58467 @@ -671,6 +671,8 @@ static int semctl_main(struct ipc_namesp
58468 ushort* sem_io = fast_sem_io;
58469 int nsems;
58470
58471 + pax_track_stack();
58472 +
58473 sma = sem_lock_check(ns, semid);
58474 if (IS_ERR(sma))
58475 return PTR_ERR(sma);
58476 @@ -1071,6 +1073,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
58477 unsigned long jiffies_left = 0;
58478 struct ipc_namespace *ns;
58479
58480 + pax_track_stack();
58481 +
58482 ns = current->nsproxy->ipc_ns;
58483
58484 if (nsops < 1 || semid < 0)
58485 diff -urNp linux-2.6.32.41/ipc/shm.c linux-2.6.32.41/ipc/shm.c
58486 --- linux-2.6.32.41/ipc/shm.c 2011-03-27 14:31:47.000000000 -0400
58487 +++ linux-2.6.32.41/ipc/shm.c 2011-04-17 15:56:46.000000000 -0400
58488 @@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_name
58489 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
58490 #endif
58491
58492 +#ifdef CONFIG_GRKERNSEC
58493 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
58494 + const time_t shm_createtime, const uid_t cuid,
58495 + const int shmid);
58496 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
58497 + const time_t shm_createtime);
58498 +#endif
58499 +
58500 void shm_init_ns(struct ipc_namespace *ns)
58501 {
58502 ns->shm_ctlmax = SHMMAX;
58503 @@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *
58504 shp->shm_lprid = 0;
58505 shp->shm_atim = shp->shm_dtim = 0;
58506 shp->shm_ctim = get_seconds();
58507 +#ifdef CONFIG_GRKERNSEC
58508 + {
58509 + struct timespec timeval;
58510 + do_posix_clock_monotonic_gettime(&timeval);
58511 +
58512 + shp->shm_createtime = timeval.tv_sec;
58513 + }
58514 +#endif
58515 shp->shm_segsz = size;
58516 shp->shm_nattch = 0;
58517 shp->shm_file = file;
58518 @@ -880,9 +896,21 @@ long do_shmat(int shmid, char __user *sh
58519 if (err)
58520 goto out_unlock;
58521
58522 +#ifdef CONFIG_GRKERNSEC
58523 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
58524 + shp->shm_perm.cuid, shmid) ||
58525 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
58526 + err = -EACCES;
58527 + goto out_unlock;
58528 + }
58529 +#endif
58530 +
58531 path.dentry = dget(shp->shm_file->f_path.dentry);
58532 path.mnt = shp->shm_file->f_path.mnt;
58533 shp->shm_nattch++;
58534 +#ifdef CONFIG_GRKERNSEC
58535 + shp->shm_lapid = current->pid;
58536 +#endif
58537 size = i_size_read(path.dentry->d_inode);
58538 shm_unlock(shp);
58539
58540 diff -urNp linux-2.6.32.41/kernel/acct.c linux-2.6.32.41/kernel/acct.c
58541 --- linux-2.6.32.41/kernel/acct.c 2011-03-27 14:31:47.000000000 -0400
58542 +++ linux-2.6.32.41/kernel/acct.c 2011-04-17 15:56:46.000000000 -0400
58543 @@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_a
58544 */
58545 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
58546 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
58547 - file->f_op->write(file, (char *)&ac,
58548 + file->f_op->write(file, (__force char __user *)&ac,
58549 sizeof(acct_t), &file->f_pos);
58550 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
58551 set_fs(fs);
58552 diff -urNp linux-2.6.32.41/kernel/audit.c linux-2.6.32.41/kernel/audit.c
58553 --- linux-2.6.32.41/kernel/audit.c 2011-03-27 14:31:47.000000000 -0400
58554 +++ linux-2.6.32.41/kernel/audit.c 2011-05-04 17:56:28.000000000 -0400
58555 @@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
58556 3) suppressed due to audit_rate_limit
58557 4) suppressed due to audit_backlog_limit
58558 */
58559 -static atomic_t audit_lost = ATOMIC_INIT(0);
58560 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
58561
58562 /* The netlink socket. */
58563 static struct sock *audit_sock;
58564 @@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
58565 unsigned long now;
58566 int print;
58567
58568 - atomic_inc(&audit_lost);
58569 + atomic_inc_unchecked(&audit_lost);
58570
58571 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
58572
58573 @@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
58574 printk(KERN_WARNING
58575 "audit: audit_lost=%d audit_rate_limit=%d "
58576 "audit_backlog_limit=%d\n",
58577 - atomic_read(&audit_lost),
58578 + atomic_read_unchecked(&audit_lost),
58579 audit_rate_limit,
58580 audit_backlog_limit);
58581 audit_panic(message);
58582 @@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_b
58583 status_set.pid = audit_pid;
58584 status_set.rate_limit = audit_rate_limit;
58585 status_set.backlog_limit = audit_backlog_limit;
58586 - status_set.lost = atomic_read(&audit_lost);
58587 + status_set.lost = atomic_read_unchecked(&audit_lost);
58588 status_set.backlog = skb_queue_len(&audit_skb_queue);
58589 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
58590 &status_set, sizeof(status_set));
58591 @@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_b
58592 spin_unlock_irq(&tsk->sighand->siglock);
58593 }
58594 read_unlock(&tasklist_lock);
58595 - audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
58596 - &s, sizeof(s));
58597 +
58598 + if (!err)
58599 + audit_send_reply(NETLINK_CB(skb).pid, seq,
58600 + AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
58601 break;
58602 }
58603 case AUDIT_TTY_SET: {
58604 diff -urNp linux-2.6.32.41/kernel/auditsc.c linux-2.6.32.41/kernel/auditsc.c
58605 --- linux-2.6.32.41/kernel/auditsc.c 2011-03-27 14:31:47.000000000 -0400
58606 +++ linux-2.6.32.41/kernel/auditsc.c 2011-05-04 17:56:28.000000000 -0400
58607 @@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_conte
58608 }
58609
58610 /* global counter which is incremented every time something logs in */
58611 -static atomic_t session_id = ATOMIC_INIT(0);
58612 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
58613
58614 /**
58615 * audit_set_loginuid - set a task's audit_context loginuid
58616 @@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT
58617 */
58618 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
58619 {
58620 - unsigned int sessionid = atomic_inc_return(&session_id);
58621 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
58622 struct audit_context *context = task->audit_context;
58623
58624 if (context && context->in_syscall) {
58625 diff -urNp linux-2.6.32.41/kernel/capability.c linux-2.6.32.41/kernel/capability.c
58626 --- linux-2.6.32.41/kernel/capability.c 2011-03-27 14:31:47.000000000 -0400
58627 +++ linux-2.6.32.41/kernel/capability.c 2011-04-17 15:56:46.000000000 -0400
58628 @@ -305,10 +305,26 @@ int capable(int cap)
58629 BUG();
58630 }
58631
58632 - if (security_capable(cap) == 0) {
58633 + if (security_capable(cap) == 0 && gr_is_capable(cap)) {
58634 current->flags |= PF_SUPERPRIV;
58635 return 1;
58636 }
58637 return 0;
58638 }
58639 +
58640 +int capable_nolog(int cap)
58641 +{
58642 + if (unlikely(!cap_valid(cap))) {
58643 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
58644 + BUG();
58645 + }
58646 +
58647 + if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
58648 + current->flags |= PF_SUPERPRIV;
58649 + return 1;
58650 + }
58651 + return 0;
58652 +}
58653 +
58654 EXPORT_SYMBOL(capable);
58655 +EXPORT_SYMBOL(capable_nolog);
58656 diff -urNp linux-2.6.32.41/kernel/cgroup.c linux-2.6.32.41/kernel/cgroup.c
58657 --- linux-2.6.32.41/kernel/cgroup.c 2011-03-27 14:31:47.000000000 -0400
58658 +++ linux-2.6.32.41/kernel/cgroup.c 2011-05-16 21:46:57.000000000 -0400
58659 @@ -536,6 +536,8 @@ static struct css_set *find_css_set(
58660 struct hlist_head *hhead;
58661 struct cg_cgroup_link *link;
58662
58663 + pax_track_stack();
58664 +
58665 /* First see if we already have a cgroup group that matches
58666 * the desired set */
58667 read_lock(&css_set_lock);
58668 diff -urNp linux-2.6.32.41/kernel/configs.c linux-2.6.32.41/kernel/configs.c
58669 --- linux-2.6.32.41/kernel/configs.c 2011-03-27 14:31:47.000000000 -0400
58670 +++ linux-2.6.32.41/kernel/configs.c 2011-04-17 15:56:46.000000000 -0400
58671 @@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
58672 struct proc_dir_entry *entry;
58673
58674 /* create the current config file */
58675 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
58676 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
58677 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
58678 + &ikconfig_file_ops);
58679 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58680 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
58681 + &ikconfig_file_ops);
58682 +#endif
58683 +#else
58684 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
58685 &ikconfig_file_ops);
58686 +#endif
58687 +
58688 if (!entry)
58689 return -ENOMEM;
58690
58691 diff -urNp linux-2.6.32.41/kernel/cpu.c linux-2.6.32.41/kernel/cpu.c
58692 --- linux-2.6.32.41/kernel/cpu.c 2011-03-27 14:31:47.000000000 -0400
58693 +++ linux-2.6.32.41/kernel/cpu.c 2011-04-17 15:56:46.000000000 -0400
58694 @@ -19,7 +19,7 @@
58695 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
58696 static DEFINE_MUTEX(cpu_add_remove_lock);
58697
58698 -static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
58699 +static RAW_NOTIFIER_HEAD(cpu_chain);
58700
58701 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
58702 * Should always be manipulated under cpu_add_remove_lock
58703 diff -urNp linux-2.6.32.41/kernel/cred.c linux-2.6.32.41/kernel/cred.c
58704 --- linux-2.6.32.41/kernel/cred.c 2011-03-27 14:31:47.000000000 -0400
58705 +++ linux-2.6.32.41/kernel/cred.c 2011-05-17 19:26:34.000000000 -0400
58706 @@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head
58707 */
58708 void __put_cred(struct cred *cred)
58709 {
58710 + pax_track_stack();
58711 +
58712 kdebug("__put_cred(%p{%d,%d})", cred,
58713 atomic_read(&cred->usage),
58714 read_cred_subscribers(cred));
58715 @@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
58716 {
58717 struct cred *cred;
58718
58719 + pax_track_stack();
58720 +
58721 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
58722 atomic_read(&tsk->cred->usage),
58723 read_cred_subscribers(tsk->cred));
58724 @@ -222,6 +226,8 @@ const struct cred *get_task_cred(struct
58725 {
58726 const struct cred *cred;
58727
58728 + pax_track_stack();
58729 +
58730 rcu_read_lock();
58731
58732 do {
58733 @@ -241,6 +247,8 @@ struct cred *cred_alloc_blank(void)
58734 {
58735 struct cred *new;
58736
58737 + pax_track_stack();
58738 +
58739 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
58740 if (!new)
58741 return NULL;
58742 @@ -289,6 +297,8 @@ struct cred *prepare_creds(void)
58743 const struct cred *old;
58744 struct cred *new;
58745
58746 + pax_track_stack();
58747 +
58748 validate_process_creds();
58749
58750 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
58751 @@ -335,6 +345,8 @@ struct cred *prepare_exec_creds(void)
58752 struct thread_group_cred *tgcred = NULL;
58753 struct cred *new;
58754
58755 + pax_track_stack();
58756 +
58757 #ifdef CONFIG_KEYS
58758 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
58759 if (!tgcred)
58760 @@ -441,6 +453,8 @@ int copy_creds(struct task_struct *p, un
58761 struct cred *new;
58762 int ret;
58763
58764 + pax_track_stack();
58765 +
58766 mutex_init(&p->cred_guard_mutex);
58767
58768 if (
58769 @@ -528,6 +542,8 @@ int commit_creds(struct cred *new)
58770 struct task_struct *task = current;
58771 const struct cred *old = task->real_cred;
58772
58773 + pax_track_stack();
58774 +
58775 kdebug("commit_creds(%p{%d,%d})", new,
58776 atomic_read(&new->usage),
58777 read_cred_subscribers(new));
58778 @@ -544,6 +560,8 @@ int commit_creds(struct cred *new)
58779
58780 get_cred(new); /* we will require a ref for the subj creds too */
58781
58782 + gr_set_role_label(task, new->uid, new->gid);
58783 +
58784 /* dumpability changes */
58785 if (old->euid != new->euid ||
58786 old->egid != new->egid ||
58787 @@ -606,6 +624,8 @@ EXPORT_SYMBOL(commit_creds);
58788 */
58789 void abort_creds(struct cred *new)
58790 {
58791 + pax_track_stack();
58792 +
58793 kdebug("abort_creds(%p{%d,%d})", new,
58794 atomic_read(&new->usage),
58795 read_cred_subscribers(new));
58796 @@ -629,6 +649,8 @@ const struct cred *override_creds(const
58797 {
58798 const struct cred *old = current->cred;
58799
58800 + pax_track_stack();
58801 +
58802 kdebug("override_creds(%p{%d,%d})", new,
58803 atomic_read(&new->usage),
58804 read_cred_subscribers(new));
58805 @@ -658,6 +680,8 @@ void revert_creds(const struct cred *old
58806 {
58807 const struct cred *override = current->cred;
58808
58809 + pax_track_stack();
58810 +
58811 kdebug("revert_creds(%p{%d,%d})", old,
58812 atomic_read(&old->usage),
58813 read_cred_subscribers(old));
58814 @@ -704,6 +728,8 @@ struct cred *prepare_kernel_cred(struct
58815 const struct cred *old;
58816 struct cred *new;
58817
58818 + pax_track_stack();
58819 +
58820 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
58821 if (!new)
58822 return NULL;
58823 @@ -758,6 +784,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
58824 */
58825 int set_security_override(struct cred *new, u32 secid)
58826 {
58827 + pax_track_stack();
58828 +
58829 return security_kernel_act_as(new, secid);
58830 }
58831 EXPORT_SYMBOL(set_security_override);
58832 @@ -777,6 +805,8 @@ int set_security_override_from_ctx(struc
58833 u32 secid;
58834 int ret;
58835
58836 + pax_track_stack();
58837 +
58838 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
58839 if (ret < 0)
58840 return ret;
58841 diff -urNp linux-2.6.32.41/kernel/exit.c linux-2.6.32.41/kernel/exit.c
58842 --- linux-2.6.32.41/kernel/exit.c 2011-03-27 14:31:47.000000000 -0400
58843 +++ linux-2.6.32.41/kernel/exit.c 2011-04-17 15:56:46.000000000 -0400
58844 @@ -55,6 +55,10 @@
58845 #include <asm/pgtable.h>
58846 #include <asm/mmu_context.h>
58847
58848 +#ifdef CONFIG_GRKERNSEC
58849 +extern rwlock_t grsec_exec_file_lock;
58850 +#endif
58851 +
58852 static void exit_mm(struct task_struct * tsk);
58853
58854 static void __unhash_process(struct task_struct *p)
58855 @@ -174,6 +178,8 @@ void release_task(struct task_struct * p
58856 struct task_struct *leader;
58857 int zap_leader;
58858 repeat:
58859 + gr_del_task_from_ip_table(p);
58860 +
58861 tracehook_prepare_release_task(p);
58862 /* don't need to get the RCU readlock here - the process is dead and
58863 * can't be modifying its own credentials */
58864 @@ -341,11 +347,22 @@ static void reparent_to_kthreadd(void)
58865 {
58866 write_lock_irq(&tasklist_lock);
58867
58868 +#ifdef CONFIG_GRKERNSEC
58869 + write_lock(&grsec_exec_file_lock);
58870 + if (current->exec_file) {
58871 + fput(current->exec_file);
58872 + current->exec_file = NULL;
58873 + }
58874 + write_unlock(&grsec_exec_file_lock);
58875 +#endif
58876 +
58877 ptrace_unlink(current);
58878 /* Reparent to init */
58879 current->real_parent = current->parent = kthreadd_task;
58880 list_move_tail(&current->sibling, &current->real_parent->children);
58881
58882 + gr_set_kernel_label(current);
58883 +
58884 /* Set the exit signal to SIGCHLD so we signal init on exit */
58885 current->exit_signal = SIGCHLD;
58886
58887 @@ -397,7 +414,7 @@ int allow_signal(int sig)
58888 * know it'll be handled, so that they don't get converted to
58889 * SIGKILL or just silently dropped.
58890 */
58891 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
58892 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
58893 recalc_sigpending();
58894 spin_unlock_irq(&current->sighand->siglock);
58895 return 0;
58896 @@ -433,6 +450,17 @@ void daemonize(const char *name, ...)
58897 vsnprintf(current->comm, sizeof(current->comm), name, args);
58898 va_end(args);
58899
58900 +#ifdef CONFIG_GRKERNSEC
58901 + write_lock(&grsec_exec_file_lock);
58902 + if (current->exec_file) {
58903 + fput(current->exec_file);
58904 + current->exec_file = NULL;
58905 + }
58906 + write_unlock(&grsec_exec_file_lock);
58907 +#endif
58908 +
58909 + gr_set_kernel_label(current);
58910 +
58911 /*
58912 * If we were started as result of loading a module, close all of the
58913 * user space pages. We don't need them, and if we didn't close them
58914 @@ -897,17 +925,17 @@ NORET_TYPE void do_exit(long code)
58915 struct task_struct *tsk = current;
58916 int group_dead;
58917
58918 - profile_task_exit(tsk);
58919 -
58920 - WARN_ON(atomic_read(&tsk->fs_excl));
58921 -
58922 + /*
58923 + * Check this first since set_fs() below depends on
58924 + * current_thread_info(), which we better not access when we're in
58925 + * interrupt context. Other than that, we want to do the set_fs()
58926 + * as early as possible.
58927 + */
58928 if (unlikely(in_interrupt()))
58929 panic("Aiee, killing interrupt handler!");
58930 - if (unlikely(!tsk->pid))
58931 - panic("Attempted to kill the idle task!");
58932
58933 /*
58934 - * If do_exit is called because this processes oopsed, it's possible
58935 + * If do_exit is called because this processes Oops'ed, it's possible
58936 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
58937 * continuing. Amongst other possible reasons, this is to prevent
58938 * mm_release()->clear_child_tid() from writing to a user-controlled
58939 @@ -915,6 +943,13 @@ NORET_TYPE void do_exit(long code)
58940 */
58941 set_fs(USER_DS);
58942
58943 + profile_task_exit(tsk);
58944 +
58945 + WARN_ON(atomic_read(&tsk->fs_excl));
58946 +
58947 + if (unlikely(!tsk->pid))
58948 + panic("Attempted to kill the idle task!");
58949 +
58950 tracehook_report_exit(&code);
58951
58952 validate_creds_for_do_exit(tsk);
58953 @@ -973,6 +1008,9 @@ NORET_TYPE void do_exit(long code)
58954 tsk->exit_code = code;
58955 taskstats_exit(tsk, group_dead);
58956
58957 + gr_acl_handle_psacct(tsk, code);
58958 + gr_acl_handle_exit();
58959 +
58960 exit_mm(tsk);
58961
58962 if (group_dead)
58963 @@ -1188,7 +1226,7 @@ static int wait_task_zombie(struct wait_
58964
58965 if (unlikely(wo->wo_flags & WNOWAIT)) {
58966 int exit_code = p->exit_code;
58967 - int why, status;
58968 + int why;
58969
58970 get_task_struct(p);
58971 read_unlock(&tasklist_lock);
58972 diff -urNp linux-2.6.32.41/kernel/fork.c linux-2.6.32.41/kernel/fork.c
58973 --- linux-2.6.32.41/kernel/fork.c 2011-03-27 14:31:47.000000000 -0400
58974 +++ linux-2.6.32.41/kernel/fork.c 2011-04-17 15:56:46.000000000 -0400
58975 @@ -253,7 +253,7 @@ static struct task_struct *dup_task_stru
58976 *stackend = STACK_END_MAGIC; /* for overflow detection */
58977
58978 #ifdef CONFIG_CC_STACKPROTECTOR
58979 - tsk->stack_canary = get_random_int();
58980 + tsk->stack_canary = pax_get_random_long();
58981 #endif
58982
58983 /* One for us, one for whoever does the "release_task()" (usually parent) */
58984 @@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm
58985 mm->locked_vm = 0;
58986 mm->mmap = NULL;
58987 mm->mmap_cache = NULL;
58988 - mm->free_area_cache = oldmm->mmap_base;
58989 - mm->cached_hole_size = ~0UL;
58990 + mm->free_area_cache = oldmm->free_area_cache;
58991 + mm->cached_hole_size = oldmm->cached_hole_size;
58992 mm->map_count = 0;
58993 cpumask_clear(mm_cpumask(mm));
58994 mm->mm_rb = RB_ROOT;
58995 @@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm
58996 tmp->vm_flags &= ~VM_LOCKED;
58997 tmp->vm_mm = mm;
58998 tmp->vm_next = tmp->vm_prev = NULL;
58999 + tmp->vm_mirror = NULL;
59000 anon_vma_link(tmp);
59001 file = tmp->vm_file;
59002 if (file) {
59003 @@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm
59004 if (retval)
59005 goto out;
59006 }
59007 +
59008 +#ifdef CONFIG_PAX_SEGMEXEC
59009 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
59010 + struct vm_area_struct *mpnt_m;
59011 +
59012 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
59013 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
59014 +
59015 + if (!mpnt->vm_mirror)
59016 + continue;
59017 +
59018 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
59019 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
59020 + mpnt->vm_mirror = mpnt_m;
59021 + } else {
59022 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
59023 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
59024 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
59025 + mpnt->vm_mirror->vm_mirror = mpnt;
59026 + }
59027 + }
59028 + BUG_ON(mpnt_m);
59029 + }
59030 +#endif
59031 +
59032 /* a new mm has just been created */
59033 arch_dup_mmap(oldmm, mm);
59034 retval = 0;
59035 @@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_f
59036 write_unlock(&fs->lock);
59037 return -EAGAIN;
59038 }
59039 - fs->users++;
59040 + atomic_inc(&fs->users);
59041 write_unlock(&fs->lock);
59042 return 0;
59043 }
59044 tsk->fs = copy_fs_struct(fs);
59045 if (!tsk->fs)
59046 return -ENOMEM;
59047 + gr_set_chroot_entries(tsk, &tsk->fs->root);
59048 return 0;
59049 }
59050
59051 @@ -1033,10 +1060,13 @@ static struct task_struct *copy_process(
59052 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
59053 #endif
59054 retval = -EAGAIN;
59055 +
59056 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
59057 +
59058 if (atomic_read(&p->real_cred->user->processes) >=
59059 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
59060 - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
59061 - p->real_cred->user != INIT_USER)
59062 + if (p->real_cred->user != INIT_USER &&
59063 + !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
59064 goto bad_fork_free;
59065 }
59066
59067 @@ -1183,6 +1213,8 @@ static struct task_struct *copy_process(
59068 goto bad_fork_free_pid;
59069 }
59070
59071 + gr_copy_label(p);
59072 +
59073 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
59074 /*
59075 * Clear TID on mm_release()?
59076 @@ -1333,6 +1365,8 @@ bad_fork_cleanup_count:
59077 bad_fork_free:
59078 free_task(p);
59079 fork_out:
59080 + gr_log_forkfail(retval);
59081 +
59082 return ERR_PTR(retval);
59083 }
59084
59085 @@ -1426,6 +1460,8 @@ long do_fork(unsigned long clone_flags,
59086 if (clone_flags & CLONE_PARENT_SETTID)
59087 put_user(nr, parent_tidptr);
59088
59089 + gr_handle_brute_check();
59090 +
59091 if (clone_flags & CLONE_VFORK) {
59092 p->vfork_done = &vfork;
59093 init_completion(&vfork);
59094 @@ -1558,7 +1594,7 @@ static int unshare_fs(unsigned long unsh
59095 return 0;
59096
59097 /* don't need lock here; in the worst case we'll do useless copy */
59098 - if (fs->users == 1)
59099 + if (atomic_read(&fs->users) == 1)
59100 return 0;
59101
59102 *new_fsp = copy_fs_struct(fs);
59103 @@ -1681,7 +1717,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
59104 fs = current->fs;
59105 write_lock(&fs->lock);
59106 current->fs = new_fs;
59107 - if (--fs->users)
59108 + gr_set_chroot_entries(current, &current->fs->root);
59109 + if (atomic_dec_return(&fs->users))
59110 new_fs = NULL;
59111 else
59112 new_fs = fs;
59113 diff -urNp linux-2.6.32.41/kernel/futex.c linux-2.6.32.41/kernel/futex.c
59114 --- linux-2.6.32.41/kernel/futex.c 2011-03-27 14:31:47.000000000 -0400
59115 +++ linux-2.6.32.41/kernel/futex.c 2011-05-16 21:46:57.000000000 -0400
59116 @@ -54,6 +54,7 @@
59117 #include <linux/mount.h>
59118 #include <linux/pagemap.h>
59119 #include <linux/syscalls.h>
59120 +#include <linux/ptrace.h>
59121 #include <linux/signal.h>
59122 #include <linux/module.h>
59123 #include <linux/magic.h>
59124 @@ -221,6 +222,11 @@ get_futex_key(u32 __user *uaddr, int fsh
59125 struct page *page;
59126 int err;
59127
59128 +#ifdef CONFIG_PAX_SEGMEXEC
59129 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
59130 + return -EFAULT;
59131 +#endif
59132 +
59133 /*
59134 * The futex address must be "naturally" aligned.
59135 */
59136 @@ -1789,6 +1795,8 @@ static int futex_wait(u32 __user *uaddr,
59137 struct futex_q q;
59138 int ret;
59139
59140 + pax_track_stack();
59141 +
59142 if (!bitset)
59143 return -EINVAL;
59144
59145 @@ -1841,7 +1849,7 @@ retry:
59146
59147 restart = &current_thread_info()->restart_block;
59148 restart->fn = futex_wait_restart;
59149 - restart->futex.uaddr = (u32 *)uaddr;
59150 + restart->futex.uaddr = uaddr;
59151 restart->futex.val = val;
59152 restart->futex.time = abs_time->tv64;
59153 restart->futex.bitset = bitset;
59154 @@ -2203,6 +2211,8 @@ static int futex_wait_requeue_pi(u32 __u
59155 struct futex_q q;
59156 int res, ret;
59157
59158 + pax_track_stack();
59159 +
59160 if (!bitset)
59161 return -EINVAL;
59162
59163 @@ -2377,7 +2387,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
59164 {
59165 struct robust_list_head __user *head;
59166 unsigned long ret;
59167 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
59168 const struct cred *cred = current_cred(), *pcred;
59169 +#endif
59170
59171 if (!futex_cmpxchg_enabled)
59172 return -ENOSYS;
59173 @@ -2393,11 +2405,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
59174 if (!p)
59175 goto err_unlock;
59176 ret = -EPERM;
59177 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59178 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
59179 + goto err_unlock;
59180 +#else
59181 pcred = __task_cred(p);
59182 if (cred->euid != pcred->euid &&
59183 cred->euid != pcred->uid &&
59184 !capable(CAP_SYS_PTRACE))
59185 goto err_unlock;
59186 +#endif
59187 head = p->robust_list;
59188 rcu_read_unlock();
59189 }
59190 @@ -2459,7 +2476,7 @@ retry:
59191 */
59192 static inline int fetch_robust_entry(struct robust_list __user **entry,
59193 struct robust_list __user * __user *head,
59194 - int *pi)
59195 + unsigned int *pi)
59196 {
59197 unsigned long uentry;
59198
59199 @@ -2640,6 +2657,7 @@ static int __init futex_init(void)
59200 {
59201 u32 curval;
59202 int i;
59203 + mm_segment_t oldfs;
59204
59205 /*
59206 * This will fail and we want it. Some arch implementations do
59207 @@ -2651,7 +2669,10 @@ static int __init futex_init(void)
59208 * implementation, the non functional ones will return
59209 * -ENOSYS.
59210 */
59211 + oldfs = get_fs();
59212 + set_fs(USER_DS);
59213 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
59214 + set_fs(oldfs);
59215 if (curval == -EFAULT)
59216 futex_cmpxchg_enabled = 1;
59217
59218 diff -urNp linux-2.6.32.41/kernel/futex_compat.c linux-2.6.32.41/kernel/futex_compat.c
59219 --- linux-2.6.32.41/kernel/futex_compat.c 2011-03-27 14:31:47.000000000 -0400
59220 +++ linux-2.6.32.41/kernel/futex_compat.c 2011-04-17 15:56:46.000000000 -0400
59221 @@ -10,6 +10,7 @@
59222 #include <linux/compat.h>
59223 #include <linux/nsproxy.h>
59224 #include <linux/futex.h>
59225 +#include <linux/ptrace.h>
59226
59227 #include <asm/uaccess.h>
59228
59229 @@ -135,7 +136,10 @@ compat_sys_get_robust_list(int pid, comp
59230 {
59231 struct compat_robust_list_head __user *head;
59232 unsigned long ret;
59233 - const struct cred *cred = current_cred(), *pcred;
59234 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
59235 + const struct cred *cred = current_cred();
59236 + const struct cred *pcred;
59237 +#endif
59238
59239 if (!futex_cmpxchg_enabled)
59240 return -ENOSYS;
59241 @@ -151,11 +155,16 @@ compat_sys_get_robust_list(int pid, comp
59242 if (!p)
59243 goto err_unlock;
59244 ret = -EPERM;
59245 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59246 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
59247 + goto err_unlock;
59248 +#else
59249 pcred = __task_cred(p);
59250 if (cred->euid != pcred->euid &&
59251 cred->euid != pcred->uid &&
59252 !capable(CAP_SYS_PTRACE))
59253 goto err_unlock;
59254 +#endif
59255 head = p->compat_robust_list;
59256 read_unlock(&tasklist_lock);
59257 }
59258 diff -urNp linux-2.6.32.41/kernel/gcov/base.c linux-2.6.32.41/kernel/gcov/base.c
59259 --- linux-2.6.32.41/kernel/gcov/base.c 2011-03-27 14:31:47.000000000 -0400
59260 +++ linux-2.6.32.41/kernel/gcov/base.c 2011-04-17 15:56:46.000000000 -0400
59261 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
59262 }
59263
59264 #ifdef CONFIG_MODULES
59265 -static inline int within(void *addr, void *start, unsigned long size)
59266 -{
59267 - return ((addr >= start) && (addr < start + size));
59268 -}
59269 -
59270 /* Update list and generate events when modules are unloaded. */
59271 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
59272 void *data)
59273 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
59274 prev = NULL;
59275 /* Remove entries located in module from linked list. */
59276 for (info = gcov_info_head; info; info = info->next) {
59277 - if (within(info, mod->module_core, mod->core_size)) {
59278 + if (within_module_core_rw((unsigned long)info, mod)) {
59279 if (prev)
59280 prev->next = info->next;
59281 else
59282 diff -urNp linux-2.6.32.41/kernel/hrtimer.c linux-2.6.32.41/kernel/hrtimer.c
59283 --- linux-2.6.32.41/kernel/hrtimer.c 2011-03-27 14:31:47.000000000 -0400
59284 +++ linux-2.6.32.41/kernel/hrtimer.c 2011-04-17 15:56:46.000000000 -0400
59285 @@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
59286 local_irq_restore(flags);
59287 }
59288
59289 -static void run_hrtimer_softirq(struct softirq_action *h)
59290 +static void run_hrtimer_softirq(void)
59291 {
59292 hrtimer_peek_ahead_timers();
59293 }
59294 diff -urNp linux-2.6.32.41/kernel/kallsyms.c linux-2.6.32.41/kernel/kallsyms.c
59295 --- linux-2.6.32.41/kernel/kallsyms.c 2011-03-27 14:31:47.000000000 -0400
59296 +++ linux-2.6.32.41/kernel/kallsyms.c 2011-04-17 15:56:46.000000000 -0400
59297 @@ -11,6 +11,9 @@
59298 * Changed the compression method from stem compression to "table lookup"
59299 * compression (see scripts/kallsyms.c for a more complete description)
59300 */
59301 +#ifdef CONFIG_GRKERNSEC_HIDESYM
59302 +#define __INCLUDED_BY_HIDESYM 1
59303 +#endif
59304 #include <linux/kallsyms.h>
59305 #include <linux/module.h>
59306 #include <linux/init.h>
59307 @@ -51,12 +54,33 @@ extern const unsigned long kallsyms_mark
59308
59309 static inline int is_kernel_inittext(unsigned long addr)
59310 {
59311 + if (system_state != SYSTEM_BOOTING)
59312 + return 0;
59313 +
59314 if (addr >= (unsigned long)_sinittext
59315 && addr <= (unsigned long)_einittext)
59316 return 1;
59317 return 0;
59318 }
59319
59320 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
59321 +#ifdef CONFIG_MODULES
59322 +static inline int is_module_text(unsigned long addr)
59323 +{
59324 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
59325 + return 1;
59326 +
59327 + addr = ktla_ktva(addr);
59328 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
59329 +}
59330 +#else
59331 +static inline int is_module_text(unsigned long addr)
59332 +{
59333 + return 0;
59334 +}
59335 +#endif
59336 +#endif
59337 +
59338 static inline int is_kernel_text(unsigned long addr)
59339 {
59340 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
59341 @@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigne
59342
59343 static inline int is_kernel(unsigned long addr)
59344 {
59345 +
59346 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
59347 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
59348 + return 1;
59349 +
59350 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
59351 +#else
59352 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
59353 +#endif
59354 +
59355 return 1;
59356 return in_gate_area_no_task(addr);
59357 }
59358
59359 static int is_ksym_addr(unsigned long addr)
59360 {
59361 +
59362 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
59363 + if (is_module_text(addr))
59364 + return 0;
59365 +#endif
59366 +
59367 if (all_var)
59368 return is_kernel(addr);
59369
59370 @@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(st
59371
59372 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
59373 {
59374 - iter->name[0] = '\0';
59375 iter->nameoff = get_symbol_offset(new_pos);
59376 iter->pos = new_pos;
59377 }
59378 @@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, vo
59379 {
59380 struct kallsym_iter *iter = m->private;
59381
59382 +#ifdef CONFIG_GRKERNSEC_HIDESYM
59383 + if (current_uid())
59384 + return 0;
59385 +#endif
59386 +
59387 /* Some debugging symbols have no name. Ignore them. */
59388 if (!iter->name[0])
59389 return 0;
59390 @@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *i
59391 struct kallsym_iter *iter;
59392 int ret;
59393
59394 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
59395 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
59396 if (!iter)
59397 return -ENOMEM;
59398 reset_iter(iter, 0);
59399 diff -urNp linux-2.6.32.41/kernel/kgdb.c linux-2.6.32.41/kernel/kgdb.c
59400 --- linux-2.6.32.41/kernel/kgdb.c 2011-04-17 17:00:52.000000000 -0400
59401 +++ linux-2.6.32.41/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
59402 @@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
59403 /* Guard for recursive entry */
59404 static int exception_level;
59405
59406 -static struct kgdb_io *kgdb_io_ops;
59407 +static const struct kgdb_io *kgdb_io_ops;
59408 static DEFINE_SPINLOCK(kgdb_registration_lock);
59409
59410 /* kgdb console driver is loaded */
59411 @@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1)
59412 */
59413 static atomic_t passive_cpu_wait[NR_CPUS];
59414 static atomic_t cpu_in_kgdb[NR_CPUS];
59415 -atomic_t kgdb_setting_breakpoint;
59416 +atomic_unchecked_t kgdb_setting_breakpoint;
59417
59418 struct task_struct *kgdb_usethread;
59419 struct task_struct *kgdb_contthread;
59420 @@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBY
59421 sizeof(unsigned long)];
59422
59423 /* to keep track of the CPU which is doing the single stepping*/
59424 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
59425 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
59426
59427 /*
59428 * If you are debugging a problem where roundup (the collection of
59429 @@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
59430 return 0;
59431 if (kgdb_connected)
59432 return 1;
59433 - if (atomic_read(&kgdb_setting_breakpoint))
59434 + if (atomic_read_unchecked(&kgdb_setting_breakpoint))
59435 return 1;
59436 if (print_wait)
59437 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
59438 @@ -1426,8 +1426,8 @@ acquirelock:
59439 * instance of the exception handler wanted to come into the
59440 * debugger on a different CPU via a single step
59441 */
59442 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
59443 - atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
59444 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
59445 + atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
59446
59447 atomic_set(&kgdb_active, -1);
59448 touch_softlockup_watchdog();
59449 @@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void
59450 *
59451 * Register it with the KGDB core.
59452 */
59453 -int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
59454 +int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
59455 {
59456 int err;
59457
59458 @@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_modul
59459 *
59460 * Unregister it with the KGDB core.
59461 */
59462 -void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
59463 +void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
59464 {
59465 BUG_ON(kgdb_connected);
59466
59467 @@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_mod
59468 */
59469 void kgdb_breakpoint(void)
59470 {
59471 - atomic_set(&kgdb_setting_breakpoint, 1);
59472 + atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
59473 wmb(); /* Sync point before breakpoint */
59474 arch_kgdb_breakpoint();
59475 wmb(); /* Sync point after breakpoint */
59476 - atomic_set(&kgdb_setting_breakpoint, 0);
59477 + atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
59478 }
59479 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
59480
59481 diff -urNp linux-2.6.32.41/kernel/kmod.c linux-2.6.32.41/kernel/kmod.c
59482 --- linux-2.6.32.41/kernel/kmod.c 2011-03-27 14:31:47.000000000 -0400
59483 +++ linux-2.6.32.41/kernel/kmod.c 2011-04-17 15:56:46.000000000 -0400
59484 @@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
59485 * If module auto-loading support is disabled then this function
59486 * becomes a no-operation.
59487 */
59488 -int __request_module(bool wait, const char *fmt, ...)
59489 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
59490 {
59491 - va_list args;
59492 char module_name[MODULE_NAME_LEN];
59493 unsigned int max_modprobes;
59494 int ret;
59495 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
59496 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
59497 static char *envp[] = { "HOME=/",
59498 "TERM=linux",
59499 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
59500 @@ -84,12 +83,24 @@ int __request_module(bool wait, const ch
59501 if (ret)
59502 return ret;
59503
59504 - va_start(args, fmt);
59505 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
59506 - va_end(args);
59507 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
59508 if (ret >= MODULE_NAME_LEN)
59509 return -ENAMETOOLONG;
59510
59511 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
59512 + if (!current_uid()) {
59513 + /* hack to workaround consolekit/udisks stupidity */
59514 + read_lock(&tasklist_lock);
59515 + if (!strcmp(current->comm, "mount") &&
59516 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
59517 + read_unlock(&tasklist_lock);
59518 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
59519 + return -EPERM;
59520 + }
59521 + read_unlock(&tasklist_lock);
59522 + }
59523 +#endif
59524 +
59525 /* If modprobe needs a service that is in a module, we get a recursive
59526 * loop. Limit the number of running kmod threads to max_threads/2 or
59527 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
59528 @@ -121,6 +132,48 @@ int __request_module(bool wait, const ch
59529 atomic_dec(&kmod_concurrent);
59530 return ret;
59531 }
59532 +
59533 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
59534 +{
59535 + va_list args;
59536 + int ret;
59537 +
59538 + va_start(args, fmt);
59539 + ret = ____request_module(wait, module_param, fmt, args);
59540 + va_end(args);
59541 +
59542 + return ret;
59543 +}
59544 +
59545 +int __request_module(bool wait, const char *fmt, ...)
59546 +{
59547 + va_list args;
59548 + int ret;
59549 +
59550 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
59551 + if (current_uid()) {
59552 + char module_param[MODULE_NAME_LEN];
59553 +
59554 + memset(module_param, 0, sizeof(module_param));
59555 +
59556 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
59557 +
59558 + va_start(args, fmt);
59559 + ret = ____request_module(wait, module_param, fmt, args);
59560 + va_end(args);
59561 +
59562 + return ret;
59563 + }
59564 +#endif
59565 +
59566 + va_start(args, fmt);
59567 + ret = ____request_module(wait, NULL, fmt, args);
59568 + va_end(args);
59569 +
59570 + return ret;
59571 +}
59572 +
59573 +
59574 EXPORT_SYMBOL(__request_module);
59575 #endif /* CONFIG_MODULES */
59576
59577 diff -urNp linux-2.6.32.41/kernel/kprobes.c linux-2.6.32.41/kernel/kprobes.c
59578 --- linux-2.6.32.41/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
59579 +++ linux-2.6.32.41/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
59580 @@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_
59581 * kernel image and loaded module images reside. This is required
59582 * so x86_64 can correctly handle the %rip-relative fixups.
59583 */
59584 - kip->insns = module_alloc(PAGE_SIZE);
59585 + kip->insns = module_alloc_exec(PAGE_SIZE);
59586 if (!kip->insns) {
59587 kfree(kip);
59588 return NULL;
59589 @@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(st
59590 */
59591 if (!list_is_singular(&kprobe_insn_pages)) {
59592 list_del(&kip->list);
59593 - module_free(NULL, kip->insns);
59594 + module_free_exec(NULL, kip->insns);
59595 kfree(kip);
59596 }
59597 return 1;
59598 @@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
59599 {
59600 int i, err = 0;
59601 unsigned long offset = 0, size = 0;
59602 - char *modname, namebuf[128];
59603 + char *modname, namebuf[KSYM_NAME_LEN];
59604 const char *symbol_name;
59605 void *addr;
59606 struct kprobe_blackpoint *kb;
59607 @@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(st
59608 const char *sym = NULL;
59609 unsigned int i = *(loff_t *) v;
59610 unsigned long offset = 0;
59611 - char *modname, namebuf[128];
59612 + char *modname, namebuf[KSYM_NAME_LEN];
59613
59614 head = &kprobe_table[i];
59615 preempt_disable();
59616 diff -urNp linux-2.6.32.41/kernel/lockdep.c linux-2.6.32.41/kernel/lockdep.c
59617 --- linux-2.6.32.41/kernel/lockdep.c 2011-03-27 14:31:47.000000000 -0400
59618 +++ linux-2.6.32.41/kernel/lockdep.c 2011-04-17 15:56:46.000000000 -0400
59619 @@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_t
59620 /*
59621 * Various lockdep statistics:
59622 */
59623 -atomic_t chain_lookup_hits;
59624 -atomic_t chain_lookup_misses;
59625 -atomic_t hardirqs_on_events;
59626 -atomic_t hardirqs_off_events;
59627 -atomic_t redundant_hardirqs_on;
59628 -atomic_t redundant_hardirqs_off;
59629 -atomic_t softirqs_on_events;
59630 -atomic_t softirqs_off_events;
59631 -atomic_t redundant_softirqs_on;
59632 -atomic_t redundant_softirqs_off;
59633 -atomic_t nr_unused_locks;
59634 -atomic_t nr_cyclic_checks;
59635 -atomic_t nr_find_usage_forwards_checks;
59636 -atomic_t nr_find_usage_backwards_checks;
59637 +atomic_unchecked_t chain_lookup_hits;
59638 +atomic_unchecked_t chain_lookup_misses;
59639 +atomic_unchecked_t hardirqs_on_events;
59640 +atomic_unchecked_t hardirqs_off_events;
59641 +atomic_unchecked_t redundant_hardirqs_on;
59642 +atomic_unchecked_t redundant_hardirqs_off;
59643 +atomic_unchecked_t softirqs_on_events;
59644 +atomic_unchecked_t softirqs_off_events;
59645 +atomic_unchecked_t redundant_softirqs_on;
59646 +atomic_unchecked_t redundant_softirqs_off;
59647 +atomic_unchecked_t nr_unused_locks;
59648 +atomic_unchecked_t nr_cyclic_checks;
59649 +atomic_unchecked_t nr_find_usage_forwards_checks;
59650 +atomic_unchecked_t nr_find_usage_backwards_checks;
59651 #endif
59652
59653 /*
59654 @@ -577,6 +577,10 @@ static int static_obj(void *obj)
59655 int i;
59656 #endif
59657
59658 +#ifdef CONFIG_PAX_KERNEXEC
59659 + start = ktla_ktva(start);
59660 +#endif
59661 +
59662 /*
59663 * static variable?
59664 */
59665 @@ -592,8 +596,7 @@ static int static_obj(void *obj)
59666 */
59667 for_each_possible_cpu(i) {
59668 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
59669 - end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
59670 - + per_cpu_offset(i);
59671 + end = start + PERCPU_ENOUGH_ROOM;
59672
59673 if ((addr >= start) && (addr < end))
59674 return 1;
59675 @@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *
59676 if (!static_obj(lock->key)) {
59677 debug_locks_off();
59678 printk("INFO: trying to register non-static key.\n");
59679 + printk("lock:%pS key:%pS.\n", lock, lock->key);
59680 printk("the code is fine but needs lockdep annotation.\n");
59681 printk("turning off the locking correctness validator.\n");
59682 dump_stack();
59683 @@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep
59684 if (!class)
59685 return 0;
59686 }
59687 - debug_atomic_inc((atomic_t *)&class->ops);
59688 + debug_atomic_inc((atomic_unchecked_t *)&class->ops);
59689 if (very_verbose(class)) {
59690 printk("\nacquire class [%p] %s", class->key, class->name);
59691 if (class->name_version > 1)
59692 diff -urNp linux-2.6.32.41/kernel/lockdep_internals.h linux-2.6.32.41/kernel/lockdep_internals.h
59693 --- linux-2.6.32.41/kernel/lockdep_internals.h 2011-03-27 14:31:47.000000000 -0400
59694 +++ linux-2.6.32.41/kernel/lockdep_internals.h 2011-04-17 15:56:46.000000000 -0400
59695 @@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_
59696 /*
59697 * Various lockdep statistics:
59698 */
59699 -extern atomic_t chain_lookup_hits;
59700 -extern atomic_t chain_lookup_misses;
59701 -extern atomic_t hardirqs_on_events;
59702 -extern atomic_t hardirqs_off_events;
59703 -extern atomic_t redundant_hardirqs_on;
59704 -extern atomic_t redundant_hardirqs_off;
59705 -extern atomic_t softirqs_on_events;
59706 -extern atomic_t softirqs_off_events;
59707 -extern atomic_t redundant_softirqs_on;
59708 -extern atomic_t redundant_softirqs_off;
59709 -extern atomic_t nr_unused_locks;
59710 -extern atomic_t nr_cyclic_checks;
59711 -extern atomic_t nr_cyclic_check_recursions;
59712 -extern atomic_t nr_find_usage_forwards_checks;
59713 -extern atomic_t nr_find_usage_forwards_recursions;
59714 -extern atomic_t nr_find_usage_backwards_checks;
59715 -extern atomic_t nr_find_usage_backwards_recursions;
59716 -# define debug_atomic_inc(ptr) atomic_inc(ptr)
59717 -# define debug_atomic_dec(ptr) atomic_dec(ptr)
59718 -# define debug_atomic_read(ptr) atomic_read(ptr)
59719 +extern atomic_unchecked_t chain_lookup_hits;
59720 +extern atomic_unchecked_t chain_lookup_misses;
59721 +extern atomic_unchecked_t hardirqs_on_events;
59722 +extern atomic_unchecked_t hardirqs_off_events;
59723 +extern atomic_unchecked_t redundant_hardirqs_on;
59724 +extern atomic_unchecked_t redundant_hardirqs_off;
59725 +extern atomic_unchecked_t softirqs_on_events;
59726 +extern atomic_unchecked_t softirqs_off_events;
59727 +extern atomic_unchecked_t redundant_softirqs_on;
59728 +extern atomic_unchecked_t redundant_softirqs_off;
59729 +extern atomic_unchecked_t nr_unused_locks;
59730 +extern atomic_unchecked_t nr_cyclic_checks;
59731 +extern atomic_unchecked_t nr_cyclic_check_recursions;
59732 +extern atomic_unchecked_t nr_find_usage_forwards_checks;
59733 +extern atomic_unchecked_t nr_find_usage_forwards_recursions;
59734 +extern atomic_unchecked_t nr_find_usage_backwards_checks;
59735 +extern atomic_unchecked_t nr_find_usage_backwards_recursions;
59736 +# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
59737 +# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
59738 +# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
59739 #else
59740 # define debug_atomic_inc(ptr) do { } while (0)
59741 # define debug_atomic_dec(ptr) do { } while (0)
59742 diff -urNp linux-2.6.32.41/kernel/lockdep_proc.c linux-2.6.32.41/kernel/lockdep_proc.c
59743 --- linux-2.6.32.41/kernel/lockdep_proc.c 2011-03-27 14:31:47.000000000 -0400
59744 +++ linux-2.6.32.41/kernel/lockdep_proc.c 2011-04-17 15:56:46.000000000 -0400
59745 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
59746
59747 static void print_name(struct seq_file *m, struct lock_class *class)
59748 {
59749 - char str[128];
59750 + char str[KSYM_NAME_LEN];
59751 const char *name = class->name;
59752
59753 if (!name) {
59754 diff -urNp linux-2.6.32.41/kernel/module.c linux-2.6.32.41/kernel/module.c
59755 --- linux-2.6.32.41/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
59756 +++ linux-2.6.32.41/kernel/module.c 2011-04-29 18:52:40.000000000 -0400
59757 @@ -55,6 +55,7 @@
59758 #include <linux/async.h>
59759 #include <linux/percpu.h>
59760 #include <linux/kmemleak.h>
59761 +#include <linux/grsecurity.h>
59762
59763 #define CREATE_TRACE_POINTS
59764 #include <trace/events/module.h>
59765 @@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq
59766 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
59767
59768 /* Bounds of module allocation, for speeding __module_address */
59769 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
59770 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
59771 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
59772
59773 int register_module_notifier(struct notifier_block * nb)
59774 {
59775 @@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct
59776 return true;
59777
59778 list_for_each_entry_rcu(mod, &modules, list) {
59779 - struct symsearch arr[] = {
59780 + struct symsearch modarr[] = {
59781 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
59782 NOT_GPL_ONLY, false },
59783 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
59784 @@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct
59785 #endif
59786 };
59787
59788 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
59789 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
59790 return true;
59791 }
59792 return false;
59793 @@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned lo
59794 void *ptr;
59795 int cpu;
59796
59797 - if (align > PAGE_SIZE) {
59798 + if (align-1 >= PAGE_SIZE) {
59799 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
59800 name, align, PAGE_SIZE);
59801 align = PAGE_SIZE;
59802 @@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resol
59803 * /sys/module/foo/sections stuff
59804 * J. Corbet <corbet@lwn.net>
59805 */
59806 -#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
59807 +#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59808
59809 static inline bool sect_empty(const Elf_Shdr *sect)
59810 {
59811 @@ -1545,7 +1547,8 @@ static void free_module(struct module *m
59812 destroy_params(mod->kp, mod->num_kp);
59813
59814 /* This may be NULL, but that's OK */
59815 - module_free(mod, mod->module_init);
59816 + module_free(mod, mod->module_init_rw);
59817 + module_free_exec(mod, mod->module_init_rx);
59818 kfree(mod->args);
59819 if (mod->percpu)
59820 percpu_modfree(mod->percpu);
59821 @@ -1554,10 +1557,12 @@ static void free_module(struct module *m
59822 percpu_modfree(mod->refptr);
59823 #endif
59824 /* Free lock-classes: */
59825 - lockdep_free_key_range(mod->module_core, mod->core_size);
59826 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
59827 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
59828
59829 /* Finally, free the core (containing the module structure) */
59830 - module_free(mod, mod->module_core);
59831 + module_free_exec(mod, mod->module_core_rx);
59832 + module_free(mod, mod->module_core_rw);
59833
59834 #ifdef CONFIG_MPU
59835 update_protections(current->mm);
59836 @@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *se
59837 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
59838 int ret = 0;
59839 const struct kernel_symbol *ksym;
59840 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
59841 + int is_fs_load = 0;
59842 + int register_filesystem_found = 0;
59843 + char *p;
59844 +
59845 + p = strstr(mod->args, "grsec_modharden_fs");
59846 +
59847 + if (p) {
59848 + char *endptr = p + strlen("grsec_modharden_fs");
59849 + /* copy \0 as well */
59850 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
59851 + is_fs_load = 1;
59852 + }
59853 +#endif
59854 +
59855
59856 for (i = 1; i < n; i++) {
59857 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
59858 + const char *name = strtab + sym[i].st_name;
59859 +
59860 + /* it's a real shame this will never get ripped and copied
59861 + upstream! ;(
59862 + */
59863 + if (is_fs_load && !strcmp(name, "register_filesystem"))
59864 + register_filesystem_found = 1;
59865 +#endif
59866 switch (sym[i].st_shndx) {
59867 case SHN_COMMON:
59868 /* We compiled with -fno-common. These are not
59869 @@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *se
59870 strtab + sym[i].st_name, mod);
59871 /* Ok if resolved. */
59872 if (ksym) {
59873 + pax_open_kernel();
59874 sym[i].st_value = ksym->value;
59875 + pax_close_kernel();
59876 break;
59877 }
59878
59879 @@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *se
59880 secbase = (unsigned long)mod->percpu;
59881 else
59882 secbase = sechdrs[sym[i].st_shndx].sh_addr;
59883 + pax_open_kernel();
59884 sym[i].st_value += secbase;
59885 + pax_close_kernel();
59886 break;
59887 }
59888 }
59889
59890 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
59891 + if (is_fs_load && !register_filesystem_found) {
59892 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
59893 + ret = -EPERM;
59894 + }
59895 +#endif
59896 +
59897 return ret;
59898 }
59899
59900 @@ -1731,11 +1771,12 @@ static void layout_sections(struct modul
59901 || s->sh_entsize != ~0UL
59902 || strstarts(secstrings + s->sh_name, ".init"))
59903 continue;
59904 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
59905 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
59906 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
59907 + else
59908 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
59909 DEBUGP("\t%s\n", secstrings + s->sh_name);
59910 }
59911 - if (m == 0)
59912 - mod->core_text_size = mod->core_size;
59913 }
59914
59915 DEBUGP("Init section allocation order:\n");
59916 @@ -1748,12 +1789,13 @@ static void layout_sections(struct modul
59917 || s->sh_entsize != ~0UL
59918 || !strstarts(secstrings + s->sh_name, ".init"))
59919 continue;
59920 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
59921 - | INIT_OFFSET_MASK);
59922 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
59923 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
59924 + else
59925 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
59926 + s->sh_entsize |= INIT_OFFSET_MASK;
59927 DEBUGP("\t%s\n", secstrings + s->sh_name);
59928 }
59929 - if (m == 0)
59930 - mod->init_text_size = mod->init_size;
59931 }
59932 }
59933
59934 @@ -1857,9 +1899,8 @@ static int is_exported(const char *name,
59935
59936 /* As per nm */
59937 static char elf_type(const Elf_Sym *sym,
59938 - Elf_Shdr *sechdrs,
59939 - const char *secstrings,
59940 - struct module *mod)
59941 + const Elf_Shdr *sechdrs,
59942 + const char *secstrings)
59943 {
59944 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
59945 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
59946 @@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struc
59947
59948 /* Put symbol section at end of init part of module. */
59949 symsect->sh_flags |= SHF_ALLOC;
59950 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
59951 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
59952 symindex) | INIT_OFFSET_MASK;
59953 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
59954
59955 @@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struc
59956 }
59957
59958 /* Append room for core symbols at end of core part. */
59959 - symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
59960 - mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
59961 + symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
59962 + mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
59963
59964 /* Put string table section at end of init part of module. */
59965 strsect->sh_flags |= SHF_ALLOC;
59966 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
59967 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
59968 strindex) | INIT_OFFSET_MASK;
59969 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
59970
59971 /* Append room for core symbols' strings at end of core part. */
59972 - *pstroffs = mod->core_size;
59973 + *pstroffs = mod->core_size_rx;
59974 __set_bit(0, strmap);
59975 - mod->core_size += bitmap_weight(strmap, strsect->sh_size);
59976 + mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
59977
59978 return symoffs;
59979 }
59980 @@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *
59981 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
59982 mod->strtab = (void *)sechdrs[strindex].sh_addr;
59983
59984 + pax_open_kernel();
59985 +
59986 /* Set types up while we still have access to sections. */
59987 for (i = 0; i < mod->num_symtab; i++)
59988 mod->symtab[i].st_info
59989 - = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
59990 + = elf_type(&mod->symtab[i], sechdrs, secstrings);
59991
59992 - mod->core_symtab = dst = mod->module_core + symoffs;
59993 + mod->core_symtab = dst = mod->module_core_rx + symoffs;
59994 src = mod->symtab;
59995 *dst = *src;
59996 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
59997 @@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *
59998 }
59999 mod->core_num_syms = ndst;
60000
60001 - mod->core_strtab = s = mod->module_core + stroffs;
60002 + mod->core_strtab = s = mod->module_core_rx + stroffs;
60003 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
60004 if (test_bit(i, strmap))
60005 *++s = mod->strtab[i];
60006 +
60007 + pax_close_kernel();
60008 }
60009 #else
60010 static inline unsigned long layout_symtab(struct module *mod,
60011 @@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _
60012 #endif
60013 }
60014
60015 -static void *module_alloc_update_bounds(unsigned long size)
60016 +static void *module_alloc_update_bounds_rw(unsigned long size)
60017 {
60018 void *ret = module_alloc(size);
60019
60020 if (ret) {
60021 /* Update module bounds. */
60022 - if ((unsigned long)ret < module_addr_min)
60023 - module_addr_min = (unsigned long)ret;
60024 - if ((unsigned long)ret + size > module_addr_max)
60025 - module_addr_max = (unsigned long)ret + size;
60026 + if ((unsigned long)ret < module_addr_min_rw)
60027 + module_addr_min_rw = (unsigned long)ret;
60028 + if ((unsigned long)ret + size > module_addr_max_rw)
60029 + module_addr_max_rw = (unsigned long)ret + size;
60030 + }
60031 + return ret;
60032 +}
60033 +
60034 +static void *module_alloc_update_bounds_rx(unsigned long size)
60035 +{
60036 + void *ret = module_alloc_exec(size);
60037 +
60038 + if (ret) {
60039 + /* Update module bounds. */
60040 + if ((unsigned long)ret < module_addr_min_rx)
60041 + module_addr_min_rx = (unsigned long)ret;
60042 + if ((unsigned long)ret + size > module_addr_max_rx)
60043 + module_addr_max_rx = (unsigned long)ret + size;
60044 }
60045 return ret;
60046 }
60047 @@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct
60048 unsigned int i;
60049
60050 /* only scan the sections containing data */
60051 - kmemleak_scan_area(mod->module_core, (unsigned long)mod -
60052 - (unsigned long)mod->module_core,
60053 + kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
60054 + (unsigned long)mod->module_core_rw,
60055 sizeof(struct module), GFP_KERNEL);
60056
60057 for (i = 1; i < hdr->e_shnum; i++) {
60058 @@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct
60059 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
60060 continue;
60061
60062 - kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
60063 - (unsigned long)mod->module_core,
60064 + kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
60065 + (unsigned long)mod->module_core_rw,
60066 sechdrs[i].sh_size, GFP_KERNEL);
60067 }
60068 }
60069 @@ -2263,7 +2322,7 @@ static noinline struct module *load_modu
60070 secstrings, &stroffs, strmap);
60071
60072 /* Do the allocs. */
60073 - ptr = module_alloc_update_bounds(mod->core_size);
60074 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
60075 /*
60076 * The pointer to this block is stored in the module structure
60077 * which is inside the block. Just mark it as not being a
60078 @@ -2274,23 +2333,47 @@ static noinline struct module *load_modu
60079 err = -ENOMEM;
60080 goto free_percpu;
60081 }
60082 - memset(ptr, 0, mod->core_size);
60083 - mod->module_core = ptr;
60084 + memset(ptr, 0, mod->core_size_rw);
60085 + mod->module_core_rw = ptr;
60086
60087 - ptr = module_alloc_update_bounds(mod->init_size);
60088 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
60089 /*
60090 * The pointer to this block is stored in the module structure
60091 * which is inside the block. This block doesn't need to be
60092 * scanned as it contains data and code that will be freed
60093 * after the module is initialized.
60094 */
60095 - kmemleak_ignore(ptr);
60096 - if (!ptr && mod->init_size) {
60097 + kmemleak_not_leak(ptr);
60098 + if (!ptr && mod->init_size_rw) {
60099 + err = -ENOMEM;
60100 + goto free_core_rw;
60101 + }
60102 + memset(ptr, 0, mod->init_size_rw);
60103 + mod->module_init_rw = ptr;
60104 +
60105 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
60106 + kmemleak_not_leak(ptr);
60107 + if (!ptr) {
60108 err = -ENOMEM;
60109 - goto free_core;
60110 + goto free_init_rw;
60111 }
60112 - memset(ptr, 0, mod->init_size);
60113 - mod->module_init = ptr;
60114 +
60115 + pax_open_kernel();
60116 + memset(ptr, 0, mod->core_size_rx);
60117 + pax_close_kernel();
60118 + mod->module_core_rx = ptr;
60119 +
60120 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
60121 + kmemleak_not_leak(ptr);
60122 + if (!ptr && mod->init_size_rx) {
60123 + err = -ENOMEM;
60124 + goto free_core_rx;
60125 + }
60126 +
60127 + pax_open_kernel();
60128 + memset(ptr, 0, mod->init_size_rx);
60129 + pax_close_kernel();
60130 + mod->module_init_rx = ptr;
60131
60132 /* Transfer each section which specifies SHF_ALLOC */
60133 DEBUGP("final section addresses:\n");
60134 @@ -2300,17 +2383,45 @@ static noinline struct module *load_modu
60135 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
60136 continue;
60137
60138 - if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
60139 - dest = mod->module_init
60140 - + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
60141 - else
60142 - dest = mod->module_core + sechdrs[i].sh_entsize;
60143 + if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
60144 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
60145 + dest = mod->module_init_rw
60146 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
60147 + else
60148 + dest = mod->module_init_rx
60149 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
60150 + } else {
60151 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
60152 + dest = mod->module_core_rw + sechdrs[i].sh_entsize;
60153 + else
60154 + dest = mod->module_core_rx + sechdrs[i].sh_entsize;
60155 + }
60156 +
60157 + if (sechdrs[i].sh_type != SHT_NOBITS) {
60158
60159 - if (sechdrs[i].sh_type != SHT_NOBITS)
60160 - memcpy(dest, (void *)sechdrs[i].sh_addr,
60161 - sechdrs[i].sh_size);
60162 +#ifdef CONFIG_PAX_KERNEXEC
60163 +#ifdef CONFIG_X86_64
60164 + if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
60165 + set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
60166 +#endif
60167 + if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
60168 + pax_open_kernel();
60169 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
60170 + pax_close_kernel();
60171 + } else
60172 +#endif
60173 +
60174 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
60175 + }
60176 /* Update sh_addr to point to copy in image. */
60177 - sechdrs[i].sh_addr = (unsigned long)dest;
60178 +
60179 +#ifdef CONFIG_PAX_KERNEXEC
60180 + if (sechdrs[i].sh_flags & SHF_EXECINSTR)
60181 + sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
60182 + else
60183 +#endif
60184 +
60185 + sechdrs[i].sh_addr = (unsigned long)dest;
60186 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
60187 }
60188 /* Module has been moved. */
60189 @@ -2322,7 +2433,7 @@ static noinline struct module *load_modu
60190 mod->name);
60191 if (!mod->refptr) {
60192 err = -ENOMEM;
60193 - goto free_init;
60194 + goto free_init_rx;
60195 }
60196 #endif
60197 /* Now we've moved module, initialize linked lists, etc. */
60198 @@ -2351,6 +2462,31 @@ static noinline struct module *load_modu
60199 /* Set up MODINFO_ATTR fields */
60200 setup_modinfo(mod, sechdrs, infoindex);
60201
60202 + mod->args = args;
60203 +
60204 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
60205 + {
60206 + char *p, *p2;
60207 +
60208 + if (strstr(mod->args, "grsec_modharden_netdev")) {
60209 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
60210 + err = -EPERM;
60211 + goto cleanup;
60212 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
60213 + p += strlen("grsec_modharden_normal");
60214 + p2 = strstr(p, "_");
60215 + if (p2) {
60216 + *p2 = '\0';
60217 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
60218 + *p2 = '_';
60219 + }
60220 + err = -EPERM;
60221 + goto cleanup;
60222 + }
60223 + }
60224 +#endif
60225 +
60226 +
60227 /* Fix up syms, so that st_value is a pointer to location. */
60228 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
60229 mod);
60230 @@ -2431,8 +2567,8 @@ static noinline struct module *load_modu
60231
60232 /* Now do relocations. */
60233 for (i = 1; i < hdr->e_shnum; i++) {
60234 - const char *strtab = (char *)sechdrs[strindex].sh_addr;
60235 unsigned int info = sechdrs[i].sh_info;
60236 + strtab = (char *)sechdrs[strindex].sh_addr;
60237
60238 /* Not a valid relocation section? */
60239 if (info >= hdr->e_shnum)
60240 @@ -2493,16 +2629,15 @@ static noinline struct module *load_modu
60241 * Do it before processing of module parameters, so the module
60242 * can provide parameter accessor functions of its own.
60243 */
60244 - if (mod->module_init)
60245 - flush_icache_range((unsigned long)mod->module_init,
60246 - (unsigned long)mod->module_init
60247 - + mod->init_size);
60248 - flush_icache_range((unsigned long)mod->module_core,
60249 - (unsigned long)mod->module_core + mod->core_size);
60250 + if (mod->module_init_rx)
60251 + flush_icache_range((unsigned long)mod->module_init_rx,
60252 + (unsigned long)mod->module_init_rx
60253 + + mod->init_size_rx);
60254 + flush_icache_range((unsigned long)mod->module_core_rx,
60255 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
60256
60257 set_fs(old_fs);
60258
60259 - mod->args = args;
60260 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
60261 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
60262 mod->name);
60263 @@ -2546,12 +2681,16 @@ static noinline struct module *load_modu
60264 free_unload:
60265 module_unload_free(mod);
60266 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
60267 + free_init_rx:
60268 percpu_modfree(mod->refptr);
60269 - free_init:
60270 #endif
60271 - module_free(mod, mod->module_init);
60272 - free_core:
60273 - module_free(mod, mod->module_core);
60274 + module_free_exec(mod, mod->module_init_rx);
60275 + free_core_rx:
60276 + module_free_exec(mod, mod->module_core_rx);
60277 + free_init_rw:
60278 + module_free(mod, mod->module_init_rw);
60279 + free_core_rw:
60280 + module_free(mod, mod->module_core_rw);
60281 /* mod will be freed with core. Don't access it beyond this line! */
60282 free_percpu:
60283 if (percpu)
60284 @@ -2653,10 +2792,12 @@ SYSCALL_DEFINE3(init_module, void __user
60285 mod->symtab = mod->core_symtab;
60286 mod->strtab = mod->core_strtab;
60287 #endif
60288 - module_free(mod, mod->module_init);
60289 - mod->module_init = NULL;
60290 - mod->init_size = 0;
60291 - mod->init_text_size = 0;
60292 + module_free(mod, mod->module_init_rw);
60293 + module_free_exec(mod, mod->module_init_rx);
60294 + mod->module_init_rw = NULL;
60295 + mod->module_init_rx = NULL;
60296 + mod->init_size_rw = 0;
60297 + mod->init_size_rx = 0;
60298 mutex_unlock(&module_mutex);
60299
60300 return 0;
60301 @@ -2687,10 +2828,16 @@ static const char *get_ksymbol(struct mo
60302 unsigned long nextval;
60303
60304 /* At worse, next value is at end of module */
60305 - if (within_module_init(addr, mod))
60306 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
60307 + if (within_module_init_rx(addr, mod))
60308 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
60309 + else if (within_module_init_rw(addr, mod))
60310 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
60311 + else if (within_module_core_rx(addr, mod))
60312 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
60313 + else if (within_module_core_rw(addr, mod))
60314 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
60315 else
60316 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
60317 + return NULL;
60318
60319 /* Scan for closest preceeding symbol, and next symbol. (ELF
60320 starts real symbols at 1). */
60321 @@ -2936,7 +3083,7 @@ static int m_show(struct seq_file *m, vo
60322 char buf[8];
60323
60324 seq_printf(m, "%s %u",
60325 - mod->name, mod->init_size + mod->core_size);
60326 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
60327 print_unload_info(m, mod);
60328
60329 /* Informative for users. */
60330 @@ -2945,7 +3092,7 @@ static int m_show(struct seq_file *m, vo
60331 mod->state == MODULE_STATE_COMING ? "Loading":
60332 "Live");
60333 /* Used by oprofile and other similar tools. */
60334 - seq_printf(m, " 0x%p", mod->module_core);
60335 + seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
60336
60337 /* Taints info */
60338 if (mod->taints)
60339 @@ -2981,7 +3128,17 @@ static const struct file_operations proc
60340
60341 static int __init proc_modules_init(void)
60342 {
60343 +#ifndef CONFIG_GRKERNSEC_HIDESYM
60344 +#ifdef CONFIG_GRKERNSEC_PROC_USER
60345 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
60346 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60347 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
60348 +#else
60349 proc_create("modules", 0, NULL, &proc_modules_operations);
60350 +#endif
60351 +#else
60352 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
60353 +#endif
60354 return 0;
60355 }
60356 module_init(proc_modules_init);
60357 @@ -3040,12 +3197,12 @@ struct module *__module_address(unsigned
60358 {
60359 struct module *mod;
60360
60361 - if (addr < module_addr_min || addr > module_addr_max)
60362 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
60363 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
60364 return NULL;
60365
60366 list_for_each_entry_rcu(mod, &modules, list)
60367 - if (within_module_core(addr, mod)
60368 - || within_module_init(addr, mod))
60369 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
60370 return mod;
60371 return NULL;
60372 }
60373 @@ -3079,11 +3236,20 @@ bool is_module_text_address(unsigned lon
60374 */
60375 struct module *__module_text_address(unsigned long addr)
60376 {
60377 - struct module *mod = __module_address(addr);
60378 + struct module *mod;
60379 +
60380 +#ifdef CONFIG_X86_32
60381 + addr = ktla_ktva(addr);
60382 +#endif
60383 +
60384 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
60385 + return NULL;
60386 +
60387 + mod = __module_address(addr);
60388 +
60389 if (mod) {
60390 /* Make sure it's within the text section. */
60391 - if (!within(addr, mod->module_init, mod->init_text_size)
60392 - && !within(addr, mod->module_core, mod->core_text_size))
60393 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
60394 mod = NULL;
60395 }
60396 return mod;
60397 diff -urNp linux-2.6.32.41/kernel/mutex.c linux-2.6.32.41/kernel/mutex.c
60398 --- linux-2.6.32.41/kernel/mutex.c 2011-03-27 14:31:47.000000000 -0400
60399 +++ linux-2.6.32.41/kernel/mutex.c 2011-04-17 15:56:46.000000000 -0400
60400 @@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock,
60401 */
60402
60403 for (;;) {
60404 - struct thread_info *owner;
60405 + struct task_struct *owner;
60406
60407 /*
60408 * If we own the BKL, then don't spin. The owner of
60409 @@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock,
60410 spin_lock_mutex(&lock->wait_lock, flags);
60411
60412 debug_mutex_lock_common(lock, &waiter);
60413 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
60414 + debug_mutex_add_waiter(lock, &waiter, task);
60415
60416 /* add waiting tasks to the end of the waitqueue (FIFO): */
60417 list_add_tail(&waiter.list, &lock->wait_list);
60418 @@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock,
60419 * TASK_UNINTERRUPTIBLE case.)
60420 */
60421 if (unlikely(signal_pending_state(state, task))) {
60422 - mutex_remove_waiter(lock, &waiter,
60423 - task_thread_info(task));
60424 + mutex_remove_waiter(lock, &waiter, task);
60425 mutex_release(&lock->dep_map, 1, ip);
60426 spin_unlock_mutex(&lock->wait_lock, flags);
60427
60428 @@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock,
60429 done:
60430 lock_acquired(&lock->dep_map, ip);
60431 /* got the lock - rejoice! */
60432 - mutex_remove_waiter(lock, &waiter, current_thread_info());
60433 + mutex_remove_waiter(lock, &waiter, task);
60434 mutex_set_owner(lock);
60435
60436 /* set it to 0 if there are no waiters left: */
60437 diff -urNp linux-2.6.32.41/kernel/mutex-debug.c linux-2.6.32.41/kernel/mutex-debug.c
60438 --- linux-2.6.32.41/kernel/mutex-debug.c 2011-03-27 14:31:47.000000000 -0400
60439 +++ linux-2.6.32.41/kernel/mutex-debug.c 2011-04-17 15:56:46.000000000 -0400
60440 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
60441 }
60442
60443 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
60444 - struct thread_info *ti)
60445 + struct task_struct *task)
60446 {
60447 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
60448
60449 /* Mark the current thread as blocked on the lock: */
60450 - ti->task->blocked_on = waiter;
60451 + task->blocked_on = waiter;
60452 }
60453
60454 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
60455 - struct thread_info *ti)
60456 + struct task_struct *task)
60457 {
60458 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
60459 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
60460 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
60461 - ti->task->blocked_on = NULL;
60462 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
60463 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
60464 + task->blocked_on = NULL;
60465
60466 list_del_init(&waiter->list);
60467 waiter->task = NULL;
60468 @@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lo
60469 return;
60470
60471 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
60472 - DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
60473 + DEBUG_LOCKS_WARN_ON(lock->owner != current);
60474 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
60475 mutex_clear_owner(lock);
60476 }
60477 diff -urNp linux-2.6.32.41/kernel/mutex-debug.h linux-2.6.32.41/kernel/mutex-debug.h
60478 --- linux-2.6.32.41/kernel/mutex-debug.h 2011-03-27 14:31:47.000000000 -0400
60479 +++ linux-2.6.32.41/kernel/mutex-debug.h 2011-04-17 15:56:46.000000000 -0400
60480 @@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(stru
60481 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
60482 extern void debug_mutex_add_waiter(struct mutex *lock,
60483 struct mutex_waiter *waiter,
60484 - struct thread_info *ti);
60485 + struct task_struct *task);
60486 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
60487 - struct thread_info *ti);
60488 + struct task_struct *task);
60489 extern void debug_mutex_unlock(struct mutex *lock);
60490 extern void debug_mutex_init(struct mutex *lock, const char *name,
60491 struct lock_class_key *key);
60492
60493 static inline void mutex_set_owner(struct mutex *lock)
60494 {
60495 - lock->owner = current_thread_info();
60496 + lock->owner = current;
60497 }
60498
60499 static inline void mutex_clear_owner(struct mutex *lock)
60500 diff -urNp linux-2.6.32.41/kernel/mutex.h linux-2.6.32.41/kernel/mutex.h
60501 --- linux-2.6.32.41/kernel/mutex.h 2011-03-27 14:31:47.000000000 -0400
60502 +++ linux-2.6.32.41/kernel/mutex.h 2011-04-17 15:56:46.000000000 -0400
60503 @@ -19,7 +19,7 @@
60504 #ifdef CONFIG_SMP
60505 static inline void mutex_set_owner(struct mutex *lock)
60506 {
60507 - lock->owner = current_thread_info();
60508 + lock->owner = current;
60509 }
60510
60511 static inline void mutex_clear_owner(struct mutex *lock)
60512 diff -urNp linux-2.6.32.41/kernel/panic.c linux-2.6.32.41/kernel/panic.c
60513 --- linux-2.6.32.41/kernel/panic.c 2011-03-27 14:31:47.000000000 -0400
60514 +++ linux-2.6.32.41/kernel/panic.c 2011-04-17 15:56:46.000000000 -0400
60515 @@ -352,7 +352,7 @@ static void warn_slowpath_common(const c
60516 const char *board;
60517
60518 printk(KERN_WARNING "------------[ cut here ]------------\n");
60519 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
60520 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
60521 board = dmi_get_system_info(DMI_PRODUCT_NAME);
60522 if (board)
60523 printk(KERN_WARNING "Hardware name: %s\n", board);
60524 @@ -392,7 +392,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
60525 */
60526 void __stack_chk_fail(void)
60527 {
60528 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
60529 + dump_stack();
60530 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
60531 __builtin_return_address(0));
60532 }
60533 EXPORT_SYMBOL(__stack_chk_fail);
60534 diff -urNp linux-2.6.32.41/kernel/params.c linux-2.6.32.41/kernel/params.c
60535 --- linux-2.6.32.41/kernel/params.c 2011-03-27 14:31:47.000000000 -0400
60536 +++ linux-2.6.32.41/kernel/params.c 2011-04-17 15:56:46.000000000 -0400
60537 @@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct
60538 return ret;
60539 }
60540
60541 -static struct sysfs_ops module_sysfs_ops = {
60542 +static const struct sysfs_ops module_sysfs_ops = {
60543 .show = module_attr_show,
60544 .store = module_attr_store,
60545 };
60546 @@ -739,7 +739,7 @@ static int uevent_filter(struct kset *ks
60547 return 0;
60548 }
60549
60550 -static struct kset_uevent_ops module_uevent_ops = {
60551 +static const struct kset_uevent_ops module_uevent_ops = {
60552 .filter = uevent_filter,
60553 };
60554
60555 diff -urNp linux-2.6.32.41/kernel/perf_event.c linux-2.6.32.41/kernel/perf_event.c
60556 --- linux-2.6.32.41/kernel/perf_event.c 2011-04-17 17:00:52.000000000 -0400
60557 +++ linux-2.6.32.41/kernel/perf_event.c 2011-05-04 17:56:28.000000000 -0400
60558 @@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostl
60559 */
60560 int sysctl_perf_event_sample_rate __read_mostly = 100000;
60561
60562 -static atomic64_t perf_event_id;
60563 +static atomic64_unchecked_t perf_event_id;
60564
60565 /*
60566 * Lock for (sysadmin-configurable) event reservations:
60567 @@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struc
60568 * In order to keep per-task stats reliable we need to flip the event
60569 * values when we flip the contexts.
60570 */
60571 - value = atomic64_read(&next_event->count);
60572 - value = atomic64_xchg(&event->count, value);
60573 - atomic64_set(&next_event->count, value);
60574 + value = atomic64_read_unchecked(&next_event->count);
60575 + value = atomic64_xchg_unchecked(&event->count, value);
60576 + atomic64_set_unchecked(&next_event->count, value);
60577
60578 swap(event->total_time_enabled, next_event->total_time_enabled);
60579 swap(event->total_time_running, next_event->total_time_running);
60580 @@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_e
60581 update_event_times(event);
60582 }
60583
60584 - return atomic64_read(&event->count);
60585 + return atomic64_read_unchecked(&event->count);
60586 }
60587
60588 /*
60589 @@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct
60590 values[n++] = 1 + leader->nr_siblings;
60591 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
60592 values[n++] = leader->total_time_enabled +
60593 - atomic64_read(&leader->child_total_time_enabled);
60594 + atomic64_read_unchecked(&leader->child_total_time_enabled);
60595 }
60596 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
60597 values[n++] = leader->total_time_running +
60598 - atomic64_read(&leader->child_total_time_running);
60599 + atomic64_read_unchecked(&leader->child_total_time_running);
60600 }
60601
60602 size = n * sizeof(u64);
60603 @@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct pe
60604 values[n++] = perf_event_read_value(event);
60605 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
60606 values[n++] = event->total_time_enabled +
60607 - atomic64_read(&event->child_total_time_enabled);
60608 + atomic64_read_unchecked(&event->child_total_time_enabled);
60609 }
60610 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
60611 values[n++] = event->total_time_running +
60612 - atomic64_read(&event->child_total_time_running);
60613 + atomic64_read_unchecked(&event->child_total_time_running);
60614 }
60615 if (read_format & PERF_FORMAT_ID)
60616 values[n++] = primary_event_id(event);
60617 @@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct fil
60618 static void perf_event_reset(struct perf_event *event)
60619 {
60620 (void)perf_event_read(event);
60621 - atomic64_set(&event->count, 0);
60622 + atomic64_set_unchecked(&event->count, 0);
60623 perf_event_update_userpage(event);
60624 }
60625
60626 @@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct p
60627 ++userpg->lock;
60628 barrier();
60629 userpg->index = perf_event_index(event);
60630 - userpg->offset = atomic64_read(&event->count);
60631 + userpg->offset = atomic64_read_unchecked(&event->count);
60632 if (event->state == PERF_EVENT_STATE_ACTIVE)
60633 - userpg->offset -= atomic64_read(&event->hw.prev_count);
60634 + userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
60635
60636 userpg->time_enabled = event->total_time_enabled +
60637 - atomic64_read(&event->child_total_time_enabled);
60638 + atomic64_read_unchecked(&event->child_total_time_enabled);
60639
60640 userpg->time_running = event->total_time_running +
60641 - atomic64_read(&event->child_total_time_running);
60642 + atomic64_read_unchecked(&event->child_total_time_running);
60643
60644 barrier();
60645 ++userpg->lock;
60646 @@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct
60647 u64 values[4];
60648 int n = 0;
60649
60650 - values[n++] = atomic64_read(&event->count);
60651 + values[n++] = atomic64_read_unchecked(&event->count);
60652 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
60653 values[n++] = event->total_time_enabled +
60654 - atomic64_read(&event->child_total_time_enabled);
60655 + atomic64_read_unchecked(&event->child_total_time_enabled);
60656 }
60657 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
60658 values[n++] = event->total_time_running +
60659 - atomic64_read(&event->child_total_time_running);
60660 + atomic64_read_unchecked(&event->child_total_time_running);
60661 }
60662 if (read_format & PERF_FORMAT_ID)
60663 values[n++] = primary_event_id(event);
60664 @@ -2940,7 +2940,7 @@ static void perf_output_read_group(struc
60665 if (leader != event)
60666 leader->pmu->read(leader);
60667
60668 - values[n++] = atomic64_read(&leader->count);
60669 + values[n++] = atomic64_read_unchecked(&leader->count);
60670 if (read_format & PERF_FORMAT_ID)
60671 values[n++] = primary_event_id(leader);
60672
60673 @@ -2952,7 +2952,7 @@ static void perf_output_read_group(struc
60674 if (sub != event)
60675 sub->pmu->read(sub);
60676
60677 - values[n++] = atomic64_read(&sub->count);
60678 + values[n++] = atomic64_read_unchecked(&sub->count);
60679 if (read_format & PERF_FORMAT_ID)
60680 values[n++] = primary_event_id(sub);
60681
60682 @@ -3787,7 +3787,7 @@ static void perf_swevent_add(struct perf
60683 {
60684 struct hw_perf_event *hwc = &event->hw;
60685
60686 - atomic64_add(nr, &event->count);
60687 + atomic64_add_unchecked(nr, &event->count);
60688
60689 if (!hwc->sample_period)
60690 return;
60691 @@ -4044,9 +4044,9 @@ static void cpu_clock_perf_event_update(
60692 u64 now;
60693
60694 now = cpu_clock(cpu);
60695 - prev = atomic64_read(&event->hw.prev_count);
60696 - atomic64_set(&event->hw.prev_count, now);
60697 - atomic64_add(now - prev, &event->count);
60698 + prev = atomic64_read_unchecked(&event->hw.prev_count);
60699 + atomic64_set_unchecked(&event->hw.prev_count, now);
60700 + atomic64_add_unchecked(now - prev, &event->count);
60701 }
60702
60703 static int cpu_clock_perf_event_enable(struct perf_event *event)
60704 @@ -4054,7 +4054,7 @@ static int cpu_clock_perf_event_enable(s
60705 struct hw_perf_event *hwc = &event->hw;
60706 int cpu = raw_smp_processor_id();
60707
60708 - atomic64_set(&hwc->prev_count, cpu_clock(cpu));
60709 + atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
60710 perf_swevent_start_hrtimer(event);
60711
60712 return 0;
60713 @@ -4086,9 +4086,9 @@ static void task_clock_perf_event_update
60714 u64 prev;
60715 s64 delta;
60716
60717 - prev = atomic64_xchg(&event->hw.prev_count, now);
60718 + prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
60719 delta = now - prev;
60720 - atomic64_add(delta, &event->count);
60721 + atomic64_add_unchecked(delta, &event->count);
60722 }
60723
60724 static int task_clock_perf_event_enable(struct perf_event *event)
60725 @@ -4098,7 +4098,7 @@ static int task_clock_perf_event_enable(
60726
60727 now = event->ctx->time;
60728
60729 - atomic64_set(&hwc->prev_count, now);
60730 + atomic64_set_unchecked(&hwc->prev_count, now);
60731
60732 perf_swevent_start_hrtimer(event);
60733
60734 @@ -4293,7 +4293,7 @@ perf_event_alloc(struct perf_event_attr
60735 event->parent = parent_event;
60736
60737 event->ns = get_pid_ns(current->nsproxy->pid_ns);
60738 - event->id = atomic64_inc_return(&perf_event_id);
60739 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
60740
60741 event->state = PERF_EVENT_STATE_INACTIVE;
60742
60743 @@ -4724,15 +4724,15 @@ static void sync_child_event(struct perf
60744 if (child_event->attr.inherit_stat)
60745 perf_event_read_event(child_event, child);
60746
60747 - child_val = atomic64_read(&child_event->count);
60748 + child_val = atomic64_read_unchecked(&child_event->count);
60749
60750 /*
60751 * Add back the child's count to the parent's count:
60752 */
60753 - atomic64_add(child_val, &parent_event->count);
60754 - atomic64_add(child_event->total_time_enabled,
60755 + atomic64_add_unchecked(child_val, &parent_event->count);
60756 + atomic64_add_unchecked(child_event->total_time_enabled,
60757 &parent_event->child_total_time_enabled);
60758 - atomic64_add(child_event->total_time_running,
60759 + atomic64_add_unchecked(child_event->total_time_running,
60760 &parent_event->child_total_time_running);
60761
60762 /*
60763 diff -urNp linux-2.6.32.41/kernel/pid.c linux-2.6.32.41/kernel/pid.c
60764 --- linux-2.6.32.41/kernel/pid.c 2011-04-22 19:16:29.000000000 -0400
60765 +++ linux-2.6.32.41/kernel/pid.c 2011-04-18 19:22:38.000000000 -0400
60766 @@ -33,6 +33,7 @@
60767 #include <linux/rculist.h>
60768 #include <linux/bootmem.h>
60769 #include <linux/hash.h>
60770 +#include <linux/security.h>
60771 #include <linux/pid_namespace.h>
60772 #include <linux/init_task.h>
60773 #include <linux/syscalls.h>
60774 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
60775
60776 int pid_max = PID_MAX_DEFAULT;
60777
60778 -#define RESERVED_PIDS 300
60779 +#define RESERVED_PIDS 500
60780
60781 int pid_max_min = RESERVED_PIDS + 1;
60782 int pid_max_max = PID_MAX_LIMIT;
60783 @@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
60784 */
60785 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
60786 {
60787 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
60788 + struct task_struct *task;
60789 +
60790 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
60791 +
60792 + if (gr_pid_is_chrooted(task))
60793 + return NULL;
60794 +
60795 + return task;
60796 }
60797
60798 struct task_struct *find_task_by_vpid(pid_t vnr)
60799 diff -urNp linux-2.6.32.41/kernel/posix-cpu-timers.c linux-2.6.32.41/kernel/posix-cpu-timers.c
60800 --- linux-2.6.32.41/kernel/posix-cpu-timers.c 2011-03-27 14:31:47.000000000 -0400
60801 +++ linux-2.6.32.41/kernel/posix-cpu-timers.c 2011-04-17 15:56:46.000000000 -0400
60802 @@ -6,6 +6,7 @@
60803 #include <linux/posix-timers.h>
60804 #include <linux/errno.h>
60805 #include <linux/math64.h>
60806 +#include <linux/security.h>
60807 #include <asm/uaccess.h>
60808 #include <linux/kernel_stat.h>
60809 #include <trace/events/timer.h>
60810 diff -urNp linux-2.6.32.41/kernel/posix-timers.c linux-2.6.32.41/kernel/posix-timers.c
60811 --- linux-2.6.32.41/kernel/posix-timers.c 2011-03-27 14:31:47.000000000 -0400
60812 +++ linux-2.6.32.41/kernel/posix-timers.c 2011-05-16 21:46:57.000000000 -0400
60813 @@ -42,6 +42,7 @@
60814 #include <linux/compiler.h>
60815 #include <linux/idr.h>
60816 #include <linux/posix-timers.h>
60817 +#include <linux/grsecurity.h>
60818 #include <linux/syscalls.h>
60819 #include <linux/wait.h>
60820 #include <linux/workqueue.h>
60821 @@ -296,6 +297,8 @@ static __init int init_posix_timers(void
60822 .nsleep = no_nsleep,
60823 };
60824
60825 + pax_track_stack();
60826 +
60827 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
60828 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
60829 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
60830 @@ -948,6 +951,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
60831 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
60832 return -EFAULT;
60833
60834 + /* only the CLOCK_REALTIME clock can be set, all other clocks
60835 + have their clock_set fptr set to a nosettime dummy function
60836 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
60837 + call common_clock_set, which calls do_sys_settimeofday, which
60838 + we hook
60839 + */
60840 +
60841 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
60842 }
60843
60844 diff -urNp linux-2.6.32.41/kernel/power/hibernate.c linux-2.6.32.41/kernel/power/hibernate.c
60845 --- linux-2.6.32.41/kernel/power/hibernate.c 2011-03-27 14:31:47.000000000 -0400
60846 +++ linux-2.6.32.41/kernel/power/hibernate.c 2011-04-17 15:56:46.000000000 -0400
60847 @@ -48,14 +48,14 @@ enum {
60848
60849 static int hibernation_mode = HIBERNATION_SHUTDOWN;
60850
60851 -static struct platform_hibernation_ops *hibernation_ops;
60852 +static const struct platform_hibernation_ops *hibernation_ops;
60853
60854 /**
60855 * hibernation_set_ops - set the global hibernate operations
60856 * @ops: the hibernation operations to use in subsequent hibernation transitions
60857 */
60858
60859 -void hibernation_set_ops(struct platform_hibernation_ops *ops)
60860 +void hibernation_set_ops(const struct platform_hibernation_ops *ops)
60861 {
60862 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
60863 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
60864 diff -urNp linux-2.6.32.41/kernel/power/poweroff.c linux-2.6.32.41/kernel/power/poweroff.c
60865 --- linux-2.6.32.41/kernel/power/poweroff.c 2011-03-27 14:31:47.000000000 -0400
60866 +++ linux-2.6.32.41/kernel/power/poweroff.c 2011-04-17 15:56:46.000000000 -0400
60867 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
60868 .enable_mask = SYSRQ_ENABLE_BOOT,
60869 };
60870
60871 -static int pm_sysrq_init(void)
60872 +static int __init pm_sysrq_init(void)
60873 {
60874 register_sysrq_key('o', &sysrq_poweroff_op);
60875 return 0;
60876 diff -urNp linux-2.6.32.41/kernel/power/process.c linux-2.6.32.41/kernel/power/process.c
60877 --- linux-2.6.32.41/kernel/power/process.c 2011-03-27 14:31:47.000000000 -0400
60878 +++ linux-2.6.32.41/kernel/power/process.c 2011-04-17 15:56:46.000000000 -0400
60879 @@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_
60880 struct timeval start, end;
60881 u64 elapsed_csecs64;
60882 unsigned int elapsed_csecs;
60883 + bool timedout = false;
60884
60885 do_gettimeofday(&start);
60886
60887 end_time = jiffies + TIMEOUT;
60888 do {
60889 todo = 0;
60890 + if (time_after(jiffies, end_time))
60891 + timedout = true;
60892 read_lock(&tasklist_lock);
60893 do_each_thread(g, p) {
60894 if (frozen(p) || !freezeable(p))
60895 @@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_
60896 * It is "frozen enough". If the task does wake
60897 * up, it will immediately call try_to_freeze.
60898 */
60899 - if (!task_is_stopped_or_traced(p) &&
60900 - !freezer_should_skip(p))
60901 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
60902 todo++;
60903 + if (timedout) {
60904 + printk(KERN_ERR "Task refusing to freeze:\n");
60905 + sched_show_task(p);
60906 + }
60907 + }
60908 } while_each_thread(g, p);
60909 read_unlock(&tasklist_lock);
60910 yield(); /* Yield is okay here */
60911 - if (time_after(jiffies, end_time))
60912 - break;
60913 - } while (todo);
60914 + } while (todo && !timedout);
60915
60916 do_gettimeofday(&end);
60917 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
60918 diff -urNp linux-2.6.32.41/kernel/power/suspend.c linux-2.6.32.41/kernel/power/suspend.c
60919 --- linux-2.6.32.41/kernel/power/suspend.c 2011-03-27 14:31:47.000000000 -0400
60920 +++ linux-2.6.32.41/kernel/power/suspend.c 2011-04-17 15:56:46.000000000 -0400
60921 @@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_M
60922 [PM_SUSPEND_MEM] = "mem",
60923 };
60924
60925 -static struct platform_suspend_ops *suspend_ops;
60926 +static const struct platform_suspend_ops *suspend_ops;
60927
60928 /**
60929 * suspend_set_ops - Set the global suspend method table.
60930 * @ops: Pointer to ops structure.
60931 */
60932 -void suspend_set_ops(struct platform_suspend_ops *ops)
60933 +void suspend_set_ops(const struct platform_suspend_ops *ops)
60934 {
60935 mutex_lock(&pm_mutex);
60936 suspend_ops = ops;
60937 diff -urNp linux-2.6.32.41/kernel/printk.c linux-2.6.32.41/kernel/printk.c
60938 --- linux-2.6.32.41/kernel/printk.c 2011-03-27 14:31:47.000000000 -0400
60939 +++ linux-2.6.32.41/kernel/printk.c 2011-04-17 15:56:46.000000000 -0400
60940 @@ -278,6 +278,11 @@ int do_syslog(int type, char __user *buf
60941 char c;
60942 int error = 0;
60943
60944 +#ifdef CONFIG_GRKERNSEC_DMESG
60945 + if (grsec_enable_dmesg && !capable(CAP_SYS_ADMIN))
60946 + return -EPERM;
60947 +#endif
60948 +
60949 error = security_syslog(type);
60950 if (error)
60951 return error;
60952 diff -urNp linux-2.6.32.41/kernel/profile.c linux-2.6.32.41/kernel/profile.c
60953 --- linux-2.6.32.41/kernel/profile.c 2011-03-27 14:31:47.000000000 -0400
60954 +++ linux-2.6.32.41/kernel/profile.c 2011-05-04 17:56:28.000000000 -0400
60955 @@ -39,7 +39,7 @@ struct profile_hit {
60956 /* Oprofile timer tick hook */
60957 static int (*timer_hook)(struct pt_regs *) __read_mostly;
60958
60959 -static atomic_t *prof_buffer;
60960 +static atomic_unchecked_t *prof_buffer;
60961 static unsigned long prof_len, prof_shift;
60962
60963 int prof_on __read_mostly;
60964 @@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
60965 hits[i].pc = 0;
60966 continue;
60967 }
60968 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
60969 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
60970 hits[i].hits = hits[i].pc = 0;
60971 }
60972 }
60973 @@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc,
60974 * Add the current hit(s) and flush the write-queue out
60975 * to the global buffer:
60976 */
60977 - atomic_add(nr_hits, &prof_buffer[pc]);
60978 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
60979 for (i = 0; i < NR_PROFILE_HIT; ++i) {
60980 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
60981 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
60982 hits[i].pc = hits[i].hits = 0;
60983 }
60984 out:
60985 @@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc,
60986 if (prof_on != type || !prof_buffer)
60987 return;
60988 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
60989 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
60990 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
60991 }
60992 #endif /* !CONFIG_SMP */
60993 EXPORT_SYMBOL_GPL(profile_hits);
60994 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
60995 return -EFAULT;
60996 buf++; p++; count--; read++;
60997 }
60998 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
60999 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
61000 if (copy_to_user(buf, (void *)pnt, count))
61001 return -EFAULT;
61002 read += count;
61003 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
61004 }
61005 #endif
61006 profile_discard_flip_buffers();
61007 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
61008 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
61009 return count;
61010 }
61011
61012 diff -urNp linux-2.6.32.41/kernel/ptrace.c linux-2.6.32.41/kernel/ptrace.c
61013 --- linux-2.6.32.41/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
61014 +++ linux-2.6.32.41/kernel/ptrace.c 2011-05-22 23:02:06.000000000 -0400
61015 @@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_stru
61016 return ret;
61017 }
61018
61019 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
61020 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
61021 + unsigned int log)
61022 {
61023 const struct cred *cred = current_cred(), *tcred;
61024
61025 @@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_stru
61026 cred->gid != tcred->egid ||
61027 cred->gid != tcred->sgid ||
61028 cred->gid != tcred->gid) &&
61029 - !capable(CAP_SYS_PTRACE)) {
61030 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
61031 + (log && !capable(CAP_SYS_PTRACE)))
61032 + ) {
61033 rcu_read_unlock();
61034 return -EPERM;
61035 }
61036 @@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_stru
61037 smp_rmb();
61038 if (task->mm)
61039 dumpable = get_dumpable(task->mm);
61040 - if (!dumpable && !capable(CAP_SYS_PTRACE))
61041 + if (!dumpable &&
61042 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
61043 + (log && !capable(CAP_SYS_PTRACE))))
61044 return -EPERM;
61045
61046 return security_ptrace_access_check(task, mode);
61047 @@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struc
61048 {
61049 int err;
61050 task_lock(task);
61051 - err = __ptrace_may_access(task, mode);
61052 + err = __ptrace_may_access(task, mode, 0);
61053 + task_unlock(task);
61054 + return !err;
61055 +}
61056 +
61057 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
61058 +{
61059 + int err;
61060 + task_lock(task);
61061 + err = __ptrace_may_access(task, mode, 1);
61062 task_unlock(task);
61063 return !err;
61064 }
61065 @@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *ta
61066 goto out;
61067
61068 task_lock(task);
61069 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
61070 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
61071 task_unlock(task);
61072 if (retval)
61073 goto unlock_creds;
61074 @@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *ta
61075 goto unlock_tasklist;
61076
61077 task->ptrace = PT_PTRACED;
61078 - if (capable(CAP_SYS_PTRACE))
61079 + if (capable_nolog(CAP_SYS_PTRACE))
61080 task->ptrace |= PT_PTRACE_CAP;
61081
61082 __ptrace_link(task, current);
61083 @@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *
61084 {
61085 int copied = 0;
61086
61087 + pax_track_stack();
61088 +
61089 while (len > 0) {
61090 char buf[128];
61091 int this_len, retval;
61092 @@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct
61093 {
61094 int copied = 0;
61095
61096 + pax_track_stack();
61097 +
61098 while (len > 0) {
61099 char buf[128];
61100 int this_len, retval;
61101 @@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *c
61102 int ret = -EIO;
61103 siginfo_t siginfo;
61104
61105 + pax_track_stack();
61106 +
61107 switch (request) {
61108 case PTRACE_PEEKTEXT:
61109 case PTRACE_PEEKDATA:
61110 @@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *c
61111 ret = ptrace_setoptions(child, data);
61112 break;
61113 case PTRACE_GETEVENTMSG:
61114 - ret = put_user(child->ptrace_message, (unsigned long __user *) data);
61115 + ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
61116 break;
61117
61118 case PTRACE_GETSIGINFO:
61119 ret = ptrace_getsiginfo(child, &siginfo);
61120 if (!ret)
61121 - ret = copy_siginfo_to_user((siginfo_t __user *) data,
61122 + ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
61123 &siginfo);
61124 break;
61125
61126 case PTRACE_SETSIGINFO:
61127 - if (copy_from_user(&siginfo, (siginfo_t __user *) data,
61128 + if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
61129 sizeof siginfo))
61130 ret = -EFAULT;
61131 else
61132 @@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
61133 goto out;
61134 }
61135
61136 + if (gr_handle_ptrace(child, request)) {
61137 + ret = -EPERM;
61138 + goto out_put_task_struct;
61139 + }
61140 +
61141 if (request == PTRACE_ATTACH) {
61142 ret = ptrace_attach(child);
61143 /*
61144 * Some architectures need to do book-keeping after
61145 * a ptrace attach.
61146 */
61147 - if (!ret)
61148 + if (!ret) {
61149 arch_ptrace_attach(child);
61150 + gr_audit_ptrace(child);
61151 + }
61152 goto out_put_task_struct;
61153 }
61154
61155 @@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_
61156 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
61157 if (copied != sizeof(tmp))
61158 return -EIO;
61159 - return put_user(tmp, (unsigned long __user *)data);
61160 + return put_user(tmp, (__force unsigned long __user *)data);
61161 }
61162
61163 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
61164 @@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_st
61165 siginfo_t siginfo;
61166 int ret;
61167
61168 + pax_track_stack();
61169 +
61170 switch (request) {
61171 case PTRACE_PEEKTEXT:
61172 case PTRACE_PEEKDATA:
61173 @@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat
61174 goto out;
61175 }
61176
61177 + if (gr_handle_ptrace(child, request)) {
61178 + ret = -EPERM;
61179 + goto out_put_task_struct;
61180 + }
61181 +
61182 if (request == PTRACE_ATTACH) {
61183 ret = ptrace_attach(child);
61184 /*
61185 * Some architectures need to do book-keeping after
61186 * a ptrace attach.
61187 */
61188 - if (!ret)
61189 + if (!ret) {
61190 arch_ptrace_attach(child);
61191 + gr_audit_ptrace(child);
61192 + }
61193 goto out_put_task_struct;
61194 }
61195
61196 diff -urNp linux-2.6.32.41/kernel/rcutorture.c linux-2.6.32.41/kernel/rcutorture.c
61197 --- linux-2.6.32.41/kernel/rcutorture.c 2011-03-27 14:31:47.000000000 -0400
61198 +++ linux-2.6.32.41/kernel/rcutorture.c 2011-05-04 17:56:28.000000000 -0400
61199 @@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
61200 { 0 };
61201 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
61202 { 0 };
61203 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
61204 -static atomic_t n_rcu_torture_alloc;
61205 -static atomic_t n_rcu_torture_alloc_fail;
61206 -static atomic_t n_rcu_torture_free;
61207 -static atomic_t n_rcu_torture_mberror;
61208 -static atomic_t n_rcu_torture_error;
61209 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
61210 +static atomic_unchecked_t n_rcu_torture_alloc;
61211 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
61212 +static atomic_unchecked_t n_rcu_torture_free;
61213 +static atomic_unchecked_t n_rcu_torture_mberror;
61214 +static atomic_unchecked_t n_rcu_torture_error;
61215 static long n_rcu_torture_timers;
61216 static struct list_head rcu_torture_removed;
61217 static cpumask_var_t shuffle_tmp_mask;
61218 @@ -187,11 +187,11 @@ rcu_torture_alloc(void)
61219
61220 spin_lock_bh(&rcu_torture_lock);
61221 if (list_empty(&rcu_torture_freelist)) {
61222 - atomic_inc(&n_rcu_torture_alloc_fail);
61223 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
61224 spin_unlock_bh(&rcu_torture_lock);
61225 return NULL;
61226 }
61227 - atomic_inc(&n_rcu_torture_alloc);
61228 + atomic_inc_unchecked(&n_rcu_torture_alloc);
61229 p = rcu_torture_freelist.next;
61230 list_del_init(p);
61231 spin_unlock_bh(&rcu_torture_lock);
61232 @@ -204,7 +204,7 @@ rcu_torture_alloc(void)
61233 static void
61234 rcu_torture_free(struct rcu_torture *p)
61235 {
61236 - atomic_inc(&n_rcu_torture_free);
61237 + atomic_inc_unchecked(&n_rcu_torture_free);
61238 spin_lock_bh(&rcu_torture_lock);
61239 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
61240 spin_unlock_bh(&rcu_torture_lock);
61241 @@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
61242 i = rp->rtort_pipe_count;
61243 if (i > RCU_TORTURE_PIPE_LEN)
61244 i = RCU_TORTURE_PIPE_LEN;
61245 - atomic_inc(&rcu_torture_wcount[i]);
61246 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
61247 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
61248 rp->rtort_mbtest = 0;
61249 rcu_torture_free(rp);
61250 @@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_fr
61251 i = rp->rtort_pipe_count;
61252 if (i > RCU_TORTURE_PIPE_LEN)
61253 i = RCU_TORTURE_PIPE_LEN;
61254 - atomic_inc(&rcu_torture_wcount[i]);
61255 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
61256 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
61257 rp->rtort_mbtest = 0;
61258 list_del(&rp->rtort_free);
61259 @@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
61260 i = old_rp->rtort_pipe_count;
61261 if (i > RCU_TORTURE_PIPE_LEN)
61262 i = RCU_TORTURE_PIPE_LEN;
61263 - atomic_inc(&rcu_torture_wcount[i]);
61264 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
61265 old_rp->rtort_pipe_count++;
61266 cur_ops->deferred_free(old_rp);
61267 }
61268 @@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned l
61269 return;
61270 }
61271 if (p->rtort_mbtest == 0)
61272 - atomic_inc(&n_rcu_torture_mberror);
61273 + atomic_inc_unchecked(&n_rcu_torture_mberror);
61274 spin_lock(&rand_lock);
61275 cur_ops->read_delay(&rand);
61276 n_rcu_torture_timers++;
61277 @@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
61278 continue;
61279 }
61280 if (p->rtort_mbtest == 0)
61281 - atomic_inc(&n_rcu_torture_mberror);
61282 + atomic_inc_unchecked(&n_rcu_torture_mberror);
61283 cur_ops->read_delay(&rand);
61284 preempt_disable();
61285 pipe_count = p->rtort_pipe_count;
61286 @@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
61287 rcu_torture_current,
61288 rcu_torture_current_version,
61289 list_empty(&rcu_torture_freelist),
61290 - atomic_read(&n_rcu_torture_alloc),
61291 - atomic_read(&n_rcu_torture_alloc_fail),
61292 - atomic_read(&n_rcu_torture_free),
61293 - atomic_read(&n_rcu_torture_mberror),
61294 + atomic_read_unchecked(&n_rcu_torture_alloc),
61295 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
61296 + atomic_read_unchecked(&n_rcu_torture_free),
61297 + atomic_read_unchecked(&n_rcu_torture_mberror),
61298 n_rcu_torture_timers);
61299 - if (atomic_read(&n_rcu_torture_mberror) != 0)
61300 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
61301 cnt += sprintf(&page[cnt], " !!!");
61302 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
61303 if (i > 1) {
61304 cnt += sprintf(&page[cnt], "!!! ");
61305 - atomic_inc(&n_rcu_torture_error);
61306 + atomic_inc_unchecked(&n_rcu_torture_error);
61307 WARN_ON_ONCE(1);
61308 }
61309 cnt += sprintf(&page[cnt], "Reader Pipe: ");
61310 @@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
61311 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
61312 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
61313 cnt += sprintf(&page[cnt], " %d",
61314 - atomic_read(&rcu_torture_wcount[i]));
61315 + atomic_read_unchecked(&rcu_torture_wcount[i]));
61316 }
61317 cnt += sprintf(&page[cnt], "\n");
61318 if (cur_ops->stats)
61319 @@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
61320
61321 if (cur_ops->cleanup)
61322 cur_ops->cleanup();
61323 - if (atomic_read(&n_rcu_torture_error))
61324 + if (atomic_read_unchecked(&n_rcu_torture_error))
61325 rcu_torture_print_module_parms("End of test: FAILURE");
61326 else
61327 rcu_torture_print_module_parms("End of test: SUCCESS");
61328 @@ -1138,13 +1138,13 @@ rcu_torture_init(void)
61329
61330 rcu_torture_current = NULL;
61331 rcu_torture_current_version = 0;
61332 - atomic_set(&n_rcu_torture_alloc, 0);
61333 - atomic_set(&n_rcu_torture_alloc_fail, 0);
61334 - atomic_set(&n_rcu_torture_free, 0);
61335 - atomic_set(&n_rcu_torture_mberror, 0);
61336 - atomic_set(&n_rcu_torture_error, 0);
61337 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
61338 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
61339 + atomic_set_unchecked(&n_rcu_torture_free, 0);
61340 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
61341 + atomic_set_unchecked(&n_rcu_torture_error, 0);
61342 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
61343 - atomic_set(&rcu_torture_wcount[i], 0);
61344 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
61345 for_each_possible_cpu(cpu) {
61346 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
61347 per_cpu(rcu_torture_count, cpu)[i] = 0;
61348 diff -urNp linux-2.6.32.41/kernel/rcutree.c linux-2.6.32.41/kernel/rcutree.c
61349 --- linux-2.6.32.41/kernel/rcutree.c 2011-03-27 14:31:47.000000000 -0400
61350 +++ linux-2.6.32.41/kernel/rcutree.c 2011-04-17 15:56:46.000000000 -0400
61351 @@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state
61352 /*
61353 * Do softirq processing for the current CPU.
61354 */
61355 -static void rcu_process_callbacks(struct softirq_action *unused)
61356 +static void rcu_process_callbacks(void)
61357 {
61358 /*
61359 * Memory references from any prior RCU read-side critical sections
61360 diff -urNp linux-2.6.32.41/kernel/rcutree_plugin.h linux-2.6.32.41/kernel/rcutree_plugin.h
61361 --- linux-2.6.32.41/kernel/rcutree_plugin.h 2011-03-27 14:31:47.000000000 -0400
61362 +++ linux-2.6.32.41/kernel/rcutree_plugin.h 2011-04-17 15:56:46.000000000 -0400
61363 @@ -145,7 +145,7 @@ static void rcu_preempt_note_context_swi
61364 */
61365 void __rcu_read_lock(void)
61366 {
61367 - ACCESS_ONCE(current->rcu_read_lock_nesting)++;
61368 + ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
61369 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
61370 }
61371 EXPORT_SYMBOL_GPL(__rcu_read_lock);
61372 @@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
61373 struct task_struct *t = current;
61374
61375 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
61376 - if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
61377 + if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
61378 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
61379 rcu_read_unlock_special(t);
61380 }
61381 diff -urNp linux-2.6.32.41/kernel/relay.c linux-2.6.32.41/kernel/relay.c
61382 --- linux-2.6.32.41/kernel/relay.c 2011-03-27 14:31:47.000000000 -0400
61383 +++ linux-2.6.32.41/kernel/relay.c 2011-05-16 21:46:57.000000000 -0400
61384 @@ -1222,7 +1222,7 @@ static int subbuf_splice_actor(struct fi
61385 unsigned int flags,
61386 int *nonpad_ret)
61387 {
61388 - unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
61389 + unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
61390 struct rchan_buf *rbuf = in->private_data;
61391 unsigned int subbuf_size = rbuf->chan->subbuf_size;
61392 uint64_t pos = (uint64_t) *ppos;
61393 @@ -1241,6 +1241,9 @@ static int subbuf_splice_actor(struct fi
61394 .ops = &relay_pipe_buf_ops,
61395 .spd_release = relay_page_release,
61396 };
61397 + ssize_t ret;
61398 +
61399 + pax_track_stack();
61400
61401 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
61402 return 0;
61403 diff -urNp linux-2.6.32.41/kernel/resource.c linux-2.6.32.41/kernel/resource.c
61404 --- linux-2.6.32.41/kernel/resource.c 2011-03-27 14:31:47.000000000 -0400
61405 +++ linux-2.6.32.41/kernel/resource.c 2011-04-17 15:56:46.000000000 -0400
61406 @@ -132,8 +132,18 @@ static const struct file_operations proc
61407
61408 static int __init ioresources_init(void)
61409 {
61410 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
61411 +#ifdef CONFIG_GRKERNSEC_PROC_USER
61412 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
61413 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
61414 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61415 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
61416 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
61417 +#endif
61418 +#else
61419 proc_create("ioports", 0, NULL, &proc_ioports_operations);
61420 proc_create("iomem", 0, NULL, &proc_iomem_operations);
61421 +#endif
61422 return 0;
61423 }
61424 __initcall(ioresources_init);
61425 diff -urNp linux-2.6.32.41/kernel/rtmutex.c linux-2.6.32.41/kernel/rtmutex.c
61426 --- linux-2.6.32.41/kernel/rtmutex.c 2011-03-27 14:31:47.000000000 -0400
61427 +++ linux-2.6.32.41/kernel/rtmutex.c 2011-04-17 15:56:46.000000000 -0400
61428 @@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt
61429 */
61430 spin_lock_irqsave(&pendowner->pi_lock, flags);
61431
61432 - WARN_ON(!pendowner->pi_blocked_on);
61433 + BUG_ON(!pendowner->pi_blocked_on);
61434 WARN_ON(pendowner->pi_blocked_on != waiter);
61435 WARN_ON(pendowner->pi_blocked_on->lock != lock);
61436
61437 diff -urNp linux-2.6.32.41/kernel/rtmutex-tester.c linux-2.6.32.41/kernel/rtmutex-tester.c
61438 --- linux-2.6.32.41/kernel/rtmutex-tester.c 2011-03-27 14:31:47.000000000 -0400
61439 +++ linux-2.6.32.41/kernel/rtmutex-tester.c 2011-05-04 17:56:28.000000000 -0400
61440 @@ -21,7 +21,7 @@
61441 #define MAX_RT_TEST_MUTEXES 8
61442
61443 static spinlock_t rttest_lock;
61444 -static atomic_t rttest_event;
61445 +static atomic_unchecked_t rttest_event;
61446
61447 struct test_thread_data {
61448 int opcode;
61449 @@ -64,7 +64,7 @@ static int handle_op(struct test_thread_
61450
61451 case RTTEST_LOCKCONT:
61452 td->mutexes[td->opdata] = 1;
61453 - td->event = atomic_add_return(1, &rttest_event);
61454 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61455 return 0;
61456
61457 case RTTEST_RESET:
61458 @@ -82,7 +82,7 @@ static int handle_op(struct test_thread_
61459 return 0;
61460
61461 case RTTEST_RESETEVENT:
61462 - atomic_set(&rttest_event, 0);
61463 + atomic_set_unchecked(&rttest_event, 0);
61464 return 0;
61465
61466 default:
61467 @@ -99,9 +99,9 @@ static int handle_op(struct test_thread_
61468 return ret;
61469
61470 td->mutexes[id] = 1;
61471 - td->event = atomic_add_return(1, &rttest_event);
61472 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61473 rt_mutex_lock(&mutexes[id]);
61474 - td->event = atomic_add_return(1, &rttest_event);
61475 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61476 td->mutexes[id] = 4;
61477 return 0;
61478
61479 @@ -112,9 +112,9 @@ static int handle_op(struct test_thread_
61480 return ret;
61481
61482 td->mutexes[id] = 1;
61483 - td->event = atomic_add_return(1, &rttest_event);
61484 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61485 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
61486 - td->event = atomic_add_return(1, &rttest_event);
61487 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61488 td->mutexes[id] = ret ? 0 : 4;
61489 return ret ? -EINTR : 0;
61490
61491 @@ -123,9 +123,9 @@ static int handle_op(struct test_thread_
61492 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
61493 return ret;
61494
61495 - td->event = atomic_add_return(1, &rttest_event);
61496 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61497 rt_mutex_unlock(&mutexes[id]);
61498 - td->event = atomic_add_return(1, &rttest_event);
61499 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61500 td->mutexes[id] = 0;
61501 return 0;
61502
61503 @@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mu
61504 break;
61505
61506 td->mutexes[dat] = 2;
61507 - td->event = atomic_add_return(1, &rttest_event);
61508 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61509 break;
61510
61511 case RTTEST_LOCKBKL:
61512 @@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mu
61513 return;
61514
61515 td->mutexes[dat] = 3;
61516 - td->event = atomic_add_return(1, &rttest_event);
61517 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61518 break;
61519
61520 case RTTEST_LOCKNOWAIT:
61521 @@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mu
61522 return;
61523
61524 td->mutexes[dat] = 1;
61525 - td->event = atomic_add_return(1, &rttest_event);
61526 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61527 return;
61528
61529 case RTTEST_LOCKBKL:
61530 diff -urNp linux-2.6.32.41/kernel/sched.c linux-2.6.32.41/kernel/sched.c
61531 --- linux-2.6.32.41/kernel/sched.c 2011-03-27 14:31:47.000000000 -0400
61532 +++ linux-2.6.32.41/kernel/sched.c 2011-05-22 23:02:06.000000000 -0400
61533 @@ -5043,7 +5043,7 @@ out:
61534 * In CONFIG_NO_HZ case, the idle load balance owner will do the
61535 * rebalancing for all the cpus for whom scheduler ticks are stopped.
61536 */
61537 -static void run_rebalance_domains(struct softirq_action *h)
61538 +static void run_rebalance_domains(void)
61539 {
61540 int this_cpu = smp_processor_id();
61541 struct rq *this_rq = cpu_rq(this_cpu);
61542 @@ -5700,6 +5700,8 @@ asmlinkage void __sched schedule(void)
61543 struct rq *rq;
61544 int cpu;
61545
61546 + pax_track_stack();
61547 +
61548 need_resched:
61549 preempt_disable();
61550 cpu = smp_processor_id();
61551 @@ -5770,7 +5772,7 @@ EXPORT_SYMBOL(schedule);
61552 * Look out! "owner" is an entirely speculative pointer
61553 * access and not reliable.
61554 */
61555 -int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
61556 +int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
61557 {
61558 unsigned int cpu;
61559 struct rq *rq;
61560 @@ -5784,10 +5786,10 @@ int mutex_spin_on_owner(struct mutex *lo
61561 * DEBUG_PAGEALLOC could have unmapped it if
61562 * the mutex owner just released it and exited.
61563 */
61564 - if (probe_kernel_address(&owner->cpu, cpu))
61565 + if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
61566 return 0;
61567 #else
61568 - cpu = owner->cpu;
61569 + cpu = task_thread_info(owner)->cpu;
61570 #endif
61571
61572 /*
61573 @@ -5816,7 +5818,7 @@ int mutex_spin_on_owner(struct mutex *lo
61574 /*
61575 * Is that owner really running on that cpu?
61576 */
61577 - if (task_thread_info(rq->curr) != owner || need_resched())
61578 + if (rq->curr != owner || need_resched())
61579 return 0;
61580
61581 cpu_relax();
61582 @@ -6359,6 +6361,8 @@ int can_nice(const struct task_struct *p
61583 /* convert nice value [19,-20] to rlimit style value [1,40] */
61584 int nice_rlim = 20 - nice;
61585
61586 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
61587 +
61588 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
61589 capable(CAP_SYS_NICE));
61590 }
61591 @@ -6392,7 +6396,8 @@ SYSCALL_DEFINE1(nice, int, increment)
61592 if (nice > 19)
61593 nice = 19;
61594
61595 - if (increment < 0 && !can_nice(current, nice))
61596 + if (increment < 0 && (!can_nice(current, nice) ||
61597 + gr_handle_chroot_nice()))
61598 return -EPERM;
61599
61600 retval = security_task_setnice(current, nice);
61601 @@ -8774,7 +8779,7 @@ static void init_sched_groups_power(int
61602 long power;
61603 int weight;
61604
61605 - WARN_ON(!sd || !sd->groups);
61606 + BUG_ON(!sd || !sd->groups);
61607
61608 if (cpu != group_first_cpu(sd->groups))
61609 return;
61610 diff -urNp linux-2.6.32.41/kernel/signal.c linux-2.6.32.41/kernel/signal.c
61611 --- linux-2.6.32.41/kernel/signal.c 2011-04-17 17:00:52.000000000 -0400
61612 +++ linux-2.6.32.41/kernel/signal.c 2011-05-22 23:02:06.000000000 -0400
61613 @@ -41,12 +41,12 @@
61614
61615 static struct kmem_cache *sigqueue_cachep;
61616
61617 -static void __user *sig_handler(struct task_struct *t, int sig)
61618 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
61619 {
61620 return t->sighand->action[sig - 1].sa.sa_handler;
61621 }
61622
61623 -static int sig_handler_ignored(void __user *handler, int sig)
61624 +static int sig_handler_ignored(__sighandler_t handler, int sig)
61625 {
61626 /* Is it explicitly or implicitly ignored? */
61627 return handler == SIG_IGN ||
61628 @@ -56,7 +56,7 @@ static int sig_handler_ignored(void __us
61629 static int sig_task_ignored(struct task_struct *t, int sig,
61630 int from_ancestor_ns)
61631 {
61632 - void __user *handler;
61633 + __sighandler_t handler;
61634
61635 handler = sig_handler(t, sig);
61636
61637 @@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc
61638 */
61639 user = get_uid(__task_cred(t)->user);
61640 atomic_inc(&user->sigpending);
61641 +
61642 + if (!override_rlimit)
61643 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
61644 if (override_rlimit ||
61645 atomic_read(&user->sigpending) <=
61646 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
61647 @@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct
61648
61649 int unhandled_signal(struct task_struct *tsk, int sig)
61650 {
61651 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
61652 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
61653 if (is_global_init(tsk))
61654 return 1;
61655 if (handler != SIG_IGN && handler != SIG_DFL)
61656 @@ -627,6 +630,9 @@ static int check_kill_permission(int sig
61657 }
61658 }
61659
61660 + if (gr_handle_signal(t, sig))
61661 + return -EPERM;
61662 +
61663 return security_task_kill(t, info, sig, 0);
61664 }
61665
61666 @@ -968,7 +974,7 @@ __group_send_sig_info(int sig, struct si
61667 return send_signal(sig, info, p, 1);
61668 }
61669
61670 -static int
61671 +int
61672 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
61673 {
61674 return send_signal(sig, info, t, 0);
61675 @@ -1005,6 +1011,7 @@ force_sig_info(int sig, struct siginfo *
61676 unsigned long int flags;
61677 int ret, blocked, ignored;
61678 struct k_sigaction *action;
61679 + int is_unhandled = 0;
61680
61681 spin_lock_irqsave(&t->sighand->siglock, flags);
61682 action = &t->sighand->action[sig-1];
61683 @@ -1019,9 +1026,18 @@ force_sig_info(int sig, struct siginfo *
61684 }
61685 if (action->sa.sa_handler == SIG_DFL)
61686 t->signal->flags &= ~SIGNAL_UNKILLABLE;
61687 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
61688 + is_unhandled = 1;
61689 ret = specific_send_sig_info(sig, info, t);
61690 spin_unlock_irqrestore(&t->sighand->siglock, flags);
61691
61692 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
61693 + normal operation */
61694 + if (is_unhandled) {
61695 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
61696 + gr_handle_crash(t, sig);
61697 + }
61698 +
61699 return ret;
61700 }
61701
61702 @@ -1081,8 +1097,11 @@ int group_send_sig_info(int sig, struct
61703 {
61704 int ret = check_kill_permission(sig, info, p);
61705
61706 - if (!ret && sig)
61707 + if (!ret && sig) {
61708 ret = do_send_sig_info(sig, info, p, true);
61709 + if (!ret)
61710 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
61711 + }
61712
61713 return ret;
61714 }
61715 @@ -1644,6 +1663,8 @@ void ptrace_notify(int exit_code)
61716 {
61717 siginfo_t info;
61718
61719 + pax_track_stack();
61720 +
61721 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
61722
61723 memset(&info, 0, sizeof info);
61724 diff -urNp linux-2.6.32.41/kernel/smp.c linux-2.6.32.41/kernel/smp.c
61725 --- linux-2.6.32.41/kernel/smp.c 2011-03-27 14:31:47.000000000 -0400
61726 +++ linux-2.6.32.41/kernel/smp.c 2011-04-17 15:56:46.000000000 -0400
61727 @@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void
61728 }
61729 EXPORT_SYMBOL(smp_call_function);
61730
61731 -void ipi_call_lock(void)
61732 +void ipi_call_lock(void) __acquires(call_function.lock)
61733 {
61734 spin_lock(&call_function.lock);
61735 }
61736
61737 -void ipi_call_unlock(void)
61738 +void ipi_call_unlock(void) __releases(call_function.lock)
61739 {
61740 spin_unlock(&call_function.lock);
61741 }
61742
61743 -void ipi_call_lock_irq(void)
61744 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
61745 {
61746 spin_lock_irq(&call_function.lock);
61747 }
61748
61749 -void ipi_call_unlock_irq(void)
61750 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
61751 {
61752 spin_unlock_irq(&call_function.lock);
61753 }
61754 diff -urNp linux-2.6.32.41/kernel/softirq.c linux-2.6.32.41/kernel/softirq.c
61755 --- linux-2.6.32.41/kernel/softirq.c 2011-03-27 14:31:47.000000000 -0400
61756 +++ linux-2.6.32.41/kernel/softirq.c 2011-04-17 15:56:46.000000000 -0400
61757 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
61758
61759 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
61760
61761 -char *softirq_to_name[NR_SOFTIRQS] = {
61762 +const char * const softirq_to_name[NR_SOFTIRQS] = {
61763 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
61764 "TASKLET", "SCHED", "HRTIMER", "RCU"
61765 };
61766 @@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
61767
61768 asmlinkage void __do_softirq(void)
61769 {
61770 - struct softirq_action *h;
61771 + const struct softirq_action *h;
61772 __u32 pending;
61773 int max_restart = MAX_SOFTIRQ_RESTART;
61774 int cpu;
61775 @@ -233,7 +233,7 @@ restart:
61776 kstat_incr_softirqs_this_cpu(h - softirq_vec);
61777
61778 trace_softirq_entry(h, softirq_vec);
61779 - h->action(h);
61780 + h->action();
61781 trace_softirq_exit(h, softirq_vec);
61782 if (unlikely(prev_count != preempt_count())) {
61783 printk(KERN_ERR "huh, entered softirq %td %s %p"
61784 @@ -363,7 +363,7 @@ void raise_softirq(unsigned int nr)
61785 local_irq_restore(flags);
61786 }
61787
61788 -void open_softirq(int nr, void (*action)(struct softirq_action *))
61789 +void open_softirq(int nr, void (*action)(void))
61790 {
61791 softirq_vec[nr].action = action;
61792 }
61793 @@ -419,7 +419,7 @@ void __tasklet_hi_schedule_first(struct
61794
61795 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
61796
61797 -static void tasklet_action(struct softirq_action *a)
61798 +static void tasklet_action(void)
61799 {
61800 struct tasklet_struct *list;
61801
61802 @@ -454,7 +454,7 @@ static void tasklet_action(struct softir
61803 }
61804 }
61805
61806 -static void tasklet_hi_action(struct softirq_action *a)
61807 +static void tasklet_hi_action(void)
61808 {
61809 struct tasklet_struct *list;
61810
61811 diff -urNp linux-2.6.32.41/kernel/sys.c linux-2.6.32.41/kernel/sys.c
61812 --- linux-2.6.32.41/kernel/sys.c 2011-03-27 14:31:47.000000000 -0400
61813 +++ linux-2.6.32.41/kernel/sys.c 2011-04-17 15:56:46.000000000 -0400
61814 @@ -133,6 +133,12 @@ static int set_one_prio(struct task_stru
61815 error = -EACCES;
61816 goto out;
61817 }
61818 +
61819 + if (gr_handle_chroot_setpriority(p, niceval)) {
61820 + error = -EACCES;
61821 + goto out;
61822 + }
61823 +
61824 no_nice = security_task_setnice(p, niceval);
61825 if (no_nice) {
61826 error = no_nice;
61827 @@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which,
61828 !(user = find_user(who)))
61829 goto out_unlock; /* No processes for this user */
61830
61831 - do_each_thread(g, p)
61832 + do_each_thread(g, p) {
61833 if (__task_cred(p)->uid == who)
61834 error = set_one_prio(p, niceval, error);
61835 - while_each_thread(g, p);
61836 + } while_each_thread(g, p);
61837 if (who != cred->uid)
61838 free_uid(user); /* For find_user() */
61839 break;
61840 @@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which,
61841 !(user = find_user(who)))
61842 goto out_unlock; /* No processes for this user */
61843
61844 - do_each_thread(g, p)
61845 + do_each_thread(g, p) {
61846 if (__task_cred(p)->uid == who) {
61847 niceval = 20 - task_nice(p);
61848 if (niceval > retval)
61849 retval = niceval;
61850 }
61851 - while_each_thread(g, p);
61852 + } while_each_thread(g, p);
61853 if (who != cred->uid)
61854 free_uid(user); /* for find_user() */
61855 break;
61856 @@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
61857 goto error;
61858 }
61859
61860 + if (gr_check_group_change(new->gid, new->egid, -1))
61861 + goto error;
61862 +
61863 if (rgid != (gid_t) -1 ||
61864 (egid != (gid_t) -1 && egid != old->gid))
61865 new->sgid = new->egid;
61866 @@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
61867 goto error;
61868
61869 retval = -EPERM;
61870 +
61871 + if (gr_check_group_change(gid, gid, gid))
61872 + goto error;
61873 +
61874 if (capable(CAP_SETGID))
61875 new->gid = new->egid = new->sgid = new->fsgid = gid;
61876 else if (gid == old->gid || gid == old->sgid)
61877 @@ -627,6 +640,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
61878 goto error;
61879 }
61880
61881 + if (gr_check_user_change(new->uid, new->euid, -1))
61882 + goto error;
61883 +
61884 if (new->uid != old->uid) {
61885 retval = set_user(new);
61886 if (retval < 0)
61887 @@ -675,6 +691,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
61888 goto error;
61889
61890 retval = -EPERM;
61891 +
61892 + if (gr_check_crash_uid(uid))
61893 + goto error;
61894 + if (gr_check_user_change(uid, uid, uid))
61895 + goto error;
61896 +
61897 if (capable(CAP_SETUID)) {
61898 new->suid = new->uid = uid;
61899 if (uid != old->uid) {
61900 @@ -732,6 +754,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
61901 goto error;
61902 }
61903
61904 + if (gr_check_user_change(ruid, euid, -1))
61905 + goto error;
61906 +
61907 if (ruid != (uid_t) -1) {
61908 new->uid = ruid;
61909 if (ruid != old->uid) {
61910 @@ -800,6 +825,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
61911 goto error;
61912 }
61913
61914 + if (gr_check_group_change(rgid, egid, -1))
61915 + goto error;
61916 +
61917 if (rgid != (gid_t) -1)
61918 new->gid = rgid;
61919 if (egid != (gid_t) -1)
61920 @@ -849,6 +877,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
61921 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
61922 goto error;
61923
61924 + if (gr_check_user_change(-1, -1, uid))
61925 + goto error;
61926 +
61927 if (uid == old->uid || uid == old->euid ||
61928 uid == old->suid || uid == old->fsuid ||
61929 capable(CAP_SETUID)) {
61930 @@ -889,6 +920,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
61931 if (gid == old->gid || gid == old->egid ||
61932 gid == old->sgid || gid == old->fsgid ||
61933 capable(CAP_SETGID)) {
61934 + if (gr_check_group_change(-1, -1, gid))
61935 + goto error;
61936 +
61937 if (gid != old_fsgid) {
61938 new->fsgid = gid;
61939 goto change_okay;
61940 @@ -1454,7 +1488,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
61941 error = get_dumpable(me->mm);
61942 break;
61943 case PR_SET_DUMPABLE:
61944 - if (arg2 < 0 || arg2 > 1) {
61945 + if (arg2 > 1) {
61946 error = -EINVAL;
61947 break;
61948 }
61949 diff -urNp linux-2.6.32.41/kernel/sysctl.c linux-2.6.32.41/kernel/sysctl.c
61950 --- linux-2.6.32.41/kernel/sysctl.c 2011-03-27 14:31:47.000000000 -0400
61951 +++ linux-2.6.32.41/kernel/sysctl.c 2011-04-17 15:56:46.000000000 -0400
61952 @@ -63,6 +63,13 @@
61953 static int deprecated_sysctl_warning(struct __sysctl_args *args);
61954
61955 #if defined(CONFIG_SYSCTL)
61956 +#include <linux/grsecurity.h>
61957 +#include <linux/grinternal.h>
61958 +
61959 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
61960 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
61961 + const int op);
61962 +extern int gr_handle_chroot_sysctl(const int op);
61963
61964 /* External variables not in a header file. */
61965 extern int C_A_D;
61966 @@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_ta
61967 static int proc_taint(struct ctl_table *table, int write,
61968 void __user *buffer, size_t *lenp, loff_t *ppos);
61969 #endif
61970 +extern ctl_table grsecurity_table[];
61971
61972 static struct ctl_table root_table[];
61973 static struct ctl_table_root sysctl_table_root;
61974 @@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
61975 int sysctl_legacy_va_layout;
61976 #endif
61977
61978 +#ifdef CONFIG_PAX_SOFTMODE
61979 +static ctl_table pax_table[] = {
61980 + {
61981 + .ctl_name = CTL_UNNUMBERED,
61982 + .procname = "softmode",
61983 + .data = &pax_softmode,
61984 + .maxlen = sizeof(unsigned int),
61985 + .mode = 0600,
61986 + .proc_handler = &proc_dointvec,
61987 + },
61988 +
61989 + { .ctl_name = 0 }
61990 +};
61991 +#endif
61992 +
61993 extern int prove_locking;
61994 extern int lock_stat;
61995
61996 @@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = N
61997 #endif
61998
61999 static struct ctl_table kern_table[] = {
62000 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
62001 + {
62002 + .ctl_name = CTL_UNNUMBERED,
62003 + .procname = "grsecurity",
62004 + .mode = 0500,
62005 + .child = grsecurity_table,
62006 + },
62007 +#endif
62008 +
62009 +#ifdef CONFIG_PAX_SOFTMODE
62010 + {
62011 + .ctl_name = CTL_UNNUMBERED,
62012 + .procname = "pax",
62013 + .mode = 0500,
62014 + .child = pax_table,
62015 + },
62016 +#endif
62017 +
62018 {
62019 .ctl_name = CTL_UNNUMBERED,
62020 .procname = "sched_child_runs_first",
62021 @@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
62022 .data = &modprobe_path,
62023 .maxlen = KMOD_PATH_LEN,
62024 .mode = 0644,
62025 - .proc_handler = &proc_dostring,
62026 - .strategy = &sysctl_string,
62027 + .proc_handler = &proc_dostring_modpriv,
62028 + .strategy = &sysctl_string_modpriv,
62029 },
62030 {
62031 .ctl_name = CTL_UNNUMBERED,
62032 @@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
62033 .mode = 0644,
62034 .proc_handler = &proc_dointvec
62035 },
62036 + {
62037 + .procname = "heap_stack_gap",
62038 + .data = &sysctl_heap_stack_gap,
62039 + .maxlen = sizeof(sysctl_heap_stack_gap),
62040 + .mode = 0644,
62041 + .proc_handler = proc_doulongvec_minmax,
62042 + },
62043 #else
62044 {
62045 .ctl_name = CTL_UNNUMBERED,
62046 @@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl
62047 return 0;
62048 }
62049
62050 +static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
62051 +
62052 static int parse_table(int __user *name, int nlen,
62053 void __user *oldval, size_t __user *oldlenp,
62054 void __user *newval, size_t newlen,
62055 @@ -1821,7 +1871,7 @@ repeat:
62056 if (n == table->ctl_name) {
62057 int error;
62058 if (table->child) {
62059 - if (sysctl_perm(root, table, MAY_EXEC))
62060 + if (sysctl_perm_nochk(root, table, MAY_EXEC))
62061 return -EPERM;
62062 name++;
62063 nlen--;
62064 @@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *r
62065 int error;
62066 int mode;
62067
62068 + if (table->parent != NULL && table->parent->procname != NULL &&
62069 + table->procname != NULL &&
62070 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
62071 + return -EACCES;
62072 + if (gr_handle_chroot_sysctl(op))
62073 + return -EACCES;
62074 + error = gr_handle_sysctl(table, op);
62075 + if (error)
62076 + return error;
62077 +
62078 + error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
62079 + if (error)
62080 + return error;
62081 +
62082 + if (root->permissions)
62083 + mode = root->permissions(root, current->nsproxy, table);
62084 + else
62085 + mode = table->mode;
62086 +
62087 + return test_perm(mode, op);
62088 +}
62089 +
62090 +int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
62091 +{
62092 + int error;
62093 + int mode;
62094 +
62095 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
62096 if (error)
62097 return error;
62098 @@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *tabl
62099 buffer, lenp, ppos);
62100 }
62101
62102 +int proc_dostring_modpriv(struct ctl_table *table, int write,
62103 + void __user *buffer, size_t *lenp, loff_t *ppos)
62104 +{
62105 + if (write && !capable(CAP_SYS_MODULE))
62106 + return -EPERM;
62107 +
62108 + return _proc_do_string(table->data, table->maxlen, write,
62109 + buffer, lenp, ppos);
62110 +}
62111 +
62112
62113 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
62114 int *valp,
62115 @@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(v
62116 vleft = table->maxlen / sizeof(unsigned long);
62117 left = *lenp;
62118
62119 - for (; left && vleft--; i++, min++, max++, first=0) {
62120 + for (; left && vleft--; i++, first=0) {
62121 if (write) {
62122 while (left) {
62123 char c;
62124 @@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *tabl
62125 return -ENOSYS;
62126 }
62127
62128 +int proc_dostring_modpriv(struct ctl_table *table, int write,
62129 + void __user *buffer, size_t *lenp, loff_t *ppos)
62130 +{
62131 + return -ENOSYS;
62132 +}
62133 +
62134 int proc_dointvec(struct ctl_table *table, int write,
62135 void __user *buffer, size_t *lenp, loff_t *ppos)
62136 {
62137 @@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *tabl
62138 return 1;
62139 }
62140
62141 +int sysctl_string_modpriv(struct ctl_table *table,
62142 + void __user *oldval, size_t __user *oldlenp,
62143 + void __user *newval, size_t newlen)
62144 +{
62145 + if (newval && newlen && !capable(CAP_SYS_MODULE))
62146 + return -EPERM;
62147 +
62148 + return sysctl_string(table, oldval, oldlenp, newval, newlen);
62149 +}
62150 +
62151 /*
62152 * This function makes sure that all of the integers in the vector
62153 * are between the minimum and maximum values given in the arrays
62154 @@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *tabl
62155 return -ENOSYS;
62156 }
62157
62158 +int sysctl_string_modpriv(struct ctl_table *table,
62159 + void __user *oldval, size_t __user *oldlenp,
62160 + void __user *newval, size_t newlen)
62161 +{
62162 + return -ENOSYS;
62163 +}
62164 +
62165 int sysctl_intvec(struct ctl_table *table,
62166 void __user *oldval, size_t __user *oldlenp,
62167 void __user *newval, size_t newlen)
62168 @@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
62169 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
62170 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
62171 EXPORT_SYMBOL(proc_dostring);
62172 +EXPORT_SYMBOL(proc_dostring_modpriv);
62173 EXPORT_SYMBOL(proc_doulongvec_minmax);
62174 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
62175 EXPORT_SYMBOL(register_sysctl_table);
62176 @@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
62177 EXPORT_SYMBOL(sysctl_jiffies);
62178 EXPORT_SYMBOL(sysctl_ms_jiffies);
62179 EXPORT_SYMBOL(sysctl_string);
62180 +EXPORT_SYMBOL(sysctl_string_modpriv);
62181 EXPORT_SYMBOL(sysctl_data);
62182 EXPORT_SYMBOL(unregister_sysctl_table);
62183 diff -urNp linux-2.6.32.41/kernel/sysctl_check.c linux-2.6.32.41/kernel/sysctl_check.c
62184 --- linux-2.6.32.41/kernel/sysctl_check.c 2011-03-27 14:31:47.000000000 -0400
62185 +++ linux-2.6.32.41/kernel/sysctl_check.c 2011-04-17 15:56:46.000000000 -0400
62186 @@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *n
62187 } else {
62188 if ((table->strategy == sysctl_data) ||
62189 (table->strategy == sysctl_string) ||
62190 + (table->strategy == sysctl_string_modpriv) ||
62191 (table->strategy == sysctl_intvec) ||
62192 (table->strategy == sysctl_jiffies) ||
62193 (table->strategy == sysctl_ms_jiffies) ||
62194 (table->proc_handler == proc_dostring) ||
62195 + (table->proc_handler == proc_dostring_modpriv) ||
62196 (table->proc_handler == proc_dointvec) ||
62197 (table->proc_handler == proc_dointvec_minmax) ||
62198 (table->proc_handler == proc_dointvec_jiffies) ||
62199 diff -urNp linux-2.6.32.41/kernel/taskstats.c linux-2.6.32.41/kernel/taskstats.c
62200 --- linux-2.6.32.41/kernel/taskstats.c 2011-03-27 14:31:47.000000000 -0400
62201 +++ linux-2.6.32.41/kernel/taskstats.c 2011-04-17 15:56:46.000000000 -0400
62202 @@ -26,9 +26,12 @@
62203 #include <linux/cgroup.h>
62204 #include <linux/fs.h>
62205 #include <linux/file.h>
62206 +#include <linux/grsecurity.h>
62207 #include <net/genetlink.h>
62208 #include <asm/atomic.h>
62209
62210 +extern int gr_is_taskstats_denied(int pid);
62211 +
62212 /*
62213 * Maximum length of a cpumask that can be specified in
62214 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
62215 @@ -433,6 +436,9 @@ static int taskstats_user_cmd(struct sk_
62216 size_t size;
62217 cpumask_var_t mask;
62218
62219 + if (gr_is_taskstats_denied(current->pid))
62220 + return -EACCES;
62221 +
62222 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
62223 return -ENOMEM;
62224
62225 diff -urNp linux-2.6.32.41/kernel/time/tick-broadcast.c linux-2.6.32.41/kernel/time/tick-broadcast.c
62226 --- linux-2.6.32.41/kernel/time/tick-broadcast.c 2011-05-23 16:56:59.000000000 -0400
62227 +++ linux-2.6.32.41/kernel/time/tick-broadcast.c 2011-05-23 16:57:13.000000000 -0400
62228 @@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct cl
62229 * then clear the broadcast bit.
62230 */
62231 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
62232 - int cpu = smp_processor_id();
62233 + cpu = smp_processor_id();
62234
62235 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
62236 tick_broadcast_clear_oneshot(cpu);
62237 diff -urNp linux-2.6.32.41/kernel/time/timekeeping.c linux-2.6.32.41/kernel/time/timekeeping.c
62238 --- linux-2.6.32.41/kernel/time/timekeeping.c 2011-05-23 16:56:59.000000000 -0400
62239 +++ linux-2.6.32.41/kernel/time/timekeeping.c 2011-05-23 19:09:33.000000000 -0400
62240 @@ -14,6 +14,7 @@
62241 #include <linux/init.h>
62242 #include <linux/mm.h>
62243 #include <linux/sched.h>
62244 +#include <linux/grsecurity.h>
62245 #include <linux/sysdev.h>
62246 #include <linux/clocksource.h>
62247 #include <linux/jiffies.h>
62248 @@ -176,7 +177,7 @@ void update_xtime_cache(u64 nsec)
62249 */
62250 struct timespec ts = xtime;
62251 timespec_add_ns(&ts, nsec);
62252 - ACCESS_ONCE(xtime_cache) = ts;
62253 + ACCESS_ONCE_RW(xtime_cache) = ts;
62254 }
62255
62256 /* must hold xtime_lock */
62257 @@ -329,6 +330,8 @@ int do_settimeofday(struct timespec *tv)
62258 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
62259 return -EINVAL;
62260
62261 + gr_log_timechange();
62262 +
62263 write_seqlock_irqsave(&xtime_lock, flags);
62264
62265 timekeeping_forward_now();
62266 diff -urNp linux-2.6.32.41/kernel/time/timer_list.c linux-2.6.32.41/kernel/time/timer_list.c
62267 --- linux-2.6.32.41/kernel/time/timer_list.c 2011-03-27 14:31:47.000000000 -0400
62268 +++ linux-2.6.32.41/kernel/time/timer_list.c 2011-04-17 15:56:46.000000000 -0400
62269 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
62270
62271 static void print_name_offset(struct seq_file *m, void *sym)
62272 {
62273 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62274 + SEQ_printf(m, "<%p>", NULL);
62275 +#else
62276 char symname[KSYM_NAME_LEN];
62277
62278 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
62279 SEQ_printf(m, "<%p>", sym);
62280 else
62281 SEQ_printf(m, "%s", symname);
62282 +#endif
62283 }
62284
62285 static void
62286 @@ -112,7 +116,11 @@ next_one:
62287 static void
62288 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
62289 {
62290 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62291 + SEQ_printf(m, " .base: %p\n", NULL);
62292 +#else
62293 SEQ_printf(m, " .base: %p\n", base);
62294 +#endif
62295 SEQ_printf(m, " .index: %d\n",
62296 base->index);
62297 SEQ_printf(m, " .resolution: %Lu nsecs\n",
62298 @@ -289,7 +297,11 @@ static int __init init_timer_list_procfs
62299 {
62300 struct proc_dir_entry *pe;
62301
62302 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
62303 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
62304 +#else
62305 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
62306 +#endif
62307 if (!pe)
62308 return -ENOMEM;
62309 return 0;
62310 diff -urNp linux-2.6.32.41/kernel/time/timer_stats.c linux-2.6.32.41/kernel/time/timer_stats.c
62311 --- linux-2.6.32.41/kernel/time/timer_stats.c 2011-03-27 14:31:47.000000000 -0400
62312 +++ linux-2.6.32.41/kernel/time/timer_stats.c 2011-05-04 17:56:28.000000000 -0400
62313 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
62314 static unsigned long nr_entries;
62315 static struct entry entries[MAX_ENTRIES];
62316
62317 -static atomic_t overflow_count;
62318 +static atomic_unchecked_t overflow_count;
62319
62320 /*
62321 * The entries are in a hash-table, for fast lookup:
62322 @@ -140,7 +140,7 @@ static void reset_entries(void)
62323 nr_entries = 0;
62324 memset(entries, 0, sizeof(entries));
62325 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
62326 - atomic_set(&overflow_count, 0);
62327 + atomic_set_unchecked(&overflow_count, 0);
62328 }
62329
62330 static struct entry *alloc_entry(void)
62331 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
62332 if (likely(entry))
62333 entry->count++;
62334 else
62335 - atomic_inc(&overflow_count);
62336 + atomic_inc_unchecked(&overflow_count);
62337
62338 out_unlock:
62339 spin_unlock_irqrestore(lock, flags);
62340 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
62341
62342 static void print_name_offset(struct seq_file *m, unsigned long addr)
62343 {
62344 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62345 + seq_printf(m, "<%p>", NULL);
62346 +#else
62347 char symname[KSYM_NAME_LEN];
62348
62349 if (lookup_symbol_name(addr, symname) < 0)
62350 seq_printf(m, "<%p>", (void *)addr);
62351 else
62352 seq_printf(m, "%s", symname);
62353 +#endif
62354 }
62355
62356 static int tstats_show(struct seq_file *m, void *v)
62357 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
62358
62359 seq_puts(m, "Timer Stats Version: v0.2\n");
62360 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
62361 - if (atomic_read(&overflow_count))
62362 + if (atomic_read_unchecked(&overflow_count))
62363 seq_printf(m, "Overflow: %d entries\n",
62364 - atomic_read(&overflow_count));
62365 + atomic_read_unchecked(&overflow_count));
62366
62367 for (i = 0; i < nr_entries; i++) {
62368 entry = entries + i;
62369 @@ -415,7 +419,11 @@ static int __init init_tstats_procfs(voi
62370 {
62371 struct proc_dir_entry *pe;
62372
62373 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
62374 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
62375 +#else
62376 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
62377 +#endif
62378 if (!pe)
62379 return -ENOMEM;
62380 return 0;
62381 diff -urNp linux-2.6.32.41/kernel/time.c linux-2.6.32.41/kernel/time.c
62382 --- linux-2.6.32.41/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
62383 +++ linux-2.6.32.41/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
62384 @@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec
62385 return error;
62386
62387 if (tz) {
62388 + /* we log in do_settimeofday called below, so don't log twice
62389 + */
62390 + if (!tv)
62391 + gr_log_timechange();
62392 +
62393 /* SMP safe, global irq locking makes it work. */
62394 sys_tz = *tz;
62395 update_vsyscall_tz();
62396 @@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
62397 * Avoid unnecessary multiplications/divisions in the
62398 * two most common HZ cases:
62399 */
62400 -unsigned int inline jiffies_to_msecs(const unsigned long j)
62401 +inline unsigned int jiffies_to_msecs(const unsigned long j)
62402 {
62403 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
62404 return (MSEC_PER_SEC / HZ) * j;
62405 @@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(con
62406 }
62407 EXPORT_SYMBOL(jiffies_to_msecs);
62408
62409 -unsigned int inline jiffies_to_usecs(const unsigned long j)
62410 +inline unsigned int jiffies_to_usecs(const unsigned long j)
62411 {
62412 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
62413 return (USEC_PER_SEC / HZ) * j;
62414 diff -urNp linux-2.6.32.41/kernel/timer.c linux-2.6.32.41/kernel/timer.c
62415 --- linux-2.6.32.41/kernel/timer.c 2011-03-27 14:31:47.000000000 -0400
62416 +++ linux-2.6.32.41/kernel/timer.c 2011-04-17 15:56:46.000000000 -0400
62417 @@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
62418 /*
62419 * This function runs timers and the timer-tq in bottom half context.
62420 */
62421 -static void run_timer_softirq(struct softirq_action *h)
62422 +static void run_timer_softirq(void)
62423 {
62424 struct tvec_base *base = __get_cpu_var(tvec_bases);
62425
62426 diff -urNp linux-2.6.32.41/kernel/trace/blktrace.c linux-2.6.32.41/kernel/trace/blktrace.c
62427 --- linux-2.6.32.41/kernel/trace/blktrace.c 2011-03-27 14:31:47.000000000 -0400
62428 +++ linux-2.6.32.41/kernel/trace/blktrace.c 2011-05-04 17:56:28.000000000 -0400
62429 @@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct f
62430 struct blk_trace *bt = filp->private_data;
62431 char buf[16];
62432
62433 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
62434 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
62435
62436 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
62437 }
62438 @@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(str
62439 return 1;
62440
62441 bt = buf->chan->private_data;
62442 - atomic_inc(&bt->dropped);
62443 + atomic_inc_unchecked(&bt->dropped);
62444 return 0;
62445 }
62446
62447 @@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_qu
62448
62449 bt->dir = dir;
62450 bt->dev = dev;
62451 - atomic_set(&bt->dropped, 0);
62452 + atomic_set_unchecked(&bt->dropped, 0);
62453
62454 ret = -EIO;
62455 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
62456 diff -urNp linux-2.6.32.41/kernel/trace/ftrace.c linux-2.6.32.41/kernel/trace/ftrace.c
62457 --- linux-2.6.32.41/kernel/trace/ftrace.c 2011-03-27 14:31:47.000000000 -0400
62458 +++ linux-2.6.32.41/kernel/trace/ftrace.c 2011-04-17 15:56:46.000000000 -0400
62459 @@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod,
62460
62461 ip = rec->ip;
62462
62463 + ret = ftrace_arch_code_modify_prepare();
62464 + FTRACE_WARN_ON(ret);
62465 + if (ret)
62466 + return 0;
62467 +
62468 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
62469 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
62470 if (ret) {
62471 ftrace_bug(ret, ip);
62472 rec->flags |= FTRACE_FL_FAILED;
62473 - return 0;
62474 }
62475 - return 1;
62476 + return ret ? 0 : 1;
62477 }
62478
62479 /*
62480 diff -urNp linux-2.6.32.41/kernel/trace/ring_buffer.c linux-2.6.32.41/kernel/trace/ring_buffer.c
62481 --- linux-2.6.32.41/kernel/trace/ring_buffer.c 2011-03-27 14:31:47.000000000 -0400
62482 +++ linux-2.6.32.41/kernel/trace/ring_buffer.c 2011-04-17 15:56:46.000000000 -0400
62483 @@ -606,7 +606,7 @@ static struct list_head *rb_list_head(st
62484 * the reader page). But if the next page is a header page,
62485 * its flags will be non zero.
62486 */
62487 -static int inline
62488 +static inline int
62489 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
62490 struct buffer_page *page, struct list_head *list)
62491 {
62492 diff -urNp linux-2.6.32.41/kernel/trace/trace.c linux-2.6.32.41/kernel/trace/trace.c
62493 --- linux-2.6.32.41/kernel/trace/trace.c 2011-03-27 14:31:47.000000000 -0400
62494 +++ linux-2.6.32.41/kernel/trace/trace.c 2011-05-16 21:46:57.000000000 -0400
62495 @@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(
62496 size_t rem;
62497 unsigned int i;
62498
62499 + pax_track_stack();
62500 +
62501 /* copy the tracer to avoid using a global lock all around */
62502 mutex_lock(&trace_types_lock);
62503 if (unlikely(old_tracer != current_trace && current_trace)) {
62504 @@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file
62505 int entries, size, i;
62506 size_t ret;
62507
62508 + pax_track_stack();
62509 +
62510 if (*ppos & (PAGE_SIZE - 1)) {
62511 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
62512 return -EINVAL;
62513 @@ -3816,10 +3820,9 @@ static const struct file_operations trac
62514 };
62515 #endif
62516
62517 -static struct dentry *d_tracer;
62518 -
62519 struct dentry *tracing_init_dentry(void)
62520 {
62521 + static struct dentry *d_tracer;
62522 static int once;
62523
62524 if (d_tracer)
62525 @@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
62526 return d_tracer;
62527 }
62528
62529 -static struct dentry *d_percpu;
62530 -
62531 struct dentry *tracing_dentry_percpu(void)
62532 {
62533 + static struct dentry *d_percpu;
62534 static int once;
62535 struct dentry *d_tracer;
62536
62537 diff -urNp linux-2.6.32.41/kernel/trace/trace_events.c linux-2.6.32.41/kernel/trace/trace_events.c
62538 --- linux-2.6.32.41/kernel/trace/trace_events.c 2011-03-27 14:31:47.000000000 -0400
62539 +++ linux-2.6.32.41/kernel/trace/trace_events.c 2011-04-17 15:56:46.000000000 -0400
62540 @@ -951,6 +951,8 @@ static LIST_HEAD(ftrace_module_file_list
62541 * Modules must own their file_operations to keep up with
62542 * reference counting.
62543 */
62544 +
62545 +/* cannot be const */
62546 struct ftrace_module_file_ops {
62547 struct list_head list;
62548 struct module *mod;
62549 diff -urNp linux-2.6.32.41/kernel/trace/trace_mmiotrace.c linux-2.6.32.41/kernel/trace/trace_mmiotrace.c
62550 --- linux-2.6.32.41/kernel/trace/trace_mmiotrace.c 2011-03-27 14:31:47.000000000 -0400
62551 +++ linux-2.6.32.41/kernel/trace/trace_mmiotrace.c 2011-05-04 17:56:28.000000000 -0400
62552 @@ -23,7 +23,7 @@ struct header_iter {
62553 static struct trace_array *mmio_trace_array;
62554 static bool overrun_detected;
62555 static unsigned long prev_overruns;
62556 -static atomic_t dropped_count;
62557 +static atomic_unchecked_t dropped_count;
62558
62559 static void mmio_reset_data(struct trace_array *tr)
62560 {
62561 @@ -126,7 +126,7 @@ static void mmio_close(struct trace_iter
62562
62563 static unsigned long count_overruns(struct trace_iterator *iter)
62564 {
62565 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
62566 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
62567 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
62568
62569 if (over > prev_overruns)
62570 @@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct
62571 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
62572 sizeof(*entry), 0, pc);
62573 if (!event) {
62574 - atomic_inc(&dropped_count);
62575 + atomic_inc_unchecked(&dropped_count);
62576 return;
62577 }
62578 entry = ring_buffer_event_data(event);
62579 @@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct
62580 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
62581 sizeof(*entry), 0, pc);
62582 if (!event) {
62583 - atomic_inc(&dropped_count);
62584 + atomic_inc_unchecked(&dropped_count);
62585 return;
62586 }
62587 entry = ring_buffer_event_data(event);
62588 diff -urNp linux-2.6.32.41/kernel/trace/trace_output.c linux-2.6.32.41/kernel/trace/trace_output.c
62589 --- linux-2.6.32.41/kernel/trace/trace_output.c 2011-03-27 14:31:47.000000000 -0400
62590 +++ linux-2.6.32.41/kernel/trace/trace_output.c 2011-04-17 15:56:46.000000000 -0400
62591 @@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s,
62592 return 0;
62593 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
62594 if (!IS_ERR(p)) {
62595 - p = mangle_path(s->buffer + s->len, p, "\n");
62596 + p = mangle_path(s->buffer + s->len, p, "\n\\");
62597 if (p) {
62598 s->len = p - s->buffer;
62599 return 1;
62600 diff -urNp linux-2.6.32.41/kernel/trace/trace_stack.c linux-2.6.32.41/kernel/trace/trace_stack.c
62601 --- linux-2.6.32.41/kernel/trace/trace_stack.c 2011-03-27 14:31:47.000000000 -0400
62602 +++ linux-2.6.32.41/kernel/trace/trace_stack.c 2011-04-17 15:56:46.000000000 -0400
62603 @@ -50,7 +50,7 @@ static inline void check_stack(void)
62604 return;
62605
62606 /* we do not handle interrupt stacks yet */
62607 - if (!object_is_on_stack(&this_size))
62608 + if (!object_starts_on_stack(&this_size))
62609 return;
62610
62611 local_irq_save(flags);
62612 diff -urNp linux-2.6.32.41/kernel/trace/trace_workqueue.c linux-2.6.32.41/kernel/trace/trace_workqueue.c
62613 --- linux-2.6.32.41/kernel/trace/trace_workqueue.c 2011-03-27 14:31:47.000000000 -0400
62614 +++ linux-2.6.32.41/kernel/trace/trace_workqueue.c 2011-04-17 15:56:46.000000000 -0400
62615 @@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
62616 int cpu;
62617 pid_t pid;
62618 /* Can be inserted from interrupt or user context, need to be atomic */
62619 - atomic_t inserted;
62620 + atomic_unchecked_t inserted;
62621 /*
62622 * Don't need to be atomic, works are serialized in a single workqueue thread
62623 * on a single CPU.
62624 @@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_st
62625 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
62626 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
62627 if (node->pid == wq_thread->pid) {
62628 - atomic_inc(&node->inserted);
62629 + atomic_inc_unchecked(&node->inserted);
62630 goto found;
62631 }
62632 }
62633 @@ -205,7 +205,7 @@ static int workqueue_stat_show(struct se
62634 tsk = get_pid_task(pid, PIDTYPE_PID);
62635 if (tsk) {
62636 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
62637 - atomic_read(&cws->inserted), cws->executed,
62638 + atomic_read_unchecked(&cws->inserted), cws->executed,
62639 tsk->comm);
62640 put_task_struct(tsk);
62641 }
62642 diff -urNp linux-2.6.32.41/kernel/user.c linux-2.6.32.41/kernel/user.c
62643 --- linux-2.6.32.41/kernel/user.c 2011-03-27 14:31:47.000000000 -0400
62644 +++ linux-2.6.32.41/kernel/user.c 2011-04-17 15:56:46.000000000 -0400
62645 @@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct use
62646 spin_lock_irq(&uidhash_lock);
62647 up = uid_hash_find(uid, hashent);
62648 if (up) {
62649 + put_user_ns(ns);
62650 key_put(new->uid_keyring);
62651 key_put(new->session_keyring);
62652 kmem_cache_free(uid_cachep, new);
62653 diff -urNp linux-2.6.32.41/lib/bug.c linux-2.6.32.41/lib/bug.c
62654 --- linux-2.6.32.41/lib/bug.c 2011-03-27 14:31:47.000000000 -0400
62655 +++ linux-2.6.32.41/lib/bug.c 2011-04-17 15:56:46.000000000 -0400
62656 @@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned l
62657 return BUG_TRAP_TYPE_NONE;
62658
62659 bug = find_bug(bugaddr);
62660 + if (!bug)
62661 + return BUG_TRAP_TYPE_NONE;
62662
62663 printk(KERN_EMERG "------------[ cut here ]------------\n");
62664
62665 diff -urNp linux-2.6.32.41/lib/debugobjects.c linux-2.6.32.41/lib/debugobjects.c
62666 --- linux-2.6.32.41/lib/debugobjects.c 2011-03-27 14:31:47.000000000 -0400
62667 +++ linux-2.6.32.41/lib/debugobjects.c 2011-04-17 15:56:46.000000000 -0400
62668 @@ -277,7 +277,7 @@ static void debug_object_is_on_stack(voi
62669 if (limit > 4)
62670 return;
62671
62672 - is_on_stack = object_is_on_stack(addr);
62673 + is_on_stack = object_starts_on_stack(addr);
62674 if (is_on_stack == onstack)
62675 return;
62676
62677 diff -urNp linux-2.6.32.41/lib/dma-debug.c linux-2.6.32.41/lib/dma-debug.c
62678 --- linux-2.6.32.41/lib/dma-debug.c 2011-03-27 14:31:47.000000000 -0400
62679 +++ linux-2.6.32.41/lib/dma-debug.c 2011-04-17 15:56:46.000000000 -0400
62680 @@ -861,7 +861,7 @@ out:
62681
62682 static void check_for_stack(struct device *dev, void *addr)
62683 {
62684 - if (object_is_on_stack(addr))
62685 + if (object_starts_on_stack(addr))
62686 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
62687 "stack [addr=%p]\n", addr);
62688 }
62689 diff -urNp linux-2.6.32.41/lib/idr.c linux-2.6.32.41/lib/idr.c
62690 --- linux-2.6.32.41/lib/idr.c 2011-03-27 14:31:47.000000000 -0400
62691 +++ linux-2.6.32.41/lib/idr.c 2011-04-17 15:56:46.000000000 -0400
62692 @@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, in
62693 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
62694
62695 /* if already at the top layer, we need to grow */
62696 - if (id >= 1 << (idp->layers * IDR_BITS)) {
62697 + if (id >= (1 << (idp->layers * IDR_BITS))) {
62698 *starting_id = id;
62699 return IDR_NEED_TO_GROW;
62700 }
62701 diff -urNp linux-2.6.32.41/lib/inflate.c linux-2.6.32.41/lib/inflate.c
62702 --- linux-2.6.32.41/lib/inflate.c 2011-03-27 14:31:47.000000000 -0400
62703 +++ linux-2.6.32.41/lib/inflate.c 2011-04-17 15:56:46.000000000 -0400
62704 @@ -266,7 +266,7 @@ static void free(void *where)
62705 malloc_ptr = free_mem_ptr;
62706 }
62707 #else
62708 -#define malloc(a) kmalloc(a, GFP_KERNEL)
62709 +#define malloc(a) kmalloc((a), GFP_KERNEL)
62710 #define free(a) kfree(a)
62711 #endif
62712
62713 diff -urNp linux-2.6.32.41/lib/Kconfig.debug linux-2.6.32.41/lib/Kconfig.debug
62714 --- linux-2.6.32.41/lib/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
62715 +++ linux-2.6.32.41/lib/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
62716 @@ -905,7 +905,7 @@ config LATENCYTOP
62717 select STACKTRACE
62718 select SCHEDSTATS
62719 select SCHED_DEBUG
62720 - depends on HAVE_LATENCYTOP_SUPPORT
62721 + depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
62722 help
62723 Enable this option if you want to use the LatencyTOP tool
62724 to find out which userspace is blocking on what kernel operations.
62725 diff -urNp linux-2.6.32.41/lib/kobject.c linux-2.6.32.41/lib/kobject.c
62726 --- linux-2.6.32.41/lib/kobject.c 2011-03-27 14:31:47.000000000 -0400
62727 +++ linux-2.6.32.41/lib/kobject.c 2011-04-17 15:56:46.000000000 -0400
62728 @@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct ko
62729 return ret;
62730 }
62731
62732 -struct sysfs_ops kobj_sysfs_ops = {
62733 +const struct sysfs_ops kobj_sysfs_ops = {
62734 .show = kobj_attr_show,
62735 .store = kobj_attr_store,
62736 };
62737 @@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
62738 * If the kset was not able to be created, NULL will be returned.
62739 */
62740 static struct kset *kset_create(const char *name,
62741 - struct kset_uevent_ops *uevent_ops,
62742 + const struct kset_uevent_ops *uevent_ops,
62743 struct kobject *parent_kobj)
62744 {
62745 struct kset *kset;
62746 @@ -832,7 +832,7 @@ static struct kset *kset_create(const ch
62747 * If the kset was not able to be created, NULL will be returned.
62748 */
62749 struct kset *kset_create_and_add(const char *name,
62750 - struct kset_uevent_ops *uevent_ops,
62751 + const struct kset_uevent_ops *uevent_ops,
62752 struct kobject *parent_kobj)
62753 {
62754 struct kset *kset;
62755 diff -urNp linux-2.6.32.41/lib/kobject_uevent.c linux-2.6.32.41/lib/kobject_uevent.c
62756 --- linux-2.6.32.41/lib/kobject_uevent.c 2011-03-27 14:31:47.000000000 -0400
62757 +++ linux-2.6.32.41/lib/kobject_uevent.c 2011-04-17 15:56:46.000000000 -0400
62758 @@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *k
62759 const char *subsystem;
62760 struct kobject *top_kobj;
62761 struct kset *kset;
62762 - struct kset_uevent_ops *uevent_ops;
62763 + const struct kset_uevent_ops *uevent_ops;
62764 u64 seq;
62765 int i = 0;
62766 int retval = 0;
62767 diff -urNp linux-2.6.32.41/lib/kref.c linux-2.6.32.41/lib/kref.c
62768 --- linux-2.6.32.41/lib/kref.c 2011-03-27 14:31:47.000000000 -0400
62769 +++ linux-2.6.32.41/lib/kref.c 2011-04-17 15:56:46.000000000 -0400
62770 @@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
62771 */
62772 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
62773 {
62774 - WARN_ON(release == NULL);
62775 + BUG_ON(release == NULL);
62776 WARN_ON(release == (void (*)(struct kref *))kfree);
62777
62778 if (atomic_dec_and_test(&kref->refcount)) {
62779 diff -urNp linux-2.6.32.41/lib/parser.c linux-2.6.32.41/lib/parser.c
62780 --- linux-2.6.32.41/lib/parser.c 2011-03-27 14:31:47.000000000 -0400
62781 +++ linux-2.6.32.41/lib/parser.c 2011-04-17 15:56:46.000000000 -0400
62782 @@ -126,7 +126,7 @@ static int match_number(substring_t *s,
62783 char *buf;
62784 int ret;
62785
62786 - buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
62787 + buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
62788 if (!buf)
62789 return -ENOMEM;
62790 memcpy(buf, s->from, s->to - s->from);
62791 diff -urNp linux-2.6.32.41/lib/radix-tree.c linux-2.6.32.41/lib/radix-tree.c
62792 --- linux-2.6.32.41/lib/radix-tree.c 2011-03-27 14:31:47.000000000 -0400
62793 +++ linux-2.6.32.41/lib/radix-tree.c 2011-04-17 15:56:46.000000000 -0400
62794 @@ -81,7 +81,7 @@ struct radix_tree_preload {
62795 int nr;
62796 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
62797 };
62798 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
62799 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
62800
62801 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
62802 {
62803 diff -urNp linux-2.6.32.41/lib/random32.c linux-2.6.32.41/lib/random32.c
62804 --- linux-2.6.32.41/lib/random32.c 2011-03-27 14:31:47.000000000 -0400
62805 +++ linux-2.6.32.41/lib/random32.c 2011-04-17 15:56:46.000000000 -0400
62806 @@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *
62807 */
62808 static inline u32 __seed(u32 x, u32 m)
62809 {
62810 - return (x < m) ? x + m : x;
62811 + return (x <= m) ? x + m + 1 : x;
62812 }
62813
62814 /**
62815 diff -urNp linux-2.6.32.41/lib/vsprintf.c linux-2.6.32.41/lib/vsprintf.c
62816 --- linux-2.6.32.41/lib/vsprintf.c 2011-03-27 14:31:47.000000000 -0400
62817 +++ linux-2.6.32.41/lib/vsprintf.c 2011-04-17 15:56:46.000000000 -0400
62818 @@ -16,6 +16,9 @@
62819 * - scnprintf and vscnprintf
62820 */
62821
62822 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62823 +#define __INCLUDED_BY_HIDESYM 1
62824 +#endif
62825 #include <stdarg.h>
62826 #include <linux/module.h>
62827 #include <linux/types.h>
62828 @@ -546,12 +549,12 @@ static char *number(char *buf, char *end
62829 return buf;
62830 }
62831
62832 -static char *string(char *buf, char *end, char *s, struct printf_spec spec)
62833 +static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
62834 {
62835 int len, i;
62836
62837 if ((unsigned long)s < PAGE_SIZE)
62838 - s = "<NULL>";
62839 + s = "(null)";
62840
62841 len = strnlen(s, spec.precision);
62842
62843 @@ -581,7 +584,7 @@ static char *symbol_string(char *buf, ch
62844 unsigned long value = (unsigned long) ptr;
62845 #ifdef CONFIG_KALLSYMS
62846 char sym[KSYM_SYMBOL_LEN];
62847 - if (ext != 'f' && ext != 's')
62848 + if (ext != 'f' && ext != 's' && ext != 'a')
62849 sprint_symbol(sym, value);
62850 else
62851 kallsyms_lookup(value, NULL, NULL, NULL, sym);
62852 @@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf,
62853 * - 'f' For simple symbolic function names without offset
62854 * - 'S' For symbolic direct pointers with offset
62855 * - 's' For symbolic direct pointers without offset
62856 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
62857 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
62858 * - 'R' For a struct resource pointer, it prints the range of
62859 * addresses (not the name nor the flags)
62860 * - 'M' For a 6-byte MAC address, it prints the address in the
62861 @@ -822,7 +827,7 @@ static char *pointer(const char *fmt, ch
62862 struct printf_spec spec)
62863 {
62864 if (!ptr)
62865 - return string(buf, end, "(null)", spec);
62866 + return string(buf, end, "(nil)", spec);
62867
62868 switch (*fmt) {
62869 case 'F':
62870 @@ -831,6 +836,14 @@ static char *pointer(const char *fmt, ch
62871 case 's':
62872 /* Fallthrough */
62873 case 'S':
62874 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62875 + break;
62876 +#else
62877 + return symbol_string(buf, end, ptr, spec, *fmt);
62878 +#endif
62879 + case 'a':
62880 + /* Fallthrough */
62881 + case 'A':
62882 return symbol_string(buf, end, ptr, spec, *fmt);
62883 case 'R':
62884 return resource_string(buf, end, ptr, spec);
62885 @@ -1445,7 +1458,7 @@ do { \
62886 size_t len;
62887 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
62888 || (unsigned long)save_str < PAGE_SIZE)
62889 - save_str = "<NULL>";
62890 + save_str = "(null)";
62891 len = strlen(save_str);
62892 if (str + len + 1 < end)
62893 memcpy(str, save_str, len + 1);
62894 @@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size,
62895 typeof(type) value; \
62896 if (sizeof(type) == 8) { \
62897 args = PTR_ALIGN(args, sizeof(u32)); \
62898 - *(u32 *)&value = *(u32 *)args; \
62899 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
62900 + *(u32 *)&value = *(const u32 *)args; \
62901 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
62902 } else { \
62903 args = PTR_ALIGN(args, sizeof(type)); \
62904 - value = *(typeof(type) *)args; \
62905 + value = *(const typeof(type) *)args; \
62906 } \
62907 args += sizeof(type); \
62908 value; \
62909 @@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size,
62910 const char *str_arg = args;
62911 size_t len = strlen(str_arg);
62912 args += len + 1;
62913 - str = string(str, end, (char *)str_arg, spec);
62914 + str = string(str, end, str_arg, spec);
62915 break;
62916 }
62917
62918 diff -urNp linux-2.6.32.41/localversion-grsec linux-2.6.32.41/localversion-grsec
62919 --- linux-2.6.32.41/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
62920 +++ linux-2.6.32.41/localversion-grsec 2011-04-17 15:56:46.000000000 -0400
62921 @@ -0,0 +1 @@
62922 +-grsec
62923 diff -urNp linux-2.6.32.41/Makefile linux-2.6.32.41/Makefile
62924 --- linux-2.6.32.41/Makefile 2011-05-23 16:56:59.000000000 -0400
62925 +++ linux-2.6.32.41/Makefile 2011-06-07 18:06:04.000000000 -0400
62926 @@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
62927
62928 HOSTCC = gcc
62929 HOSTCXX = g++
62930 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
62931 -HOSTCXXFLAGS = -O2
62932 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
62933 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
62934 +HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
62935
62936 # Decide whether to build built-in, modular, or both.
62937 # Normally, just do built-in.
62938 @@ -342,10 +343,12 @@ LINUXINCLUDE := -Iinclude \
62939 KBUILD_CPPFLAGS := -D__KERNEL__
62940
62941 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
62942 + -W -Wno-unused-parameter -Wno-missing-field-initializers \
62943 -fno-strict-aliasing -fno-common \
62944 -Werror-implicit-function-declaration \
62945 -Wno-format-security \
62946 -fno-delete-null-pointer-checks
62947 +KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
62948 KBUILD_AFLAGS := -D__ASSEMBLY__
62949
62950 # Read KERNELRELEASE from include/config/kernel.release (if it exists)
62951 @@ -403,7 +406,7 @@ endif
62952 # of make so .config is not included in this case either (for *config).
62953
62954 no-dot-config-targets := clean mrproper distclean \
62955 - cscope TAGS tags help %docs check% \
62956 + cscope gtags TAGS tags help %docs check% \
62957 include/linux/version.h headers_% \
62958 kernelrelease kernelversion
62959
62960 @@ -644,7 +647,7 @@ export mod_strip_cmd
62961
62962
62963 ifeq ($(KBUILD_EXTMOD),)
62964 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
62965 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
62966
62967 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
62968 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
62969 @@ -949,7 +952,19 @@ include/config/kernel.release: include/c
62970 # version.h and scripts_basic is processed / created.
62971
62972 # Listed in dependency order
62973 -PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3
62974 +PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3 pax-plugin
62975 +
62976 +ifeq ($(CONFIG_PAX_MEMORY_STACKLEAK),y)
62977 +KBUILD_CFLAGS += $(call cc-ifversion, -ge, 0405, -fplugin=$(objtree)/tools/gcc/pax_plugin.so -fplugin-arg-pax_plugin-track-lowest-sp=100)
62978 +endif
62979 +pax-plugin:
62980 +ifneq (,$(findstring pax_plugin, $(KBUILD_CFLAGS)))
62981 + $(Q)$(MAKE) $(build)=tools/gcc
62982 +else
62983 +ifeq ($(CONFIG_PAX_MEMORY_STACKLEAK),y)
62984 + $(Q)echo "warning, your gcc does not support plugins, PAX_MEMORY_STACKLEAK will be less secure"
62985 +endif
62986 +endif
62987
62988 # prepare3 is used to check if we are building in a separate output directory,
62989 # and if so do:
62990 @@ -970,7 +985,7 @@ ifneq ($(KBUILD_SRC),)
62991 endif
62992
62993 # prepare2 creates a makefile if using a separate output directory
62994 -prepare2: prepare3 outputmakefile
62995 +prepare2: prepare3 outputmakefile pax-plugin
62996
62997 prepare1: prepare2 include/linux/version.h include/linux/utsrelease.h \
62998 include/asm include/config/auto.conf
62999 @@ -1198,7 +1213,7 @@ MRPROPER_FILES += .config .config.old in
63000 include/linux/autoconf.h include/linux/version.h \
63001 include/linux/utsrelease.h \
63002 include/linux/bounds.h include/asm*/asm-offsets.h \
63003 - Module.symvers Module.markers tags TAGS cscope*
63004 + Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
63005
63006 # clean - Delete most, but leave enough to build external modules
63007 #
63008 @@ -1289,6 +1304,7 @@ help:
63009 @echo ' modules_prepare - Set up for building external modules'
63010 @echo ' tags/TAGS - Generate tags file for editors'
63011 @echo ' cscope - Generate cscope index'
63012 + @echo ' gtags - Generate GNU GLOBAL index'
63013 @echo ' kernelrelease - Output the release version string'
63014 @echo ' kernelversion - Output the version stored in Makefile'
63015 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
63016 @@ -1445,7 +1461,7 @@ endif # KBUILD_EXTMOD
63017 quiet_cmd_tags = GEN $@
63018 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
63019
63020 -tags TAGS cscope: FORCE
63021 +tags TAGS cscope gtags: FORCE
63022 $(call cmd,tags)
63023
63024 # Scripts to check various things for consistency
63025 diff -urNp linux-2.6.32.41/mm/backing-dev.c linux-2.6.32.41/mm/backing-dev.c
63026 --- linux-2.6.32.41/mm/backing-dev.c 2011-03-27 14:31:47.000000000 -0400
63027 +++ linux-2.6.32.41/mm/backing-dev.c 2011-05-04 17:56:28.000000000 -0400
63028 @@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rc
63029 * Add the default flusher task that gets created for any bdi
63030 * that has dirty data pending writeout
63031 */
63032 -void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
63033 +static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
63034 {
63035 if (!bdi_cap_writeback_dirty(bdi))
63036 return;
63037 diff -urNp linux-2.6.32.41/mm/filemap.c linux-2.6.32.41/mm/filemap.c
63038 --- linux-2.6.32.41/mm/filemap.c 2011-03-27 14:31:47.000000000 -0400
63039 +++ linux-2.6.32.41/mm/filemap.c 2011-04-17 15:56:46.000000000 -0400
63040 @@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file
63041 struct address_space *mapping = file->f_mapping;
63042
63043 if (!mapping->a_ops->readpage)
63044 - return -ENOEXEC;
63045 + return -ENODEV;
63046 file_accessed(file);
63047 vma->vm_ops = &generic_file_vm_ops;
63048 vma->vm_flags |= VM_CAN_NONLINEAR;
63049 @@ -2027,6 +2027,7 @@ inline int generic_write_checks(struct f
63050 *pos = i_size_read(inode);
63051
63052 if (limit != RLIM_INFINITY) {
63053 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
63054 if (*pos >= limit) {
63055 send_sig(SIGXFSZ, current, 0);
63056 return -EFBIG;
63057 diff -urNp linux-2.6.32.41/mm/fremap.c linux-2.6.32.41/mm/fremap.c
63058 --- linux-2.6.32.41/mm/fremap.c 2011-03-27 14:31:47.000000000 -0400
63059 +++ linux-2.6.32.41/mm/fremap.c 2011-04-17 15:56:46.000000000 -0400
63060 @@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
63061 retry:
63062 vma = find_vma(mm, start);
63063
63064 +#ifdef CONFIG_PAX_SEGMEXEC
63065 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
63066 + goto out;
63067 +#endif
63068 +
63069 /*
63070 * Make sure the vma is shared, that it supports prefaulting,
63071 * and that the remapped range is valid and fully within
63072 @@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
63073 /*
63074 * drop PG_Mlocked flag for over-mapped range
63075 */
63076 - unsigned int saved_flags = vma->vm_flags;
63077 + unsigned long saved_flags = vma->vm_flags;
63078 munlock_vma_pages_range(vma, start, start + size);
63079 vma->vm_flags = saved_flags;
63080 }
63081 diff -urNp linux-2.6.32.41/mm/highmem.c linux-2.6.32.41/mm/highmem.c
63082 --- linux-2.6.32.41/mm/highmem.c 2011-03-27 14:31:47.000000000 -0400
63083 +++ linux-2.6.32.41/mm/highmem.c 2011-04-17 15:56:46.000000000 -0400
63084 @@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
63085 * So no dangers, even with speculative execution.
63086 */
63087 page = pte_page(pkmap_page_table[i]);
63088 + pax_open_kernel();
63089 pte_clear(&init_mm, (unsigned long)page_address(page),
63090 &pkmap_page_table[i]);
63091 -
63092 + pax_close_kernel();
63093 set_page_address(page, NULL);
63094 need_flush = 1;
63095 }
63096 @@ -177,9 +178,11 @@ start:
63097 }
63098 }
63099 vaddr = PKMAP_ADDR(last_pkmap_nr);
63100 +
63101 + pax_open_kernel();
63102 set_pte_at(&init_mm, vaddr,
63103 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
63104 -
63105 + pax_close_kernel();
63106 pkmap_count[last_pkmap_nr] = 1;
63107 set_page_address(page, (void *)vaddr);
63108
63109 diff -urNp linux-2.6.32.41/mm/hugetlb.c linux-2.6.32.41/mm/hugetlb.c
63110 --- linux-2.6.32.41/mm/hugetlb.c 2011-03-27 14:31:47.000000000 -0400
63111 +++ linux-2.6.32.41/mm/hugetlb.c 2011-04-17 15:56:46.000000000 -0400
63112 @@ -1925,6 +1925,26 @@ static int unmap_ref_private(struct mm_s
63113 return 1;
63114 }
63115
63116 +#ifdef CONFIG_PAX_SEGMEXEC
63117 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
63118 +{
63119 + struct mm_struct *mm = vma->vm_mm;
63120 + struct vm_area_struct *vma_m;
63121 + unsigned long address_m;
63122 + pte_t *ptep_m;
63123 +
63124 + vma_m = pax_find_mirror_vma(vma);
63125 + if (!vma_m)
63126 + return;
63127 +
63128 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63129 + address_m = address + SEGMEXEC_TASK_SIZE;
63130 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
63131 + get_page(page_m);
63132 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
63133 +}
63134 +#endif
63135 +
63136 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
63137 unsigned long address, pte_t *ptep, pte_t pte,
63138 struct page *pagecache_page)
63139 @@ -1996,6 +2016,11 @@ retry_avoidcopy:
63140 huge_ptep_clear_flush(vma, address, ptep);
63141 set_huge_pte_at(mm, address, ptep,
63142 make_huge_pte(vma, new_page, 1));
63143 +
63144 +#ifdef CONFIG_PAX_SEGMEXEC
63145 + pax_mirror_huge_pte(vma, address, new_page);
63146 +#endif
63147 +
63148 /* Make the old page be freed below */
63149 new_page = old_page;
63150 }
63151 @@ -2127,6 +2152,10 @@ retry:
63152 && (vma->vm_flags & VM_SHARED)));
63153 set_huge_pte_at(mm, address, ptep, new_pte);
63154
63155 +#ifdef CONFIG_PAX_SEGMEXEC
63156 + pax_mirror_huge_pte(vma, address, page);
63157 +#endif
63158 +
63159 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
63160 /* Optimization, do the COW without a second fault */
63161 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
63162 @@ -2155,6 +2184,28 @@ int hugetlb_fault(struct mm_struct *mm,
63163 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
63164 struct hstate *h = hstate_vma(vma);
63165
63166 +#ifdef CONFIG_PAX_SEGMEXEC
63167 + struct vm_area_struct *vma_m;
63168 +
63169 + vma_m = pax_find_mirror_vma(vma);
63170 + if (vma_m) {
63171 + unsigned long address_m;
63172 +
63173 + if (vma->vm_start > vma_m->vm_start) {
63174 + address_m = address;
63175 + address -= SEGMEXEC_TASK_SIZE;
63176 + vma = vma_m;
63177 + h = hstate_vma(vma);
63178 + } else
63179 + address_m = address + SEGMEXEC_TASK_SIZE;
63180 +
63181 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
63182 + return VM_FAULT_OOM;
63183 + address_m &= HPAGE_MASK;
63184 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
63185 + }
63186 +#endif
63187 +
63188 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
63189 if (!ptep)
63190 return VM_FAULT_OOM;
63191 diff -urNp linux-2.6.32.41/mm/Kconfig linux-2.6.32.41/mm/Kconfig
63192 --- linux-2.6.32.41/mm/Kconfig 2011-03-27 14:31:47.000000000 -0400
63193 +++ linux-2.6.32.41/mm/Kconfig 2011-04-17 15:56:46.000000000 -0400
63194 @@ -228,7 +228,7 @@ config KSM
63195 config DEFAULT_MMAP_MIN_ADDR
63196 int "Low address space to protect from user allocation"
63197 depends on MMU
63198 - default 4096
63199 + default 65536
63200 help
63201 This is the portion of low virtual memory which should be protected
63202 from userspace allocation. Keeping a user from writing to low pages
63203 diff -urNp linux-2.6.32.41/mm/kmemleak.c linux-2.6.32.41/mm/kmemleak.c
63204 --- linux-2.6.32.41/mm/kmemleak.c 2011-03-27 14:31:47.000000000 -0400
63205 +++ linux-2.6.32.41/mm/kmemleak.c 2011-04-17 15:56:46.000000000 -0400
63206 @@ -358,7 +358,7 @@ static void print_unreferenced(struct se
63207
63208 for (i = 0; i < object->trace_len; i++) {
63209 void *ptr = (void *)object->trace[i];
63210 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
63211 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
63212 }
63213 }
63214
63215 diff -urNp linux-2.6.32.41/mm/maccess.c linux-2.6.32.41/mm/maccess.c
63216 --- linux-2.6.32.41/mm/maccess.c 2011-03-27 14:31:47.000000000 -0400
63217 +++ linux-2.6.32.41/mm/maccess.c 2011-04-17 15:56:46.000000000 -0400
63218 @@ -14,7 +14,7 @@
63219 * Safely read from address @src to the buffer at @dst. If a kernel fault
63220 * happens, handle that and return -EFAULT.
63221 */
63222 -long probe_kernel_read(void *dst, void *src, size_t size)
63223 +long probe_kernel_read(void *dst, const void *src, size_t size)
63224 {
63225 long ret;
63226 mm_segment_t old_fs = get_fs();
63227 @@ -39,7 +39,7 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
63228 * Safely write to address @dst from the buffer at @src. If a kernel fault
63229 * happens, handle that and return -EFAULT.
63230 */
63231 -long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
63232 +long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
63233 {
63234 long ret;
63235 mm_segment_t old_fs = get_fs();
63236 diff -urNp linux-2.6.32.41/mm/madvise.c linux-2.6.32.41/mm/madvise.c
63237 --- linux-2.6.32.41/mm/madvise.c 2011-03-27 14:31:47.000000000 -0400
63238 +++ linux-2.6.32.41/mm/madvise.c 2011-04-17 15:56:46.000000000 -0400
63239 @@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_a
63240 pgoff_t pgoff;
63241 unsigned long new_flags = vma->vm_flags;
63242
63243 +#ifdef CONFIG_PAX_SEGMEXEC
63244 + struct vm_area_struct *vma_m;
63245 +#endif
63246 +
63247 switch (behavior) {
63248 case MADV_NORMAL:
63249 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
63250 @@ -103,6 +107,13 @@ success:
63251 /*
63252 * vm_flags is protected by the mmap_sem held in write mode.
63253 */
63254 +
63255 +#ifdef CONFIG_PAX_SEGMEXEC
63256 + vma_m = pax_find_mirror_vma(vma);
63257 + if (vma_m)
63258 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
63259 +#endif
63260 +
63261 vma->vm_flags = new_flags;
63262
63263 out:
63264 @@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_a
63265 struct vm_area_struct ** prev,
63266 unsigned long start, unsigned long end)
63267 {
63268 +
63269 +#ifdef CONFIG_PAX_SEGMEXEC
63270 + struct vm_area_struct *vma_m;
63271 +#endif
63272 +
63273 *prev = vma;
63274 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
63275 return -EINVAL;
63276 @@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_a
63277 zap_page_range(vma, start, end - start, &details);
63278 } else
63279 zap_page_range(vma, start, end - start, NULL);
63280 +
63281 +#ifdef CONFIG_PAX_SEGMEXEC
63282 + vma_m = pax_find_mirror_vma(vma);
63283 + if (vma_m) {
63284 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
63285 + struct zap_details details = {
63286 + .nonlinear_vma = vma_m,
63287 + .last_index = ULONG_MAX,
63288 + };
63289 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
63290 + } else
63291 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
63292 + }
63293 +#endif
63294 +
63295 return 0;
63296 }
63297
63298 @@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
63299 if (end < start)
63300 goto out;
63301
63302 +#ifdef CONFIG_PAX_SEGMEXEC
63303 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
63304 + if (end > SEGMEXEC_TASK_SIZE)
63305 + goto out;
63306 + } else
63307 +#endif
63308 +
63309 + if (end > TASK_SIZE)
63310 + goto out;
63311 +
63312 error = 0;
63313 if (end == start)
63314 goto out;
63315 diff -urNp linux-2.6.32.41/mm/memory.c linux-2.6.32.41/mm/memory.c
63316 --- linux-2.6.32.41/mm/memory.c 2011-03-27 14:31:47.000000000 -0400
63317 +++ linux-2.6.32.41/mm/memory.c 2011-04-17 15:56:46.000000000 -0400
63318 @@ -187,8 +187,12 @@ static inline void free_pmd_range(struct
63319 return;
63320
63321 pmd = pmd_offset(pud, start);
63322 +
63323 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
63324 pud_clear(pud);
63325 pmd_free_tlb(tlb, pmd, start);
63326 +#endif
63327 +
63328 }
63329
63330 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
63331 @@ -219,9 +223,12 @@ static inline void free_pud_range(struct
63332 if (end - 1 > ceiling - 1)
63333 return;
63334
63335 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
63336 pud = pud_offset(pgd, start);
63337 pgd_clear(pgd);
63338 pud_free_tlb(tlb, pud, start);
63339 +#endif
63340 +
63341 }
63342
63343 /*
63344 @@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct
63345 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
63346 i = 0;
63347
63348 - do {
63349 + while (nr_pages) {
63350 struct vm_area_struct *vma;
63351
63352 - vma = find_extend_vma(mm, start);
63353 + vma = find_vma(mm, start);
63354 if (!vma && in_gate_area(tsk, start)) {
63355 unsigned long pg = start & PAGE_MASK;
63356 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
63357 @@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct
63358 continue;
63359 }
63360
63361 - if (!vma ||
63362 + if (!vma || start < vma->vm_start ||
63363 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
63364 !(vm_flags & vma->vm_flags))
63365 return i ? : -EFAULT;
63366 @@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct
63367 start += PAGE_SIZE;
63368 nr_pages--;
63369 } while (nr_pages && start < vma->vm_end);
63370 - } while (nr_pages);
63371 + }
63372 return i;
63373 }
63374
63375 @@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_st
63376 page_add_file_rmap(page);
63377 set_pte_at(mm, addr, pte, mk_pte(page, prot));
63378
63379 +#ifdef CONFIG_PAX_SEGMEXEC
63380 + pax_mirror_file_pte(vma, addr, page, ptl);
63381 +#endif
63382 +
63383 retval = 0;
63384 pte_unmap_unlock(pte, ptl);
63385 return retval;
63386 @@ -1560,10 +1571,22 @@ out:
63387 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
63388 struct page *page)
63389 {
63390 +
63391 +#ifdef CONFIG_PAX_SEGMEXEC
63392 + struct vm_area_struct *vma_m;
63393 +#endif
63394 +
63395 if (addr < vma->vm_start || addr >= vma->vm_end)
63396 return -EFAULT;
63397 if (!page_count(page))
63398 return -EINVAL;
63399 +
63400 +#ifdef CONFIG_PAX_SEGMEXEC
63401 + vma_m = pax_find_mirror_vma(vma);
63402 + if (vma_m)
63403 + vma_m->vm_flags |= VM_INSERTPAGE;
63404 +#endif
63405 +
63406 vma->vm_flags |= VM_INSERTPAGE;
63407 return insert_page(vma, addr, page, vma->vm_page_prot);
63408 }
63409 @@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struc
63410 unsigned long pfn)
63411 {
63412 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
63413 + BUG_ON(vma->vm_mirror);
63414
63415 if (addr < vma->vm_start || addr >= vma->vm_end)
63416 return -EFAULT;
63417 @@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct
63418 copy_user_highpage(dst, src, va, vma);
63419 }
63420
63421 +#ifdef CONFIG_PAX_SEGMEXEC
63422 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
63423 +{
63424 + struct mm_struct *mm = vma->vm_mm;
63425 + spinlock_t *ptl;
63426 + pte_t *pte, entry;
63427 +
63428 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
63429 + entry = *pte;
63430 + if (!pte_present(entry)) {
63431 + if (!pte_none(entry)) {
63432 + BUG_ON(pte_file(entry));
63433 + free_swap_and_cache(pte_to_swp_entry(entry));
63434 + pte_clear_not_present_full(mm, address, pte, 0);
63435 + }
63436 + } else {
63437 + struct page *page;
63438 +
63439 + flush_cache_page(vma, address, pte_pfn(entry));
63440 + entry = ptep_clear_flush(vma, address, pte);
63441 + BUG_ON(pte_dirty(entry));
63442 + page = vm_normal_page(vma, address, entry);
63443 + if (page) {
63444 + update_hiwater_rss(mm);
63445 + if (PageAnon(page))
63446 + dec_mm_counter(mm, anon_rss);
63447 + else
63448 + dec_mm_counter(mm, file_rss);
63449 + page_remove_rmap(page);
63450 + page_cache_release(page);
63451 + }
63452 + }
63453 + pte_unmap_unlock(pte, ptl);
63454 +}
63455 +
63456 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
63457 + *
63458 + * the ptl of the lower mapped page is held on entry and is not released on exit
63459 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
63460 + */
63461 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
63462 +{
63463 + struct mm_struct *mm = vma->vm_mm;
63464 + unsigned long address_m;
63465 + spinlock_t *ptl_m;
63466 + struct vm_area_struct *vma_m;
63467 + pmd_t *pmd_m;
63468 + pte_t *pte_m, entry_m;
63469 +
63470 + BUG_ON(!page_m || !PageAnon(page_m));
63471 +
63472 + vma_m = pax_find_mirror_vma(vma);
63473 + if (!vma_m)
63474 + return;
63475 +
63476 + BUG_ON(!PageLocked(page_m));
63477 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63478 + address_m = address + SEGMEXEC_TASK_SIZE;
63479 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
63480 + pte_m = pte_offset_map_nested(pmd_m, address_m);
63481 + ptl_m = pte_lockptr(mm, pmd_m);
63482 + if (ptl != ptl_m) {
63483 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
63484 + if (!pte_none(*pte_m))
63485 + goto out;
63486 + }
63487 +
63488 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
63489 + page_cache_get(page_m);
63490 + page_add_anon_rmap(page_m, vma_m, address_m);
63491 + inc_mm_counter(mm, anon_rss);
63492 + set_pte_at(mm, address_m, pte_m, entry_m);
63493 + update_mmu_cache(vma_m, address_m, entry_m);
63494 +out:
63495 + if (ptl != ptl_m)
63496 + spin_unlock(ptl_m);
63497 + pte_unmap_nested(pte_m);
63498 + unlock_page(page_m);
63499 +}
63500 +
63501 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
63502 +{
63503 + struct mm_struct *mm = vma->vm_mm;
63504 + unsigned long address_m;
63505 + spinlock_t *ptl_m;
63506 + struct vm_area_struct *vma_m;
63507 + pmd_t *pmd_m;
63508 + pte_t *pte_m, entry_m;
63509 +
63510 + BUG_ON(!page_m || PageAnon(page_m));
63511 +
63512 + vma_m = pax_find_mirror_vma(vma);
63513 + if (!vma_m)
63514 + return;
63515 +
63516 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63517 + address_m = address + SEGMEXEC_TASK_SIZE;
63518 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
63519 + pte_m = pte_offset_map_nested(pmd_m, address_m);
63520 + ptl_m = pte_lockptr(mm, pmd_m);
63521 + if (ptl != ptl_m) {
63522 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
63523 + if (!pte_none(*pte_m))
63524 + goto out;
63525 + }
63526 +
63527 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
63528 + page_cache_get(page_m);
63529 + page_add_file_rmap(page_m);
63530 + inc_mm_counter(mm, file_rss);
63531 + set_pte_at(mm, address_m, pte_m, entry_m);
63532 + update_mmu_cache(vma_m, address_m, entry_m);
63533 +out:
63534 + if (ptl != ptl_m)
63535 + spin_unlock(ptl_m);
63536 + pte_unmap_nested(pte_m);
63537 +}
63538 +
63539 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
63540 +{
63541 + struct mm_struct *mm = vma->vm_mm;
63542 + unsigned long address_m;
63543 + spinlock_t *ptl_m;
63544 + struct vm_area_struct *vma_m;
63545 + pmd_t *pmd_m;
63546 + pte_t *pte_m, entry_m;
63547 +
63548 + vma_m = pax_find_mirror_vma(vma);
63549 + if (!vma_m)
63550 + return;
63551 +
63552 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63553 + address_m = address + SEGMEXEC_TASK_SIZE;
63554 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
63555 + pte_m = pte_offset_map_nested(pmd_m, address_m);
63556 + ptl_m = pte_lockptr(mm, pmd_m);
63557 + if (ptl != ptl_m) {
63558 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
63559 + if (!pte_none(*pte_m))
63560 + goto out;
63561 + }
63562 +
63563 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
63564 + set_pte_at(mm, address_m, pte_m, entry_m);
63565 +out:
63566 + if (ptl != ptl_m)
63567 + spin_unlock(ptl_m);
63568 + pte_unmap_nested(pte_m);
63569 +}
63570 +
63571 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
63572 +{
63573 + struct page *page_m;
63574 + pte_t entry;
63575 +
63576 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
63577 + goto out;
63578 +
63579 + entry = *pte;
63580 + page_m = vm_normal_page(vma, address, entry);
63581 + if (!page_m)
63582 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
63583 + else if (PageAnon(page_m)) {
63584 + if (pax_find_mirror_vma(vma)) {
63585 + pte_unmap_unlock(pte, ptl);
63586 + lock_page(page_m);
63587 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
63588 + if (pte_same(entry, *pte))
63589 + pax_mirror_anon_pte(vma, address, page_m, ptl);
63590 + else
63591 + unlock_page(page_m);
63592 + }
63593 + } else
63594 + pax_mirror_file_pte(vma, address, page_m, ptl);
63595 +
63596 +out:
63597 + pte_unmap_unlock(pte, ptl);
63598 +}
63599 +#endif
63600 +
63601 /*
63602 * This routine handles present pages, when users try to write
63603 * to a shared page. It is done by copying the page to a new address
63604 @@ -2156,6 +2360,12 @@ gotten:
63605 */
63606 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
63607 if (likely(pte_same(*page_table, orig_pte))) {
63608 +
63609 +#ifdef CONFIG_PAX_SEGMEXEC
63610 + if (pax_find_mirror_vma(vma))
63611 + BUG_ON(!trylock_page(new_page));
63612 +#endif
63613 +
63614 if (old_page) {
63615 if (!PageAnon(old_page)) {
63616 dec_mm_counter(mm, file_rss);
63617 @@ -2207,6 +2417,10 @@ gotten:
63618 page_remove_rmap(old_page);
63619 }
63620
63621 +#ifdef CONFIG_PAX_SEGMEXEC
63622 + pax_mirror_anon_pte(vma, address, new_page, ptl);
63623 +#endif
63624 +
63625 /* Free the old page.. */
63626 new_page = old_page;
63627 ret |= VM_FAULT_WRITE;
63628 @@ -2604,6 +2818,11 @@ static int do_swap_page(struct mm_struct
63629 swap_free(entry);
63630 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
63631 try_to_free_swap(page);
63632 +
63633 +#ifdef CONFIG_PAX_SEGMEXEC
63634 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
63635 +#endif
63636 +
63637 unlock_page(page);
63638
63639 if (flags & FAULT_FLAG_WRITE) {
63640 @@ -2615,6 +2834,11 @@ static int do_swap_page(struct mm_struct
63641
63642 /* No need to invalidate - it was non-present before */
63643 update_mmu_cache(vma, address, pte);
63644 +
63645 +#ifdef CONFIG_PAX_SEGMEXEC
63646 + pax_mirror_anon_pte(vma, address, page, ptl);
63647 +#endif
63648 +
63649 unlock:
63650 pte_unmap_unlock(page_table, ptl);
63651 out:
63652 @@ -2630,40 +2854,6 @@ out_release:
63653 }
63654
63655 /*
63656 - * This is like a special single-page "expand_{down|up}wards()",
63657 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
63658 - * doesn't hit another vma.
63659 - */
63660 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
63661 -{
63662 - address &= PAGE_MASK;
63663 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
63664 - struct vm_area_struct *prev = vma->vm_prev;
63665 -
63666 - /*
63667 - * Is there a mapping abutting this one below?
63668 - *
63669 - * That's only ok if it's the same stack mapping
63670 - * that has gotten split..
63671 - */
63672 - if (prev && prev->vm_end == address)
63673 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
63674 -
63675 - expand_stack(vma, address - PAGE_SIZE);
63676 - }
63677 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
63678 - struct vm_area_struct *next = vma->vm_next;
63679 -
63680 - /* As VM_GROWSDOWN but s/below/above/ */
63681 - if (next && next->vm_start == address + PAGE_SIZE)
63682 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
63683 -
63684 - expand_upwards(vma, address + PAGE_SIZE);
63685 - }
63686 - return 0;
63687 -}
63688 -
63689 -/*
63690 * We enter with non-exclusive mmap_sem (to exclude vma changes,
63691 * but allow concurrent faults), and pte mapped but not yet locked.
63692 * We return with mmap_sem still held, but pte unmapped and unlocked.
63693 @@ -2672,27 +2862,23 @@ static int do_anonymous_page(struct mm_s
63694 unsigned long address, pte_t *page_table, pmd_t *pmd,
63695 unsigned int flags)
63696 {
63697 - struct page *page;
63698 + struct page *page = NULL;
63699 spinlock_t *ptl;
63700 pte_t entry;
63701
63702 - pte_unmap(page_table);
63703 -
63704 - /* Check if we need to add a guard page to the stack */
63705 - if (check_stack_guard_page(vma, address) < 0)
63706 - return VM_FAULT_SIGBUS;
63707 -
63708 - /* Use the zero-page for reads */
63709 if (!(flags & FAULT_FLAG_WRITE)) {
63710 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
63711 vma->vm_page_prot));
63712 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
63713 + ptl = pte_lockptr(mm, pmd);
63714 + spin_lock(ptl);
63715 if (!pte_none(*page_table))
63716 goto unlock;
63717 goto setpte;
63718 }
63719
63720 /* Allocate our own private page. */
63721 + pte_unmap(page_table);
63722 +
63723 if (unlikely(anon_vma_prepare(vma)))
63724 goto oom;
63725 page = alloc_zeroed_user_highpage_movable(vma, address);
63726 @@ -2711,6 +2897,11 @@ static int do_anonymous_page(struct mm_s
63727 if (!pte_none(*page_table))
63728 goto release;
63729
63730 +#ifdef CONFIG_PAX_SEGMEXEC
63731 + if (pax_find_mirror_vma(vma))
63732 + BUG_ON(!trylock_page(page));
63733 +#endif
63734 +
63735 inc_mm_counter(mm, anon_rss);
63736 page_add_new_anon_rmap(page, vma, address);
63737 setpte:
63738 @@ -2718,6 +2909,12 @@ setpte:
63739
63740 /* No need to invalidate - it was non-present before */
63741 update_mmu_cache(vma, address, entry);
63742 +
63743 +#ifdef CONFIG_PAX_SEGMEXEC
63744 + if (page)
63745 + pax_mirror_anon_pte(vma, address, page, ptl);
63746 +#endif
63747 +
63748 unlock:
63749 pte_unmap_unlock(page_table, ptl);
63750 return 0;
63751 @@ -2860,6 +3057,12 @@ static int __do_fault(struct mm_struct *
63752 */
63753 /* Only go through if we didn't race with anybody else... */
63754 if (likely(pte_same(*page_table, orig_pte))) {
63755 +
63756 +#ifdef CONFIG_PAX_SEGMEXEC
63757 + if (anon && pax_find_mirror_vma(vma))
63758 + BUG_ON(!trylock_page(page));
63759 +#endif
63760 +
63761 flush_icache_page(vma, page);
63762 entry = mk_pte(page, vma->vm_page_prot);
63763 if (flags & FAULT_FLAG_WRITE)
63764 @@ -2879,6 +3082,14 @@ static int __do_fault(struct mm_struct *
63765
63766 /* no need to invalidate: a not-present page won't be cached */
63767 update_mmu_cache(vma, address, entry);
63768 +
63769 +#ifdef CONFIG_PAX_SEGMEXEC
63770 + if (anon)
63771 + pax_mirror_anon_pte(vma, address, page, ptl);
63772 + else
63773 + pax_mirror_file_pte(vma, address, page, ptl);
63774 +#endif
63775 +
63776 } else {
63777 if (charged)
63778 mem_cgroup_uncharge_page(page);
63779 @@ -3026,6 +3237,12 @@ static inline int handle_pte_fault(struc
63780 if (flags & FAULT_FLAG_WRITE)
63781 flush_tlb_page(vma, address);
63782 }
63783 +
63784 +#ifdef CONFIG_PAX_SEGMEXEC
63785 + pax_mirror_pte(vma, address, pte, pmd, ptl);
63786 + return 0;
63787 +#endif
63788 +
63789 unlock:
63790 pte_unmap_unlock(pte, ptl);
63791 return 0;
63792 @@ -3042,6 +3259,10 @@ int handle_mm_fault(struct mm_struct *mm
63793 pmd_t *pmd;
63794 pte_t *pte;
63795
63796 +#ifdef CONFIG_PAX_SEGMEXEC
63797 + struct vm_area_struct *vma_m;
63798 +#endif
63799 +
63800 __set_current_state(TASK_RUNNING);
63801
63802 count_vm_event(PGFAULT);
63803 @@ -3049,6 +3270,34 @@ int handle_mm_fault(struct mm_struct *mm
63804 if (unlikely(is_vm_hugetlb_page(vma)))
63805 return hugetlb_fault(mm, vma, address, flags);
63806
63807 +#ifdef CONFIG_PAX_SEGMEXEC
63808 + vma_m = pax_find_mirror_vma(vma);
63809 + if (vma_m) {
63810 + unsigned long address_m;
63811 + pgd_t *pgd_m;
63812 + pud_t *pud_m;
63813 + pmd_t *pmd_m;
63814 +
63815 + if (vma->vm_start > vma_m->vm_start) {
63816 + address_m = address;
63817 + address -= SEGMEXEC_TASK_SIZE;
63818 + vma = vma_m;
63819 + } else
63820 + address_m = address + SEGMEXEC_TASK_SIZE;
63821 +
63822 + pgd_m = pgd_offset(mm, address_m);
63823 + pud_m = pud_alloc(mm, pgd_m, address_m);
63824 + if (!pud_m)
63825 + return VM_FAULT_OOM;
63826 + pmd_m = pmd_alloc(mm, pud_m, address_m);
63827 + if (!pmd_m)
63828 + return VM_FAULT_OOM;
63829 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
63830 + return VM_FAULT_OOM;
63831 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
63832 + }
63833 +#endif
63834 +
63835 pgd = pgd_offset(mm, address);
63836 pud = pud_alloc(mm, pgd, address);
63837 if (!pud)
63838 @@ -3146,7 +3395,7 @@ static int __init gate_vma_init(void)
63839 gate_vma.vm_start = FIXADDR_USER_START;
63840 gate_vma.vm_end = FIXADDR_USER_END;
63841 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
63842 - gate_vma.vm_page_prot = __P101;
63843 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
63844 /*
63845 * Make sure the vDSO gets into every core dump.
63846 * Dumping its contents makes post-mortem fully interpretable later
63847 diff -urNp linux-2.6.32.41/mm/memory-failure.c linux-2.6.32.41/mm/memory-failure.c
63848 --- linux-2.6.32.41/mm/memory-failure.c 2011-03-27 14:31:47.000000000 -0400
63849 +++ linux-2.6.32.41/mm/memory-failure.c 2011-04-17 15:56:46.000000000 -0400
63850 @@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __r
63851
63852 int sysctl_memory_failure_recovery __read_mostly = 1;
63853
63854 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
63855 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
63856
63857 /*
63858 * Send all the processes who have the page mapped an ``action optional''
63859 @@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn,
63860 return 0;
63861 }
63862
63863 - atomic_long_add(1, &mce_bad_pages);
63864 + atomic_long_add_unchecked(1, &mce_bad_pages);
63865
63866 /*
63867 * We need/can do nothing about count=0 pages.
63868 diff -urNp linux-2.6.32.41/mm/mempolicy.c linux-2.6.32.41/mm/mempolicy.c
63869 --- linux-2.6.32.41/mm/mempolicy.c 2011-03-27 14:31:47.000000000 -0400
63870 +++ linux-2.6.32.41/mm/mempolicy.c 2011-04-17 15:56:46.000000000 -0400
63871 @@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_st
63872 struct vm_area_struct *next;
63873 int err;
63874
63875 +#ifdef CONFIG_PAX_SEGMEXEC
63876 + struct vm_area_struct *vma_m;
63877 +#endif
63878 +
63879 err = 0;
63880 for (; vma && vma->vm_start < end; vma = next) {
63881 next = vma->vm_next;
63882 @@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_st
63883 err = policy_vma(vma, new);
63884 if (err)
63885 break;
63886 +
63887 +#ifdef CONFIG_PAX_SEGMEXEC
63888 + vma_m = pax_find_mirror_vma(vma);
63889 + if (vma_m) {
63890 + err = policy_vma(vma_m, new);
63891 + if (err)
63892 + break;
63893 + }
63894 +#endif
63895 +
63896 }
63897 return err;
63898 }
63899 @@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start
63900
63901 if (end < start)
63902 return -EINVAL;
63903 +
63904 +#ifdef CONFIG_PAX_SEGMEXEC
63905 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
63906 + if (end > SEGMEXEC_TASK_SIZE)
63907 + return -EINVAL;
63908 + } else
63909 +#endif
63910 +
63911 + if (end > TASK_SIZE)
63912 + return -EINVAL;
63913 +
63914 if (end == start)
63915 return 0;
63916
63917 @@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
63918 if (!mm)
63919 return -EINVAL;
63920
63921 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63922 + if (mm != current->mm &&
63923 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
63924 + err = -EPERM;
63925 + goto out;
63926 + }
63927 +#endif
63928 +
63929 /*
63930 * Check if this process has the right to modify the specified
63931 * process. The right exists if the process has administrative
63932 @@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
63933 rcu_read_lock();
63934 tcred = __task_cred(task);
63935 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
63936 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
63937 - !capable(CAP_SYS_NICE)) {
63938 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
63939 rcu_read_unlock();
63940 err = -EPERM;
63941 goto out;
63942 @@ -2396,7 +2428,7 @@ int show_numa_map(struct seq_file *m, vo
63943
63944 if (file) {
63945 seq_printf(m, " file=");
63946 - seq_path(m, &file->f_path, "\n\t= ");
63947 + seq_path(m, &file->f_path, "\n\t\\= ");
63948 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
63949 seq_printf(m, " heap");
63950 } else if (vma->vm_start <= mm->start_stack &&
63951 diff -urNp linux-2.6.32.41/mm/migrate.c linux-2.6.32.41/mm/migrate.c
63952 --- linux-2.6.32.41/mm/migrate.c 2011-03-27 14:31:47.000000000 -0400
63953 +++ linux-2.6.32.41/mm/migrate.c 2011-05-16 21:46:57.000000000 -0400
63954 @@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struc
63955 unsigned long chunk_start;
63956 int err;
63957
63958 + pax_track_stack();
63959 +
63960 task_nodes = cpuset_mems_allowed(task);
63961
63962 err = -ENOMEM;
63963 @@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
63964 if (!mm)
63965 return -EINVAL;
63966
63967 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63968 + if (mm != current->mm &&
63969 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
63970 + err = -EPERM;
63971 + goto out;
63972 + }
63973 +#endif
63974 +
63975 /*
63976 * Check if this process has the right to modify the specified
63977 * process. The right exists if the process has administrative
63978 @@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
63979 rcu_read_lock();
63980 tcred = __task_cred(task);
63981 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
63982 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
63983 - !capable(CAP_SYS_NICE)) {
63984 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
63985 rcu_read_unlock();
63986 err = -EPERM;
63987 goto out;
63988 diff -urNp linux-2.6.32.41/mm/mlock.c linux-2.6.32.41/mm/mlock.c
63989 --- linux-2.6.32.41/mm/mlock.c 2011-03-27 14:31:47.000000000 -0400
63990 +++ linux-2.6.32.41/mm/mlock.c 2011-04-17 15:56:46.000000000 -0400
63991 @@ -13,6 +13,7 @@
63992 #include <linux/pagemap.h>
63993 #include <linux/mempolicy.h>
63994 #include <linux/syscalls.h>
63995 +#include <linux/security.h>
63996 #include <linux/sched.h>
63997 #include <linux/module.h>
63998 #include <linux/rmap.h>
63999 @@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
64000 }
64001 }
64002
64003 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
64004 -{
64005 - return (vma->vm_flags & VM_GROWSDOWN) &&
64006 - (vma->vm_start == addr) &&
64007 - !vma_stack_continue(vma->vm_prev, addr);
64008 -}
64009 -
64010 /**
64011 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
64012 * @vma: target vma
64013 @@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(stru
64014 if (vma->vm_flags & VM_WRITE)
64015 gup_flags |= FOLL_WRITE;
64016
64017 - /* We don't try to access the guard page of a stack vma */
64018 - if (stack_guard_page(vma, start)) {
64019 - addr += PAGE_SIZE;
64020 - nr_pages--;
64021 - }
64022 -
64023 while (nr_pages > 0) {
64024 int i;
64025
64026 @@ -440,7 +428,7 @@ static int do_mlock(unsigned long start,
64027 {
64028 unsigned long nstart, end, tmp;
64029 struct vm_area_struct * vma, * prev;
64030 - int error;
64031 + int error = -EINVAL;
64032
64033 len = PAGE_ALIGN(len);
64034 end = start + len;
64035 @@ -448,6 +436,9 @@ static int do_mlock(unsigned long start,
64036 return -EINVAL;
64037 if (end == start)
64038 return 0;
64039 + if (end > TASK_SIZE)
64040 + return -EINVAL;
64041 +
64042 vma = find_vma_prev(current->mm, start, &prev);
64043 if (!vma || vma->vm_start > start)
64044 return -ENOMEM;
64045 @@ -458,6 +449,11 @@ static int do_mlock(unsigned long start,
64046 for (nstart = start ; ; ) {
64047 unsigned int newflags;
64048
64049 +#ifdef CONFIG_PAX_SEGMEXEC
64050 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
64051 + break;
64052 +#endif
64053 +
64054 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
64055
64056 newflags = vma->vm_flags | VM_LOCKED;
64057 @@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
64058 lock_limit >>= PAGE_SHIFT;
64059
64060 /* check against resource limits */
64061 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
64062 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
64063 error = do_mlock(start, len, 1);
64064 up_write(&current->mm->mmap_sem);
64065 @@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
64066 static int do_mlockall(int flags)
64067 {
64068 struct vm_area_struct * vma, * prev = NULL;
64069 - unsigned int def_flags = 0;
64070
64071 if (flags & MCL_FUTURE)
64072 - def_flags = VM_LOCKED;
64073 - current->mm->def_flags = def_flags;
64074 + current->mm->def_flags |= VM_LOCKED;
64075 + else
64076 + current->mm->def_flags &= ~VM_LOCKED;
64077 if (flags == MCL_FUTURE)
64078 goto out;
64079
64080 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
64081 - unsigned int newflags;
64082 + unsigned long newflags;
64083 +
64084 +#ifdef CONFIG_PAX_SEGMEXEC
64085 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
64086 + break;
64087 +#endif
64088
64089 + BUG_ON(vma->vm_end > TASK_SIZE);
64090 newflags = vma->vm_flags | VM_LOCKED;
64091 if (!(flags & MCL_CURRENT))
64092 newflags &= ~VM_LOCKED;
64093 @@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
64094 lock_limit >>= PAGE_SHIFT;
64095
64096 ret = -ENOMEM;
64097 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
64098 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
64099 capable(CAP_IPC_LOCK))
64100 ret = do_mlockall(flags);
64101 diff -urNp linux-2.6.32.41/mm/mmap.c linux-2.6.32.41/mm/mmap.c
64102 --- linux-2.6.32.41/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
64103 +++ linux-2.6.32.41/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
64104 @@ -45,6 +45,16 @@
64105 #define arch_rebalance_pgtables(addr, len) (addr)
64106 #endif
64107
64108 +static inline void verify_mm_writelocked(struct mm_struct *mm)
64109 +{
64110 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
64111 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
64112 + up_read(&mm->mmap_sem);
64113 + BUG();
64114 + }
64115 +#endif
64116 +}
64117 +
64118 static void unmap_region(struct mm_struct *mm,
64119 struct vm_area_struct *vma, struct vm_area_struct *prev,
64120 unsigned long start, unsigned long end);
64121 @@ -70,22 +80,32 @@ static void unmap_region(struct mm_struc
64122 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
64123 *
64124 */
64125 -pgprot_t protection_map[16] = {
64126 +pgprot_t protection_map[16] __read_only = {
64127 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
64128 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
64129 };
64130
64131 pgprot_t vm_get_page_prot(unsigned long vm_flags)
64132 {
64133 - return __pgprot(pgprot_val(protection_map[vm_flags &
64134 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
64135 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
64136 pgprot_val(arch_vm_get_page_prot(vm_flags)));
64137 +
64138 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
64139 + if (!nx_enabled &&
64140 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
64141 + (vm_flags & (VM_READ | VM_WRITE)))
64142 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
64143 +#endif
64144 +
64145 + return prot;
64146 }
64147 EXPORT_SYMBOL(vm_get_page_prot);
64148
64149 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
64150 int sysctl_overcommit_ratio = 50; /* default is 50% */
64151 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
64152 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
64153 struct percpu_counter vm_committed_as;
64154
64155 /*
64156 @@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma
64157 struct vm_area_struct *next = vma->vm_next;
64158
64159 might_sleep();
64160 + BUG_ON(vma->vm_mirror);
64161 if (vma->vm_ops && vma->vm_ops->close)
64162 vma->vm_ops->close(vma);
64163 if (vma->vm_file) {
64164 @@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
64165 * not page aligned -Ram Gupta
64166 */
64167 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
64168 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
64169 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
64170 (mm->end_data - mm->start_data) > rlim)
64171 goto out;
64172 @@ -704,6 +726,12 @@ static int
64173 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
64174 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
64175 {
64176 +
64177 +#ifdef CONFIG_PAX_SEGMEXEC
64178 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
64179 + return 0;
64180 +#endif
64181 +
64182 if (is_mergeable_vma(vma, file, vm_flags) &&
64183 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
64184 if (vma->vm_pgoff == vm_pgoff)
64185 @@ -723,6 +751,12 @@ static int
64186 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
64187 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
64188 {
64189 +
64190 +#ifdef CONFIG_PAX_SEGMEXEC
64191 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
64192 + return 0;
64193 +#endif
64194 +
64195 if (is_mergeable_vma(vma, file, vm_flags) &&
64196 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
64197 pgoff_t vm_pglen;
64198 @@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struc
64199 struct vm_area_struct *vma_merge(struct mm_struct *mm,
64200 struct vm_area_struct *prev, unsigned long addr,
64201 unsigned long end, unsigned long vm_flags,
64202 - struct anon_vma *anon_vma, struct file *file,
64203 + struct anon_vma *anon_vma, struct file *file,
64204 pgoff_t pgoff, struct mempolicy *policy)
64205 {
64206 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
64207 struct vm_area_struct *area, *next;
64208
64209 +#ifdef CONFIG_PAX_SEGMEXEC
64210 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
64211 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
64212 +
64213 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
64214 +#endif
64215 +
64216 /*
64217 * We later require that vma->vm_flags == vm_flags,
64218 * so this tests vma->vm_flags & VM_SPECIAL, too.
64219 @@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct
64220 if (next && next->vm_end == end) /* cases 6, 7, 8 */
64221 next = next->vm_next;
64222
64223 +#ifdef CONFIG_PAX_SEGMEXEC
64224 + if (prev)
64225 + prev_m = pax_find_mirror_vma(prev);
64226 + if (area)
64227 + area_m = pax_find_mirror_vma(area);
64228 + if (next)
64229 + next_m = pax_find_mirror_vma(next);
64230 +#endif
64231 +
64232 /*
64233 * Can it merge with the predecessor?
64234 */
64235 @@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct
64236 /* cases 1, 6 */
64237 vma_adjust(prev, prev->vm_start,
64238 next->vm_end, prev->vm_pgoff, NULL);
64239 - } else /* cases 2, 5, 7 */
64240 +
64241 +#ifdef CONFIG_PAX_SEGMEXEC
64242 + if (prev_m)
64243 + vma_adjust(prev_m, prev_m->vm_start,
64244 + next_m->vm_end, prev_m->vm_pgoff, NULL);
64245 +#endif
64246 +
64247 + } else { /* cases 2, 5, 7 */
64248 vma_adjust(prev, prev->vm_start,
64249 end, prev->vm_pgoff, NULL);
64250 +
64251 +#ifdef CONFIG_PAX_SEGMEXEC
64252 + if (prev_m)
64253 + vma_adjust(prev_m, prev_m->vm_start,
64254 + end_m, prev_m->vm_pgoff, NULL);
64255 +#endif
64256 +
64257 + }
64258 return prev;
64259 }
64260
64261 @@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct
64262 mpol_equal(policy, vma_policy(next)) &&
64263 can_vma_merge_before(next, vm_flags,
64264 anon_vma, file, pgoff+pglen)) {
64265 - if (prev && addr < prev->vm_end) /* case 4 */
64266 + if (prev && addr < prev->vm_end) { /* case 4 */
64267 vma_adjust(prev, prev->vm_start,
64268 addr, prev->vm_pgoff, NULL);
64269 - else /* cases 3, 8 */
64270 +
64271 +#ifdef CONFIG_PAX_SEGMEXEC
64272 + if (prev_m)
64273 + vma_adjust(prev_m, prev_m->vm_start,
64274 + addr_m, prev_m->vm_pgoff, NULL);
64275 +#endif
64276 +
64277 + } else { /* cases 3, 8 */
64278 vma_adjust(area, addr, next->vm_end,
64279 next->vm_pgoff - pglen, NULL);
64280 +
64281 +#ifdef CONFIG_PAX_SEGMEXEC
64282 + if (area_m)
64283 + vma_adjust(area_m, addr_m, next_m->vm_end,
64284 + next_m->vm_pgoff - pglen, NULL);
64285 +#endif
64286 +
64287 + }
64288 return area;
64289 }
64290
64291 @@ -898,14 +978,11 @@ none:
64292 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
64293 struct file *file, long pages)
64294 {
64295 - const unsigned long stack_flags
64296 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
64297 -
64298 if (file) {
64299 mm->shared_vm += pages;
64300 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
64301 mm->exec_vm += pages;
64302 - } else if (flags & stack_flags)
64303 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
64304 mm->stack_vm += pages;
64305 if (flags & (VM_RESERVED|VM_IO))
64306 mm->reserved_vm += pages;
64307 @@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file
64308 * (the exception is when the underlying filesystem is noexec
64309 * mounted, in which case we dont add PROT_EXEC.)
64310 */
64311 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
64312 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
64313 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
64314 prot |= PROT_EXEC;
64315
64316 @@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file
64317 /* Obtain the address to map to. we verify (or select) it and ensure
64318 * that it represents a valid section of the address space.
64319 */
64320 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
64321 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
64322 if (addr & ~PAGE_MASK)
64323 return addr;
64324
64325 @@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file
64326 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
64327 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
64328
64329 +#ifdef CONFIG_PAX_MPROTECT
64330 + if (mm->pax_flags & MF_PAX_MPROTECT) {
64331 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
64332 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
64333 + gr_log_rwxmmap(file);
64334 +
64335 +#ifdef CONFIG_PAX_EMUPLT
64336 + vm_flags &= ~VM_EXEC;
64337 +#else
64338 + return -EPERM;
64339 +#endif
64340 +
64341 + }
64342 +
64343 + if (!(vm_flags & VM_EXEC))
64344 + vm_flags &= ~VM_MAYEXEC;
64345 +#else
64346 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
64347 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
64348 +#endif
64349 + else
64350 + vm_flags &= ~VM_MAYWRITE;
64351 + }
64352 +#endif
64353 +
64354 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
64355 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
64356 + vm_flags &= ~VM_PAGEEXEC;
64357 +#endif
64358 +
64359 if (flags & MAP_LOCKED)
64360 if (!can_do_mlock())
64361 return -EPERM;
64362 @@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file
64363 locked += mm->locked_vm;
64364 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
64365 lock_limit >>= PAGE_SHIFT;
64366 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
64367 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
64368 return -EAGAIN;
64369 }
64370 @@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file
64371 if (error)
64372 return error;
64373
64374 + if (!gr_acl_handle_mmap(file, prot))
64375 + return -EACCES;
64376 +
64377 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
64378 }
64379 EXPORT_SYMBOL(do_mmap_pgoff);
64380 @@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
64381 */
64382 int vma_wants_writenotify(struct vm_area_struct *vma)
64383 {
64384 - unsigned int vm_flags = vma->vm_flags;
64385 + unsigned long vm_flags = vma->vm_flags;
64386
64387 /* If it was private or non-writable, the write bit is already clear */
64388 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
64389 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
64390 return 0;
64391
64392 /* The backer wishes to know when pages are first written to? */
64393 @@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *f
64394 unsigned long charged = 0;
64395 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
64396
64397 +#ifdef CONFIG_PAX_SEGMEXEC
64398 + struct vm_area_struct *vma_m = NULL;
64399 +#endif
64400 +
64401 + /*
64402 + * mm->mmap_sem is required to protect against another thread
64403 + * changing the mappings in case we sleep.
64404 + */
64405 + verify_mm_writelocked(mm);
64406 +
64407 /* Clear old maps */
64408 error = -ENOMEM;
64409 -munmap_back:
64410 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
64411 if (vma && vma->vm_start < addr + len) {
64412 if (do_munmap(mm, addr, len))
64413 return -ENOMEM;
64414 - goto munmap_back;
64415 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
64416 + BUG_ON(vma && vma->vm_start < addr + len);
64417 }
64418
64419 /* Check against address space limit. */
64420 @@ -1173,6 +1294,16 @@ munmap_back:
64421 goto unacct_error;
64422 }
64423
64424 +#ifdef CONFIG_PAX_SEGMEXEC
64425 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
64426 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
64427 + if (!vma_m) {
64428 + error = -ENOMEM;
64429 + goto free_vma;
64430 + }
64431 + }
64432 +#endif
64433 +
64434 vma->vm_mm = mm;
64435 vma->vm_start = addr;
64436 vma->vm_end = addr + len;
64437 @@ -1195,6 +1326,19 @@ munmap_back:
64438 error = file->f_op->mmap(file, vma);
64439 if (error)
64440 goto unmap_and_free_vma;
64441 +
64442 +#ifdef CONFIG_PAX_SEGMEXEC
64443 + if (vma_m && (vm_flags & VM_EXECUTABLE))
64444 + added_exe_file_vma(mm);
64445 +#endif
64446 +
64447 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
64448 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
64449 + vma->vm_flags |= VM_PAGEEXEC;
64450 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
64451 + }
64452 +#endif
64453 +
64454 if (vm_flags & VM_EXECUTABLE)
64455 added_exe_file_vma(mm);
64456
64457 @@ -1218,6 +1362,11 @@ munmap_back:
64458 vma_link(mm, vma, prev, rb_link, rb_parent);
64459 file = vma->vm_file;
64460
64461 +#ifdef CONFIG_PAX_SEGMEXEC
64462 + if (vma_m)
64463 + pax_mirror_vma(vma_m, vma);
64464 +#endif
64465 +
64466 /* Once vma denies write, undo our temporary denial count */
64467 if (correct_wcount)
64468 atomic_inc(&inode->i_writecount);
64469 @@ -1226,6 +1375,7 @@ out:
64470
64471 mm->total_vm += len >> PAGE_SHIFT;
64472 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
64473 + track_exec_limit(mm, addr, addr + len, vm_flags);
64474 if (vm_flags & VM_LOCKED) {
64475 /*
64476 * makes pages present; downgrades, drops, reacquires mmap_sem
64477 @@ -1248,6 +1398,12 @@ unmap_and_free_vma:
64478 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
64479 charged = 0;
64480 free_vma:
64481 +
64482 +#ifdef CONFIG_PAX_SEGMEXEC
64483 + if (vma_m)
64484 + kmem_cache_free(vm_area_cachep, vma_m);
64485 +#endif
64486 +
64487 kmem_cache_free(vm_area_cachep, vma);
64488 unacct_error:
64489 if (charged)
64490 @@ -1255,6 +1411,44 @@ unacct_error:
64491 return error;
64492 }
64493
64494 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
64495 +{
64496 + if (!vma) {
64497 +#ifdef CONFIG_STACK_GROWSUP
64498 + if (addr > sysctl_heap_stack_gap)
64499 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
64500 + else
64501 + vma = find_vma(current->mm, 0);
64502 + if (vma && (vma->vm_flags & VM_GROWSUP))
64503 + return false;
64504 +#endif
64505 + return true;
64506 + }
64507 +
64508 + if (addr + len > vma->vm_start)
64509 + return false;
64510 +
64511 + if (vma->vm_flags & VM_GROWSDOWN)
64512 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
64513 +#ifdef CONFIG_STACK_GROWSUP
64514 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
64515 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
64516 +#endif
64517 +
64518 + return true;
64519 +}
64520 +
64521 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
64522 +{
64523 + if (vma->vm_start < len)
64524 + return -ENOMEM;
64525 + if (!(vma->vm_flags & VM_GROWSDOWN))
64526 + return vma->vm_start - len;
64527 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
64528 + return vma->vm_start - len - sysctl_heap_stack_gap;
64529 + return -ENOMEM;
64530 +}
64531 +
64532 /* Get an address range which is currently unmapped.
64533 * For shmat() with addr=0.
64534 *
64535 @@ -1281,18 +1475,23 @@ arch_get_unmapped_area(struct file *filp
64536 if (flags & MAP_FIXED)
64537 return addr;
64538
64539 +#ifdef CONFIG_PAX_RANDMMAP
64540 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
64541 +#endif
64542 +
64543 if (addr) {
64544 addr = PAGE_ALIGN(addr);
64545 - vma = find_vma(mm, addr);
64546 - if (TASK_SIZE - len >= addr &&
64547 - (!vma || addr + len <= vma->vm_start))
64548 - return addr;
64549 + if (TASK_SIZE - len >= addr) {
64550 + vma = find_vma(mm, addr);
64551 + if (check_heap_stack_gap(vma, addr, len))
64552 + return addr;
64553 + }
64554 }
64555 if (len > mm->cached_hole_size) {
64556 - start_addr = addr = mm->free_area_cache;
64557 + start_addr = addr = mm->free_area_cache;
64558 } else {
64559 - start_addr = addr = TASK_UNMAPPED_BASE;
64560 - mm->cached_hole_size = 0;
64561 + start_addr = addr = mm->mmap_base;
64562 + mm->cached_hole_size = 0;
64563 }
64564
64565 full_search:
64566 @@ -1303,34 +1502,40 @@ full_search:
64567 * Start a new search - just in case we missed
64568 * some holes.
64569 */
64570 - if (start_addr != TASK_UNMAPPED_BASE) {
64571 - addr = TASK_UNMAPPED_BASE;
64572 - start_addr = addr;
64573 + if (start_addr != mm->mmap_base) {
64574 + start_addr = addr = mm->mmap_base;
64575 mm->cached_hole_size = 0;
64576 goto full_search;
64577 }
64578 return -ENOMEM;
64579 }
64580 - if (!vma || addr + len <= vma->vm_start) {
64581 - /*
64582 - * Remember the place where we stopped the search:
64583 - */
64584 - mm->free_area_cache = addr + len;
64585 - return addr;
64586 - }
64587 + if (check_heap_stack_gap(vma, addr, len))
64588 + break;
64589 if (addr + mm->cached_hole_size < vma->vm_start)
64590 mm->cached_hole_size = vma->vm_start - addr;
64591 addr = vma->vm_end;
64592 }
64593 +
64594 + /*
64595 + * Remember the place where we stopped the search:
64596 + */
64597 + mm->free_area_cache = addr + len;
64598 + return addr;
64599 }
64600 #endif
64601
64602 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
64603 {
64604 +
64605 +#ifdef CONFIG_PAX_SEGMEXEC
64606 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
64607 + return;
64608 +#endif
64609 +
64610 /*
64611 * Is this a new hole at the lowest possible address?
64612 */
64613 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
64614 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
64615 mm->free_area_cache = addr;
64616 mm->cached_hole_size = ~0UL;
64617 }
64618 @@ -1348,7 +1553,7 @@ arch_get_unmapped_area_topdown(struct fi
64619 {
64620 struct vm_area_struct *vma;
64621 struct mm_struct *mm = current->mm;
64622 - unsigned long addr = addr0;
64623 + unsigned long base = mm->mmap_base, addr = addr0;
64624
64625 /* requested length too big for entire address space */
64626 if (len > TASK_SIZE)
64627 @@ -1357,13 +1562,18 @@ arch_get_unmapped_area_topdown(struct fi
64628 if (flags & MAP_FIXED)
64629 return addr;
64630
64631 +#ifdef CONFIG_PAX_RANDMMAP
64632 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
64633 +#endif
64634 +
64635 /* requesting a specific address */
64636 if (addr) {
64637 addr = PAGE_ALIGN(addr);
64638 - vma = find_vma(mm, addr);
64639 - if (TASK_SIZE - len >= addr &&
64640 - (!vma || addr + len <= vma->vm_start))
64641 - return addr;
64642 + if (TASK_SIZE - len >= addr) {
64643 + vma = find_vma(mm, addr);
64644 + if (check_heap_stack_gap(vma, addr, len))
64645 + return addr;
64646 + }
64647 }
64648
64649 /* check if free_area_cache is useful for us */
64650 @@ -1378,7 +1588,7 @@ arch_get_unmapped_area_topdown(struct fi
64651 /* make sure it can fit in the remaining address space */
64652 if (addr > len) {
64653 vma = find_vma(mm, addr-len);
64654 - if (!vma || addr <= vma->vm_start)
64655 + if (check_heap_stack_gap(vma, addr - len, len))
64656 /* remember the address as a hint for next time */
64657 return (mm->free_area_cache = addr-len);
64658 }
64659 @@ -1395,7 +1605,7 @@ arch_get_unmapped_area_topdown(struct fi
64660 * return with success:
64661 */
64662 vma = find_vma(mm, addr);
64663 - if (!vma || addr+len <= vma->vm_start)
64664 + if (check_heap_stack_gap(vma, addr, len))
64665 /* remember the address as a hint for next time */
64666 return (mm->free_area_cache = addr);
64667
64668 @@ -1404,8 +1614,8 @@ arch_get_unmapped_area_topdown(struct fi
64669 mm->cached_hole_size = vma->vm_start - addr;
64670
64671 /* try just below the current vma->vm_start */
64672 - addr = vma->vm_start-len;
64673 - } while (len < vma->vm_start);
64674 + addr = skip_heap_stack_gap(vma, len);
64675 + } while (!IS_ERR_VALUE(addr));
64676
64677 bottomup:
64678 /*
64679 @@ -1414,13 +1624,21 @@ bottomup:
64680 * can happen with large stack limits and large mmap()
64681 * allocations.
64682 */
64683 + mm->mmap_base = TASK_UNMAPPED_BASE;
64684 +
64685 +#ifdef CONFIG_PAX_RANDMMAP
64686 + if (mm->pax_flags & MF_PAX_RANDMMAP)
64687 + mm->mmap_base += mm->delta_mmap;
64688 +#endif
64689 +
64690 + mm->free_area_cache = mm->mmap_base;
64691 mm->cached_hole_size = ~0UL;
64692 - mm->free_area_cache = TASK_UNMAPPED_BASE;
64693 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
64694 /*
64695 * Restore the topdown base:
64696 */
64697 - mm->free_area_cache = mm->mmap_base;
64698 + mm->mmap_base = base;
64699 + mm->free_area_cache = base;
64700 mm->cached_hole_size = ~0UL;
64701
64702 return addr;
64703 @@ -1429,6 +1647,12 @@ bottomup:
64704
64705 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
64706 {
64707 +
64708 +#ifdef CONFIG_PAX_SEGMEXEC
64709 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
64710 + return;
64711 +#endif
64712 +
64713 /*
64714 * Is this a new hole at the highest possible address?
64715 */
64716 @@ -1436,8 +1660,10 @@ void arch_unmap_area_topdown(struct mm_s
64717 mm->free_area_cache = addr;
64718
64719 /* dont allow allocations above current base */
64720 - if (mm->free_area_cache > mm->mmap_base)
64721 + if (mm->free_area_cache > mm->mmap_base) {
64722 mm->free_area_cache = mm->mmap_base;
64723 + mm->cached_hole_size = ~0UL;
64724 + }
64725 }
64726
64727 unsigned long
64728 @@ -1545,6 +1771,27 @@ out:
64729 return prev ? prev->vm_next : vma;
64730 }
64731
64732 +#ifdef CONFIG_PAX_SEGMEXEC
64733 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
64734 +{
64735 + struct vm_area_struct *vma_m;
64736 +
64737 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
64738 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
64739 + BUG_ON(vma->vm_mirror);
64740 + return NULL;
64741 + }
64742 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
64743 + vma_m = vma->vm_mirror;
64744 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
64745 + BUG_ON(vma->vm_file != vma_m->vm_file);
64746 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
64747 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
64748 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
64749 + return vma_m;
64750 +}
64751 +#endif
64752 +
64753 /*
64754 * Verify that the stack growth is acceptable and
64755 * update accounting. This is shared with both the
64756 @@ -1561,6 +1808,7 @@ static int acct_stack_growth(struct vm_a
64757 return -ENOMEM;
64758
64759 /* Stack limit test */
64760 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
64761 if (size > rlim[RLIMIT_STACK].rlim_cur)
64762 return -ENOMEM;
64763
64764 @@ -1570,6 +1818,7 @@ static int acct_stack_growth(struct vm_a
64765 unsigned long limit;
64766 locked = mm->locked_vm + grow;
64767 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
64768 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
64769 if (locked > limit && !capable(CAP_IPC_LOCK))
64770 return -ENOMEM;
64771 }
64772 @@ -1600,37 +1849,48 @@ static int acct_stack_growth(struct vm_a
64773 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
64774 * vma is the last one with address > vma->vm_end. Have to extend vma.
64775 */
64776 +#ifndef CONFIG_IA64
64777 +static
64778 +#endif
64779 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
64780 {
64781 int error;
64782 + bool locknext;
64783
64784 if (!(vma->vm_flags & VM_GROWSUP))
64785 return -EFAULT;
64786
64787 + /* Also guard against wrapping around to address 0. */
64788 + if (address < PAGE_ALIGN(address+1))
64789 + address = PAGE_ALIGN(address+1);
64790 + else
64791 + return -ENOMEM;
64792 +
64793 /*
64794 * We must make sure the anon_vma is allocated
64795 * so that the anon_vma locking is not a noop.
64796 */
64797 if (unlikely(anon_vma_prepare(vma)))
64798 return -ENOMEM;
64799 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
64800 + if (locknext && anon_vma_prepare(vma->vm_next))
64801 + return -ENOMEM;
64802 anon_vma_lock(vma);
64803 + if (locknext)
64804 + anon_vma_lock(vma->vm_next);
64805
64806 /*
64807 * vma->vm_start/vm_end cannot change under us because the caller
64808 * is required to hold the mmap_sem in read mode. We need the
64809 - * anon_vma lock to serialize against concurrent expand_stacks.
64810 - * Also guard against wrapping around to address 0.
64811 + * anon_vma locks to serialize against concurrent expand_stacks
64812 + * and expand_upwards.
64813 */
64814 - if (address < PAGE_ALIGN(address+4))
64815 - address = PAGE_ALIGN(address+4);
64816 - else {
64817 - anon_vma_unlock(vma);
64818 - return -ENOMEM;
64819 - }
64820 error = 0;
64821
64822 /* Somebody else might have raced and expanded it already */
64823 - if (address > vma->vm_end) {
64824 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
64825 + error = -ENOMEM;
64826 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
64827 unsigned long size, grow;
64828
64829 size = address - vma->vm_start;
64830 @@ -1640,6 +1900,8 @@ int expand_upwards(struct vm_area_struct
64831 if (!error)
64832 vma->vm_end = address;
64833 }
64834 + if (locknext)
64835 + anon_vma_unlock(vma->vm_next);
64836 anon_vma_unlock(vma);
64837 return error;
64838 }
64839 @@ -1652,6 +1914,8 @@ static int expand_downwards(struct vm_ar
64840 unsigned long address)
64841 {
64842 int error;
64843 + bool lockprev = false;
64844 + struct vm_area_struct *prev;
64845
64846 /*
64847 * We must make sure the anon_vma is allocated
64848 @@ -1665,6 +1929,15 @@ static int expand_downwards(struct vm_ar
64849 if (error)
64850 return error;
64851
64852 + prev = vma->vm_prev;
64853 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
64854 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
64855 +#endif
64856 + if (lockprev && anon_vma_prepare(prev))
64857 + return -ENOMEM;
64858 + if (lockprev)
64859 + anon_vma_lock(prev);
64860 +
64861 anon_vma_lock(vma);
64862
64863 /*
64864 @@ -1674,9 +1947,17 @@ static int expand_downwards(struct vm_ar
64865 */
64866
64867 /* Somebody else might have raced and expanded it already */
64868 - if (address < vma->vm_start) {
64869 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
64870 + error = -ENOMEM;
64871 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
64872 unsigned long size, grow;
64873
64874 +#ifdef CONFIG_PAX_SEGMEXEC
64875 + struct vm_area_struct *vma_m;
64876 +
64877 + vma_m = pax_find_mirror_vma(vma);
64878 +#endif
64879 +
64880 size = vma->vm_end - address;
64881 grow = (vma->vm_start - address) >> PAGE_SHIFT;
64882
64883 @@ -1684,9 +1965,20 @@ static int expand_downwards(struct vm_ar
64884 if (!error) {
64885 vma->vm_start = address;
64886 vma->vm_pgoff -= grow;
64887 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
64888 +
64889 +#ifdef CONFIG_PAX_SEGMEXEC
64890 + if (vma_m) {
64891 + vma_m->vm_start -= grow << PAGE_SHIFT;
64892 + vma_m->vm_pgoff -= grow;
64893 + }
64894 +#endif
64895 +
64896 }
64897 }
64898 anon_vma_unlock(vma);
64899 + if (lockprev)
64900 + anon_vma_unlock(prev);
64901 return error;
64902 }
64903
64904 @@ -1762,6 +2054,13 @@ static void remove_vma_list(struct mm_st
64905 do {
64906 long nrpages = vma_pages(vma);
64907
64908 +#ifdef CONFIG_PAX_SEGMEXEC
64909 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
64910 + vma = remove_vma(vma);
64911 + continue;
64912 + }
64913 +#endif
64914 +
64915 mm->total_vm -= nrpages;
64916 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
64917 vma = remove_vma(vma);
64918 @@ -1807,6 +2106,16 @@ detach_vmas_to_be_unmapped(struct mm_str
64919 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
64920 vma->vm_prev = NULL;
64921 do {
64922 +
64923 +#ifdef CONFIG_PAX_SEGMEXEC
64924 + if (vma->vm_mirror) {
64925 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
64926 + vma->vm_mirror->vm_mirror = NULL;
64927 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
64928 + vma->vm_mirror = NULL;
64929 + }
64930 +#endif
64931 +
64932 rb_erase(&vma->vm_rb, &mm->mm_rb);
64933 mm->map_count--;
64934 tail_vma = vma;
64935 @@ -1834,10 +2143,25 @@ int split_vma(struct mm_struct * mm, str
64936 struct mempolicy *pol;
64937 struct vm_area_struct *new;
64938
64939 +#ifdef CONFIG_PAX_SEGMEXEC
64940 + struct vm_area_struct *vma_m, *new_m = NULL;
64941 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
64942 +#endif
64943 +
64944 if (is_vm_hugetlb_page(vma) && (addr &
64945 ~(huge_page_mask(hstate_vma(vma)))))
64946 return -EINVAL;
64947
64948 +#ifdef CONFIG_PAX_SEGMEXEC
64949 + vma_m = pax_find_mirror_vma(vma);
64950 +
64951 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
64952 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
64953 + if (mm->map_count >= sysctl_max_map_count-1)
64954 + return -ENOMEM;
64955 + } else
64956 +#endif
64957 +
64958 if (mm->map_count >= sysctl_max_map_count)
64959 return -ENOMEM;
64960
64961 @@ -1845,6 +2169,16 @@ int split_vma(struct mm_struct * mm, str
64962 if (!new)
64963 return -ENOMEM;
64964
64965 +#ifdef CONFIG_PAX_SEGMEXEC
64966 + if (vma_m) {
64967 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
64968 + if (!new_m) {
64969 + kmem_cache_free(vm_area_cachep, new);
64970 + return -ENOMEM;
64971 + }
64972 + }
64973 +#endif
64974 +
64975 /* most fields are the same, copy all, and then fixup */
64976 *new = *vma;
64977
64978 @@ -1855,8 +2189,29 @@ int split_vma(struct mm_struct * mm, str
64979 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
64980 }
64981
64982 +#ifdef CONFIG_PAX_SEGMEXEC
64983 + if (vma_m) {
64984 + *new_m = *vma_m;
64985 + new_m->vm_mirror = new;
64986 + new->vm_mirror = new_m;
64987 +
64988 + if (new_below)
64989 + new_m->vm_end = addr_m;
64990 + else {
64991 + new_m->vm_start = addr_m;
64992 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
64993 + }
64994 + }
64995 +#endif
64996 +
64997 pol = mpol_dup(vma_policy(vma));
64998 if (IS_ERR(pol)) {
64999 +
65000 +#ifdef CONFIG_PAX_SEGMEXEC
65001 + if (new_m)
65002 + kmem_cache_free(vm_area_cachep, new_m);
65003 +#endif
65004 +
65005 kmem_cache_free(vm_area_cachep, new);
65006 return PTR_ERR(pol);
65007 }
65008 @@ -1877,6 +2232,28 @@ int split_vma(struct mm_struct * mm, str
65009 else
65010 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
65011
65012 +#ifdef CONFIG_PAX_SEGMEXEC
65013 + if (vma_m) {
65014 + mpol_get(pol);
65015 + vma_set_policy(new_m, pol);
65016 +
65017 + if (new_m->vm_file) {
65018 + get_file(new_m->vm_file);
65019 + if (vma_m->vm_flags & VM_EXECUTABLE)
65020 + added_exe_file_vma(mm);
65021 + }
65022 +
65023 + if (new_m->vm_ops && new_m->vm_ops->open)
65024 + new_m->vm_ops->open(new_m);
65025 +
65026 + if (new_below)
65027 + vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
65028 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
65029 + else
65030 + vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
65031 + }
65032 +#endif
65033 +
65034 return 0;
65035 }
65036
65037 @@ -1885,11 +2262,30 @@ int split_vma(struct mm_struct * mm, str
65038 * work. This now handles partial unmappings.
65039 * Jeremy Fitzhardinge <jeremy@goop.org>
65040 */
65041 +#ifdef CONFIG_PAX_SEGMEXEC
65042 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
65043 +{
65044 + int ret = __do_munmap(mm, start, len);
65045 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
65046 + return ret;
65047 +
65048 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
65049 +}
65050 +
65051 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
65052 +#else
65053 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
65054 +#endif
65055 {
65056 unsigned long end;
65057 struct vm_area_struct *vma, *prev, *last;
65058
65059 + /*
65060 + * mm->mmap_sem is required to protect against another thread
65061 + * changing the mappings in case we sleep.
65062 + */
65063 + verify_mm_writelocked(mm);
65064 +
65065 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
65066 return -EINVAL;
65067
65068 @@ -1953,6 +2349,8 @@ int do_munmap(struct mm_struct *mm, unsi
65069 /* Fix up all other VM information */
65070 remove_vma_list(mm, vma);
65071
65072 + track_exec_limit(mm, start, end, 0UL);
65073 +
65074 return 0;
65075 }
65076
65077 @@ -1965,22 +2363,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
65078
65079 profile_munmap(addr);
65080
65081 +#ifdef CONFIG_PAX_SEGMEXEC
65082 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
65083 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
65084 + return -EINVAL;
65085 +#endif
65086 +
65087 down_write(&mm->mmap_sem);
65088 ret = do_munmap(mm, addr, len);
65089 up_write(&mm->mmap_sem);
65090 return ret;
65091 }
65092
65093 -static inline void verify_mm_writelocked(struct mm_struct *mm)
65094 -{
65095 -#ifdef CONFIG_DEBUG_VM
65096 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
65097 - WARN_ON(1);
65098 - up_read(&mm->mmap_sem);
65099 - }
65100 -#endif
65101 -}
65102 -
65103 /*
65104 * this is really a simplified "do_mmap". it only handles
65105 * anonymous maps. eventually we may be able to do some
65106 @@ -1994,6 +2388,7 @@ unsigned long do_brk(unsigned long addr,
65107 struct rb_node ** rb_link, * rb_parent;
65108 pgoff_t pgoff = addr >> PAGE_SHIFT;
65109 int error;
65110 + unsigned long charged;
65111
65112 len = PAGE_ALIGN(len);
65113 if (!len)
65114 @@ -2005,16 +2400,30 @@ unsigned long do_brk(unsigned long addr,
65115
65116 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
65117
65118 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
65119 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
65120 + flags &= ~VM_EXEC;
65121 +
65122 +#ifdef CONFIG_PAX_MPROTECT
65123 + if (mm->pax_flags & MF_PAX_MPROTECT)
65124 + flags &= ~VM_MAYEXEC;
65125 +#endif
65126 +
65127 + }
65128 +#endif
65129 +
65130 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
65131 if (error & ~PAGE_MASK)
65132 return error;
65133
65134 + charged = len >> PAGE_SHIFT;
65135 +
65136 /*
65137 * mlock MCL_FUTURE?
65138 */
65139 if (mm->def_flags & VM_LOCKED) {
65140 unsigned long locked, lock_limit;
65141 - locked = len >> PAGE_SHIFT;
65142 + locked = charged;
65143 locked += mm->locked_vm;
65144 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
65145 lock_limit >>= PAGE_SHIFT;
65146 @@ -2031,22 +2440,22 @@ unsigned long do_brk(unsigned long addr,
65147 /*
65148 * Clear old maps. this also does some error checking for us
65149 */
65150 - munmap_back:
65151 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
65152 if (vma && vma->vm_start < addr + len) {
65153 if (do_munmap(mm, addr, len))
65154 return -ENOMEM;
65155 - goto munmap_back;
65156 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
65157 + BUG_ON(vma && vma->vm_start < addr + len);
65158 }
65159
65160 /* Check against address space limits *after* clearing old maps... */
65161 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
65162 + if (!may_expand_vm(mm, charged))
65163 return -ENOMEM;
65164
65165 if (mm->map_count > sysctl_max_map_count)
65166 return -ENOMEM;
65167
65168 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
65169 + if (security_vm_enough_memory(charged))
65170 return -ENOMEM;
65171
65172 /* Can we just expand an old private anonymous mapping? */
65173 @@ -2060,7 +2469,7 @@ unsigned long do_brk(unsigned long addr,
65174 */
65175 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65176 if (!vma) {
65177 - vm_unacct_memory(len >> PAGE_SHIFT);
65178 + vm_unacct_memory(charged);
65179 return -ENOMEM;
65180 }
65181
65182 @@ -2072,11 +2481,12 @@ unsigned long do_brk(unsigned long addr,
65183 vma->vm_page_prot = vm_get_page_prot(flags);
65184 vma_link(mm, vma, prev, rb_link, rb_parent);
65185 out:
65186 - mm->total_vm += len >> PAGE_SHIFT;
65187 + mm->total_vm += charged;
65188 if (flags & VM_LOCKED) {
65189 if (!mlock_vma_pages_range(vma, addr, addr + len))
65190 - mm->locked_vm += (len >> PAGE_SHIFT);
65191 + mm->locked_vm += charged;
65192 }
65193 + track_exec_limit(mm, addr, addr + len, flags);
65194 return addr;
65195 }
65196
65197 @@ -2123,8 +2533,10 @@ void exit_mmap(struct mm_struct *mm)
65198 * Walk the list again, actually closing and freeing it,
65199 * with preemption enabled, without holding any MM locks.
65200 */
65201 - while (vma)
65202 + while (vma) {
65203 + vma->vm_mirror = NULL;
65204 vma = remove_vma(vma);
65205 + }
65206
65207 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
65208 }
65209 @@ -2138,6 +2550,10 @@ int insert_vm_struct(struct mm_struct *
65210 struct vm_area_struct * __vma, * prev;
65211 struct rb_node ** rb_link, * rb_parent;
65212
65213 +#ifdef CONFIG_PAX_SEGMEXEC
65214 + struct vm_area_struct *vma_m = NULL;
65215 +#endif
65216 +
65217 /*
65218 * The vm_pgoff of a purely anonymous vma should be irrelevant
65219 * until its first write fault, when page's anon_vma and index
65220 @@ -2160,7 +2576,22 @@ int insert_vm_struct(struct mm_struct *
65221 if ((vma->vm_flags & VM_ACCOUNT) &&
65222 security_vm_enough_memory_mm(mm, vma_pages(vma)))
65223 return -ENOMEM;
65224 +
65225 +#ifdef CONFIG_PAX_SEGMEXEC
65226 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
65227 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65228 + if (!vma_m)
65229 + return -ENOMEM;
65230 + }
65231 +#endif
65232 +
65233 vma_link(mm, vma, prev, rb_link, rb_parent);
65234 +
65235 +#ifdef CONFIG_PAX_SEGMEXEC
65236 + if (vma_m)
65237 + pax_mirror_vma(vma_m, vma);
65238 +#endif
65239 +
65240 return 0;
65241 }
65242
65243 @@ -2178,6 +2609,8 @@ struct vm_area_struct *copy_vma(struct v
65244 struct rb_node **rb_link, *rb_parent;
65245 struct mempolicy *pol;
65246
65247 + BUG_ON(vma->vm_mirror);
65248 +
65249 /*
65250 * If anonymous vma has not yet been faulted, update new pgoff
65251 * to match new location, to increase its chance of merging.
65252 @@ -2221,6 +2654,35 @@ struct vm_area_struct *copy_vma(struct v
65253 return new_vma;
65254 }
65255
65256 +#ifdef CONFIG_PAX_SEGMEXEC
65257 +void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
65258 +{
65259 + struct vm_area_struct *prev_m;
65260 + struct rb_node **rb_link_m, *rb_parent_m;
65261 + struct mempolicy *pol_m;
65262 +
65263 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
65264 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
65265 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
65266 + *vma_m = *vma;
65267 + pol_m = vma_policy(vma_m);
65268 + mpol_get(pol_m);
65269 + vma_set_policy(vma_m, pol_m);
65270 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
65271 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
65272 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
65273 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
65274 + if (vma_m->vm_file)
65275 + get_file(vma_m->vm_file);
65276 + if (vma_m->vm_ops && vma_m->vm_ops->open)
65277 + vma_m->vm_ops->open(vma_m);
65278 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
65279 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
65280 + vma_m->vm_mirror = vma;
65281 + vma->vm_mirror = vma_m;
65282 +}
65283 +#endif
65284 +
65285 /*
65286 * Return true if the calling process may expand its vm space by the passed
65287 * number of pages
65288 @@ -2231,7 +2693,7 @@ int may_expand_vm(struct mm_struct *mm,
65289 unsigned long lim;
65290
65291 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
65292 -
65293 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
65294 if (cur + npages > lim)
65295 return 0;
65296 return 1;
65297 @@ -2301,6 +2763,22 @@ int install_special_mapping(struct mm_st
65298 vma->vm_start = addr;
65299 vma->vm_end = addr + len;
65300
65301 +#ifdef CONFIG_PAX_MPROTECT
65302 + if (mm->pax_flags & MF_PAX_MPROTECT) {
65303 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
65304 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
65305 + return -EPERM;
65306 + if (!(vm_flags & VM_EXEC))
65307 + vm_flags &= ~VM_MAYEXEC;
65308 +#else
65309 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
65310 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
65311 +#endif
65312 + else
65313 + vm_flags &= ~VM_MAYWRITE;
65314 + }
65315 +#endif
65316 +
65317 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
65318 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
65319
65320 diff -urNp linux-2.6.32.41/mm/mprotect.c linux-2.6.32.41/mm/mprotect.c
65321 --- linux-2.6.32.41/mm/mprotect.c 2011-03-27 14:31:47.000000000 -0400
65322 +++ linux-2.6.32.41/mm/mprotect.c 2011-04-17 15:56:46.000000000 -0400
65323 @@ -24,10 +24,16 @@
65324 #include <linux/mmu_notifier.h>
65325 #include <linux/migrate.h>
65326 #include <linux/perf_event.h>
65327 +
65328 +#ifdef CONFIG_PAX_MPROTECT
65329 +#include <linux/elf.h>
65330 +#endif
65331 +
65332 #include <asm/uaccess.h>
65333 #include <asm/pgtable.h>
65334 #include <asm/cacheflush.h>
65335 #include <asm/tlbflush.h>
65336 +#include <asm/mmu_context.h>
65337
65338 #ifndef pgprot_modify
65339 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
65340 @@ -132,6 +138,48 @@ static void change_protection(struct vm_
65341 flush_tlb_range(vma, start, end);
65342 }
65343
65344 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
65345 +/* called while holding the mmap semaphor for writing except stack expansion */
65346 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
65347 +{
65348 + unsigned long oldlimit, newlimit = 0UL;
65349 +
65350 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
65351 + return;
65352 +
65353 + spin_lock(&mm->page_table_lock);
65354 + oldlimit = mm->context.user_cs_limit;
65355 + if ((prot & VM_EXEC) && oldlimit < end)
65356 + /* USER_CS limit moved up */
65357 + newlimit = end;
65358 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
65359 + /* USER_CS limit moved down */
65360 + newlimit = start;
65361 +
65362 + if (newlimit) {
65363 + mm->context.user_cs_limit = newlimit;
65364 +
65365 +#ifdef CONFIG_SMP
65366 + wmb();
65367 + cpus_clear(mm->context.cpu_user_cs_mask);
65368 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
65369 +#endif
65370 +
65371 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
65372 + }
65373 + spin_unlock(&mm->page_table_lock);
65374 + if (newlimit == end) {
65375 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
65376 +
65377 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
65378 + if (is_vm_hugetlb_page(vma))
65379 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
65380 + else
65381 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
65382 + }
65383 +}
65384 +#endif
65385 +
65386 int
65387 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
65388 unsigned long start, unsigned long end, unsigned long newflags)
65389 @@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vm
65390 int error;
65391 int dirty_accountable = 0;
65392
65393 +#ifdef CONFIG_PAX_SEGMEXEC
65394 + struct vm_area_struct *vma_m = NULL;
65395 + unsigned long start_m, end_m;
65396 +
65397 + start_m = start + SEGMEXEC_TASK_SIZE;
65398 + end_m = end + SEGMEXEC_TASK_SIZE;
65399 +#endif
65400 +
65401 if (newflags == oldflags) {
65402 *pprev = vma;
65403 return 0;
65404 }
65405
65406 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
65407 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
65408 +
65409 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
65410 + return -ENOMEM;
65411 +
65412 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
65413 + return -ENOMEM;
65414 + }
65415 +
65416 /*
65417 * If we make a private mapping writable we increase our commit;
65418 * but (without finer accounting) cannot reduce our commit if we
65419 @@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vm
65420 }
65421 }
65422
65423 +#ifdef CONFIG_PAX_SEGMEXEC
65424 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
65425 + if (start != vma->vm_start) {
65426 + error = split_vma(mm, vma, start, 1);
65427 + if (error)
65428 + goto fail;
65429 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
65430 + *pprev = (*pprev)->vm_next;
65431 + }
65432 +
65433 + if (end != vma->vm_end) {
65434 + error = split_vma(mm, vma, end, 0);
65435 + if (error)
65436 + goto fail;
65437 + }
65438 +
65439 + if (pax_find_mirror_vma(vma)) {
65440 + error = __do_munmap(mm, start_m, end_m - start_m);
65441 + if (error)
65442 + goto fail;
65443 + } else {
65444 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65445 + if (!vma_m) {
65446 + error = -ENOMEM;
65447 + goto fail;
65448 + }
65449 + vma->vm_flags = newflags;
65450 + pax_mirror_vma(vma_m, vma);
65451 + }
65452 + }
65453 +#endif
65454 +
65455 /*
65456 * First try to merge with previous and/or next vma.
65457 */
65458 @@ -195,9 +293,21 @@ success:
65459 * vm_flags and vm_page_prot are protected by the mmap_sem
65460 * held in write mode.
65461 */
65462 +
65463 +#ifdef CONFIG_PAX_SEGMEXEC
65464 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
65465 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
65466 +#endif
65467 +
65468 vma->vm_flags = newflags;
65469 +
65470 +#ifdef CONFIG_PAX_MPROTECT
65471 + if (mm->binfmt && mm->binfmt->handle_mprotect)
65472 + mm->binfmt->handle_mprotect(vma, newflags);
65473 +#endif
65474 +
65475 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
65476 - vm_get_page_prot(newflags));
65477 + vm_get_page_prot(vma->vm_flags));
65478
65479 if (vma_wants_writenotify(vma)) {
65480 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
65481 @@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65482 end = start + len;
65483 if (end <= start)
65484 return -ENOMEM;
65485 +
65486 +#ifdef CONFIG_PAX_SEGMEXEC
65487 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
65488 + if (end > SEGMEXEC_TASK_SIZE)
65489 + return -EINVAL;
65490 + } else
65491 +#endif
65492 +
65493 + if (end > TASK_SIZE)
65494 + return -EINVAL;
65495 +
65496 if (!arch_validate_prot(prot))
65497 return -EINVAL;
65498
65499 @@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65500 /*
65501 * Does the application expect PROT_READ to imply PROT_EXEC:
65502 */
65503 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
65504 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
65505 prot |= PROT_EXEC;
65506
65507 vm_flags = calc_vm_prot_bits(prot);
65508 @@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65509 if (start > vma->vm_start)
65510 prev = vma;
65511
65512 +#ifdef CONFIG_PAX_MPROTECT
65513 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
65514 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
65515 +#endif
65516 +
65517 for (nstart = start ; ; ) {
65518 unsigned long newflags;
65519
65520 @@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65521
65522 /* newflags >> 4 shift VM_MAY% in place of VM_% */
65523 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
65524 + if (prot & (PROT_WRITE | PROT_EXEC))
65525 + gr_log_rwxmprotect(vma->vm_file);
65526 +
65527 + error = -EACCES;
65528 + goto out;
65529 + }
65530 +
65531 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
65532 error = -EACCES;
65533 goto out;
65534 }
65535 @@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65536 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
65537 if (error)
65538 goto out;
65539 +
65540 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
65541 +
65542 nstart = tmp;
65543
65544 if (nstart < prev->vm_end)
65545 diff -urNp linux-2.6.32.41/mm/mremap.c linux-2.6.32.41/mm/mremap.c
65546 --- linux-2.6.32.41/mm/mremap.c 2011-04-17 17:00:52.000000000 -0400
65547 +++ linux-2.6.32.41/mm/mremap.c 2011-04-17 17:03:58.000000000 -0400
65548 @@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_str
65549 continue;
65550 pte = ptep_clear_flush(vma, old_addr, old_pte);
65551 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
65552 +
65553 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
65554 + if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
65555 + pte = pte_exprotect(pte);
65556 +#endif
65557 +
65558 set_pte_at(mm, new_addr, new_pte, pte);
65559 }
65560
65561 @@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_res
65562 if (is_vm_hugetlb_page(vma))
65563 goto Einval;
65564
65565 +#ifdef CONFIG_PAX_SEGMEXEC
65566 + if (pax_find_mirror_vma(vma))
65567 + goto Einval;
65568 +#endif
65569 +
65570 /* We can't remap across vm area boundaries */
65571 if (old_len > vma->vm_end - addr)
65572 goto Efault;
65573 @@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned
65574 unsigned long ret = -EINVAL;
65575 unsigned long charged = 0;
65576 unsigned long map_flags;
65577 + unsigned long pax_task_size = TASK_SIZE;
65578
65579 if (new_addr & ~PAGE_MASK)
65580 goto out;
65581
65582 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
65583 +#ifdef CONFIG_PAX_SEGMEXEC
65584 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
65585 + pax_task_size = SEGMEXEC_TASK_SIZE;
65586 +#endif
65587 +
65588 + pax_task_size -= PAGE_SIZE;
65589 +
65590 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
65591 goto out;
65592
65593 /* Check if the location we're moving into overlaps the
65594 * old location at all, and fail if it does.
65595 */
65596 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
65597 - goto out;
65598 -
65599 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
65600 + if (addr + old_len > new_addr && new_addr + new_len > addr)
65601 goto out;
65602
65603 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
65604 @@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long ad
65605 struct vm_area_struct *vma;
65606 unsigned long ret = -EINVAL;
65607 unsigned long charged = 0;
65608 + unsigned long pax_task_size = TASK_SIZE;
65609
65610 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
65611 goto out;
65612 @@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long ad
65613 if (!new_len)
65614 goto out;
65615
65616 +#ifdef CONFIG_PAX_SEGMEXEC
65617 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
65618 + pax_task_size = SEGMEXEC_TASK_SIZE;
65619 +#endif
65620 +
65621 + pax_task_size -= PAGE_SIZE;
65622 +
65623 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
65624 + old_len > pax_task_size || addr > pax_task_size-old_len)
65625 + goto out;
65626 +
65627 if (flags & MREMAP_FIXED) {
65628 if (flags & MREMAP_MAYMOVE)
65629 ret = mremap_to(addr, old_len, new_addr, new_len);
65630 @@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long ad
65631 addr + new_len);
65632 }
65633 ret = addr;
65634 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
65635 goto out;
65636 }
65637 }
65638 @@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long ad
65639 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
65640 if (ret)
65641 goto out;
65642 +
65643 + map_flags = vma->vm_flags;
65644 ret = move_vma(vma, addr, old_len, new_len, new_addr);
65645 + if (!(ret & ~PAGE_MASK)) {
65646 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
65647 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
65648 + }
65649 }
65650 out:
65651 if (ret & ~PAGE_MASK)
65652 diff -urNp linux-2.6.32.41/mm/nommu.c linux-2.6.32.41/mm/nommu.c
65653 --- linux-2.6.32.41/mm/nommu.c 2011-03-27 14:31:47.000000000 -0400
65654 +++ linux-2.6.32.41/mm/nommu.c 2011-04-17 15:56:46.000000000 -0400
65655 @@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMI
65656 int sysctl_overcommit_ratio = 50; /* default is 50% */
65657 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
65658 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
65659 -int heap_stack_gap = 0;
65660
65661 atomic_long_t mmap_pages_allocated;
65662
65663 @@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct m
65664 EXPORT_SYMBOL(find_vma);
65665
65666 /*
65667 - * find a VMA
65668 - * - we don't extend stack VMAs under NOMMU conditions
65669 - */
65670 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
65671 -{
65672 - return find_vma(mm, addr);
65673 -}
65674 -
65675 -/*
65676 * expand a stack to a given address
65677 * - not supported under NOMMU conditions
65678 */
65679 diff -urNp linux-2.6.32.41/mm/page_alloc.c linux-2.6.32.41/mm/page_alloc.c
65680 --- linux-2.6.32.41/mm/page_alloc.c 2011-03-27 14:31:47.000000000 -0400
65681 +++ linux-2.6.32.41/mm/page_alloc.c 2011-05-16 21:46:57.000000000 -0400
65682 @@ -587,6 +587,10 @@ static void __free_pages_ok(struct page
65683 int bad = 0;
65684 int wasMlocked = __TestClearPageMlocked(page);
65685
65686 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
65687 + unsigned long index = 1UL << order;
65688 +#endif
65689 +
65690 kmemcheck_free_shadow(page, order);
65691
65692 for (i = 0 ; i < (1 << order) ; ++i)
65693 @@ -599,6 +603,12 @@ static void __free_pages_ok(struct page
65694 debug_check_no_obj_freed(page_address(page),
65695 PAGE_SIZE << order);
65696 }
65697 +
65698 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
65699 + for (; index; --index)
65700 + sanitize_highpage(page + index - 1);
65701 +#endif
65702 +
65703 arch_free_page(page, order);
65704 kernel_map_pages(page, 1 << order, 0);
65705
65706 @@ -702,8 +712,10 @@ static int prep_new_page(struct page *pa
65707 arch_alloc_page(page, order);
65708 kernel_map_pages(page, 1 << order, 1);
65709
65710 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
65711 if (gfp_flags & __GFP_ZERO)
65712 prep_zero_page(page, order, gfp_flags);
65713 +#endif
65714
65715 if (order && (gfp_flags & __GFP_COMP))
65716 prep_compound_page(page, order);
65717 @@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct pa
65718 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
65719 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
65720 }
65721 +
65722 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
65723 + sanitize_highpage(page);
65724 +#endif
65725 +
65726 arch_free_page(page, 0);
65727 kernel_map_pages(page, 1, 0);
65728
65729 @@ -2179,6 +2196,8 @@ void show_free_areas(void)
65730 int cpu;
65731 struct zone *zone;
65732
65733 + pax_track_stack();
65734 +
65735 for_each_populated_zone(zone) {
65736 show_node(zone);
65737 printk("%s per-cpu:\n", zone->name);
65738 @@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct p
65739 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
65740 }
65741 #else
65742 -static void inline setup_usemap(struct pglist_data *pgdat,
65743 +static inline void setup_usemap(struct pglist_data *pgdat,
65744 struct zone *zone, unsigned long zonesize) {}
65745 #endif /* CONFIG_SPARSEMEM */
65746
65747 diff -urNp linux-2.6.32.41/mm/percpu.c linux-2.6.32.41/mm/percpu.c
65748 --- linux-2.6.32.41/mm/percpu.c 2011-03-27 14:31:47.000000000 -0400
65749 +++ linux-2.6.32.41/mm/percpu.c 2011-04-17 15:56:46.000000000 -0400
65750 @@ -115,7 +115,7 @@ static unsigned int pcpu_first_unit_cpu
65751 static unsigned int pcpu_last_unit_cpu __read_mostly;
65752
65753 /* the address of the first chunk which starts with the kernel static area */
65754 -void *pcpu_base_addr __read_mostly;
65755 +void *pcpu_base_addr __read_only;
65756 EXPORT_SYMBOL_GPL(pcpu_base_addr);
65757
65758 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
65759 diff -urNp linux-2.6.32.41/mm/rmap.c linux-2.6.32.41/mm/rmap.c
65760 --- linux-2.6.32.41/mm/rmap.c 2011-03-27 14:31:47.000000000 -0400
65761 +++ linux-2.6.32.41/mm/rmap.c 2011-04-17 15:56:46.000000000 -0400
65762 @@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_stru
65763 /* page_table_lock to protect against threads */
65764 spin_lock(&mm->page_table_lock);
65765 if (likely(!vma->anon_vma)) {
65766 +
65767 +#ifdef CONFIG_PAX_SEGMEXEC
65768 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
65769 +
65770 + if (vma_m) {
65771 + BUG_ON(vma_m->anon_vma);
65772 + vma_m->anon_vma = anon_vma;
65773 + list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
65774 + }
65775 +#endif
65776 +
65777 vma->anon_vma = anon_vma;
65778 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
65779 allocated = NULL;
65780 diff -urNp linux-2.6.32.41/mm/shmem.c linux-2.6.32.41/mm/shmem.c
65781 --- linux-2.6.32.41/mm/shmem.c 2011-03-27 14:31:47.000000000 -0400
65782 +++ linux-2.6.32.41/mm/shmem.c 2011-05-18 20:09:37.000000000 -0400
65783 @@ -31,7 +31,7 @@
65784 #include <linux/swap.h>
65785 #include <linux/ima.h>
65786
65787 -static struct vfsmount *shm_mnt;
65788 +struct vfsmount *shm_mnt;
65789
65790 #ifdef CONFIG_SHMEM
65791 /*
65792 @@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *
65793 goto unlock;
65794 }
65795 entry = shmem_swp_entry(info, index, NULL);
65796 + if (!entry)
65797 + goto unlock;
65798 if (entry->val) {
65799 /*
65800 * The more uptodate page coming down from a stacked
65801 @@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_ent
65802 struct vm_area_struct pvma;
65803 struct page *page;
65804
65805 + pax_track_stack();
65806 +
65807 spol = mpol_cond_copy(&mpol,
65808 mpol_shared_policy_lookup(&info->policy, idx));
65809
65810 @@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *d
65811
65812 info = SHMEM_I(inode);
65813 inode->i_size = len-1;
65814 - if (len <= (char *)inode - (char *)info) {
65815 + if (len <= (char *)inode - (char *)info && len <= 64) {
65816 /* do it inline */
65817 memcpy(info, symname, len);
65818 inode->i_op = &shmem_symlink_inline_operations;
65819 @@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block
65820 int err = -ENOMEM;
65821
65822 /* Round up to L1_CACHE_BYTES to resist false sharing */
65823 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
65824 - L1_CACHE_BYTES), GFP_KERNEL);
65825 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
65826 if (!sbinfo)
65827 return -ENOMEM;
65828
65829 diff -urNp linux-2.6.32.41/mm/slab.c linux-2.6.32.41/mm/slab.c
65830 --- linux-2.6.32.41/mm/slab.c 2011-03-27 14:31:47.000000000 -0400
65831 +++ linux-2.6.32.41/mm/slab.c 2011-05-04 17:56:20.000000000 -0400
65832 @@ -174,7 +174,7 @@
65833
65834 /* Legal flag mask for kmem_cache_create(). */
65835 #if DEBUG
65836 -# define CREATE_MASK (SLAB_RED_ZONE | \
65837 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
65838 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
65839 SLAB_CACHE_DMA | \
65840 SLAB_STORE_USER | \
65841 @@ -182,7 +182,7 @@
65842 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
65843 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
65844 #else
65845 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
65846 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
65847 SLAB_CACHE_DMA | \
65848 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
65849 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
65850 @@ -308,7 +308,7 @@ struct kmem_list3 {
65851 * Need this for bootstrapping a per node allocator.
65852 */
65853 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
65854 -struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
65855 +struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
65856 #define CACHE_CACHE 0
65857 #define SIZE_AC MAX_NUMNODES
65858 #define SIZE_L3 (2 * MAX_NUMNODES)
65859 @@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_
65860 if ((x)->max_freeable < i) \
65861 (x)->max_freeable = i; \
65862 } while (0)
65863 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
65864 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
65865 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
65866 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
65867 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
65868 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
65869 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
65870 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
65871 #else
65872 #define STATS_INC_ACTIVE(x) do { } while (0)
65873 #define STATS_DEC_ACTIVE(x) do { } while (0)
65874 @@ -558,7 +558,7 @@ static inline void *index_to_obj(struct
65875 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
65876 */
65877 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
65878 - const struct slab *slab, void *obj)
65879 + const struct slab *slab, const void *obj)
65880 {
65881 u32 offset = (obj - slab->s_mem);
65882 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
65883 @@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
65884 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
65885 sizes[INDEX_AC].cs_size,
65886 ARCH_KMALLOC_MINALIGN,
65887 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
65888 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
65889 NULL);
65890
65891 if (INDEX_AC != INDEX_L3) {
65892 @@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
65893 kmem_cache_create(names[INDEX_L3].name,
65894 sizes[INDEX_L3].cs_size,
65895 ARCH_KMALLOC_MINALIGN,
65896 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
65897 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
65898 NULL);
65899 }
65900
65901 @@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
65902 sizes->cs_cachep = kmem_cache_create(names->name,
65903 sizes->cs_size,
65904 ARCH_KMALLOC_MINALIGN,
65905 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
65906 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
65907 NULL);
65908 }
65909 #ifdef CONFIG_ZONE_DMA
65910 @@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, vo
65911 }
65912 /* cpu stats */
65913 {
65914 - unsigned long allochit = atomic_read(&cachep->allochit);
65915 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
65916 - unsigned long freehit = atomic_read(&cachep->freehit);
65917 - unsigned long freemiss = atomic_read(&cachep->freemiss);
65918 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
65919 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
65920 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
65921 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
65922
65923 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
65924 allochit, allocmiss, freehit, freemiss);
65925 @@ -4471,15 +4471,66 @@ static const struct file_operations proc
65926
65927 static int __init slab_proc_init(void)
65928 {
65929 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
65930 + mode_t gr_mode = S_IRUGO;
65931 +
65932 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
65933 + gr_mode = S_IRUSR;
65934 +#endif
65935 +
65936 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
65937 #ifdef CONFIG_DEBUG_SLAB_LEAK
65938 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
65939 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
65940 #endif
65941 return 0;
65942 }
65943 module_init(slab_proc_init);
65944 #endif
65945
65946 +void check_object_size(const void *ptr, unsigned long n, bool to)
65947 +{
65948 +
65949 +#ifdef CONFIG_PAX_USERCOPY
65950 + struct page *page;
65951 + struct kmem_cache *cachep = NULL;
65952 + struct slab *slabp;
65953 + unsigned int objnr;
65954 + unsigned long offset;
65955 +
65956 + if (!n)
65957 + return;
65958 +
65959 + if (ZERO_OR_NULL_PTR(ptr))
65960 + goto report;
65961 +
65962 + if (!virt_addr_valid(ptr))
65963 + return;
65964 +
65965 + page = virt_to_head_page(ptr);
65966 +
65967 + if (!PageSlab(page)) {
65968 + if (object_is_on_stack(ptr, n) == -1)
65969 + goto report;
65970 + return;
65971 + }
65972 +
65973 + cachep = page_get_cache(page);
65974 + if (!(cachep->flags & SLAB_USERCOPY))
65975 + goto report;
65976 +
65977 + slabp = page_get_slab(page);
65978 + objnr = obj_to_index(cachep, slabp, ptr);
65979 + BUG_ON(objnr >= cachep->num);
65980 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
65981 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
65982 + return;
65983 +
65984 +report:
65985 + pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
65986 +#endif
65987 +
65988 +}
65989 +EXPORT_SYMBOL(check_object_size);
65990 +
65991 /**
65992 * ksize - get the actual amount of memory allocated for a given object
65993 * @objp: Pointer to the object
65994 diff -urNp linux-2.6.32.41/mm/slob.c linux-2.6.32.41/mm/slob.c
65995 --- linux-2.6.32.41/mm/slob.c 2011-03-27 14:31:47.000000000 -0400
65996 +++ linux-2.6.32.41/mm/slob.c 2011-04-17 15:56:46.000000000 -0400
65997 @@ -29,7 +29,7 @@
65998 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
65999 * alloc_pages() directly, allocating compound pages so the page order
66000 * does not have to be separately tracked, and also stores the exact
66001 - * allocation size in page->private so that it can be used to accurately
66002 + * allocation size in slob_page->size so that it can be used to accurately
66003 * provide ksize(). These objects are detected in kfree() because slob_page()
66004 * is false for them.
66005 *
66006 @@ -58,6 +58,7 @@
66007 */
66008
66009 #include <linux/kernel.h>
66010 +#include <linux/sched.h>
66011 #include <linux/slab.h>
66012 #include <linux/mm.h>
66013 #include <linux/swap.h> /* struct reclaim_state */
66014 @@ -100,7 +101,8 @@ struct slob_page {
66015 unsigned long flags; /* mandatory */
66016 atomic_t _count; /* mandatory */
66017 slobidx_t units; /* free units left in page */
66018 - unsigned long pad[2];
66019 + unsigned long pad[1];
66020 + unsigned long size; /* size when >=PAGE_SIZE */
66021 slob_t *free; /* first free slob_t in page */
66022 struct list_head list; /* linked list of free pages */
66023 };
66024 @@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
66025 */
66026 static inline int is_slob_page(struct slob_page *sp)
66027 {
66028 - return PageSlab((struct page *)sp);
66029 + return PageSlab((struct page *)sp) && !sp->size;
66030 }
66031
66032 static inline void set_slob_page(struct slob_page *sp)
66033 @@ -148,7 +150,7 @@ static inline void clear_slob_page(struc
66034
66035 static inline struct slob_page *slob_page(const void *addr)
66036 {
66037 - return (struct slob_page *)virt_to_page(addr);
66038 + return (struct slob_page *)virt_to_head_page(addr);
66039 }
66040
66041 /*
66042 @@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_
66043 /*
66044 * Return the size of a slob block.
66045 */
66046 -static slobidx_t slob_units(slob_t *s)
66047 +static slobidx_t slob_units(const slob_t *s)
66048 {
66049 if (s->units > 0)
66050 return s->units;
66051 @@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
66052 /*
66053 * Return the next free slob block pointer after this one.
66054 */
66055 -static slob_t *slob_next(slob_t *s)
66056 +static slob_t *slob_next(const slob_t *s)
66057 {
66058 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
66059 slobidx_t next;
66060 @@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
66061 /*
66062 * Returns true if s is the last free block in its page.
66063 */
66064 -static int slob_last(slob_t *s)
66065 +static int slob_last(const slob_t *s)
66066 {
66067 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
66068 }
66069 @@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, i
66070 if (!page)
66071 return NULL;
66072
66073 + set_slob_page(page);
66074 return page_address(page);
66075 }
66076
66077 @@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp
66078 if (!b)
66079 return NULL;
66080 sp = slob_page(b);
66081 - set_slob_page(sp);
66082
66083 spin_lock_irqsave(&slob_lock, flags);
66084 sp->units = SLOB_UNITS(PAGE_SIZE);
66085 sp->free = b;
66086 + sp->size = 0;
66087 INIT_LIST_HEAD(&sp->list);
66088 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
66089 set_slob_page_free(sp, slob_list);
66090 @@ -475,10 +478,9 @@ out:
66091 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
66092 #endif
66093
66094 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
66095 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
66096 {
66097 - unsigned int *m;
66098 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66099 + slob_t *m;
66100 void *ret;
66101
66102 lockdep_trace_alloc(gfp);
66103 @@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t
66104
66105 if (!m)
66106 return NULL;
66107 - *m = size;
66108 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
66109 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
66110 + m[0].units = size;
66111 + m[1].units = align;
66112 ret = (void *)m + align;
66113
66114 trace_kmalloc_node(_RET_IP_, ret,
66115 @@ -501,9 +506,9 @@ void *__kmalloc_node(size_t size, gfp_t
66116
66117 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
66118 if (ret) {
66119 - struct page *page;
66120 - page = virt_to_page(ret);
66121 - page->private = size;
66122 + struct slob_page *sp;
66123 + sp = slob_page(ret);
66124 + sp->size = size;
66125 }
66126
66127 trace_kmalloc_node(_RET_IP_, ret,
66128 @@ -513,6 +518,13 @@ void *__kmalloc_node(size_t size, gfp_t
66129 kmemleak_alloc(ret, size, 1, gfp);
66130 return ret;
66131 }
66132 +
66133 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
66134 +{
66135 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66136 +
66137 + return __kmalloc_node_align(size, gfp, node, align);
66138 +}
66139 EXPORT_SYMBOL(__kmalloc_node);
66140
66141 void kfree(const void *block)
66142 @@ -528,13 +540,81 @@ void kfree(const void *block)
66143 sp = slob_page(block);
66144 if (is_slob_page(sp)) {
66145 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66146 - unsigned int *m = (unsigned int *)(block - align);
66147 - slob_free(m, *m + align);
66148 - } else
66149 + slob_t *m = (slob_t *)(block - align);
66150 + slob_free(m, m[0].units + align);
66151 + } else {
66152 + clear_slob_page(sp);
66153 + free_slob_page(sp);
66154 + sp->size = 0;
66155 put_page(&sp->page);
66156 + }
66157 }
66158 EXPORT_SYMBOL(kfree);
66159
66160 +void check_object_size(const void *ptr, unsigned long n, bool to)
66161 +{
66162 +
66163 +#ifdef CONFIG_PAX_USERCOPY
66164 + struct slob_page *sp;
66165 + const slob_t *free;
66166 + const void *base;
66167 +
66168 + if (!n)
66169 + return;
66170 +
66171 + if (ZERO_OR_NULL_PTR(ptr))
66172 + goto report;
66173 +
66174 + if (!virt_addr_valid(ptr))
66175 + return;
66176 +
66177 + sp = slob_page(ptr);
66178 + if (!PageSlab((struct page*)sp)) {
66179 + if (object_is_on_stack(ptr, n) == -1)
66180 + goto report;
66181 + return;
66182 + }
66183 +
66184 + if (sp->size) {
66185 + base = page_address(&sp->page);
66186 + if (base <= ptr && n <= sp->size - (ptr - base))
66187 + return;
66188 + goto report;
66189 + }
66190 +
66191 + /* some tricky double walking to find the chunk */
66192 + base = (void *)((unsigned long)ptr & PAGE_MASK);
66193 + free = sp->free;
66194 +
66195 + while (!slob_last(free) && (void *)free <= ptr) {
66196 + base = free + slob_units(free);
66197 + free = slob_next(free);
66198 + }
66199 +
66200 + while (base < (void *)free) {
66201 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
66202 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
66203 + int offset;
66204 +
66205 + if (ptr < base + align)
66206 + goto report;
66207 +
66208 + offset = ptr - base - align;
66209 + if (offset < m) {
66210 + if (n <= m - offset)
66211 + return;
66212 + goto report;
66213 + }
66214 + base += size;
66215 + }
66216 +
66217 +report:
66218 + pax_report_usercopy(ptr, n, to, NULL);
66219 +#endif
66220 +
66221 +}
66222 +EXPORT_SYMBOL(check_object_size);
66223 +
66224 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
66225 size_t ksize(const void *block)
66226 {
66227 @@ -547,10 +627,10 @@ size_t ksize(const void *block)
66228 sp = slob_page(block);
66229 if (is_slob_page(sp)) {
66230 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66231 - unsigned int *m = (unsigned int *)(block - align);
66232 - return SLOB_UNITS(*m) * SLOB_UNIT;
66233 + slob_t *m = (slob_t *)(block - align);
66234 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
66235 } else
66236 - return sp->page.private;
66237 + return sp->size;
66238 }
66239 EXPORT_SYMBOL(ksize);
66240
66241 @@ -605,17 +685,25 @@ void *kmem_cache_alloc_node(struct kmem_
66242 {
66243 void *b;
66244
66245 +#ifdef CONFIG_PAX_USERCOPY
66246 + b = __kmalloc_node_align(c->size, flags, node, c->align);
66247 +#else
66248 if (c->size < PAGE_SIZE) {
66249 b = slob_alloc(c->size, flags, c->align, node);
66250 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
66251 SLOB_UNITS(c->size) * SLOB_UNIT,
66252 flags, node);
66253 } else {
66254 + struct slob_page *sp;
66255 +
66256 b = slob_new_pages(flags, get_order(c->size), node);
66257 + sp = slob_page(b);
66258 + sp->size = c->size;
66259 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
66260 PAGE_SIZE << get_order(c->size),
66261 flags, node);
66262 }
66263 +#endif
66264
66265 if (c->ctor)
66266 c->ctor(b);
66267 @@ -627,10 +715,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
66268
66269 static void __kmem_cache_free(void *b, int size)
66270 {
66271 - if (size < PAGE_SIZE)
66272 + struct slob_page *sp = slob_page(b);
66273 +
66274 + if (is_slob_page(sp))
66275 slob_free(b, size);
66276 - else
66277 + else {
66278 + clear_slob_page(sp);
66279 + free_slob_page(sp);
66280 + sp->size = 0;
66281 slob_free_pages(b, get_order(size));
66282 + }
66283 }
66284
66285 static void kmem_rcu_free(struct rcu_head *head)
66286 @@ -643,15 +737,24 @@ static void kmem_rcu_free(struct rcu_hea
66287
66288 void kmem_cache_free(struct kmem_cache *c, void *b)
66289 {
66290 + int size = c->size;
66291 +
66292 +#ifdef CONFIG_PAX_USERCOPY
66293 + if (size + c->align < PAGE_SIZE) {
66294 + size += c->align;
66295 + b -= c->align;
66296 + }
66297 +#endif
66298 +
66299 kmemleak_free_recursive(b, c->flags);
66300 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
66301 struct slob_rcu *slob_rcu;
66302 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
66303 + slob_rcu = b + (size - sizeof(struct slob_rcu));
66304 INIT_RCU_HEAD(&slob_rcu->head);
66305 - slob_rcu->size = c->size;
66306 + slob_rcu->size = size;
66307 call_rcu(&slob_rcu->head, kmem_rcu_free);
66308 } else {
66309 - __kmem_cache_free(b, c->size);
66310 + __kmem_cache_free(b, size);
66311 }
66312
66313 trace_kmem_cache_free(_RET_IP_, b);
66314 diff -urNp linux-2.6.32.41/mm/slub.c linux-2.6.32.41/mm/slub.c
66315 --- linux-2.6.32.41/mm/slub.c 2011-03-27 14:31:47.000000000 -0400
66316 +++ linux-2.6.32.41/mm/slub.c 2011-04-17 15:56:46.000000000 -0400
66317 @@ -410,7 +410,7 @@ static void print_track(const char *s, s
66318 if (!t->addr)
66319 return;
66320
66321 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
66322 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
66323 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
66324 }
66325
66326 @@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *
66327
66328 page = virt_to_head_page(x);
66329
66330 + BUG_ON(!PageSlab(page));
66331 +
66332 slab_free(s, page, x, _RET_IP_);
66333
66334 trace_kmem_cache_free(_RET_IP_, x);
66335 @@ -1937,7 +1939,7 @@ static int slub_min_objects;
66336 * Merge control. If this is set then no merging of slab caches will occur.
66337 * (Could be removed. This was introduced to pacify the merge skeptics.)
66338 */
66339 -static int slub_nomerge;
66340 +static int slub_nomerge = 1;
66341
66342 /*
66343 * Calculate the order of allocation given an slab object size.
66344 @@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_c
66345 * list to avoid pounding the page allocator excessively.
66346 */
66347 set_min_partial(s, ilog2(s->size));
66348 - s->refcount = 1;
66349 + atomic_set(&s->refcount, 1);
66350 #ifdef CONFIG_NUMA
66351 s->remote_node_defrag_ratio = 1000;
66352 #endif
66353 @@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struc
66354 void kmem_cache_destroy(struct kmem_cache *s)
66355 {
66356 down_write(&slub_lock);
66357 - s->refcount--;
66358 - if (!s->refcount) {
66359 + if (atomic_dec_and_test(&s->refcount)) {
66360 list_del(&s->list);
66361 up_write(&slub_lock);
66362 if (kmem_cache_close(s)) {
66363 @@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(cha
66364 __setup("slub_nomerge", setup_slub_nomerge);
66365
66366 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
66367 - const char *name, int size, gfp_t gfp_flags)
66368 + const char *name, int size, gfp_t gfp_flags, unsigned int flags)
66369 {
66370 - unsigned int flags = 0;
66371 -
66372 if (gfp_flags & SLUB_DMA)
66373 - flags = SLAB_CACHE_DMA;
66374 + flags |= SLAB_CACHE_DMA;
66375
66376 /*
66377 * This function is called with IRQs disabled during early-boot on
66378 @@ -2915,6 +2914,46 @@ void *__kmalloc_node(size_t size, gfp_t
66379 EXPORT_SYMBOL(__kmalloc_node);
66380 #endif
66381
66382 +void check_object_size(const void *ptr, unsigned long n, bool to)
66383 +{
66384 +
66385 +#ifdef CONFIG_PAX_USERCOPY
66386 + struct page *page;
66387 + struct kmem_cache *s = NULL;
66388 + unsigned long offset;
66389 +
66390 + if (!n)
66391 + return;
66392 +
66393 + if (ZERO_OR_NULL_PTR(ptr))
66394 + goto report;
66395 +
66396 + if (!virt_addr_valid(ptr))
66397 + return;
66398 +
66399 + page = get_object_page(ptr);
66400 +
66401 + if (!page) {
66402 + if (object_is_on_stack(ptr, n) == -1)
66403 + goto report;
66404 + return;
66405 + }
66406 +
66407 + s = page->slab;
66408 + if (!(s->flags & SLAB_USERCOPY))
66409 + goto report;
66410 +
66411 + offset = (ptr - page_address(page)) % s->size;
66412 + if (offset <= s->objsize && n <= s->objsize - offset)
66413 + return;
66414 +
66415 +report:
66416 + pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
66417 +#endif
66418 +
66419 +}
66420 +EXPORT_SYMBOL(check_object_size);
66421 +
66422 size_t ksize(const void *object)
66423 {
66424 struct page *page;
66425 @@ -3185,8 +3224,8 @@ void __init kmem_cache_init(void)
66426 * kmem_cache_open for slab_state == DOWN.
66427 */
66428 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
66429 - sizeof(struct kmem_cache_node), GFP_NOWAIT);
66430 - kmalloc_caches[0].refcount = -1;
66431 + sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
66432 + atomic_set(&kmalloc_caches[0].refcount, -1);
66433 caches++;
66434
66435 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
66436 @@ -3198,18 +3237,18 @@ void __init kmem_cache_init(void)
66437 /* Caches that are not of the two-to-the-power-of size */
66438 if (KMALLOC_MIN_SIZE <= 32) {
66439 create_kmalloc_cache(&kmalloc_caches[1],
66440 - "kmalloc-96", 96, GFP_NOWAIT);
66441 + "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
66442 caches++;
66443 }
66444 if (KMALLOC_MIN_SIZE <= 64) {
66445 create_kmalloc_cache(&kmalloc_caches[2],
66446 - "kmalloc-192", 192, GFP_NOWAIT);
66447 + "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
66448 caches++;
66449 }
66450
66451 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
66452 create_kmalloc_cache(&kmalloc_caches[i],
66453 - "kmalloc", 1 << i, GFP_NOWAIT);
66454 + "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
66455 caches++;
66456 }
66457
66458 @@ -3293,7 +3332,7 @@ static int slab_unmergeable(struct kmem_
66459 /*
66460 * We may have set a slab to be unmergeable during bootstrap.
66461 */
66462 - if (s->refcount < 0)
66463 + if (atomic_read(&s->refcount) < 0)
66464 return 1;
66465
66466 return 0;
66467 @@ -3353,7 +3392,7 @@ struct kmem_cache *kmem_cache_create(con
66468 if (s) {
66469 int cpu;
66470
66471 - s->refcount++;
66472 + atomic_inc(&s->refcount);
66473 /*
66474 * Adjust the object sizes so that we clear
66475 * the complete object on kzalloc.
66476 @@ -3372,7 +3411,7 @@ struct kmem_cache *kmem_cache_create(con
66477
66478 if (sysfs_slab_alias(s, name)) {
66479 down_write(&slub_lock);
66480 - s->refcount--;
66481 + atomic_dec(&s->refcount);
66482 up_write(&slub_lock);
66483 goto err;
66484 }
66485 @@ -4101,7 +4140,7 @@ SLAB_ATTR_RO(ctor);
66486
66487 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
66488 {
66489 - return sprintf(buf, "%d\n", s->refcount - 1);
66490 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
66491 }
66492 SLAB_ATTR_RO(aliases);
66493
66494 @@ -4503,7 +4542,7 @@ static void kmem_cache_release(struct ko
66495 kfree(s);
66496 }
66497
66498 -static struct sysfs_ops slab_sysfs_ops = {
66499 +static const struct sysfs_ops slab_sysfs_ops = {
66500 .show = slab_attr_show,
66501 .store = slab_attr_store,
66502 };
66503 @@ -4522,7 +4561,7 @@ static int uevent_filter(struct kset *ks
66504 return 0;
66505 }
66506
66507 -static struct kset_uevent_ops slab_uevent_ops = {
66508 +static const struct kset_uevent_ops slab_uevent_ops = {
66509 .filter = uevent_filter,
66510 };
66511
66512 @@ -4785,7 +4824,13 @@ static const struct file_operations proc
66513
66514 static int __init slab_proc_init(void)
66515 {
66516 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
66517 + mode_t gr_mode = S_IRUGO;
66518 +
66519 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66520 + gr_mode = S_IRUSR;
66521 +#endif
66522 +
66523 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
66524 return 0;
66525 }
66526 module_init(slab_proc_init);
66527 diff -urNp linux-2.6.32.41/mm/util.c linux-2.6.32.41/mm/util.c
66528 --- linux-2.6.32.41/mm/util.c 2011-03-27 14:31:47.000000000 -0400
66529 +++ linux-2.6.32.41/mm/util.c 2011-04-17 15:56:46.000000000 -0400
66530 @@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
66531 void arch_pick_mmap_layout(struct mm_struct *mm)
66532 {
66533 mm->mmap_base = TASK_UNMAPPED_BASE;
66534 +
66535 +#ifdef CONFIG_PAX_RANDMMAP
66536 + if (mm->pax_flags & MF_PAX_RANDMMAP)
66537 + mm->mmap_base += mm->delta_mmap;
66538 +#endif
66539 +
66540 mm->get_unmapped_area = arch_get_unmapped_area;
66541 mm->unmap_area = arch_unmap_area;
66542 }
66543 diff -urNp linux-2.6.32.41/mm/vmalloc.c linux-2.6.32.41/mm/vmalloc.c
66544 --- linux-2.6.32.41/mm/vmalloc.c 2011-03-27 14:31:47.000000000 -0400
66545 +++ linux-2.6.32.41/mm/vmalloc.c 2011-04-17 15:56:46.000000000 -0400
66546 @@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd,
66547
66548 pte = pte_offset_kernel(pmd, addr);
66549 do {
66550 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
66551 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
66552 +
66553 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66554 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
66555 + BUG_ON(!pte_exec(*pte));
66556 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
66557 + continue;
66558 + }
66559 +#endif
66560 +
66561 + {
66562 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
66563 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
66564 + }
66565 } while (pte++, addr += PAGE_SIZE, addr != end);
66566 }
66567
66568 @@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, un
66569 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
66570 {
66571 pte_t *pte;
66572 + int ret = -ENOMEM;
66573
66574 /*
66575 * nr is a running index into the array which helps higher level
66576 @@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, un
66577 pte = pte_alloc_kernel(pmd, addr);
66578 if (!pte)
66579 return -ENOMEM;
66580 +
66581 + pax_open_kernel();
66582 do {
66583 struct page *page = pages[*nr];
66584
66585 - if (WARN_ON(!pte_none(*pte)))
66586 - return -EBUSY;
66587 - if (WARN_ON(!page))
66588 - return -ENOMEM;
66589 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66590 + if (!(pgprot_val(prot) & _PAGE_NX))
66591 + BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
66592 + else
66593 +#endif
66594 +
66595 + if (WARN_ON(!pte_none(*pte))) {
66596 + ret = -EBUSY;
66597 + goto out;
66598 + }
66599 + if (WARN_ON(!page)) {
66600 + ret = -ENOMEM;
66601 + goto out;
66602 + }
66603 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
66604 (*nr)++;
66605 } while (pte++, addr += PAGE_SIZE, addr != end);
66606 - return 0;
66607 + ret = 0;
66608 +out:
66609 + pax_close_kernel();
66610 + return ret;
66611 }
66612
66613 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
66614 @@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void
66615 * and fall back on vmalloc() if that fails. Others
66616 * just put it in the vmalloc space.
66617 */
66618 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
66619 +#ifdef CONFIG_MODULES
66620 +#ifdef MODULES_VADDR
66621 unsigned long addr = (unsigned long)x;
66622 if (addr >= MODULES_VADDR && addr < MODULES_END)
66623 return 1;
66624 #endif
66625 +
66626 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66627 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
66628 + return 1;
66629 +#endif
66630 +
66631 +#endif
66632 +
66633 return is_vmalloc_addr(x);
66634 }
66635
66636 @@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void
66637
66638 if (!pgd_none(*pgd)) {
66639 pud_t *pud = pud_offset(pgd, addr);
66640 +#ifdef CONFIG_X86
66641 + if (!pud_large(*pud))
66642 +#endif
66643 if (!pud_none(*pud)) {
66644 pmd_t *pmd = pmd_offset(pud, addr);
66645 +#ifdef CONFIG_X86
66646 + if (!pmd_large(*pmd))
66647 +#endif
66648 if (!pmd_none(*pmd)) {
66649 pte_t *ptep, pte;
66650
66651 @@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vm
66652 struct rb_node *tmp;
66653
66654 while (*p) {
66655 - struct vmap_area *tmp;
66656 + struct vmap_area *varea;
66657
66658 parent = *p;
66659 - tmp = rb_entry(parent, struct vmap_area, rb_node);
66660 - if (va->va_start < tmp->va_end)
66661 + varea = rb_entry(parent, struct vmap_area, rb_node);
66662 + if (va->va_start < varea->va_end)
66663 p = &(*p)->rb_left;
66664 - else if (va->va_end > tmp->va_start)
66665 + else if (va->va_end > varea->va_start)
66666 p = &(*p)->rb_right;
66667 else
66668 BUG();
66669 @@ -1232,6 +1274,16 @@ static struct vm_struct *__get_vm_area_n
66670 struct vm_struct *area;
66671
66672 BUG_ON(in_interrupt());
66673 +
66674 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
66675 + if (flags & VM_KERNEXEC) {
66676 + if (start != VMALLOC_START || end != VMALLOC_END)
66677 + return NULL;
66678 + start = (unsigned long)MODULES_EXEC_VADDR;
66679 + end = (unsigned long)MODULES_EXEC_END;
66680 + }
66681 +#endif
66682 +
66683 if (flags & VM_IOREMAP) {
66684 int bit = fls(size);
66685
66686 @@ -1457,6 +1509,11 @@ void *vmap(struct page **pages, unsigned
66687 if (count > totalram_pages)
66688 return NULL;
66689
66690 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
66691 + if (!(pgprot_val(prot) & _PAGE_NX))
66692 + flags |= VM_KERNEXEC;
66693 +#endif
66694 +
66695 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
66696 __builtin_return_address(0));
66697 if (!area)
66698 @@ -1567,6 +1624,13 @@ static void *__vmalloc_node(unsigned lon
66699 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
66700 return NULL;
66701
66702 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
66703 + if (!(pgprot_val(prot) & _PAGE_NX))
66704 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
66705 + node, gfp_mask, caller);
66706 + else
66707 +#endif
66708 +
66709 area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
66710 VMALLOC_END, node, gfp_mask, caller);
66711
66712 @@ -1585,6 +1649,7 @@ static void *__vmalloc_node(unsigned lon
66713 return addr;
66714 }
66715
66716 +#undef __vmalloc
66717 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
66718 {
66719 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
66720 @@ -1601,6 +1666,7 @@ EXPORT_SYMBOL(__vmalloc);
66721 * For tight control over page level allocator and protection flags
66722 * use __vmalloc() instead.
66723 */
66724 +#undef vmalloc
66725 void *vmalloc(unsigned long size)
66726 {
66727 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
66728 @@ -1615,6 +1681,7 @@ EXPORT_SYMBOL(vmalloc);
66729 * The resulting memory area is zeroed so it can be mapped to userspace
66730 * without leaking data.
66731 */
66732 +#undef vmalloc_user
66733 void *vmalloc_user(unsigned long size)
66734 {
66735 struct vm_struct *area;
66736 @@ -1642,6 +1709,7 @@ EXPORT_SYMBOL(vmalloc_user);
66737 * For tight control over page level allocator and protection flags
66738 * use __vmalloc() instead.
66739 */
66740 +#undef vmalloc_node
66741 void *vmalloc_node(unsigned long size, int node)
66742 {
66743 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
66744 @@ -1664,10 +1732,10 @@ EXPORT_SYMBOL(vmalloc_node);
66745 * For tight control over page level allocator and protection flags
66746 * use __vmalloc() instead.
66747 */
66748 -
66749 +#undef vmalloc_exec
66750 void *vmalloc_exec(unsigned long size)
66751 {
66752 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
66753 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
66754 -1, __builtin_return_address(0));
66755 }
66756
66757 @@ -1686,6 +1754,7 @@ void *vmalloc_exec(unsigned long size)
66758 * Allocate enough 32bit PA addressable pages to cover @size from the
66759 * page level allocator and map them into contiguous kernel virtual space.
66760 */
66761 +#undef vmalloc_32
66762 void *vmalloc_32(unsigned long size)
66763 {
66764 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
66765 @@ -1700,6 +1769,7 @@ EXPORT_SYMBOL(vmalloc_32);
66766 * The resulting memory area is 32bit addressable and zeroed so it can be
66767 * mapped to userspace without leaking data.
66768 */
66769 +#undef vmalloc_32_user
66770 void *vmalloc_32_user(unsigned long size)
66771 {
66772 struct vm_struct *area;
66773 @@ -1964,6 +2034,8 @@ int remap_vmalloc_range(struct vm_area_s
66774 unsigned long uaddr = vma->vm_start;
66775 unsigned long usize = vma->vm_end - vma->vm_start;
66776
66777 + BUG_ON(vma->vm_mirror);
66778 +
66779 if ((PAGE_SIZE-1) & (unsigned long)addr)
66780 return -EINVAL;
66781
66782 diff -urNp linux-2.6.32.41/mm/vmstat.c linux-2.6.32.41/mm/vmstat.c
66783 --- linux-2.6.32.41/mm/vmstat.c 2011-03-27 14:31:47.000000000 -0400
66784 +++ linux-2.6.32.41/mm/vmstat.c 2011-04-17 15:56:46.000000000 -0400
66785 @@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
66786 *
66787 * vm_stat contains the global counters
66788 */
66789 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
66790 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
66791 EXPORT_SYMBOL(vm_stat);
66792
66793 #ifdef CONFIG_SMP
66794 @@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
66795 v = p->vm_stat_diff[i];
66796 p->vm_stat_diff[i] = 0;
66797 local_irq_restore(flags);
66798 - atomic_long_add(v, &zone->vm_stat[i]);
66799 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
66800 global_diff[i] += v;
66801 #ifdef CONFIG_NUMA
66802 /* 3 seconds idle till flush */
66803 @@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
66804
66805 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
66806 if (global_diff[i])
66807 - atomic_long_add(global_diff[i], &vm_stat[i]);
66808 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
66809 }
66810
66811 #endif
66812 @@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
66813 start_cpu_timer(cpu);
66814 #endif
66815 #ifdef CONFIG_PROC_FS
66816 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
66817 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
66818 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
66819 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
66820 + {
66821 + mode_t gr_mode = S_IRUGO;
66822 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66823 + gr_mode = S_IRUSR;
66824 +#endif
66825 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
66826 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
66827 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66828 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
66829 +#else
66830 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
66831 +#endif
66832 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
66833 + }
66834 #endif
66835 return 0;
66836 }
66837 diff -urNp linux-2.6.32.41/net/8021q/vlan.c linux-2.6.32.41/net/8021q/vlan.c
66838 --- linux-2.6.32.41/net/8021q/vlan.c 2011-03-27 14:31:47.000000000 -0400
66839 +++ linux-2.6.32.41/net/8021q/vlan.c 2011-04-17 15:56:46.000000000 -0400
66840 @@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net
66841 err = -EPERM;
66842 if (!capable(CAP_NET_ADMIN))
66843 break;
66844 - if ((args.u.name_type >= 0) &&
66845 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
66846 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
66847 struct vlan_net *vn;
66848
66849 vn = net_generic(net, vlan_net_id);
66850 diff -urNp linux-2.6.32.41/net/atm/atm_misc.c linux-2.6.32.41/net/atm/atm_misc.c
66851 --- linux-2.6.32.41/net/atm/atm_misc.c 2011-03-27 14:31:47.000000000 -0400
66852 +++ linux-2.6.32.41/net/atm/atm_misc.c 2011-04-17 15:56:46.000000000 -0400
66853 @@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int t
66854 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
66855 return 1;
66856 atm_return(vcc,truesize);
66857 - atomic_inc(&vcc->stats->rx_drop);
66858 + atomic_inc_unchecked(&vcc->stats->rx_drop);
66859 return 0;
66860 }
66861
66862 @@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct
66863 }
66864 }
66865 atm_return(vcc,guess);
66866 - atomic_inc(&vcc->stats->rx_drop);
66867 + atomic_inc_unchecked(&vcc->stats->rx_drop);
66868 return NULL;
66869 }
66870
66871 @@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafpr
66872
66873 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
66874 {
66875 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
66876 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
66877 __SONET_ITEMS
66878 #undef __HANDLE_ITEM
66879 }
66880 @@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_sta
66881
66882 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
66883 {
66884 -#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
66885 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
66886 __SONET_ITEMS
66887 #undef __HANDLE_ITEM
66888 }
66889 diff -urNp linux-2.6.32.41/net/atm/mpoa_caches.c linux-2.6.32.41/net/atm/mpoa_caches.c
66890 --- linux-2.6.32.41/net/atm/mpoa_caches.c 2011-03-27 14:31:47.000000000 -0400
66891 +++ linux-2.6.32.41/net/atm/mpoa_caches.c 2011-05-16 21:46:57.000000000 -0400
66892 @@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_cl
66893 struct timeval now;
66894 struct k_message msg;
66895
66896 + pax_track_stack();
66897 +
66898 do_gettimeofday(&now);
66899
66900 write_lock_irq(&client->egress_lock);
66901 diff -urNp linux-2.6.32.41/net/atm/proc.c linux-2.6.32.41/net/atm/proc.c
66902 --- linux-2.6.32.41/net/atm/proc.c 2011-03-27 14:31:47.000000000 -0400
66903 +++ linux-2.6.32.41/net/atm/proc.c 2011-04-17 15:56:46.000000000 -0400
66904 @@ -43,9 +43,9 @@ static void add_stats(struct seq_file *s
66905 const struct k_atm_aal_stats *stats)
66906 {
66907 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
66908 - atomic_read(&stats->tx),atomic_read(&stats->tx_err),
66909 - atomic_read(&stats->rx),atomic_read(&stats->rx_err),
66910 - atomic_read(&stats->rx_drop));
66911 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
66912 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
66913 + atomic_read_unchecked(&stats->rx_drop));
66914 }
66915
66916 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
66917 @@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *se
66918 {
66919 struct sock *sk = sk_atm(vcc);
66920
66921 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66922 + seq_printf(seq, "%p ", NULL);
66923 +#else
66924 seq_printf(seq, "%p ", vcc);
66925 +#endif
66926 +
66927 if (!vcc->dev)
66928 seq_printf(seq, "Unassigned ");
66929 else
66930 @@ -214,7 +219,11 @@ static void svc_info(struct seq_file *se
66931 {
66932 if (!vcc->dev)
66933 seq_printf(seq, sizeof(void *) == 4 ?
66934 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66935 + "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
66936 +#else
66937 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
66938 +#endif
66939 else
66940 seq_printf(seq, "%3d %3d %5d ",
66941 vcc->dev->number, vcc->vpi, vcc->vci);
66942 diff -urNp linux-2.6.32.41/net/atm/resources.c linux-2.6.32.41/net/atm/resources.c
66943 --- linux-2.6.32.41/net/atm/resources.c 2011-03-27 14:31:47.000000000 -0400
66944 +++ linux-2.6.32.41/net/atm/resources.c 2011-04-17 15:56:46.000000000 -0400
66945 @@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *
66946 static void copy_aal_stats(struct k_atm_aal_stats *from,
66947 struct atm_aal_stats *to)
66948 {
66949 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
66950 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
66951 __AAL_STAT_ITEMS
66952 #undef __HANDLE_ITEM
66953 }
66954 @@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_
66955 static void subtract_aal_stats(struct k_atm_aal_stats *from,
66956 struct atm_aal_stats *to)
66957 {
66958 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
66959 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
66960 __AAL_STAT_ITEMS
66961 #undef __HANDLE_ITEM
66962 }
66963 diff -urNp linux-2.6.32.41/net/bluetooth/l2cap.c linux-2.6.32.41/net/bluetooth/l2cap.c
66964 --- linux-2.6.32.41/net/bluetooth/l2cap.c 2011-03-27 14:31:47.000000000 -0400
66965 +++ linux-2.6.32.41/net/bluetooth/l2cap.c 2011-06-12 06:34:08.000000000 -0400
66966 @@ -1885,7 +1885,7 @@ static int l2cap_sock_getsockopt_old(str
66967 err = -ENOTCONN;
66968 break;
66969 }
66970 -
66971 + memset(&cinfo, 0, sizeof(cinfo));
66972 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
66973 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
66974
66975 diff -urNp linux-2.6.32.41/net/bluetooth/rfcomm/sock.c linux-2.6.32.41/net/bluetooth/rfcomm/sock.c
66976 --- linux-2.6.32.41/net/bluetooth/rfcomm/sock.c 2011-03-27 14:31:47.000000000 -0400
66977 +++ linux-2.6.32.41/net/bluetooth/rfcomm/sock.c 2011-06-12 06:35:00.000000000 -0400
66978 @@ -878,6 +878,7 @@ static int rfcomm_sock_getsockopt_old(st
66979
66980 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
66981
66982 + memset(&cinfo, 0, sizeof(cinfo));
66983 cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle;
66984 memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3);
66985
66986 diff -urNp linux-2.6.32.41/net/bridge/br_private.h linux-2.6.32.41/net/bridge/br_private.h
66987 --- linux-2.6.32.41/net/bridge/br_private.h 2011-03-27 14:31:47.000000000 -0400
66988 +++ linux-2.6.32.41/net/bridge/br_private.h 2011-04-17 15:56:46.000000000 -0400
66989 @@ -254,7 +254,7 @@ extern void br_ifinfo_notify(int event,
66990
66991 #ifdef CONFIG_SYSFS
66992 /* br_sysfs_if.c */
66993 -extern struct sysfs_ops brport_sysfs_ops;
66994 +extern const struct sysfs_ops brport_sysfs_ops;
66995 extern int br_sysfs_addif(struct net_bridge_port *p);
66996
66997 /* br_sysfs_br.c */
66998 diff -urNp linux-2.6.32.41/net/bridge/br_stp_if.c linux-2.6.32.41/net/bridge/br_stp_if.c
66999 --- linux-2.6.32.41/net/bridge/br_stp_if.c 2011-03-27 14:31:47.000000000 -0400
67000 +++ linux-2.6.32.41/net/bridge/br_stp_if.c 2011-04-17 15:56:46.000000000 -0400
67001 @@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridg
67002 char *envp[] = { NULL };
67003
67004 if (br->stp_enabled == BR_USER_STP) {
67005 - r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
67006 + r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
67007 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
67008 br->dev->name, r);
67009
67010 diff -urNp linux-2.6.32.41/net/bridge/br_sysfs_if.c linux-2.6.32.41/net/bridge/br_sysfs_if.c
67011 --- linux-2.6.32.41/net/bridge/br_sysfs_if.c 2011-03-27 14:31:47.000000000 -0400
67012 +++ linux-2.6.32.41/net/bridge/br_sysfs_if.c 2011-04-17 15:56:46.000000000 -0400
67013 @@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobje
67014 return ret;
67015 }
67016
67017 -struct sysfs_ops brport_sysfs_ops = {
67018 +const struct sysfs_ops brport_sysfs_ops = {
67019 .show = brport_show,
67020 .store = brport_store,
67021 };
67022 diff -urNp linux-2.6.32.41/net/bridge/netfilter/ebtables.c linux-2.6.32.41/net/bridge/netfilter/ebtables.c
67023 --- linux-2.6.32.41/net/bridge/netfilter/ebtables.c 2011-04-17 17:00:52.000000000 -0400
67024 +++ linux-2.6.32.41/net/bridge/netfilter/ebtables.c 2011-05-16 21:46:57.000000000 -0400
67025 @@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struc
67026 unsigned int entries_size, nentries;
67027 char *entries;
67028
67029 + pax_track_stack();
67030 +
67031 if (cmd == EBT_SO_GET_ENTRIES) {
67032 entries_size = t->private->entries_size;
67033 nentries = t->private->nentries;
67034 diff -urNp linux-2.6.32.41/net/can/bcm.c linux-2.6.32.41/net/can/bcm.c
67035 --- linux-2.6.32.41/net/can/bcm.c 2011-05-10 22:12:01.000000000 -0400
67036 +++ linux-2.6.32.41/net/can/bcm.c 2011-05-10 22:12:34.000000000 -0400
67037 @@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file
67038 struct bcm_sock *bo = bcm_sk(sk);
67039 struct bcm_op *op;
67040
67041 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67042 + seq_printf(m, ">>> socket %p", NULL);
67043 + seq_printf(m, " / sk %p", NULL);
67044 + seq_printf(m, " / bo %p", NULL);
67045 +#else
67046 seq_printf(m, ">>> socket %p", sk->sk_socket);
67047 seq_printf(m, " / sk %p", sk);
67048 seq_printf(m, " / bo %p", bo);
67049 +#endif
67050 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
67051 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
67052 seq_printf(m, " <<<\n");
67053 diff -urNp linux-2.6.32.41/net/core/dev.c linux-2.6.32.41/net/core/dev.c
67054 --- linux-2.6.32.41/net/core/dev.c 2011-04-17 17:00:52.000000000 -0400
67055 +++ linux-2.6.32.41/net/core/dev.c 2011-04-17 17:04:18.000000000 -0400
67056 @@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const cha
67057 if (no_module && capable(CAP_NET_ADMIN))
67058 no_module = request_module("netdev-%s", name);
67059 if (no_module && capable(CAP_SYS_MODULE)) {
67060 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
67061 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
67062 +#else
67063 if (!request_module("%s", name))
67064 pr_err("Loading kernel module for a network device "
67065 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
67066 "instead\n", name);
67067 +#endif
67068 }
67069 }
67070 EXPORT_SYMBOL(dev_load);
67071 @@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
67072 }
67073 EXPORT_SYMBOL(netif_rx_ni);
67074
67075 -static void net_tx_action(struct softirq_action *h)
67076 +static void net_tx_action(void)
67077 {
67078 struct softnet_data *sd = &__get_cpu_var(softnet_data);
67079
67080 @@ -2826,7 +2830,7 @@ void netif_napi_del(struct napi_struct *
67081 EXPORT_SYMBOL(netif_napi_del);
67082
67083
67084 -static void net_rx_action(struct softirq_action *h)
67085 +static void net_rx_action(void)
67086 {
67087 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
67088 unsigned long time_limit = jiffies + 2;
67089 diff -urNp linux-2.6.32.41/net/core/flow.c linux-2.6.32.41/net/core/flow.c
67090 --- linux-2.6.32.41/net/core/flow.c 2011-03-27 14:31:47.000000000 -0400
67091 +++ linux-2.6.32.41/net/core/flow.c 2011-05-04 17:56:20.000000000 -0400
67092 @@ -35,11 +35,11 @@ struct flow_cache_entry {
67093 atomic_t *object_ref;
67094 };
67095
67096 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
67097 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
67098
67099 static u32 flow_hash_shift;
67100 #define flow_hash_size (1 << flow_hash_shift)
67101 -static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
67102 +static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
67103
67104 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
67105
67106 @@ -52,7 +52,7 @@ struct flow_percpu_info {
67107 u32 hash_rnd;
67108 int count;
67109 };
67110 -static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
67111 +static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
67112
67113 #define flow_hash_rnd_recalc(cpu) \
67114 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
67115 @@ -69,7 +69,7 @@ struct flow_flush_info {
67116 atomic_t cpuleft;
67117 struct completion completion;
67118 };
67119 -static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
67120 +static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
67121
67122 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
67123
67124 @@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net,
67125 if (fle->family == family &&
67126 fle->dir == dir &&
67127 flow_key_compare(key, &fle->key) == 0) {
67128 - if (fle->genid == atomic_read(&flow_cache_genid)) {
67129 + if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
67130 void *ret = fle->object;
67131
67132 if (ret)
67133 @@ -228,7 +228,7 @@ nocache:
67134 err = resolver(net, key, family, dir, &obj, &obj_ref);
67135
67136 if (fle && !err) {
67137 - fle->genid = atomic_read(&flow_cache_genid);
67138 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
67139
67140 if (fle->object)
67141 atomic_dec(fle->object_ref);
67142 @@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(uns
67143
67144 fle = flow_table(cpu)[i];
67145 for (; fle; fle = fle->next) {
67146 - unsigned genid = atomic_read(&flow_cache_genid);
67147 + unsigned genid = atomic_read_unchecked(&flow_cache_genid);
67148
67149 if (!fle->object || fle->genid == genid)
67150 continue;
67151 diff -urNp linux-2.6.32.41/net/core/skbuff.c linux-2.6.32.41/net/core/skbuff.c
67152 --- linux-2.6.32.41/net/core/skbuff.c 2011-03-27 14:31:47.000000000 -0400
67153 +++ linux-2.6.32.41/net/core/skbuff.c 2011-05-16 21:46:57.000000000 -0400
67154 @@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb,
67155 struct sk_buff *frag_iter;
67156 struct sock *sk = skb->sk;
67157
67158 + pax_track_stack();
67159 +
67160 /*
67161 * __skb_splice_bits() only fails if the output has no room left,
67162 * so no point in going over the frag_list for the error case.
67163 diff -urNp linux-2.6.32.41/net/core/sock.c linux-2.6.32.41/net/core/sock.c
67164 --- linux-2.6.32.41/net/core/sock.c 2011-03-27 14:31:47.000000000 -0400
67165 +++ linux-2.6.32.41/net/core/sock.c 2011-05-04 17:56:20.000000000 -0400
67166 @@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock,
67167 break;
67168
67169 case SO_PEERCRED:
67170 + {
67171 + struct ucred peercred;
67172 if (len > sizeof(sk->sk_peercred))
67173 len = sizeof(sk->sk_peercred);
67174 - if (copy_to_user(optval, &sk->sk_peercred, len))
67175 + peercred = sk->sk_peercred;
67176 + if (copy_to_user(optval, &peercred, len))
67177 return -EFAULT;
67178 goto lenout;
67179 + }
67180
67181 case SO_PEERNAME:
67182 {
67183 @@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock,
67184 */
67185 smp_wmb();
67186 atomic_set(&sk->sk_refcnt, 1);
67187 - atomic_set(&sk->sk_drops, 0);
67188 + atomic_set_unchecked(&sk->sk_drops, 0);
67189 }
67190 EXPORT_SYMBOL(sock_init_data);
67191
67192 diff -urNp linux-2.6.32.41/net/decnet/sysctl_net_decnet.c linux-2.6.32.41/net/decnet/sysctl_net_decnet.c
67193 --- linux-2.6.32.41/net/decnet/sysctl_net_decnet.c 2011-03-27 14:31:47.000000000 -0400
67194 +++ linux-2.6.32.41/net/decnet/sysctl_net_decnet.c 2011-04-17 15:56:46.000000000 -0400
67195 @@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_t
67196
67197 if (len > *lenp) len = *lenp;
67198
67199 - if (copy_to_user(buffer, addr, len))
67200 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
67201 return -EFAULT;
67202
67203 *lenp = len;
67204 @@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table
67205
67206 if (len > *lenp) len = *lenp;
67207
67208 - if (copy_to_user(buffer, devname, len))
67209 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
67210 return -EFAULT;
67211
67212 *lenp = len;
67213 diff -urNp linux-2.6.32.41/net/econet/Kconfig linux-2.6.32.41/net/econet/Kconfig
67214 --- linux-2.6.32.41/net/econet/Kconfig 2011-03-27 14:31:47.000000000 -0400
67215 +++ linux-2.6.32.41/net/econet/Kconfig 2011-04-17 15:56:46.000000000 -0400
67216 @@ -4,7 +4,7 @@
67217
67218 config ECONET
67219 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
67220 - depends on EXPERIMENTAL && INET
67221 + depends on EXPERIMENTAL && INET && BROKEN
67222 ---help---
67223 Econet is a fairly old and slow networking protocol mainly used by
67224 Acorn computers to access file and print servers. It uses native
67225 diff -urNp linux-2.6.32.41/net/ieee802154/dgram.c linux-2.6.32.41/net/ieee802154/dgram.c
67226 --- linux-2.6.32.41/net/ieee802154/dgram.c 2011-03-27 14:31:47.000000000 -0400
67227 +++ linux-2.6.32.41/net/ieee802154/dgram.c 2011-05-04 17:56:28.000000000 -0400
67228 @@ -318,7 +318,7 @@ out:
67229 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
67230 {
67231 if (sock_queue_rcv_skb(sk, skb) < 0) {
67232 - atomic_inc(&sk->sk_drops);
67233 + atomic_inc_unchecked(&sk->sk_drops);
67234 kfree_skb(skb);
67235 return NET_RX_DROP;
67236 }
67237 diff -urNp linux-2.6.32.41/net/ieee802154/raw.c linux-2.6.32.41/net/ieee802154/raw.c
67238 --- linux-2.6.32.41/net/ieee802154/raw.c 2011-03-27 14:31:47.000000000 -0400
67239 +++ linux-2.6.32.41/net/ieee802154/raw.c 2011-05-04 17:56:28.000000000 -0400
67240 @@ -206,7 +206,7 @@ out:
67241 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
67242 {
67243 if (sock_queue_rcv_skb(sk, skb) < 0) {
67244 - atomic_inc(&sk->sk_drops);
67245 + atomic_inc_unchecked(&sk->sk_drops);
67246 kfree_skb(skb);
67247 return NET_RX_DROP;
67248 }
67249 diff -urNp linux-2.6.32.41/net/ipv4/inet_diag.c linux-2.6.32.41/net/ipv4/inet_diag.c
67250 --- linux-2.6.32.41/net/ipv4/inet_diag.c 2011-04-17 17:00:52.000000000 -0400
67251 +++ linux-2.6.32.41/net/ipv4/inet_diag.c 2011-04-17 17:04:18.000000000 -0400
67252 @@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct soc
67253 r->idiag_retrans = 0;
67254
67255 r->id.idiag_if = sk->sk_bound_dev_if;
67256 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67257 + r->id.idiag_cookie[0] = 0;
67258 + r->id.idiag_cookie[1] = 0;
67259 +#else
67260 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
67261 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
67262 +#endif
67263
67264 r->id.idiag_sport = inet->sport;
67265 r->id.idiag_dport = inet->dport;
67266 @@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct in
67267 r->idiag_family = tw->tw_family;
67268 r->idiag_retrans = 0;
67269 r->id.idiag_if = tw->tw_bound_dev_if;
67270 +
67271 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67272 + r->id.idiag_cookie[0] = 0;
67273 + r->id.idiag_cookie[1] = 0;
67274 +#else
67275 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
67276 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
67277 +#endif
67278 +
67279 r->id.idiag_sport = tw->tw_sport;
67280 r->id.idiag_dport = tw->tw_dport;
67281 r->id.idiag_src[0] = tw->tw_rcv_saddr;
67282 @@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk
67283 if (sk == NULL)
67284 goto unlock;
67285
67286 +#ifndef CONFIG_GRKERNSEC_HIDESYM
67287 err = -ESTALE;
67288 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
67289 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
67290 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
67291 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
67292 goto out;
67293 +#endif
67294
67295 err = -ENOMEM;
67296 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
67297 @@ -581,8 +595,14 @@ static int inet_diag_fill_req(struct sk_
67298 r->idiag_retrans = req->retrans;
67299
67300 r->id.idiag_if = sk->sk_bound_dev_if;
67301 +
67302 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67303 + r->id.idiag_cookie[0] = 0;
67304 + r->id.idiag_cookie[1] = 0;
67305 +#else
67306 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
67307 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
67308 +#endif
67309
67310 tmo = req->expires - jiffies;
67311 if (tmo < 0)
67312 diff -urNp linux-2.6.32.41/net/ipv4/inet_hashtables.c linux-2.6.32.41/net/ipv4/inet_hashtables.c
67313 --- linux-2.6.32.41/net/ipv4/inet_hashtables.c 2011-03-27 14:31:47.000000000 -0400
67314 +++ linux-2.6.32.41/net/ipv4/inet_hashtables.c 2011-04-17 15:56:46.000000000 -0400
67315 @@ -18,11 +18,14 @@
67316 #include <linux/sched.h>
67317 #include <linux/slab.h>
67318 #include <linux/wait.h>
67319 +#include <linux/security.h>
67320
67321 #include <net/inet_connection_sock.h>
67322 #include <net/inet_hashtables.h>
67323 #include <net/ip.h>
67324
67325 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
67326 +
67327 /*
67328 * Allocate and initialize a new local port bind bucket.
67329 * The bindhash mutex for snum's hash chain must be held here.
67330 @@ -490,6 +493,8 @@ ok:
67331 }
67332 spin_unlock(&head->lock);
67333
67334 + gr_update_task_in_ip_table(current, inet_sk(sk));
67335 +
67336 if (tw) {
67337 inet_twsk_deschedule(tw, death_row);
67338 inet_twsk_put(tw);
67339 diff -urNp linux-2.6.32.41/net/ipv4/inetpeer.c linux-2.6.32.41/net/ipv4/inetpeer.c
67340 --- linux-2.6.32.41/net/ipv4/inetpeer.c 2011-03-27 14:31:47.000000000 -0400
67341 +++ linux-2.6.32.41/net/ipv4/inetpeer.c 2011-05-16 21:46:57.000000000 -0400
67342 @@ -366,6 +366,8 @@ struct inet_peer *inet_getpeer(__be32 da
67343 struct inet_peer *p, *n;
67344 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
67345
67346 + pax_track_stack();
67347 +
67348 /* Look up for the address quickly. */
67349 read_lock_bh(&peer_pool_lock);
67350 p = lookup(daddr, NULL);
67351 @@ -389,7 +391,7 @@ struct inet_peer *inet_getpeer(__be32 da
67352 return NULL;
67353 n->v4daddr = daddr;
67354 atomic_set(&n->refcnt, 1);
67355 - atomic_set(&n->rid, 0);
67356 + atomic_set_unchecked(&n->rid, 0);
67357 n->ip_id_count = secure_ip_id(daddr);
67358 n->tcp_ts_stamp = 0;
67359
67360 diff -urNp linux-2.6.32.41/net/ipv4/ip_fragment.c linux-2.6.32.41/net/ipv4/ip_fragment.c
67361 --- linux-2.6.32.41/net/ipv4/ip_fragment.c 2011-03-27 14:31:47.000000000 -0400
67362 +++ linux-2.6.32.41/net/ipv4/ip_fragment.c 2011-04-17 15:56:46.000000000 -0400
67363 @@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct
67364 return 0;
67365
67366 start = qp->rid;
67367 - end = atomic_inc_return(&peer->rid);
67368 + end = atomic_inc_return_unchecked(&peer->rid);
67369 qp->rid = end;
67370
67371 rc = qp->q.fragments && (end - start) > max;
67372 diff -urNp linux-2.6.32.41/net/ipv4/ip_sockglue.c linux-2.6.32.41/net/ipv4/ip_sockglue.c
67373 --- linux-2.6.32.41/net/ipv4/ip_sockglue.c 2011-03-27 14:31:47.000000000 -0400
67374 +++ linux-2.6.32.41/net/ipv4/ip_sockglue.c 2011-05-16 21:46:57.000000000 -0400
67375 @@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock
67376 int val;
67377 int len;
67378
67379 + pax_track_stack();
67380 +
67381 if (level != SOL_IP)
67382 return -EOPNOTSUPP;
67383
67384 diff -urNp linux-2.6.32.41/net/ipv4/netfilter/arp_tables.c linux-2.6.32.41/net/ipv4/netfilter/arp_tables.c
67385 --- linux-2.6.32.41/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:00:52.000000000 -0400
67386 +++ linux-2.6.32.41/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:04:18.000000000 -0400
67387 @@ -934,6 +934,7 @@ static int get_info(struct net *net, voi
67388 private = &tmp;
67389 }
67390 #endif
67391 + memset(&info, 0, sizeof(info));
67392 info.valid_hooks = t->valid_hooks;
67393 memcpy(info.hook_entry, private->hook_entry,
67394 sizeof(info.hook_entry));
67395 diff -urNp linux-2.6.32.41/net/ipv4/netfilter/ip_tables.c linux-2.6.32.41/net/ipv4/netfilter/ip_tables.c
67396 --- linux-2.6.32.41/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:00:52.000000000 -0400
67397 +++ linux-2.6.32.41/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:04:18.000000000 -0400
67398 @@ -1141,6 +1141,7 @@ static int get_info(struct net *net, voi
67399 private = &tmp;
67400 }
67401 #endif
67402 + memset(&info, 0, sizeof(info));
67403 info.valid_hooks = t->valid_hooks;
67404 memcpy(info.hook_entry, private->hook_entry,
67405 sizeof(info.hook_entry));
67406 diff -urNp linux-2.6.32.41/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-2.6.32.41/net/ipv4/netfilter/nf_nat_snmp_basic.c
67407 --- linux-2.6.32.41/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-03-27 14:31:47.000000000 -0400
67408 +++ linux-2.6.32.41/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-04-17 15:56:46.000000000 -0400
67409 @@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(
67410
67411 *len = 0;
67412
67413 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
67414 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
67415 if (*octets == NULL) {
67416 if (net_ratelimit())
67417 printk("OOM in bsalg (%d)\n", __LINE__);
67418 diff -urNp linux-2.6.32.41/net/ipv4/raw.c linux-2.6.32.41/net/ipv4/raw.c
67419 --- linux-2.6.32.41/net/ipv4/raw.c 2011-03-27 14:31:47.000000000 -0400
67420 +++ linux-2.6.32.41/net/ipv4/raw.c 2011-05-04 17:59:08.000000000 -0400
67421 @@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk,
67422 /* Charge it to the socket. */
67423
67424 if (sock_queue_rcv_skb(sk, skb) < 0) {
67425 - atomic_inc(&sk->sk_drops);
67426 + atomic_inc_unchecked(&sk->sk_drops);
67427 kfree_skb(skb);
67428 return NET_RX_DROP;
67429 }
67430 @@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk,
67431 int raw_rcv(struct sock *sk, struct sk_buff *skb)
67432 {
67433 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
67434 - atomic_inc(&sk->sk_drops);
67435 + atomic_inc_unchecked(&sk->sk_drops);
67436 kfree_skb(skb);
67437 return NET_RX_DROP;
67438 }
67439 @@ -724,15 +724,22 @@ static int raw_init(struct sock *sk)
67440
67441 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
67442 {
67443 + struct icmp_filter filter;
67444 +
67445 + if (optlen < 0)
67446 + return -EINVAL;
67447 if (optlen > sizeof(struct icmp_filter))
67448 optlen = sizeof(struct icmp_filter);
67449 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
67450 + if (copy_from_user(&filter, optval, optlen))
67451 return -EFAULT;
67452 + memcpy(&raw_sk(sk)->filter, &filter, optlen);
67453 +
67454 return 0;
67455 }
67456
67457 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
67458 {
67459 + struct icmp_filter filter;
67460 int len, ret = -EFAULT;
67461
67462 if (get_user(len, optlen))
67463 @@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock
67464 if (len > sizeof(struct icmp_filter))
67465 len = sizeof(struct icmp_filter);
67466 ret = -EFAULT;
67467 + memcpy(&filter, &raw_sk(sk)->filter, len);
67468 if (put_user(len, optlen) ||
67469 - copy_to_user(optval, &raw_sk(sk)->filter, len))
67470 + copy_to_user(optval, &filter, len))
67471 goto out;
67472 ret = 0;
67473 out: return ret;
67474 @@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq
67475 sk_wmem_alloc_get(sp),
67476 sk_rmem_alloc_get(sp),
67477 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
67478 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
67479 + atomic_read(&sp->sk_refcnt),
67480 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67481 + NULL,
67482 +#else
67483 + sp,
67484 +#endif
67485 + atomic_read_unchecked(&sp->sk_drops));
67486 }
67487
67488 static int raw_seq_show(struct seq_file *seq, void *v)
67489 diff -urNp linux-2.6.32.41/net/ipv4/route.c linux-2.6.32.41/net/ipv4/route.c
67490 --- linux-2.6.32.41/net/ipv4/route.c 2011-03-27 14:31:47.000000000 -0400
67491 +++ linux-2.6.32.41/net/ipv4/route.c 2011-05-04 17:56:28.000000000 -0400
67492 @@ -268,7 +268,7 @@ static inline unsigned int rt_hash(__be3
67493
67494 static inline int rt_genid(struct net *net)
67495 {
67496 - return atomic_read(&net->ipv4.rt_genid);
67497 + return atomic_read_unchecked(&net->ipv4.rt_genid);
67498 }
67499
67500 #ifdef CONFIG_PROC_FS
67501 @@ -888,7 +888,7 @@ static void rt_cache_invalidate(struct n
67502 unsigned char shuffle;
67503
67504 get_random_bytes(&shuffle, sizeof(shuffle));
67505 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
67506 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
67507 }
67508
67509 /*
67510 @@ -3356,7 +3356,7 @@ static __net_initdata struct pernet_oper
67511
67512 static __net_init int rt_secret_timer_init(struct net *net)
67513 {
67514 - atomic_set(&net->ipv4.rt_genid,
67515 + atomic_set_unchecked(&net->ipv4.rt_genid,
67516 (int) ((num_physpages ^ (num_physpages>>8)) ^
67517 (jiffies ^ (jiffies >> 7))));
67518
67519 diff -urNp linux-2.6.32.41/net/ipv4/tcp.c linux-2.6.32.41/net/ipv4/tcp.c
67520 --- linux-2.6.32.41/net/ipv4/tcp.c 2011-03-27 14:31:47.000000000 -0400
67521 +++ linux-2.6.32.41/net/ipv4/tcp.c 2011-05-16 21:46:57.000000000 -0400
67522 @@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock
67523 int val;
67524 int err = 0;
67525
67526 + pax_track_stack();
67527 +
67528 /* This is a string value all the others are int's */
67529 if (optname == TCP_CONGESTION) {
67530 char name[TCP_CA_NAME_MAX];
67531 @@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock
67532 struct tcp_sock *tp = tcp_sk(sk);
67533 int val, len;
67534
67535 + pax_track_stack();
67536 +
67537 if (get_user(len, optlen))
67538 return -EFAULT;
67539
67540 diff -urNp linux-2.6.32.41/net/ipv4/tcp_ipv4.c linux-2.6.32.41/net/ipv4/tcp_ipv4.c
67541 --- linux-2.6.32.41/net/ipv4/tcp_ipv4.c 2011-03-27 14:31:47.000000000 -0400
67542 +++ linux-2.6.32.41/net/ipv4/tcp_ipv4.c 2011-04-17 15:56:46.000000000 -0400
67543 @@ -84,6 +84,9 @@
67544 int sysctl_tcp_tw_reuse __read_mostly;
67545 int sysctl_tcp_low_latency __read_mostly;
67546
67547 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67548 +extern int grsec_enable_blackhole;
67549 +#endif
67550
67551 #ifdef CONFIG_TCP_MD5SIG
67552 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
67553 @@ -1542,6 +1545,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
67554 return 0;
67555
67556 reset:
67557 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67558 + if (!grsec_enable_blackhole)
67559 +#endif
67560 tcp_v4_send_reset(rsk, skb);
67561 discard:
67562 kfree_skb(skb);
67563 @@ -1603,12 +1609,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
67564 TCP_SKB_CB(skb)->sacked = 0;
67565
67566 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
67567 - if (!sk)
67568 + if (!sk) {
67569 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67570 + ret = 1;
67571 +#endif
67572 goto no_tcp_socket;
67573 + }
67574
67575 process:
67576 - if (sk->sk_state == TCP_TIME_WAIT)
67577 + if (sk->sk_state == TCP_TIME_WAIT) {
67578 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67579 + ret = 2;
67580 +#endif
67581 goto do_time_wait;
67582 + }
67583
67584 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
67585 goto discard_and_relse;
67586 @@ -1650,6 +1664,10 @@ no_tcp_socket:
67587 bad_packet:
67588 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
67589 } else {
67590 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67591 + if (!grsec_enable_blackhole || (ret == 1 &&
67592 + (skb->dev->flags & IFF_LOOPBACK)))
67593 +#endif
67594 tcp_v4_send_reset(NULL, skb);
67595 }
67596
67597 @@ -2237,7 +2255,11 @@ static void get_openreq4(struct sock *sk
67598 0, /* non standard timer */
67599 0, /* open_requests have no inode */
67600 atomic_read(&sk->sk_refcnt),
67601 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67602 + NULL,
67603 +#else
67604 req,
67605 +#endif
67606 len);
67607 }
67608
67609 @@ -2279,7 +2301,12 @@ static void get_tcp4_sock(struct sock *s
67610 sock_i_uid(sk),
67611 icsk->icsk_probes_out,
67612 sock_i_ino(sk),
67613 - atomic_read(&sk->sk_refcnt), sk,
67614 + atomic_read(&sk->sk_refcnt),
67615 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67616 + NULL,
67617 +#else
67618 + sk,
67619 +#endif
67620 jiffies_to_clock_t(icsk->icsk_rto),
67621 jiffies_to_clock_t(icsk->icsk_ack.ato),
67622 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
67623 @@ -2307,7 +2334,13 @@ static void get_timewait4_sock(struct in
67624 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
67625 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
67626 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
67627 - atomic_read(&tw->tw_refcnt), tw, len);
67628 + atomic_read(&tw->tw_refcnt),
67629 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67630 + NULL,
67631 +#else
67632 + tw,
67633 +#endif
67634 + len);
67635 }
67636
67637 #define TMPSZ 150
67638 diff -urNp linux-2.6.32.41/net/ipv4/tcp_minisocks.c linux-2.6.32.41/net/ipv4/tcp_minisocks.c
67639 --- linux-2.6.32.41/net/ipv4/tcp_minisocks.c 2011-03-27 14:31:47.000000000 -0400
67640 +++ linux-2.6.32.41/net/ipv4/tcp_minisocks.c 2011-04-17 15:56:46.000000000 -0400
67641 @@ -26,6 +26,10 @@
67642 #include <net/inet_common.h>
67643 #include <net/xfrm.h>
67644
67645 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67646 +extern int grsec_enable_blackhole;
67647 +#endif
67648 +
67649 #ifdef CONFIG_SYSCTL
67650 #define SYNC_INIT 0 /* let the user enable it */
67651 #else
67652 @@ -672,6 +676,10 @@ listen_overflow:
67653
67654 embryonic_reset:
67655 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
67656 +
67657 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67658 + if (!grsec_enable_blackhole)
67659 +#endif
67660 if (!(flg & TCP_FLAG_RST))
67661 req->rsk_ops->send_reset(sk, skb);
67662
67663 diff -urNp linux-2.6.32.41/net/ipv4/tcp_output.c linux-2.6.32.41/net/ipv4/tcp_output.c
67664 --- linux-2.6.32.41/net/ipv4/tcp_output.c 2011-03-27 14:31:47.000000000 -0400
67665 +++ linux-2.6.32.41/net/ipv4/tcp_output.c 2011-05-16 21:46:57.000000000 -0400
67666 @@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct s
67667 __u8 *md5_hash_location;
67668 int mss;
67669
67670 + pax_track_stack();
67671 +
67672 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
67673 if (skb == NULL)
67674 return NULL;
67675 diff -urNp linux-2.6.32.41/net/ipv4/tcp_probe.c linux-2.6.32.41/net/ipv4/tcp_probe.c
67676 --- linux-2.6.32.41/net/ipv4/tcp_probe.c 2011-03-27 14:31:47.000000000 -0400
67677 +++ linux-2.6.32.41/net/ipv4/tcp_probe.c 2011-04-17 15:56:46.000000000 -0400
67678 @@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file
67679 if (cnt + width >= len)
67680 break;
67681
67682 - if (copy_to_user(buf + cnt, tbuf, width))
67683 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
67684 return -EFAULT;
67685 cnt += width;
67686 }
67687 diff -urNp linux-2.6.32.41/net/ipv4/tcp_timer.c linux-2.6.32.41/net/ipv4/tcp_timer.c
67688 --- linux-2.6.32.41/net/ipv4/tcp_timer.c 2011-03-27 14:31:47.000000000 -0400
67689 +++ linux-2.6.32.41/net/ipv4/tcp_timer.c 2011-04-17 15:56:46.000000000 -0400
67690 @@ -21,6 +21,10 @@
67691 #include <linux/module.h>
67692 #include <net/tcp.h>
67693
67694 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67695 +extern int grsec_lastack_retries;
67696 +#endif
67697 +
67698 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
67699 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
67700 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
67701 @@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock
67702 }
67703 }
67704
67705 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67706 + if ((sk->sk_state == TCP_LAST_ACK) &&
67707 + (grsec_lastack_retries > 0) &&
67708 + (grsec_lastack_retries < retry_until))
67709 + retry_until = grsec_lastack_retries;
67710 +#endif
67711 +
67712 if (retransmits_timed_out(sk, retry_until)) {
67713 /* Has it gone just too far? */
67714 tcp_write_err(sk);
67715 diff -urNp linux-2.6.32.41/net/ipv4/udp.c linux-2.6.32.41/net/ipv4/udp.c
67716 --- linux-2.6.32.41/net/ipv4/udp.c 2011-03-27 14:31:47.000000000 -0400
67717 +++ linux-2.6.32.41/net/ipv4/udp.c 2011-05-04 17:57:28.000000000 -0400
67718 @@ -86,6 +86,7 @@
67719 #include <linux/types.h>
67720 #include <linux/fcntl.h>
67721 #include <linux/module.h>
67722 +#include <linux/security.h>
67723 #include <linux/socket.h>
67724 #include <linux/sockios.h>
67725 #include <linux/igmp.h>
67726 @@ -106,6 +107,10 @@
67727 #include <net/xfrm.h>
67728 #include "udp_impl.h"
67729
67730 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67731 +extern int grsec_enable_blackhole;
67732 +#endif
67733 +
67734 struct udp_table udp_table;
67735 EXPORT_SYMBOL(udp_table);
67736
67737 @@ -371,6 +376,9 @@ found:
67738 return s;
67739 }
67740
67741 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
67742 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
67743 +
67744 /*
67745 * This routine is called by the ICMP module when it gets some
67746 * sort of error condition. If err < 0 then the socket should
67747 @@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
67748 dport = usin->sin_port;
67749 if (dport == 0)
67750 return -EINVAL;
67751 +
67752 + err = gr_search_udp_sendmsg(sk, usin);
67753 + if (err)
67754 + return err;
67755 } else {
67756 if (sk->sk_state != TCP_ESTABLISHED)
67757 return -EDESTADDRREQ;
67758 +
67759 + err = gr_search_udp_sendmsg(sk, NULL);
67760 + if (err)
67761 + return err;
67762 +
67763 daddr = inet->daddr;
67764 dport = inet->dport;
67765 /* Open fast path for connected socket.
67766 @@ -945,6 +962,10 @@ try_again:
67767 if (!skb)
67768 goto out;
67769
67770 + err = gr_search_udp_recvmsg(sk, skb);
67771 + if (err)
67772 + goto out_free;
67773 +
67774 ulen = skb->len - sizeof(struct udphdr);
67775 copied = len;
67776 if (copied > ulen)
67777 @@ -1065,7 +1086,7 @@ static int __udp_queue_rcv_skb(struct so
67778 if (rc == -ENOMEM) {
67779 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
67780 is_udplite);
67781 - atomic_inc(&sk->sk_drops);
67782 + atomic_inc_unchecked(&sk->sk_drops);
67783 }
67784 goto drop;
67785 }
67786 @@ -1335,6 +1356,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
67787 goto csum_error;
67788
67789 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
67790 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67791 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
67792 +#endif
67793 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
67794
67795 /*
67796 @@ -1755,8 +1779,13 @@ static void udp4_format_sock(struct sock
67797 sk_wmem_alloc_get(sp),
67798 sk_rmem_alloc_get(sp),
67799 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
67800 - atomic_read(&sp->sk_refcnt), sp,
67801 - atomic_read(&sp->sk_drops), len);
67802 + atomic_read(&sp->sk_refcnt),
67803 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67804 + NULL,
67805 +#else
67806 + sp,
67807 +#endif
67808 + atomic_read_unchecked(&sp->sk_drops), len);
67809 }
67810
67811 int udp4_seq_show(struct seq_file *seq, void *v)
67812 diff -urNp linux-2.6.32.41/net/ipv6/inet6_connection_sock.c linux-2.6.32.41/net/ipv6/inet6_connection_sock.c
67813 --- linux-2.6.32.41/net/ipv6/inet6_connection_sock.c 2011-03-27 14:31:47.000000000 -0400
67814 +++ linux-2.6.32.41/net/ipv6/inet6_connection_sock.c 2011-05-04 17:56:28.000000000 -0400
67815 @@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *
67816 #ifdef CONFIG_XFRM
67817 {
67818 struct rt6_info *rt = (struct rt6_info *)dst;
67819 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
67820 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
67821 }
67822 #endif
67823 }
67824 @@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(
67825 #ifdef CONFIG_XFRM
67826 if (dst) {
67827 struct rt6_info *rt = (struct rt6_info *)dst;
67828 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
67829 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
67830 sk->sk_dst_cache = NULL;
67831 dst_release(dst);
67832 dst = NULL;
67833 diff -urNp linux-2.6.32.41/net/ipv6/inet6_hashtables.c linux-2.6.32.41/net/ipv6/inet6_hashtables.c
67834 --- linux-2.6.32.41/net/ipv6/inet6_hashtables.c 2011-03-27 14:31:47.000000000 -0400
67835 +++ linux-2.6.32.41/net/ipv6/inet6_hashtables.c 2011-05-04 17:56:28.000000000 -0400
67836 @@ -118,7 +118,7 @@ out:
67837 }
67838 EXPORT_SYMBOL(__inet6_lookup_established);
67839
67840 -static int inline compute_score(struct sock *sk, struct net *net,
67841 +static inline int compute_score(struct sock *sk, struct net *net,
67842 const unsigned short hnum,
67843 const struct in6_addr *daddr,
67844 const int dif)
67845 diff -urNp linux-2.6.32.41/net/ipv6/ipv6_sockglue.c linux-2.6.32.41/net/ipv6/ipv6_sockglue.c
67846 --- linux-2.6.32.41/net/ipv6/ipv6_sockglue.c 2011-03-27 14:31:47.000000000 -0400
67847 +++ linux-2.6.32.41/net/ipv6/ipv6_sockglue.c 2011-05-16 21:46:57.000000000 -0400
67848 @@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct soc
67849 int val, valbool;
67850 int retv = -ENOPROTOOPT;
67851
67852 + pax_track_stack();
67853 +
67854 if (optval == NULL)
67855 val=0;
67856 else {
67857 @@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct soc
67858 int len;
67859 int val;
67860
67861 + pax_track_stack();
67862 +
67863 if (ip6_mroute_opt(optname))
67864 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
67865
67866 diff -urNp linux-2.6.32.41/net/ipv6/netfilter/ip6_tables.c linux-2.6.32.41/net/ipv6/netfilter/ip6_tables.c
67867 --- linux-2.6.32.41/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:00:52.000000000 -0400
67868 +++ linux-2.6.32.41/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:04:18.000000000 -0400
67869 @@ -1173,6 +1173,7 @@ static int get_info(struct net *net, voi
67870 private = &tmp;
67871 }
67872 #endif
67873 + memset(&info, 0, sizeof(info));
67874 info.valid_hooks = t->valid_hooks;
67875 memcpy(info.hook_entry, private->hook_entry,
67876 sizeof(info.hook_entry));
67877 diff -urNp linux-2.6.32.41/net/ipv6/raw.c linux-2.6.32.41/net/ipv6/raw.c
67878 --- linux-2.6.32.41/net/ipv6/raw.c 2011-03-27 14:31:47.000000000 -0400
67879 +++ linux-2.6.32.41/net/ipv6/raw.c 2011-05-16 21:46:57.000000000 -0400
67880 @@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct s
67881 {
67882 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
67883 skb_checksum_complete(skb)) {
67884 - atomic_inc(&sk->sk_drops);
67885 + atomic_inc_unchecked(&sk->sk_drops);
67886 kfree_skb(skb);
67887 return NET_RX_DROP;
67888 }
67889
67890 /* Charge it to the socket. */
67891 if (sock_queue_rcv_skb(sk,skb)<0) {
67892 - atomic_inc(&sk->sk_drops);
67893 + atomic_inc_unchecked(&sk->sk_drops);
67894 kfree_skb(skb);
67895 return NET_RX_DROP;
67896 }
67897 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
67898 struct raw6_sock *rp = raw6_sk(sk);
67899
67900 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
67901 - atomic_inc(&sk->sk_drops);
67902 + atomic_inc_unchecked(&sk->sk_drops);
67903 kfree_skb(skb);
67904 return NET_RX_DROP;
67905 }
67906 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
67907
67908 if (inet->hdrincl) {
67909 if (skb_checksum_complete(skb)) {
67910 - atomic_inc(&sk->sk_drops);
67911 + atomic_inc_unchecked(&sk->sk_drops);
67912 kfree_skb(skb);
67913 return NET_RX_DROP;
67914 }
67915 @@ -518,7 +518,7 @@ csum_copy_err:
67916 as some normal condition.
67917 */
67918 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
67919 - atomic_inc(&sk->sk_drops);
67920 + atomic_inc_unchecked(&sk->sk_drops);
67921 goto out;
67922 }
67923
67924 @@ -600,7 +600,7 @@ out:
67925 return err;
67926 }
67927
67928 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
67929 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
67930 struct flowi *fl, struct rt6_info *rt,
67931 unsigned int flags)
67932 {
67933 @@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *i
67934 u16 proto;
67935 int err;
67936
67937 + pax_track_stack();
67938 +
67939 /* Rough check on arithmetic overflow,
67940 better check is made in ip6_append_data().
67941 */
67942 @@ -916,12 +918,17 @@ do_confirm:
67943 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
67944 char __user *optval, int optlen)
67945 {
67946 + struct icmp6_filter filter;
67947 +
67948 switch (optname) {
67949 case ICMPV6_FILTER:
67950 + if (optlen < 0)
67951 + return -EINVAL;
67952 if (optlen > sizeof(struct icmp6_filter))
67953 optlen = sizeof(struct icmp6_filter);
67954 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
67955 + if (copy_from_user(&filter, optval, optlen))
67956 return -EFAULT;
67957 + memcpy(&raw6_sk(sk)->filter, &filter, optlen);
67958 return 0;
67959 default:
67960 return -ENOPROTOOPT;
67961 @@ -933,6 +940,7 @@ static int rawv6_seticmpfilter(struct so
67962 static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
67963 char __user *optval, int __user *optlen)
67964 {
67965 + struct icmp6_filter filter;
67966 int len;
67967
67968 switch (optname) {
67969 @@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct so
67970 len = sizeof(struct icmp6_filter);
67971 if (put_user(len, optlen))
67972 return -EFAULT;
67973 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
67974 + memcpy(&filter, &raw6_sk(sk)->filter, len);
67975 + if (copy_to_user(optval, &filter, len))
67976 return -EFAULT;
67977 return 0;
67978 default:
67979 @@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct se
67980 0, 0L, 0,
67981 sock_i_uid(sp), 0,
67982 sock_i_ino(sp),
67983 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
67984 + atomic_read(&sp->sk_refcnt),
67985 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67986 + NULL,
67987 +#else
67988 + sp,
67989 +#endif
67990 + atomic_read_unchecked(&sp->sk_drops));
67991 }
67992
67993 static int raw6_seq_show(struct seq_file *seq, void *v)
67994 diff -urNp linux-2.6.32.41/net/ipv6/tcp_ipv6.c linux-2.6.32.41/net/ipv6/tcp_ipv6.c
67995 --- linux-2.6.32.41/net/ipv6/tcp_ipv6.c 2011-03-27 14:31:47.000000000 -0400
67996 +++ linux-2.6.32.41/net/ipv6/tcp_ipv6.c 2011-04-17 15:56:46.000000000 -0400
67997 @@ -88,6 +88,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
67998 }
67999 #endif
68000
68001 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68002 +extern int grsec_enable_blackhole;
68003 +#endif
68004 +
68005 static void tcp_v6_hash(struct sock *sk)
68006 {
68007 if (sk->sk_state != TCP_CLOSE) {
68008 @@ -1578,6 +1582,9 @@ static int tcp_v6_do_rcv(struct sock *sk
68009 return 0;
68010
68011 reset:
68012 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68013 + if (!grsec_enable_blackhole)
68014 +#endif
68015 tcp_v6_send_reset(sk, skb);
68016 discard:
68017 if (opt_skb)
68018 @@ -1655,12 +1662,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
68019 TCP_SKB_CB(skb)->sacked = 0;
68020
68021 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
68022 - if (!sk)
68023 + if (!sk) {
68024 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68025 + ret = 1;
68026 +#endif
68027 goto no_tcp_socket;
68028 + }
68029
68030 process:
68031 - if (sk->sk_state == TCP_TIME_WAIT)
68032 + if (sk->sk_state == TCP_TIME_WAIT) {
68033 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68034 + ret = 2;
68035 +#endif
68036 goto do_time_wait;
68037 + }
68038
68039 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
68040 goto discard_and_relse;
68041 @@ -1700,6 +1715,10 @@ no_tcp_socket:
68042 bad_packet:
68043 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
68044 } else {
68045 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68046 + if (!grsec_enable_blackhole || (ret == 1 &&
68047 + (skb->dev->flags & IFF_LOOPBACK)))
68048 +#endif
68049 tcp_v6_send_reset(NULL, skb);
68050 }
68051
68052 @@ -1915,7 +1934,13 @@ static void get_openreq6(struct seq_file
68053 uid,
68054 0, /* non standard timer */
68055 0, /* open_requests have no inode */
68056 - 0, req);
68057 + 0,
68058 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68059 + NULL
68060 +#else
68061 + req
68062 +#endif
68063 + );
68064 }
68065
68066 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
68067 @@ -1965,7 +1990,12 @@ static void get_tcp6_sock(struct seq_fil
68068 sock_i_uid(sp),
68069 icsk->icsk_probes_out,
68070 sock_i_ino(sp),
68071 - atomic_read(&sp->sk_refcnt), sp,
68072 + atomic_read(&sp->sk_refcnt),
68073 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68074 + NULL,
68075 +#else
68076 + sp,
68077 +#endif
68078 jiffies_to_clock_t(icsk->icsk_rto),
68079 jiffies_to_clock_t(icsk->icsk_ack.ato),
68080 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
68081 @@ -2000,7 +2030,13 @@ static void get_timewait6_sock(struct se
68082 dest->s6_addr32[2], dest->s6_addr32[3], destp,
68083 tw->tw_substate, 0, 0,
68084 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
68085 - atomic_read(&tw->tw_refcnt), tw);
68086 + atomic_read(&tw->tw_refcnt),
68087 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68088 + NULL
68089 +#else
68090 + tw
68091 +#endif
68092 + );
68093 }
68094
68095 static int tcp6_seq_show(struct seq_file *seq, void *v)
68096 diff -urNp linux-2.6.32.41/net/ipv6/udp.c linux-2.6.32.41/net/ipv6/udp.c
68097 --- linux-2.6.32.41/net/ipv6/udp.c 2011-03-27 14:31:47.000000000 -0400
68098 +++ linux-2.6.32.41/net/ipv6/udp.c 2011-05-04 17:58:16.000000000 -0400
68099 @@ -49,6 +49,10 @@
68100 #include <linux/seq_file.h>
68101 #include "udp_impl.h"
68102
68103 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68104 +extern int grsec_enable_blackhole;
68105 +#endif
68106 +
68107 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
68108 {
68109 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
68110 @@ -388,7 +392,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
68111 if (rc == -ENOMEM) {
68112 UDP6_INC_STATS_BH(sock_net(sk),
68113 UDP_MIB_RCVBUFERRORS, is_udplite);
68114 - atomic_inc(&sk->sk_drops);
68115 + atomic_inc_unchecked(&sk->sk_drops);
68116 }
68117 goto drop;
68118 }
68119 @@ -587,6 +591,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
68120 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
68121 proto == IPPROTO_UDPLITE);
68122
68123 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68124 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
68125 +#endif
68126 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
68127
68128 kfree_skb(skb);
68129 @@ -1206,8 +1213,13 @@ static void udp6_sock_seq_show(struct se
68130 0, 0L, 0,
68131 sock_i_uid(sp), 0,
68132 sock_i_ino(sp),
68133 - atomic_read(&sp->sk_refcnt), sp,
68134 - atomic_read(&sp->sk_drops));
68135 + atomic_read(&sp->sk_refcnt),
68136 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68137 + NULL,
68138 +#else
68139 + sp,
68140 +#endif
68141 + atomic_read_unchecked(&sp->sk_drops));
68142 }
68143
68144 int udp6_seq_show(struct seq_file *seq, void *v)
68145 diff -urNp linux-2.6.32.41/net/irda/ircomm/ircomm_tty.c linux-2.6.32.41/net/irda/ircomm/ircomm_tty.c
68146 --- linux-2.6.32.41/net/irda/ircomm/ircomm_tty.c 2011-03-27 14:31:47.000000000 -0400
68147 +++ linux-2.6.32.41/net/irda/ircomm/ircomm_tty.c 2011-04-17 15:56:46.000000000 -0400
68148 @@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(st
68149 add_wait_queue(&self->open_wait, &wait);
68150
68151 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
68152 - __FILE__,__LINE__, tty->driver->name, self->open_count );
68153 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
68154
68155 /* As far as I can see, we protect open_count - Jean II */
68156 spin_lock_irqsave(&self->spinlock, flags);
68157 if (!tty_hung_up_p(filp)) {
68158 extra_count = 1;
68159 - self->open_count--;
68160 + local_dec(&self->open_count);
68161 }
68162 spin_unlock_irqrestore(&self->spinlock, flags);
68163 - self->blocked_open++;
68164 + local_inc(&self->blocked_open);
68165
68166 while (1) {
68167 if (tty->termios->c_cflag & CBAUD) {
68168 @@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(st
68169 }
68170
68171 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
68172 - __FILE__,__LINE__, tty->driver->name, self->open_count );
68173 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
68174
68175 schedule();
68176 }
68177 @@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(st
68178 if (extra_count) {
68179 /* ++ is not atomic, so this should be protected - Jean II */
68180 spin_lock_irqsave(&self->spinlock, flags);
68181 - self->open_count++;
68182 + local_inc(&self->open_count);
68183 spin_unlock_irqrestore(&self->spinlock, flags);
68184 }
68185 - self->blocked_open--;
68186 + local_dec(&self->blocked_open);
68187
68188 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
68189 - __FILE__,__LINE__, tty->driver->name, self->open_count);
68190 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
68191
68192 if (!retval)
68193 self->flags |= ASYNC_NORMAL_ACTIVE;
68194 @@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_st
68195 }
68196 /* ++ is not atomic, so this should be protected - Jean II */
68197 spin_lock_irqsave(&self->spinlock, flags);
68198 - self->open_count++;
68199 + local_inc(&self->open_count);
68200
68201 tty->driver_data = self;
68202 self->tty = tty;
68203 spin_unlock_irqrestore(&self->spinlock, flags);
68204
68205 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
68206 - self->line, self->open_count);
68207 + self->line, local_read(&self->open_count));
68208
68209 /* Not really used by us, but lets do it anyway */
68210 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
68211 @@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_
68212 return;
68213 }
68214
68215 - if ((tty->count == 1) && (self->open_count != 1)) {
68216 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
68217 /*
68218 * Uh, oh. tty->count is 1, which means that the tty
68219 * structure will be freed. state->count should always
68220 @@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_
68221 */
68222 IRDA_DEBUG(0, "%s(), bad serial port count; "
68223 "tty->count is 1, state->count is %d\n", __func__ ,
68224 - self->open_count);
68225 - self->open_count = 1;
68226 + local_read(&self->open_count));
68227 + local_set(&self->open_count, 1);
68228 }
68229
68230 - if (--self->open_count < 0) {
68231 + if (local_dec_return(&self->open_count) < 0) {
68232 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
68233 - __func__, self->line, self->open_count);
68234 - self->open_count = 0;
68235 + __func__, self->line, local_read(&self->open_count));
68236 + local_set(&self->open_count, 0);
68237 }
68238 - if (self->open_count) {
68239 + if (local_read(&self->open_count)) {
68240 spin_unlock_irqrestore(&self->spinlock, flags);
68241
68242 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
68243 @@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_
68244 tty->closing = 0;
68245 self->tty = NULL;
68246
68247 - if (self->blocked_open) {
68248 + if (local_read(&self->blocked_open)) {
68249 if (self->close_delay)
68250 schedule_timeout_interruptible(self->close_delay);
68251 wake_up_interruptible(&self->open_wait);
68252 @@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty
68253 spin_lock_irqsave(&self->spinlock, flags);
68254 self->flags &= ~ASYNC_NORMAL_ACTIVE;
68255 self->tty = NULL;
68256 - self->open_count = 0;
68257 + local_set(&self->open_count, 0);
68258 spin_unlock_irqrestore(&self->spinlock, flags);
68259
68260 wake_up_interruptible(&self->open_wait);
68261 @@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct
68262 seq_putc(m, '\n');
68263
68264 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
68265 - seq_printf(m, "Open count: %d\n", self->open_count);
68266 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
68267 seq_printf(m, "Max data size: %d\n", self->max_data_size);
68268 seq_printf(m, "Max header size: %d\n", self->max_header_size);
68269
68270 diff -urNp linux-2.6.32.41/net/iucv/af_iucv.c linux-2.6.32.41/net/iucv/af_iucv.c
68271 --- linux-2.6.32.41/net/iucv/af_iucv.c 2011-03-27 14:31:47.000000000 -0400
68272 +++ linux-2.6.32.41/net/iucv/af_iucv.c 2011-05-04 17:56:28.000000000 -0400
68273 @@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct soc
68274
68275 write_lock_bh(&iucv_sk_list.lock);
68276
68277 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
68278 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
68279 while (__iucv_get_sock_by_name(name)) {
68280 sprintf(name, "%08x",
68281 - atomic_inc_return(&iucv_sk_list.autobind_name));
68282 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
68283 }
68284
68285 write_unlock_bh(&iucv_sk_list.lock);
68286 diff -urNp linux-2.6.32.41/net/key/af_key.c linux-2.6.32.41/net/key/af_key.c
68287 --- linux-2.6.32.41/net/key/af_key.c 2011-03-27 14:31:47.000000000 -0400
68288 +++ linux-2.6.32.41/net/key/af_key.c 2011-05-16 21:46:57.000000000 -0400
68289 @@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk
68290 struct xfrm_migrate m[XFRM_MAX_DEPTH];
68291 struct xfrm_kmaddress k;
68292
68293 + pax_track_stack();
68294 +
68295 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
68296 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
68297 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
68298 @@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_fil
68299 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
68300 else
68301 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
68302 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68303 + NULL,
68304 +#else
68305 s,
68306 +#endif
68307 atomic_read(&s->sk_refcnt),
68308 sk_rmem_alloc_get(s),
68309 sk_wmem_alloc_get(s),
68310 diff -urNp linux-2.6.32.41/net/mac80211/cfg.c linux-2.6.32.41/net/mac80211/cfg.c
68311 --- linux-2.6.32.41/net/mac80211/cfg.c 2011-03-27 14:31:47.000000000 -0400
68312 +++ linux-2.6.32.41/net/mac80211/cfg.c 2011-04-17 15:56:46.000000000 -0400
68313 @@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(st
68314 return err;
68315 }
68316
68317 -struct cfg80211_ops mac80211_config_ops = {
68318 +const struct cfg80211_ops mac80211_config_ops = {
68319 .add_virtual_intf = ieee80211_add_iface,
68320 .del_virtual_intf = ieee80211_del_iface,
68321 .change_virtual_intf = ieee80211_change_iface,
68322 diff -urNp linux-2.6.32.41/net/mac80211/cfg.h linux-2.6.32.41/net/mac80211/cfg.h
68323 --- linux-2.6.32.41/net/mac80211/cfg.h 2011-03-27 14:31:47.000000000 -0400
68324 +++ linux-2.6.32.41/net/mac80211/cfg.h 2011-04-17 15:56:46.000000000 -0400
68325 @@ -4,6 +4,6 @@
68326 #ifndef __CFG_H
68327 #define __CFG_H
68328
68329 -extern struct cfg80211_ops mac80211_config_ops;
68330 +extern const struct cfg80211_ops mac80211_config_ops;
68331
68332 #endif /* __CFG_H */
68333 diff -urNp linux-2.6.32.41/net/mac80211/debugfs_key.c linux-2.6.32.41/net/mac80211/debugfs_key.c
68334 --- linux-2.6.32.41/net/mac80211/debugfs_key.c 2011-03-27 14:31:47.000000000 -0400
68335 +++ linux-2.6.32.41/net/mac80211/debugfs_key.c 2011-04-17 15:56:46.000000000 -0400
68336 @@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file
68337 size_t count, loff_t *ppos)
68338 {
68339 struct ieee80211_key *key = file->private_data;
68340 - int i, res, bufsize = 2 * key->conf.keylen + 2;
68341 + int i, bufsize = 2 * key->conf.keylen + 2;
68342 char *buf = kmalloc(bufsize, GFP_KERNEL);
68343 char *p = buf;
68344 + ssize_t res;
68345 +
68346 + if (buf == NULL)
68347 + return -ENOMEM;
68348
68349 for (i = 0; i < key->conf.keylen; i++)
68350 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
68351 diff -urNp linux-2.6.32.41/net/mac80211/debugfs_sta.c linux-2.6.32.41/net/mac80211/debugfs_sta.c
68352 --- linux-2.6.32.41/net/mac80211/debugfs_sta.c 2011-03-27 14:31:47.000000000 -0400
68353 +++ linux-2.6.32.41/net/mac80211/debugfs_sta.c 2011-05-16 21:46:57.000000000 -0400
68354 @@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struc
68355 int i;
68356 struct sta_info *sta = file->private_data;
68357
68358 + pax_track_stack();
68359 +
68360 spin_lock_bh(&sta->lock);
68361 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
68362 sta->ampdu_mlme.dialog_token_allocator + 1);
68363 diff -urNp linux-2.6.32.41/net/mac80211/ieee80211_i.h linux-2.6.32.41/net/mac80211/ieee80211_i.h
68364 --- linux-2.6.32.41/net/mac80211/ieee80211_i.h 2011-03-27 14:31:47.000000000 -0400
68365 +++ linux-2.6.32.41/net/mac80211/ieee80211_i.h 2011-04-17 15:56:46.000000000 -0400
68366 @@ -25,6 +25,7 @@
68367 #include <linux/etherdevice.h>
68368 #include <net/cfg80211.h>
68369 #include <net/mac80211.h>
68370 +#include <asm/local.h>
68371 #include "key.h"
68372 #include "sta_info.h"
68373
68374 @@ -635,7 +636,7 @@ struct ieee80211_local {
68375 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
68376 spinlock_t queue_stop_reason_lock;
68377
68378 - int open_count;
68379 + local_t open_count;
68380 int monitors, cooked_mntrs;
68381 /* number of interfaces with corresponding FIF_ flags */
68382 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
68383 diff -urNp linux-2.6.32.41/net/mac80211/iface.c linux-2.6.32.41/net/mac80211/iface.c
68384 --- linux-2.6.32.41/net/mac80211/iface.c 2011-03-27 14:31:47.000000000 -0400
68385 +++ linux-2.6.32.41/net/mac80211/iface.c 2011-04-17 15:56:46.000000000 -0400
68386 @@ -166,7 +166,7 @@ static int ieee80211_open(struct net_dev
68387 break;
68388 }
68389
68390 - if (local->open_count == 0) {
68391 + if (local_read(&local->open_count) == 0) {
68392 res = drv_start(local);
68393 if (res)
68394 goto err_del_bss;
68395 @@ -196,7 +196,7 @@ static int ieee80211_open(struct net_dev
68396 * Validate the MAC address for this device.
68397 */
68398 if (!is_valid_ether_addr(dev->dev_addr)) {
68399 - if (!local->open_count)
68400 + if (!local_read(&local->open_count))
68401 drv_stop(local);
68402 return -EADDRNOTAVAIL;
68403 }
68404 @@ -292,7 +292,7 @@ static int ieee80211_open(struct net_dev
68405
68406 hw_reconf_flags |= __ieee80211_recalc_idle(local);
68407
68408 - local->open_count++;
68409 + local_inc(&local->open_count);
68410 if (hw_reconf_flags) {
68411 ieee80211_hw_config(local, hw_reconf_flags);
68412 /*
68413 @@ -320,7 +320,7 @@ static int ieee80211_open(struct net_dev
68414 err_del_interface:
68415 drv_remove_interface(local, &conf);
68416 err_stop:
68417 - if (!local->open_count)
68418 + if (!local_read(&local->open_count))
68419 drv_stop(local);
68420 err_del_bss:
68421 sdata->bss = NULL;
68422 @@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_dev
68423 WARN_ON(!list_empty(&sdata->u.ap.vlans));
68424 }
68425
68426 - local->open_count--;
68427 + local_dec(&local->open_count);
68428
68429 switch (sdata->vif.type) {
68430 case NL80211_IFTYPE_AP_VLAN:
68431 @@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_dev
68432
68433 ieee80211_recalc_ps(local, -1);
68434
68435 - if (local->open_count == 0) {
68436 + if (local_read(&local->open_count) == 0) {
68437 ieee80211_clear_tx_pending(local);
68438 ieee80211_stop_device(local);
68439
68440 diff -urNp linux-2.6.32.41/net/mac80211/main.c linux-2.6.32.41/net/mac80211/main.c
68441 --- linux-2.6.32.41/net/mac80211/main.c 2011-05-10 22:12:02.000000000 -0400
68442 +++ linux-2.6.32.41/net/mac80211/main.c 2011-05-10 22:12:34.000000000 -0400
68443 @@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211
68444 local->hw.conf.power_level = power;
68445 }
68446
68447 - if (changed && local->open_count) {
68448 + if (changed && local_read(&local->open_count)) {
68449 ret = drv_config(local, changed);
68450 /*
68451 * Goal:
68452 diff -urNp linux-2.6.32.41/net/mac80211/mlme.c linux-2.6.32.41/net/mac80211/mlme.c
68453 --- linux-2.6.32.41/net/mac80211/mlme.c 2011-03-27 14:31:47.000000000 -0400
68454 +++ linux-2.6.32.41/net/mac80211/mlme.c 2011-05-16 21:46:57.000000000 -0400
68455 @@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee
68456 bool have_higher_than_11mbit = false, newsta = false;
68457 u16 ap_ht_cap_flags;
68458
68459 + pax_track_stack();
68460 +
68461 /*
68462 * AssocResp and ReassocResp have identical structure, so process both
68463 * of them in this function.
68464 diff -urNp linux-2.6.32.41/net/mac80211/pm.c linux-2.6.32.41/net/mac80211/pm.c
68465 --- linux-2.6.32.41/net/mac80211/pm.c 2011-03-27 14:31:47.000000000 -0400
68466 +++ linux-2.6.32.41/net/mac80211/pm.c 2011-04-17 15:56:46.000000000 -0400
68467 @@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211
68468 }
68469
68470 /* stop hardware - this must stop RX */
68471 - if (local->open_count)
68472 + if (local_read(&local->open_count))
68473 ieee80211_stop_device(local);
68474
68475 local->suspended = true;
68476 diff -urNp linux-2.6.32.41/net/mac80211/rate.c linux-2.6.32.41/net/mac80211/rate.c
68477 --- linux-2.6.32.41/net/mac80211/rate.c 2011-03-27 14:31:47.000000000 -0400
68478 +++ linux-2.6.32.41/net/mac80211/rate.c 2011-04-17 15:56:46.000000000 -0400
68479 @@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct
68480 struct rate_control_ref *ref, *old;
68481
68482 ASSERT_RTNL();
68483 - if (local->open_count)
68484 + if (local_read(&local->open_count))
68485 return -EBUSY;
68486
68487 ref = rate_control_alloc(name, local);
68488 diff -urNp linux-2.6.32.41/net/mac80211/tx.c linux-2.6.32.41/net/mac80211/tx.c
68489 --- linux-2.6.32.41/net/mac80211/tx.c 2011-03-27 14:31:47.000000000 -0400
68490 +++ linux-2.6.32.41/net/mac80211/tx.c 2011-04-17 15:56:46.000000000 -0400
68491 @@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct
68492 return cpu_to_le16(dur);
68493 }
68494
68495 -static int inline is_ieee80211_device(struct ieee80211_local *local,
68496 +static inline int is_ieee80211_device(struct ieee80211_local *local,
68497 struct net_device *dev)
68498 {
68499 return local == wdev_priv(dev->ieee80211_ptr);
68500 diff -urNp linux-2.6.32.41/net/mac80211/util.c linux-2.6.32.41/net/mac80211/util.c
68501 --- linux-2.6.32.41/net/mac80211/util.c 2011-03-27 14:31:47.000000000 -0400
68502 +++ linux-2.6.32.41/net/mac80211/util.c 2011-04-17 15:56:46.000000000 -0400
68503 @@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_
68504 local->resuming = true;
68505
68506 /* restart hardware */
68507 - if (local->open_count) {
68508 + if (local_read(&local->open_count)) {
68509 /*
68510 * Upon resume hardware can sometimes be goofy due to
68511 * various platform / driver / bus issues, so restarting
68512 diff -urNp linux-2.6.32.41/net/netfilter/ipvs/ip_vs_app.c linux-2.6.32.41/net/netfilter/ipvs/ip_vs_app.c
68513 --- linux-2.6.32.41/net/netfilter/ipvs/ip_vs_app.c 2011-03-27 14:31:47.000000000 -0400
68514 +++ linux-2.6.32.41/net/netfilter/ipvs/ip_vs_app.c 2011-05-17 19:26:34.000000000 -0400
68515 @@ -564,7 +564,7 @@ static const struct file_operations ip_v
68516 .open = ip_vs_app_open,
68517 .read = seq_read,
68518 .llseek = seq_lseek,
68519 - .release = seq_release,
68520 + .release = seq_release_net,
68521 };
68522 #endif
68523
68524 diff -urNp linux-2.6.32.41/net/netfilter/ipvs/ip_vs_conn.c linux-2.6.32.41/net/netfilter/ipvs/ip_vs_conn.c
68525 --- linux-2.6.32.41/net/netfilter/ipvs/ip_vs_conn.c 2011-03-27 14:31:47.000000000 -0400
68526 +++ linux-2.6.32.41/net/netfilter/ipvs/ip_vs_conn.c 2011-05-17 19:26:34.000000000 -0400
68527 @@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
68528 /* if the connection is not template and is created
68529 * by sync, preserve the activity flag.
68530 */
68531 - cp->flags |= atomic_read(&dest->conn_flags) &
68532 + cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
68533 (~IP_VS_CONN_F_INACTIVE);
68534 else
68535 - cp->flags |= atomic_read(&dest->conn_flags);
68536 + cp->flags |= atomic_read_unchecked(&dest->conn_flags);
68537 cp->dest = dest;
68538
68539 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
68540 @@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const
68541 atomic_set(&cp->refcnt, 1);
68542
68543 atomic_set(&cp->n_control, 0);
68544 - atomic_set(&cp->in_pkts, 0);
68545 + atomic_set_unchecked(&cp->in_pkts, 0);
68546
68547 atomic_inc(&ip_vs_conn_count);
68548 if (flags & IP_VS_CONN_F_NO_CPORT)
68549 @@ -871,7 +871,7 @@ static const struct file_operations ip_v
68550 .open = ip_vs_conn_open,
68551 .read = seq_read,
68552 .llseek = seq_lseek,
68553 - .release = seq_release,
68554 + .release = seq_release_net,
68555 };
68556
68557 static const char *ip_vs_origin_name(unsigned flags)
68558 @@ -934,7 +934,7 @@ static const struct file_operations ip_v
68559 .open = ip_vs_conn_sync_open,
68560 .read = seq_read,
68561 .llseek = seq_lseek,
68562 - .release = seq_release,
68563 + .release = seq_release_net,
68564 };
68565
68566 #endif
68567 @@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip
68568
68569 /* Don't drop the entry if its number of incoming packets is not
68570 located in [0, 8] */
68571 - i = atomic_read(&cp->in_pkts);
68572 + i = atomic_read_unchecked(&cp->in_pkts);
68573 if (i > 8 || i < 0) return 0;
68574
68575 if (!todrop_rate[i]) return 0;
68576 diff -urNp linux-2.6.32.41/net/netfilter/ipvs/ip_vs_core.c linux-2.6.32.41/net/netfilter/ipvs/ip_vs_core.c
68577 --- linux-2.6.32.41/net/netfilter/ipvs/ip_vs_core.c 2011-03-27 14:31:47.000000000 -0400
68578 +++ linux-2.6.32.41/net/netfilter/ipvs/ip_vs_core.c 2011-05-04 17:56:28.000000000 -0400
68579 @@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *sv
68580 ret = cp->packet_xmit(skb, cp, pp);
68581 /* do not touch skb anymore */
68582
68583 - atomic_inc(&cp->in_pkts);
68584 + atomic_inc_unchecked(&cp->in_pkts);
68585 ip_vs_conn_put(cp);
68586 return ret;
68587 }
68588 @@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk
68589 * Sync connection if it is about to close to
68590 * encorage the standby servers to update the connections timeout
68591 */
68592 - pkts = atomic_add_return(1, &cp->in_pkts);
68593 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
68594 if (af == AF_INET &&
68595 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
68596 (((cp->protocol != IPPROTO_TCP ||
68597 diff -urNp linux-2.6.32.41/net/netfilter/ipvs/ip_vs_ctl.c linux-2.6.32.41/net/netfilter/ipvs/ip_vs_ctl.c
68598 --- linux-2.6.32.41/net/netfilter/ipvs/ip_vs_ctl.c 2011-03-27 14:31:47.000000000 -0400
68599 +++ linux-2.6.32.41/net/netfilter/ipvs/ip_vs_ctl.c 2011-05-17 19:26:34.000000000 -0400
68600 @@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service
68601 ip_vs_rs_hash(dest);
68602 write_unlock_bh(&__ip_vs_rs_lock);
68603 }
68604 - atomic_set(&dest->conn_flags, conn_flags);
68605 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
68606
68607 /* bind the service */
68608 if (!dest->svc) {
68609 @@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct se
68610 " %-7s %-6d %-10d %-10d\n",
68611 &dest->addr.in6,
68612 ntohs(dest->port),
68613 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
68614 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
68615 atomic_read(&dest->weight),
68616 atomic_read(&dest->activeconns),
68617 atomic_read(&dest->inactconns));
68618 @@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct se
68619 "%-7s %-6d %-10d %-10d\n",
68620 ntohl(dest->addr.ip),
68621 ntohs(dest->port),
68622 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
68623 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
68624 atomic_read(&dest->weight),
68625 atomic_read(&dest->activeconns),
68626 atomic_read(&dest->inactconns));
68627 @@ -1927,7 +1927,7 @@ static const struct file_operations ip_v
68628 .open = ip_vs_info_open,
68629 .read = seq_read,
68630 .llseek = seq_lseek,
68631 - .release = seq_release_private,
68632 + .release = seq_release_net,
68633 };
68634
68635 #endif
68636 @@ -1976,7 +1976,7 @@ static const struct file_operations ip_v
68637 .open = ip_vs_stats_seq_open,
68638 .read = seq_read,
68639 .llseek = seq_lseek,
68640 - .release = single_release,
68641 + .release = single_release_net,
68642 };
68643
68644 #endif
68645 @@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip
68646
68647 entry.addr = dest->addr.ip;
68648 entry.port = dest->port;
68649 - entry.conn_flags = atomic_read(&dest->conn_flags);
68650 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
68651 entry.weight = atomic_read(&dest->weight);
68652 entry.u_threshold = dest->u_threshold;
68653 entry.l_threshold = dest->l_threshold;
68654 @@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cm
68655 unsigned char arg[128];
68656 int ret = 0;
68657
68658 + pax_track_stack();
68659 +
68660 if (!capable(CAP_NET_ADMIN))
68661 return -EPERM;
68662
68663 @@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct s
68664 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
68665
68666 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
68667 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
68668 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
68669 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
68670 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
68671 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
68672 diff -urNp linux-2.6.32.41/net/netfilter/ipvs/ip_vs_sync.c linux-2.6.32.41/net/netfilter/ipvs/ip_vs_sync.c
68673 --- linux-2.6.32.41/net/netfilter/ipvs/ip_vs_sync.c 2011-03-27 14:31:47.000000000 -0400
68674 +++ linux-2.6.32.41/net/netfilter/ipvs/ip_vs_sync.c 2011-05-04 17:56:28.000000000 -0400
68675 @@ -438,7 +438,7 @@ static void ip_vs_process_message(const
68676
68677 if (opt)
68678 memcpy(&cp->in_seq, opt, sizeof(*opt));
68679 - atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
68680 + atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
68681 cp->state = state;
68682 cp->old_state = cp->state;
68683 /*
68684 diff -urNp linux-2.6.32.41/net/netfilter/ipvs/ip_vs_xmit.c linux-2.6.32.41/net/netfilter/ipvs/ip_vs_xmit.c
68685 --- linux-2.6.32.41/net/netfilter/ipvs/ip_vs_xmit.c 2011-03-27 14:31:47.000000000 -0400
68686 +++ linux-2.6.32.41/net/netfilter/ipvs/ip_vs_xmit.c 2011-05-04 17:56:28.000000000 -0400
68687 @@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
68688 else
68689 rc = NF_ACCEPT;
68690 /* do not touch skb anymore */
68691 - atomic_inc(&cp->in_pkts);
68692 + atomic_inc_unchecked(&cp->in_pkts);
68693 goto out;
68694 }
68695
68696 @@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
68697 else
68698 rc = NF_ACCEPT;
68699 /* do not touch skb anymore */
68700 - atomic_inc(&cp->in_pkts);
68701 + atomic_inc_unchecked(&cp->in_pkts);
68702 goto out;
68703 }
68704
68705 diff -urNp linux-2.6.32.41/net/netfilter/Kconfig linux-2.6.32.41/net/netfilter/Kconfig
68706 --- linux-2.6.32.41/net/netfilter/Kconfig 2011-03-27 14:31:47.000000000 -0400
68707 +++ linux-2.6.32.41/net/netfilter/Kconfig 2011-04-17 15:56:46.000000000 -0400
68708 @@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
68709
68710 To compile it as a module, choose M here. If unsure, say N.
68711
68712 +config NETFILTER_XT_MATCH_GRADM
68713 + tristate '"gradm" match support'
68714 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
68715 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
68716 + ---help---
68717 + The gradm match allows to match on grsecurity RBAC being enabled.
68718 + It is useful when iptables rules are applied early on bootup to
68719 + prevent connections to the machine (except from a trusted host)
68720 + while the RBAC system is disabled.
68721 +
68722 config NETFILTER_XT_MATCH_HASHLIMIT
68723 tristate '"hashlimit" match support'
68724 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
68725 diff -urNp linux-2.6.32.41/net/netfilter/Makefile linux-2.6.32.41/net/netfilter/Makefile
68726 --- linux-2.6.32.41/net/netfilter/Makefile 2011-03-27 14:31:47.000000000 -0400
68727 +++ linux-2.6.32.41/net/netfilter/Makefile 2011-04-17 15:56:46.000000000 -0400
68728 @@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRAC
68729 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
68730 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
68731 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
68732 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
68733 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
68734 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
68735 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
68736 diff -urNp linux-2.6.32.41/net/netfilter/nf_conntrack_netlink.c linux-2.6.32.41/net/netfilter/nf_conntrack_netlink.c
68737 --- linux-2.6.32.41/net/netfilter/nf_conntrack_netlink.c 2011-03-27 14:31:47.000000000 -0400
68738 +++ linux-2.6.32.41/net/netfilter/nf_conntrack_netlink.c 2011-04-17 15:56:46.000000000 -0400
68739 @@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlatt
68740 static int
68741 ctnetlink_parse_tuple(const struct nlattr * const cda[],
68742 struct nf_conntrack_tuple *tuple,
68743 - enum ctattr_tuple type, u_int8_t l3num)
68744 + enum ctattr_type type, u_int8_t l3num)
68745 {
68746 struct nlattr *tb[CTA_TUPLE_MAX+1];
68747 int err;
68748 diff -urNp linux-2.6.32.41/net/netfilter/nfnetlink_log.c linux-2.6.32.41/net/netfilter/nfnetlink_log.c
68749 --- linux-2.6.32.41/net/netfilter/nfnetlink_log.c 2011-03-27 14:31:47.000000000 -0400
68750 +++ linux-2.6.32.41/net/netfilter/nfnetlink_log.c 2011-05-04 17:56:28.000000000 -0400
68751 @@ -68,7 +68,7 @@ struct nfulnl_instance {
68752 };
68753
68754 static DEFINE_RWLOCK(instances_lock);
68755 -static atomic_t global_seq;
68756 +static atomic_unchecked_t global_seq;
68757
68758 #define INSTANCE_BUCKETS 16
68759 static struct hlist_head instance_table[INSTANCE_BUCKETS];
68760 @@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_ins
68761 /* global sequence number */
68762 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
68763 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
68764 - htonl(atomic_inc_return(&global_seq)));
68765 + htonl(atomic_inc_return_unchecked(&global_seq)));
68766
68767 if (data_len) {
68768 struct nlattr *nla;
68769 diff -urNp linux-2.6.32.41/net/netfilter/xt_gradm.c linux-2.6.32.41/net/netfilter/xt_gradm.c
68770 --- linux-2.6.32.41/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
68771 +++ linux-2.6.32.41/net/netfilter/xt_gradm.c 2011-04-17 15:56:46.000000000 -0400
68772 @@ -0,0 +1,51 @@
68773 +/*
68774 + * gradm match for netfilter
68775 + * Copyright © Zbigniew Krzystolik, 2010
68776 + *
68777 + * This program is free software; you can redistribute it and/or modify
68778 + * it under the terms of the GNU General Public License; either version
68779 + * 2 or 3 as published by the Free Software Foundation.
68780 + */
68781 +#include <linux/module.h>
68782 +#include <linux/moduleparam.h>
68783 +#include <linux/skbuff.h>
68784 +#include <linux/netfilter/x_tables.h>
68785 +#include <linux/grsecurity.h>
68786 +#include <linux/netfilter/xt_gradm.h>
68787 +
68788 +static bool
68789 +gradm_mt(const struct sk_buff *skb, const struct xt_match_param *par)
68790 +{
68791 + const struct xt_gradm_mtinfo *info = par->matchinfo;
68792 + bool retval = false;
68793 + if (gr_acl_is_enabled())
68794 + retval = true;
68795 + return retval ^ info->invflags;
68796 +}
68797 +
68798 +static struct xt_match gradm_mt_reg __read_mostly = {
68799 + .name = "gradm",
68800 + .revision = 0,
68801 + .family = NFPROTO_UNSPEC,
68802 + .match = gradm_mt,
68803 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
68804 + .me = THIS_MODULE,
68805 +};
68806 +
68807 +static int __init gradm_mt_init(void)
68808 +{
68809 + return xt_register_match(&gradm_mt_reg);
68810 +}
68811 +
68812 +static void __exit gradm_mt_exit(void)
68813 +{
68814 + xt_unregister_match(&gradm_mt_reg);
68815 +}
68816 +
68817 +module_init(gradm_mt_init);
68818 +module_exit(gradm_mt_exit);
68819 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
68820 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
68821 +MODULE_LICENSE("GPL");
68822 +MODULE_ALIAS("ipt_gradm");
68823 +MODULE_ALIAS("ip6t_gradm");
68824 diff -urNp linux-2.6.32.41/net/netlink/af_netlink.c linux-2.6.32.41/net/netlink/af_netlink.c
68825 --- linux-2.6.32.41/net/netlink/af_netlink.c 2011-03-27 14:31:47.000000000 -0400
68826 +++ linux-2.6.32.41/net/netlink/af_netlink.c 2011-05-04 17:56:28.000000000 -0400
68827 @@ -733,7 +733,7 @@ static void netlink_overrun(struct sock
68828 sk->sk_error_report(sk);
68829 }
68830 }
68831 - atomic_inc(&sk->sk_drops);
68832 + atomic_inc_unchecked(&sk->sk_drops);
68833 }
68834
68835 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
68836 @@ -1964,15 +1964,23 @@ static int netlink_seq_show(struct seq_f
68837 struct netlink_sock *nlk = nlk_sk(s);
68838
68839 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d\n",
68840 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68841 + NULL,
68842 +#else
68843 s,
68844 +#endif
68845 s->sk_protocol,
68846 nlk->pid,
68847 nlk->groups ? (u32)nlk->groups[0] : 0,
68848 sk_rmem_alloc_get(s),
68849 sk_wmem_alloc_get(s),
68850 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68851 + NULL,
68852 +#else
68853 nlk->cb,
68854 +#endif
68855 atomic_read(&s->sk_refcnt),
68856 - atomic_read(&s->sk_drops)
68857 + atomic_read_unchecked(&s->sk_drops)
68858 );
68859
68860 }
68861 diff -urNp linux-2.6.32.41/net/netrom/af_netrom.c linux-2.6.32.41/net/netrom/af_netrom.c
68862 --- linux-2.6.32.41/net/netrom/af_netrom.c 2011-03-27 14:31:47.000000000 -0400
68863 +++ linux-2.6.32.41/net/netrom/af_netrom.c 2011-04-17 15:56:46.000000000 -0400
68864 @@ -838,6 +838,7 @@ static int nr_getname(struct socket *soc
68865 struct sock *sk = sock->sk;
68866 struct nr_sock *nr = nr_sk(sk);
68867
68868 + memset(sax, 0, sizeof(*sax));
68869 lock_sock(sk);
68870 if (peer != 0) {
68871 if (sk->sk_state != TCP_ESTABLISHED) {
68872 @@ -852,7 +853,6 @@ static int nr_getname(struct socket *soc
68873 *uaddr_len = sizeof(struct full_sockaddr_ax25);
68874 } else {
68875 sax->fsa_ax25.sax25_family = AF_NETROM;
68876 - sax->fsa_ax25.sax25_ndigis = 0;
68877 sax->fsa_ax25.sax25_call = nr->source_addr;
68878 *uaddr_len = sizeof(struct sockaddr_ax25);
68879 }
68880 diff -urNp linux-2.6.32.41/net/packet/af_packet.c linux-2.6.32.41/net/packet/af_packet.c
68881 --- linux-2.6.32.41/net/packet/af_packet.c 2011-04-17 17:00:52.000000000 -0400
68882 +++ linux-2.6.32.41/net/packet/af_packet.c 2011-04-17 15:56:46.000000000 -0400
68883 @@ -2427,7 +2427,11 @@ static int packet_seq_show(struct seq_fi
68884
68885 seq_printf(seq,
68886 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
68887 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68888 + NULL,
68889 +#else
68890 s,
68891 +#endif
68892 atomic_read(&s->sk_refcnt),
68893 s->sk_type,
68894 ntohs(po->num),
68895 diff -urNp linux-2.6.32.41/net/phonet/af_phonet.c linux-2.6.32.41/net/phonet/af_phonet.c
68896 --- linux-2.6.32.41/net/phonet/af_phonet.c 2011-03-27 14:31:47.000000000 -0400
68897 +++ linux-2.6.32.41/net/phonet/af_phonet.c 2011-04-17 15:56:46.000000000 -0400
68898 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_pr
68899 {
68900 struct phonet_protocol *pp;
68901
68902 - if (protocol >= PHONET_NPROTO)
68903 + if (protocol < 0 || protocol >= PHONET_NPROTO)
68904 return NULL;
68905
68906 spin_lock(&proto_tab_lock);
68907 @@ -402,7 +402,7 @@ int __init_or_module phonet_proto_regist
68908 {
68909 int err = 0;
68910
68911 - if (protocol >= PHONET_NPROTO)
68912 + if (protocol < 0 || protocol >= PHONET_NPROTO)
68913 return -EINVAL;
68914
68915 err = proto_register(pp->prot, 1);
68916 diff -urNp linux-2.6.32.41/net/phonet/datagram.c linux-2.6.32.41/net/phonet/datagram.c
68917 --- linux-2.6.32.41/net/phonet/datagram.c 2011-03-27 14:31:47.000000000 -0400
68918 +++ linux-2.6.32.41/net/phonet/datagram.c 2011-05-04 17:56:28.000000000 -0400
68919 @@ -162,7 +162,7 @@ static int pn_backlog_rcv(struct sock *s
68920 if (err < 0) {
68921 kfree_skb(skb);
68922 if (err == -ENOMEM)
68923 - atomic_inc(&sk->sk_drops);
68924 + atomic_inc_unchecked(&sk->sk_drops);
68925 }
68926 return err ? NET_RX_DROP : NET_RX_SUCCESS;
68927 }
68928 diff -urNp linux-2.6.32.41/net/phonet/pep.c linux-2.6.32.41/net/phonet/pep.c
68929 --- linux-2.6.32.41/net/phonet/pep.c 2011-03-27 14:31:47.000000000 -0400
68930 +++ linux-2.6.32.41/net/phonet/pep.c 2011-05-04 17:56:28.000000000 -0400
68931 @@ -348,7 +348,7 @@ static int pipe_do_rcv(struct sock *sk,
68932
68933 case PNS_PEP_CTRL_REQ:
68934 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
68935 - atomic_inc(&sk->sk_drops);
68936 + atomic_inc_unchecked(&sk->sk_drops);
68937 break;
68938 }
68939 __skb_pull(skb, 4);
68940 @@ -362,12 +362,12 @@ static int pipe_do_rcv(struct sock *sk,
68941 if (!err)
68942 return 0;
68943 if (err == -ENOMEM)
68944 - atomic_inc(&sk->sk_drops);
68945 + atomic_inc_unchecked(&sk->sk_drops);
68946 break;
68947 }
68948
68949 if (pn->rx_credits == 0) {
68950 - atomic_inc(&sk->sk_drops);
68951 + atomic_inc_unchecked(&sk->sk_drops);
68952 err = -ENOBUFS;
68953 break;
68954 }
68955 diff -urNp linux-2.6.32.41/net/phonet/socket.c linux-2.6.32.41/net/phonet/socket.c
68956 --- linux-2.6.32.41/net/phonet/socket.c 2011-03-27 14:31:47.000000000 -0400
68957 +++ linux-2.6.32.41/net/phonet/socket.c 2011-05-04 17:57:07.000000000 -0400
68958 @@ -482,8 +482,13 @@ static int pn_sock_seq_show(struct seq_f
68959 sk->sk_state,
68960 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
68961 sock_i_uid(sk), sock_i_ino(sk),
68962 - atomic_read(&sk->sk_refcnt), sk,
68963 - atomic_read(&sk->sk_drops), &len);
68964 + atomic_read(&sk->sk_refcnt),
68965 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68966 + NULL,
68967 +#else
68968 + sk,
68969 +#endif
68970 + atomic_read_unchecked(&sk->sk_drops), &len);
68971 }
68972 seq_printf(seq, "%*s\n", 127 - len, "");
68973 return 0;
68974 diff -urNp linux-2.6.32.41/net/rds/cong.c linux-2.6.32.41/net/rds/cong.c
68975 --- linux-2.6.32.41/net/rds/cong.c 2011-03-27 14:31:47.000000000 -0400
68976 +++ linux-2.6.32.41/net/rds/cong.c 2011-05-04 17:56:28.000000000 -0400
68977 @@ -77,7 +77,7 @@
68978 * finds that the saved generation number is smaller than the global generation
68979 * number, it wakes up the process.
68980 */
68981 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
68982 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
68983
68984 /*
68985 * Congestion monitoring
68986 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_con
68987 rdsdebug("waking map %p for %pI4\n",
68988 map, &map->m_addr);
68989 rds_stats_inc(s_cong_update_received);
68990 - atomic_inc(&rds_cong_generation);
68991 + atomic_inc_unchecked(&rds_cong_generation);
68992 if (waitqueue_active(&map->m_waitq))
68993 wake_up(&map->m_waitq);
68994 if (waitqueue_active(&rds_poll_waitq))
68995 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
68996
68997 int rds_cong_updated_since(unsigned long *recent)
68998 {
68999 - unsigned long gen = atomic_read(&rds_cong_generation);
69000 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
69001
69002 if (likely(*recent == gen))
69003 return 0;
69004 diff -urNp linux-2.6.32.41/net/rds/iw_rdma.c linux-2.6.32.41/net/rds/iw_rdma.c
69005 --- linux-2.6.32.41/net/rds/iw_rdma.c 2011-03-27 14:31:47.000000000 -0400
69006 +++ linux-2.6.32.41/net/rds/iw_rdma.c 2011-05-16 21:46:57.000000000 -0400
69007 @@ -181,6 +181,8 @@ int rds_iw_update_cm_id(struct rds_iw_de
69008 struct rdma_cm_id *pcm_id;
69009 int rc;
69010
69011 + pax_track_stack();
69012 +
69013 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
69014 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
69015
69016 diff -urNp linux-2.6.32.41/net/rds/Kconfig linux-2.6.32.41/net/rds/Kconfig
69017 --- linux-2.6.32.41/net/rds/Kconfig 2011-03-27 14:31:47.000000000 -0400
69018 +++ linux-2.6.32.41/net/rds/Kconfig 2011-04-17 15:56:46.000000000 -0400
69019 @@ -1,7 +1,7 @@
69020
69021 config RDS
69022 tristate "The RDS Protocol (EXPERIMENTAL)"
69023 - depends on INET && EXPERIMENTAL
69024 + depends on INET && EXPERIMENTAL && BROKEN
69025 ---help---
69026 The RDS (Reliable Datagram Sockets) protocol provides reliable,
69027 sequenced delivery of datagrams over Infiniband, iWARP,
69028 diff -urNp linux-2.6.32.41/net/rxrpc/af_rxrpc.c linux-2.6.32.41/net/rxrpc/af_rxrpc.c
69029 --- linux-2.6.32.41/net/rxrpc/af_rxrpc.c 2011-03-27 14:31:47.000000000 -0400
69030 +++ linux-2.6.32.41/net/rxrpc/af_rxrpc.c 2011-05-04 17:56:28.000000000 -0400
69031 @@ -38,7 +38,7 @@ static const struct proto_ops rxrpc_rpc_
69032 __be32 rxrpc_epoch;
69033
69034 /* current debugging ID */
69035 -atomic_t rxrpc_debug_id;
69036 +atomic_unchecked_t rxrpc_debug_id;
69037
69038 /* count of skbs currently in use */
69039 atomic_t rxrpc_n_skbs;
69040 diff -urNp linux-2.6.32.41/net/rxrpc/ar-ack.c linux-2.6.32.41/net/rxrpc/ar-ack.c
69041 --- linux-2.6.32.41/net/rxrpc/ar-ack.c 2011-03-27 14:31:47.000000000 -0400
69042 +++ linux-2.6.32.41/net/rxrpc/ar-ack.c 2011-05-16 21:46:57.000000000 -0400
69043 @@ -174,7 +174,7 @@ static void rxrpc_resend(struct rxrpc_ca
69044
69045 _enter("{%d,%d,%d,%d},",
69046 call->acks_hard, call->acks_unacked,
69047 - atomic_read(&call->sequence),
69048 + atomic_read_unchecked(&call->sequence),
69049 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
69050
69051 stop = 0;
69052 @@ -198,7 +198,7 @@ static void rxrpc_resend(struct rxrpc_ca
69053
69054 /* each Tx packet has a new serial number */
69055 sp->hdr.serial =
69056 - htonl(atomic_inc_return(&call->conn->serial));
69057 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
69058
69059 hdr = (struct rxrpc_header *) txb->head;
69060 hdr->serial = sp->hdr.serial;
69061 @@ -401,7 +401,7 @@ static void rxrpc_rotate_tx_window(struc
69062 */
69063 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
69064 {
69065 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
69066 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
69067 }
69068
69069 /*
69070 @@ -627,7 +627,7 @@ process_further:
69071
69072 latest = ntohl(sp->hdr.serial);
69073 hard = ntohl(ack.firstPacket);
69074 - tx = atomic_read(&call->sequence);
69075 + tx = atomic_read_unchecked(&call->sequence);
69076
69077 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
69078 latest,
69079 @@ -840,6 +840,8 @@ void rxrpc_process_call(struct work_stru
69080 u32 abort_code = RX_PROTOCOL_ERROR;
69081 u8 *acks = NULL;
69082
69083 + pax_track_stack();
69084 +
69085 //printk("\n--------------------\n");
69086 _enter("{%d,%s,%lx} [%lu]",
69087 call->debug_id, rxrpc_call_states[call->state], call->events,
69088 @@ -1159,7 +1161,7 @@ void rxrpc_process_call(struct work_stru
69089 goto maybe_reschedule;
69090
69091 send_ACK_with_skew:
69092 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
69093 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
69094 ntohl(ack.serial));
69095 send_ACK:
69096 mtu = call->conn->trans->peer->if_mtu;
69097 @@ -1171,7 +1173,7 @@ send_ACK:
69098 ackinfo.rxMTU = htonl(5692);
69099 ackinfo.jumbo_max = htonl(4);
69100
69101 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
69102 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
69103 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
69104 ntohl(hdr.serial),
69105 ntohs(ack.maxSkew),
69106 @@ -1189,7 +1191,7 @@ send_ACK:
69107 send_message:
69108 _debug("send message");
69109
69110 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
69111 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
69112 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
69113 send_message_2:
69114
69115 diff -urNp linux-2.6.32.41/net/rxrpc/ar-call.c linux-2.6.32.41/net/rxrpc/ar-call.c
69116 --- linux-2.6.32.41/net/rxrpc/ar-call.c 2011-03-27 14:31:47.000000000 -0400
69117 +++ linux-2.6.32.41/net/rxrpc/ar-call.c 2011-05-04 17:56:28.000000000 -0400
69118 @@ -82,7 +82,7 @@ static struct rxrpc_call *rxrpc_alloc_ca
69119 spin_lock_init(&call->lock);
69120 rwlock_init(&call->state_lock);
69121 atomic_set(&call->usage, 1);
69122 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
69123 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
69124 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
69125
69126 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
69127 diff -urNp linux-2.6.32.41/net/rxrpc/ar-connection.c linux-2.6.32.41/net/rxrpc/ar-connection.c
69128 --- linux-2.6.32.41/net/rxrpc/ar-connection.c 2011-03-27 14:31:47.000000000 -0400
69129 +++ linux-2.6.32.41/net/rxrpc/ar-connection.c 2011-05-04 17:56:28.000000000 -0400
69130 @@ -205,7 +205,7 @@ static struct rxrpc_connection *rxrpc_al
69131 rwlock_init(&conn->lock);
69132 spin_lock_init(&conn->state_lock);
69133 atomic_set(&conn->usage, 1);
69134 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
69135 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
69136 conn->avail_calls = RXRPC_MAXCALLS;
69137 conn->size_align = 4;
69138 conn->header_size = sizeof(struct rxrpc_header);
69139 diff -urNp linux-2.6.32.41/net/rxrpc/ar-connevent.c linux-2.6.32.41/net/rxrpc/ar-connevent.c
69140 --- linux-2.6.32.41/net/rxrpc/ar-connevent.c 2011-03-27 14:31:47.000000000 -0400
69141 +++ linux-2.6.32.41/net/rxrpc/ar-connevent.c 2011-05-04 17:56:28.000000000 -0400
69142 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct
69143
69144 len = iov[0].iov_len + iov[1].iov_len;
69145
69146 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
69147 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
69148 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
69149
69150 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
69151 diff -urNp linux-2.6.32.41/net/rxrpc/ar-input.c linux-2.6.32.41/net/rxrpc/ar-input.c
69152 --- linux-2.6.32.41/net/rxrpc/ar-input.c 2011-03-27 14:31:47.000000000 -0400
69153 +++ linux-2.6.32.41/net/rxrpc/ar-input.c 2011-05-04 17:56:28.000000000 -0400
69154 @@ -339,9 +339,9 @@ void rxrpc_fast_process_packet(struct rx
69155 /* track the latest serial number on this connection for ACK packet
69156 * information */
69157 serial = ntohl(sp->hdr.serial);
69158 - hi_serial = atomic_read(&call->conn->hi_serial);
69159 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
69160 while (serial > hi_serial)
69161 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
69162 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
69163 serial);
69164
69165 /* request ACK generation for any ACK or DATA packet that requests
69166 diff -urNp linux-2.6.32.41/net/rxrpc/ar-internal.h linux-2.6.32.41/net/rxrpc/ar-internal.h
69167 --- linux-2.6.32.41/net/rxrpc/ar-internal.h 2011-03-27 14:31:47.000000000 -0400
69168 +++ linux-2.6.32.41/net/rxrpc/ar-internal.h 2011-05-04 17:56:28.000000000 -0400
69169 @@ -272,8 +272,8 @@ struct rxrpc_connection {
69170 int error; /* error code for local abort */
69171 int debug_id; /* debug ID for printks */
69172 unsigned call_counter; /* call ID counter */
69173 - atomic_t serial; /* packet serial number counter */
69174 - atomic_t hi_serial; /* highest serial number received */
69175 + atomic_unchecked_t serial; /* packet serial number counter */
69176 + atomic_unchecked_t hi_serial; /* highest serial number received */
69177 u8 avail_calls; /* number of calls available */
69178 u8 size_align; /* data size alignment (for security) */
69179 u8 header_size; /* rxrpc + security header size */
69180 @@ -346,7 +346,7 @@ struct rxrpc_call {
69181 spinlock_t lock;
69182 rwlock_t state_lock; /* lock for state transition */
69183 atomic_t usage;
69184 - atomic_t sequence; /* Tx data packet sequence counter */
69185 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
69186 u32 abort_code; /* local/remote abort code */
69187 enum { /* current state of call */
69188 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
69189 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(stru
69190 */
69191 extern atomic_t rxrpc_n_skbs;
69192 extern __be32 rxrpc_epoch;
69193 -extern atomic_t rxrpc_debug_id;
69194 +extern atomic_unchecked_t rxrpc_debug_id;
69195 extern struct workqueue_struct *rxrpc_workqueue;
69196
69197 /*
69198 diff -urNp linux-2.6.32.41/net/rxrpc/ar-key.c linux-2.6.32.41/net/rxrpc/ar-key.c
69199 --- linux-2.6.32.41/net/rxrpc/ar-key.c 2011-03-27 14:31:47.000000000 -0400
69200 +++ linux-2.6.32.41/net/rxrpc/ar-key.c 2011-04-17 15:56:46.000000000 -0400
69201 @@ -88,11 +88,11 @@ static int rxrpc_instantiate_xdr_rxkad(s
69202 return ret;
69203
69204 plen -= sizeof(*token);
69205 - token = kmalloc(sizeof(*token), GFP_KERNEL);
69206 + token = kzalloc(sizeof(*token), GFP_KERNEL);
69207 if (!token)
69208 return -ENOMEM;
69209
69210 - token->kad = kmalloc(plen, GFP_KERNEL);
69211 + token->kad = kzalloc(plen, GFP_KERNEL);
69212 if (!token->kad) {
69213 kfree(token);
69214 return -ENOMEM;
69215 @@ -730,10 +730,10 @@ static int rxrpc_instantiate(struct key
69216 goto error;
69217
69218 ret = -ENOMEM;
69219 - token = kmalloc(sizeof(*token), GFP_KERNEL);
69220 + token = kzalloc(sizeof(*token), GFP_KERNEL);
69221 if (!token)
69222 goto error;
69223 - token->kad = kmalloc(plen, GFP_KERNEL);
69224 + token->kad = kzalloc(plen, GFP_KERNEL);
69225 if (!token->kad)
69226 goto error_free;
69227
69228 diff -urNp linux-2.6.32.41/net/rxrpc/ar-local.c linux-2.6.32.41/net/rxrpc/ar-local.c
69229 --- linux-2.6.32.41/net/rxrpc/ar-local.c 2011-03-27 14:31:47.000000000 -0400
69230 +++ linux-2.6.32.41/net/rxrpc/ar-local.c 2011-05-04 17:56:28.000000000 -0400
69231 @@ -44,7 +44,7 @@ struct rxrpc_local *rxrpc_alloc_local(st
69232 spin_lock_init(&local->lock);
69233 rwlock_init(&local->services_lock);
69234 atomic_set(&local->usage, 1);
69235 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
69236 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
69237 memcpy(&local->srx, srx, sizeof(*srx));
69238 }
69239
69240 diff -urNp linux-2.6.32.41/net/rxrpc/ar-output.c linux-2.6.32.41/net/rxrpc/ar-output.c
69241 --- linux-2.6.32.41/net/rxrpc/ar-output.c 2011-03-27 14:31:47.000000000 -0400
69242 +++ linux-2.6.32.41/net/rxrpc/ar-output.c 2011-05-04 17:56:28.000000000 -0400
69243 @@ -680,9 +680,9 @@ static int rxrpc_send_data(struct kiocb
69244 sp->hdr.cid = call->cid;
69245 sp->hdr.callNumber = call->call_id;
69246 sp->hdr.seq =
69247 - htonl(atomic_inc_return(&call->sequence));
69248 + htonl(atomic_inc_return_unchecked(&call->sequence));
69249 sp->hdr.serial =
69250 - htonl(atomic_inc_return(&conn->serial));
69251 + htonl(atomic_inc_return_unchecked(&conn->serial));
69252 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
69253 sp->hdr.userStatus = 0;
69254 sp->hdr.securityIndex = conn->security_ix;
69255 diff -urNp linux-2.6.32.41/net/rxrpc/ar-peer.c linux-2.6.32.41/net/rxrpc/ar-peer.c
69256 --- linux-2.6.32.41/net/rxrpc/ar-peer.c 2011-03-27 14:31:47.000000000 -0400
69257 +++ linux-2.6.32.41/net/rxrpc/ar-peer.c 2011-05-04 17:56:28.000000000 -0400
69258 @@ -86,7 +86,7 @@ static struct rxrpc_peer *rxrpc_alloc_pe
69259 INIT_LIST_HEAD(&peer->error_targets);
69260 spin_lock_init(&peer->lock);
69261 atomic_set(&peer->usage, 1);
69262 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
69263 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
69264 memcpy(&peer->srx, srx, sizeof(*srx));
69265
69266 rxrpc_assess_MTU_size(peer);
69267 diff -urNp linux-2.6.32.41/net/rxrpc/ar-proc.c linux-2.6.32.41/net/rxrpc/ar-proc.c
69268 --- linux-2.6.32.41/net/rxrpc/ar-proc.c 2011-03-27 14:31:47.000000000 -0400
69269 +++ linux-2.6.32.41/net/rxrpc/ar-proc.c 2011-05-04 17:56:28.000000000 -0400
69270 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(str
69271 atomic_read(&conn->usage),
69272 rxrpc_conn_states[conn->state],
69273 key_serial(conn->key),
69274 - atomic_read(&conn->serial),
69275 - atomic_read(&conn->hi_serial));
69276 + atomic_read_unchecked(&conn->serial),
69277 + atomic_read_unchecked(&conn->hi_serial));
69278
69279 return 0;
69280 }
69281 diff -urNp linux-2.6.32.41/net/rxrpc/ar-transport.c linux-2.6.32.41/net/rxrpc/ar-transport.c
69282 --- linux-2.6.32.41/net/rxrpc/ar-transport.c 2011-03-27 14:31:47.000000000 -0400
69283 +++ linux-2.6.32.41/net/rxrpc/ar-transport.c 2011-05-04 17:56:28.000000000 -0400
69284 @@ -46,7 +46,7 @@ static struct rxrpc_transport *rxrpc_all
69285 spin_lock_init(&trans->client_lock);
69286 rwlock_init(&trans->conn_lock);
69287 atomic_set(&trans->usage, 1);
69288 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
69289 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
69290
69291 if (peer->srx.transport.family == AF_INET) {
69292 switch (peer->srx.transport_type) {
69293 diff -urNp linux-2.6.32.41/net/rxrpc/rxkad.c linux-2.6.32.41/net/rxrpc/rxkad.c
69294 --- linux-2.6.32.41/net/rxrpc/rxkad.c 2011-03-27 14:31:47.000000000 -0400
69295 +++ linux-2.6.32.41/net/rxrpc/rxkad.c 2011-05-16 21:46:57.000000000 -0400
69296 @@ -210,6 +210,8 @@ static int rxkad_secure_packet_encrypt(c
69297 u16 check;
69298 int nsg;
69299
69300 + pax_track_stack();
69301 +
69302 sp = rxrpc_skb(skb);
69303
69304 _enter("");
69305 @@ -337,6 +339,8 @@ static int rxkad_verify_packet_auth(cons
69306 u16 check;
69307 int nsg;
69308
69309 + pax_track_stack();
69310 +
69311 _enter("");
69312
69313 sp = rxrpc_skb(skb);
69314 @@ -609,7 +613,7 @@ static int rxkad_issue_challenge(struct
69315
69316 len = iov[0].iov_len + iov[1].iov_len;
69317
69318 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
69319 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
69320 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
69321
69322 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
69323 @@ -659,7 +663,7 @@ static int rxkad_send_response(struct rx
69324
69325 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
69326
69327 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
69328 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
69329 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
69330
69331 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
69332 diff -urNp linux-2.6.32.41/net/sctp/proc.c linux-2.6.32.41/net/sctp/proc.c
69333 --- linux-2.6.32.41/net/sctp/proc.c 2011-03-27 14:31:47.000000000 -0400
69334 +++ linux-2.6.32.41/net/sctp/proc.c 2011-04-17 15:56:46.000000000 -0400
69335 @@ -213,7 +213,12 @@ static int sctp_eps_seq_show(struct seq_
69336 sctp_for_each_hentry(epb, node, &head->chain) {
69337 ep = sctp_ep(epb);
69338 sk = epb->sk;
69339 - seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
69340 + seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ",
69341 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69342 + NULL, NULL,
69343 +#else
69344 + ep, sk,
69345 +#endif
69346 sctp_sk(sk)->type, sk->sk_state, hash,
69347 epb->bind_addr.port,
69348 sock_i_uid(sk), sock_i_ino(sk));
69349 @@ -320,7 +325,12 @@ static int sctp_assocs_seq_show(struct s
69350 seq_printf(seq,
69351 "%8p %8p %-3d %-3d %-2d %-4d "
69352 "%4d %8d %8d %7d %5lu %-5d %5d ",
69353 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
69354 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69355 + NULL, NULL,
69356 +#else
69357 + assoc, sk,
69358 +#endif
69359 + sctp_sk(sk)->type, sk->sk_state,
69360 assoc->state, hash,
69361 assoc->assoc_id,
69362 assoc->sndbuf_used,
69363 diff -urNp linux-2.6.32.41/net/sctp/socket.c linux-2.6.32.41/net/sctp/socket.c
69364 --- linux-2.6.32.41/net/sctp/socket.c 2011-03-27 14:31:47.000000000 -0400
69365 +++ linux-2.6.32.41/net/sctp/socket.c 2011-04-23 12:56:11.000000000 -0400
69366 @@ -5802,7 +5802,6 @@ pp_found:
69367 */
69368 int reuse = sk->sk_reuse;
69369 struct sock *sk2;
69370 - struct hlist_node *node;
69371
69372 SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n");
69373 if (pp->fastreuse && sk->sk_reuse &&
69374 diff -urNp linux-2.6.32.41/net/socket.c linux-2.6.32.41/net/socket.c
69375 --- linux-2.6.32.41/net/socket.c 2011-03-27 14:31:47.000000000 -0400
69376 +++ linux-2.6.32.41/net/socket.c 2011-05-16 21:46:57.000000000 -0400
69377 @@ -87,6 +87,7 @@
69378 #include <linux/wireless.h>
69379 #include <linux/nsproxy.h>
69380 #include <linux/magic.h>
69381 +#include <linux/in.h>
69382
69383 #include <asm/uaccess.h>
69384 #include <asm/unistd.h>
69385 @@ -97,6 +98,21 @@
69386 #include <net/sock.h>
69387 #include <linux/netfilter.h>
69388
69389 +extern void gr_attach_curr_ip(const struct sock *sk);
69390 +extern int gr_handle_sock_all(const int family, const int type,
69391 + const int protocol);
69392 +extern int gr_handle_sock_server(const struct sockaddr *sck);
69393 +extern int gr_handle_sock_server_other(const struct sock *sck);
69394 +extern int gr_handle_sock_client(const struct sockaddr *sck);
69395 +extern int gr_search_connect(struct socket * sock,
69396 + struct sockaddr_in * addr);
69397 +extern int gr_search_bind(struct socket * sock,
69398 + struct sockaddr_in * addr);
69399 +extern int gr_search_listen(struct socket * sock);
69400 +extern int gr_search_accept(struct socket * sock);
69401 +extern int gr_search_socket(const int domain, const int type,
69402 + const int protocol);
69403 +
69404 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
69405 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
69406 unsigned long nr_segs, loff_t pos);
69407 @@ -298,7 +314,7 @@ static int sockfs_get_sb(struct file_sys
69408 mnt);
69409 }
69410
69411 -static struct vfsmount *sock_mnt __read_mostly;
69412 +struct vfsmount *sock_mnt __read_mostly;
69413
69414 static struct file_system_type sock_fs_type = {
69415 .name = "sockfs",
69416 @@ -1154,6 +1170,8 @@ static int __sock_create(struct net *net
69417 return -EAFNOSUPPORT;
69418 if (type < 0 || type >= SOCK_MAX)
69419 return -EINVAL;
69420 + if (protocol < 0)
69421 + return -EINVAL;
69422
69423 /* Compatibility.
69424
69425 @@ -1283,6 +1301,16 @@ SYSCALL_DEFINE3(socket, int, family, int
69426 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
69427 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
69428
69429 + if(!gr_search_socket(family, type, protocol)) {
69430 + retval = -EACCES;
69431 + goto out;
69432 + }
69433 +
69434 + if (gr_handle_sock_all(family, type, protocol)) {
69435 + retval = -EACCES;
69436 + goto out;
69437 + }
69438 +
69439 retval = sock_create(family, type, protocol, &sock);
69440 if (retval < 0)
69441 goto out;
69442 @@ -1415,6 +1443,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
69443 if (sock) {
69444 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
69445 if (err >= 0) {
69446 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
69447 + err = -EACCES;
69448 + goto error;
69449 + }
69450 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
69451 + if (err)
69452 + goto error;
69453 +
69454 err = security_socket_bind(sock,
69455 (struct sockaddr *)&address,
69456 addrlen);
69457 @@ -1423,6 +1459,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
69458 (struct sockaddr *)
69459 &address, addrlen);
69460 }
69461 +error:
69462 fput_light(sock->file, fput_needed);
69463 }
69464 return err;
69465 @@ -1446,10 +1483,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, ba
69466 if ((unsigned)backlog > somaxconn)
69467 backlog = somaxconn;
69468
69469 + if (gr_handle_sock_server_other(sock->sk)) {
69470 + err = -EPERM;
69471 + goto error;
69472 + }
69473 +
69474 + err = gr_search_listen(sock);
69475 + if (err)
69476 + goto error;
69477 +
69478 err = security_socket_listen(sock, backlog);
69479 if (!err)
69480 err = sock->ops->listen(sock, backlog);
69481
69482 +error:
69483 fput_light(sock->file, fput_needed);
69484 }
69485 return err;
69486 @@ -1492,6 +1539,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
69487 newsock->type = sock->type;
69488 newsock->ops = sock->ops;
69489
69490 + if (gr_handle_sock_server_other(sock->sk)) {
69491 + err = -EPERM;
69492 + sock_release(newsock);
69493 + goto out_put;
69494 + }
69495 +
69496 + err = gr_search_accept(sock);
69497 + if (err) {
69498 + sock_release(newsock);
69499 + goto out_put;
69500 + }
69501 +
69502 /*
69503 * We don't need try_module_get here, as the listening socket (sock)
69504 * has the protocol module (sock->ops->owner) held.
69505 @@ -1534,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
69506 fd_install(newfd, newfile);
69507 err = newfd;
69508
69509 + gr_attach_curr_ip(newsock->sk);
69510 +
69511 out_put:
69512 fput_light(sock->file, fput_needed);
69513 out:
69514 @@ -1571,6 +1632,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct
69515 int, addrlen)
69516 {
69517 struct socket *sock;
69518 + struct sockaddr *sck;
69519 struct sockaddr_storage address;
69520 int err, fput_needed;
69521
69522 @@ -1581,6 +1643,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct
69523 if (err < 0)
69524 goto out_put;
69525
69526 + sck = (struct sockaddr *)&address;
69527 +
69528 + if (gr_handle_sock_client(sck)) {
69529 + err = -EACCES;
69530 + goto out_put;
69531 + }
69532 +
69533 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
69534 + if (err)
69535 + goto out_put;
69536 +
69537 err =
69538 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
69539 if (err)
69540 @@ -1882,6 +1955,8 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct
69541 int err, ctl_len, iov_size, total_len;
69542 int fput_needed;
69543
69544 + pax_track_stack();
69545 +
69546 err = -EFAULT;
69547 if (MSG_CMSG_COMPAT & flags) {
69548 if (get_compat_msghdr(&msg_sys, msg_compat))
69549 diff -urNp linux-2.6.32.41/net/sunrpc/sched.c linux-2.6.32.41/net/sunrpc/sched.c
69550 --- linux-2.6.32.41/net/sunrpc/sched.c 2011-03-27 14:31:47.000000000 -0400
69551 +++ linux-2.6.32.41/net/sunrpc/sched.c 2011-04-17 15:56:46.000000000 -0400
69552 @@ -234,10 +234,10 @@ static int rpc_wait_bit_killable(void *w
69553 #ifdef RPC_DEBUG
69554 static void rpc_task_set_debuginfo(struct rpc_task *task)
69555 {
69556 - static atomic_t rpc_pid;
69557 + static atomic_unchecked_t rpc_pid;
69558
69559 task->tk_magic = RPC_TASK_MAGIC_ID;
69560 - task->tk_pid = atomic_inc_return(&rpc_pid);
69561 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
69562 }
69563 #else
69564 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
69565 diff -urNp linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma.c linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma.c
69566 --- linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma.c 2011-03-27 14:31:47.000000000 -0400
69567 +++ linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma.c 2011-05-04 17:56:20.000000000 -0400
69568 @@ -59,15 +59,15 @@ unsigned int svcrdma_max_req_size = RPCR
69569 static unsigned int min_max_inline = 4096;
69570 static unsigned int max_max_inline = 65536;
69571
69572 -atomic_t rdma_stat_recv;
69573 -atomic_t rdma_stat_read;
69574 -atomic_t rdma_stat_write;
69575 -atomic_t rdma_stat_sq_starve;
69576 -atomic_t rdma_stat_rq_starve;
69577 -atomic_t rdma_stat_rq_poll;
69578 -atomic_t rdma_stat_rq_prod;
69579 -atomic_t rdma_stat_sq_poll;
69580 -atomic_t rdma_stat_sq_prod;
69581 +atomic_unchecked_t rdma_stat_recv;
69582 +atomic_unchecked_t rdma_stat_read;
69583 +atomic_unchecked_t rdma_stat_write;
69584 +atomic_unchecked_t rdma_stat_sq_starve;
69585 +atomic_unchecked_t rdma_stat_rq_starve;
69586 +atomic_unchecked_t rdma_stat_rq_poll;
69587 +atomic_unchecked_t rdma_stat_rq_prod;
69588 +atomic_unchecked_t rdma_stat_sq_poll;
69589 +atomic_unchecked_t rdma_stat_sq_prod;
69590
69591 /* Temporary NFS request map and context caches */
69592 struct kmem_cache *svc_rdma_map_cachep;
69593 @@ -105,7 +105,7 @@ static int read_reset_stat(ctl_table *ta
69594 len -= *ppos;
69595 if (len > *lenp)
69596 len = *lenp;
69597 - if (len && copy_to_user(buffer, str_buf, len))
69598 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
69599 return -EFAULT;
69600 *lenp = len;
69601 *ppos += len;
69602 @@ -149,63 +149,63 @@ static ctl_table svcrdma_parm_table[] =
69603 {
69604 .procname = "rdma_stat_read",
69605 .data = &rdma_stat_read,
69606 - .maxlen = sizeof(atomic_t),
69607 + .maxlen = sizeof(atomic_unchecked_t),
69608 .mode = 0644,
69609 .proc_handler = &read_reset_stat,
69610 },
69611 {
69612 .procname = "rdma_stat_recv",
69613 .data = &rdma_stat_recv,
69614 - .maxlen = sizeof(atomic_t),
69615 + .maxlen = sizeof(atomic_unchecked_t),
69616 .mode = 0644,
69617 .proc_handler = &read_reset_stat,
69618 },
69619 {
69620 .procname = "rdma_stat_write",
69621 .data = &rdma_stat_write,
69622 - .maxlen = sizeof(atomic_t),
69623 + .maxlen = sizeof(atomic_unchecked_t),
69624 .mode = 0644,
69625 .proc_handler = &read_reset_stat,
69626 },
69627 {
69628 .procname = "rdma_stat_sq_starve",
69629 .data = &rdma_stat_sq_starve,
69630 - .maxlen = sizeof(atomic_t),
69631 + .maxlen = sizeof(atomic_unchecked_t),
69632 .mode = 0644,
69633 .proc_handler = &read_reset_stat,
69634 },
69635 {
69636 .procname = "rdma_stat_rq_starve",
69637 .data = &rdma_stat_rq_starve,
69638 - .maxlen = sizeof(atomic_t),
69639 + .maxlen = sizeof(atomic_unchecked_t),
69640 .mode = 0644,
69641 .proc_handler = &read_reset_stat,
69642 },
69643 {
69644 .procname = "rdma_stat_rq_poll",
69645 .data = &rdma_stat_rq_poll,
69646 - .maxlen = sizeof(atomic_t),
69647 + .maxlen = sizeof(atomic_unchecked_t),
69648 .mode = 0644,
69649 .proc_handler = &read_reset_stat,
69650 },
69651 {
69652 .procname = "rdma_stat_rq_prod",
69653 .data = &rdma_stat_rq_prod,
69654 - .maxlen = sizeof(atomic_t),
69655 + .maxlen = sizeof(atomic_unchecked_t),
69656 .mode = 0644,
69657 .proc_handler = &read_reset_stat,
69658 },
69659 {
69660 .procname = "rdma_stat_sq_poll",
69661 .data = &rdma_stat_sq_poll,
69662 - .maxlen = sizeof(atomic_t),
69663 + .maxlen = sizeof(atomic_unchecked_t),
69664 .mode = 0644,
69665 .proc_handler = &read_reset_stat,
69666 },
69667 {
69668 .procname = "rdma_stat_sq_prod",
69669 .data = &rdma_stat_sq_prod,
69670 - .maxlen = sizeof(atomic_t),
69671 + .maxlen = sizeof(atomic_unchecked_t),
69672 .mode = 0644,
69673 .proc_handler = &read_reset_stat,
69674 },
69675 diff -urNp linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
69676 --- linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-03-27 14:31:47.000000000 -0400
69677 +++ linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-05-04 17:56:28.000000000 -0400
69678 @@ -495,7 +495,7 @@ next_sge:
69679 svc_rdma_put_context(ctxt, 0);
69680 goto out;
69681 }
69682 - atomic_inc(&rdma_stat_read);
69683 + atomic_inc_unchecked(&rdma_stat_read);
69684
69685 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
69686 chl_map->ch[ch_no].count -= read_wr.num_sge;
69687 @@ -606,7 +606,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
69688 dto_q);
69689 list_del_init(&ctxt->dto_q);
69690 } else {
69691 - atomic_inc(&rdma_stat_rq_starve);
69692 + atomic_inc_unchecked(&rdma_stat_rq_starve);
69693 clear_bit(XPT_DATA, &xprt->xpt_flags);
69694 ctxt = NULL;
69695 }
69696 @@ -626,7 +626,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
69697 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
69698 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
69699 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
69700 - atomic_inc(&rdma_stat_recv);
69701 + atomic_inc_unchecked(&rdma_stat_recv);
69702
69703 /* Build up the XDR from the receive buffers. */
69704 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
69705 diff -urNp linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma_sendto.c linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma_sendto.c
69706 --- linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-03-27 14:31:47.000000000 -0400
69707 +++ linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-05-04 17:56:28.000000000 -0400
69708 @@ -328,7 +328,7 @@ static int send_write(struct svcxprt_rdm
69709 write_wr.wr.rdma.remote_addr = to;
69710
69711 /* Post It */
69712 - atomic_inc(&rdma_stat_write);
69713 + atomic_inc_unchecked(&rdma_stat_write);
69714 if (svc_rdma_send(xprt, &write_wr))
69715 goto err;
69716 return 0;
69717 diff -urNp linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma_transport.c linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma_transport.c
69718 --- linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-03-27 14:31:47.000000000 -0400
69719 +++ linux-2.6.32.41/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-05-04 17:56:28.000000000 -0400
69720 @@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rd
69721 return;
69722
69723 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
69724 - atomic_inc(&rdma_stat_rq_poll);
69725 + atomic_inc_unchecked(&rdma_stat_rq_poll);
69726
69727 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
69728 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
69729 @@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rd
69730 }
69731
69732 if (ctxt)
69733 - atomic_inc(&rdma_stat_rq_prod);
69734 + atomic_inc_unchecked(&rdma_stat_rq_prod);
69735
69736 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
69737 /*
69738 @@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rd
69739 return;
69740
69741 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
69742 - atomic_inc(&rdma_stat_sq_poll);
69743 + atomic_inc_unchecked(&rdma_stat_sq_poll);
69744 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
69745 if (wc.status != IB_WC_SUCCESS)
69746 /* Close the transport */
69747 @@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rd
69748 }
69749
69750 if (ctxt)
69751 - atomic_inc(&rdma_stat_sq_prod);
69752 + atomic_inc_unchecked(&rdma_stat_sq_prod);
69753 }
69754
69755 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
69756 @@ -1260,7 +1260,7 @@ int svc_rdma_send(struct svcxprt_rdma *x
69757 spin_lock_bh(&xprt->sc_lock);
69758 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
69759 spin_unlock_bh(&xprt->sc_lock);
69760 - atomic_inc(&rdma_stat_sq_starve);
69761 + atomic_inc_unchecked(&rdma_stat_sq_starve);
69762
69763 /* See if we can opportunistically reap SQ WR to make room */
69764 sq_cq_reap(xprt);
69765 diff -urNp linux-2.6.32.41/net/sysctl_net.c linux-2.6.32.41/net/sysctl_net.c
69766 --- linux-2.6.32.41/net/sysctl_net.c 2011-03-27 14:31:47.000000000 -0400
69767 +++ linux-2.6.32.41/net/sysctl_net.c 2011-04-17 15:56:46.000000000 -0400
69768 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ct
69769 struct ctl_table *table)
69770 {
69771 /* Allow network administrator to have same access as root. */
69772 - if (capable(CAP_NET_ADMIN)) {
69773 + if (capable_nolog(CAP_NET_ADMIN)) {
69774 int mode = (table->mode >> 6) & 7;
69775 return (mode << 6) | (mode << 3) | mode;
69776 }
69777 diff -urNp linux-2.6.32.41/net/unix/af_unix.c linux-2.6.32.41/net/unix/af_unix.c
69778 --- linux-2.6.32.41/net/unix/af_unix.c 2011-05-10 22:12:02.000000000 -0400
69779 +++ linux-2.6.32.41/net/unix/af_unix.c 2011-05-10 22:12:34.000000000 -0400
69780 @@ -745,6 +745,12 @@ static struct sock *unix_find_other(stru
69781 err = -ECONNREFUSED;
69782 if (!S_ISSOCK(inode->i_mode))
69783 goto put_fail;
69784 +
69785 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
69786 + err = -EACCES;
69787 + goto put_fail;
69788 + }
69789 +
69790 u = unix_find_socket_byinode(net, inode);
69791 if (!u)
69792 goto put_fail;
69793 @@ -765,6 +771,13 @@ static struct sock *unix_find_other(stru
69794 if (u) {
69795 struct dentry *dentry;
69796 dentry = unix_sk(u)->dentry;
69797 +
69798 + if (!gr_handle_chroot_unix(u->sk_peercred.pid)) {
69799 + err = -EPERM;
69800 + sock_put(u);
69801 + goto fail;
69802 + }
69803 +
69804 if (dentry)
69805 touch_atime(unix_sk(u)->mnt, dentry);
69806 } else
69807 @@ -850,11 +863,18 @@ static int unix_bind(struct socket *sock
69808 err = security_path_mknod(&nd.path, dentry, mode, 0);
69809 if (err)
69810 goto out_mknod_drop_write;
69811 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
69812 + err = -EACCES;
69813 + goto out_mknod_drop_write;
69814 + }
69815 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
69816 out_mknod_drop_write:
69817 mnt_drop_write(nd.path.mnt);
69818 if (err)
69819 goto out_mknod_dput;
69820 +
69821 + gr_handle_create(dentry, nd.path.mnt);
69822 +
69823 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
69824 dput(nd.path.dentry);
69825 nd.path.dentry = dentry;
69826 @@ -872,6 +892,10 @@ out_mknod_drop_write:
69827 goto out_unlock;
69828 }
69829
69830 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
69831 + sk->sk_peercred.pid = current->pid;
69832 +#endif
69833 +
69834 list = &unix_socket_table[addr->hash];
69835 } else {
69836 list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
69837 @@ -2211,7 +2235,11 @@ static int unix_seq_show(struct seq_file
69838 unix_state_lock(s);
69839
69840 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
69841 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69842 + NULL,
69843 +#else
69844 s,
69845 +#endif
69846 atomic_read(&s->sk_refcnt),
69847 0,
69848 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
69849 diff -urNp linux-2.6.32.41/net/wireless/wext.c linux-2.6.32.41/net/wireless/wext.c
69850 --- linux-2.6.32.41/net/wireless/wext.c 2011-03-27 14:31:47.000000000 -0400
69851 +++ linux-2.6.32.41/net/wireless/wext.c 2011-04-17 15:56:46.000000000 -0400
69852 @@ -816,8 +816,7 @@ static int ioctl_standard_iw_point(struc
69853 */
69854
69855 /* Support for very large requests */
69856 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
69857 - (user_length > descr->max_tokens)) {
69858 + if (user_length > descr->max_tokens) {
69859 /* Allow userspace to GET more than max so
69860 * we can support any size GET requests.
69861 * There is still a limit : -ENOMEM.
69862 @@ -854,22 +853,6 @@ static int ioctl_standard_iw_point(struc
69863 }
69864 }
69865
69866 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
69867 - /*
69868 - * If this is a GET, but not NOMAX, it means that the extra
69869 - * data is not bounded by userspace, but by max_tokens. Thus
69870 - * set the length to max_tokens. This matches the extra data
69871 - * allocation.
69872 - * The driver should fill it with the number of tokens it
69873 - * provided, and it may check iwp->length rather than having
69874 - * knowledge of max_tokens. If the driver doesn't change the
69875 - * iwp->length, this ioctl just copies back max_token tokens
69876 - * filled with zeroes. Hopefully the driver isn't claiming
69877 - * them to be valid data.
69878 - */
69879 - iwp->length = descr->max_tokens;
69880 - }
69881 -
69882 err = handler(dev, info, (union iwreq_data *) iwp, extra);
69883
69884 iwp->length += essid_compat;
69885 diff -urNp linux-2.6.32.41/net/xfrm/xfrm_policy.c linux-2.6.32.41/net/xfrm/xfrm_policy.c
69886 --- linux-2.6.32.41/net/xfrm/xfrm_policy.c 2011-03-27 14:31:47.000000000 -0400
69887 +++ linux-2.6.32.41/net/xfrm/xfrm_policy.c 2011-05-04 17:56:20.000000000 -0400
69888 @@ -586,7 +586,7 @@ int xfrm_policy_insert(int dir, struct x
69889 hlist_add_head(&policy->bydst, chain);
69890 xfrm_pol_hold(policy);
69891 net->xfrm.policy_count[dir]++;
69892 - atomic_inc(&flow_cache_genid);
69893 + atomic_inc_unchecked(&flow_cache_genid);
69894 if (delpol)
69895 __xfrm_policy_unlink(delpol, dir);
69896 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
69897 @@ -669,7 +669,7 @@ struct xfrm_policy *xfrm_policy_bysel_ct
69898 write_unlock_bh(&xfrm_policy_lock);
69899
69900 if (ret && delete) {
69901 - atomic_inc(&flow_cache_genid);
69902 + atomic_inc_unchecked(&flow_cache_genid);
69903 xfrm_policy_kill(ret);
69904 }
69905 return ret;
69906 @@ -710,7 +710,7 @@ struct xfrm_policy *xfrm_policy_byid(str
69907 write_unlock_bh(&xfrm_policy_lock);
69908
69909 if (ret && delete) {
69910 - atomic_inc(&flow_cache_genid);
69911 + atomic_inc_unchecked(&flow_cache_genid);
69912 xfrm_policy_kill(ret);
69913 }
69914 return ret;
69915 @@ -824,7 +824,7 @@ int xfrm_policy_flush(struct net *net, u
69916 }
69917
69918 }
69919 - atomic_inc(&flow_cache_genid);
69920 + atomic_inc_unchecked(&flow_cache_genid);
69921 out:
69922 write_unlock_bh(&xfrm_policy_lock);
69923 return err;
69924 @@ -1088,7 +1088,7 @@ int xfrm_policy_delete(struct xfrm_polic
69925 write_unlock_bh(&xfrm_policy_lock);
69926 if (pol) {
69927 if (dir < XFRM_POLICY_MAX)
69928 - atomic_inc(&flow_cache_genid);
69929 + atomic_inc_unchecked(&flow_cache_genid);
69930 xfrm_policy_kill(pol);
69931 return 0;
69932 }
69933 @@ -1477,7 +1477,7 @@ free_dst:
69934 goto out;
69935 }
69936
69937 -static int inline
69938 +static inline int
69939 xfrm_dst_alloc_copy(void **target, void *src, int size)
69940 {
69941 if (!*target) {
69942 @@ -1489,7 +1489,7 @@ xfrm_dst_alloc_copy(void **target, void
69943 return 0;
69944 }
69945
69946 -static int inline
69947 +static inline int
69948 xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
69949 {
69950 #ifdef CONFIG_XFRM_SUB_POLICY
69951 @@ -1501,7 +1501,7 @@ xfrm_dst_update_parent(struct dst_entry
69952 #endif
69953 }
69954
69955 -static int inline
69956 +static inline int
69957 xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
69958 {
69959 #ifdef CONFIG_XFRM_SUB_POLICY
69960 @@ -1537,7 +1537,7 @@ int __xfrm_lookup(struct net *net, struc
69961 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
69962
69963 restart:
69964 - genid = atomic_read(&flow_cache_genid);
69965 + genid = atomic_read_unchecked(&flow_cache_genid);
69966 policy = NULL;
69967 for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
69968 pols[pi] = NULL;
69969 @@ -1680,7 +1680,7 @@ restart:
69970 goto error;
69971 }
69972 if (nx == -EAGAIN ||
69973 - genid != atomic_read(&flow_cache_genid)) {
69974 + genid != atomic_read_unchecked(&flow_cache_genid)) {
69975 xfrm_pols_put(pols, npols);
69976 goto restart;
69977 }
69978 diff -urNp linux-2.6.32.41/net/xfrm/xfrm_user.c linux-2.6.32.41/net/xfrm/xfrm_user.c
69979 --- linux-2.6.32.41/net/xfrm/xfrm_user.c 2011-03-27 14:31:47.000000000 -0400
69980 +++ linux-2.6.32.41/net/xfrm/xfrm_user.c 2011-05-16 21:46:57.000000000 -0400
69981 @@ -1169,6 +1169,8 @@ static int copy_to_user_tmpl(struct xfrm
69982 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
69983 int i;
69984
69985 + pax_track_stack();
69986 +
69987 if (xp->xfrm_nr == 0)
69988 return 0;
69989
69990 @@ -1784,6 +1786,8 @@ static int xfrm_do_migrate(struct sk_buf
69991 int err;
69992 int n = 0;
69993
69994 + pax_track_stack();
69995 +
69996 if (attrs[XFRMA_MIGRATE] == NULL)
69997 return -EINVAL;
69998
69999 diff -urNp linux-2.6.32.41/samples/kobject/kset-example.c linux-2.6.32.41/samples/kobject/kset-example.c
70000 --- linux-2.6.32.41/samples/kobject/kset-example.c 2011-03-27 14:31:47.000000000 -0400
70001 +++ linux-2.6.32.41/samples/kobject/kset-example.c 2011-04-17 15:56:46.000000000 -0400
70002 @@ -87,7 +87,7 @@ static ssize_t foo_attr_store(struct kob
70003 }
70004
70005 /* Our custom sysfs_ops that we will associate with our ktype later on */
70006 -static struct sysfs_ops foo_sysfs_ops = {
70007 +static const struct sysfs_ops foo_sysfs_ops = {
70008 .show = foo_attr_show,
70009 .store = foo_attr_store,
70010 };
70011 diff -urNp linux-2.6.32.41/scripts/basic/fixdep.c linux-2.6.32.41/scripts/basic/fixdep.c
70012 --- linux-2.6.32.41/scripts/basic/fixdep.c 2011-03-27 14:31:47.000000000 -0400
70013 +++ linux-2.6.32.41/scripts/basic/fixdep.c 2011-04-17 15:56:46.000000000 -0400
70014 @@ -222,9 +222,9 @@ static void use_config(char *m, int slen
70015
70016 static void parse_config_file(char *map, size_t len)
70017 {
70018 - int *end = (int *) (map + len);
70019 + unsigned int *end = (unsigned int *) (map + len);
70020 /* start at +1, so that p can never be < map */
70021 - int *m = (int *) map + 1;
70022 + unsigned int *m = (unsigned int *) map + 1;
70023 char *p, *q;
70024
70025 for (; m < end; m++) {
70026 @@ -371,7 +371,7 @@ static void print_deps(void)
70027 static void traps(void)
70028 {
70029 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
70030 - int *p = (int *)test;
70031 + unsigned int *p = (unsigned int *)test;
70032
70033 if (*p != INT_CONF) {
70034 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
70035 diff -urNp linux-2.6.32.41/scripts/Makefile.build linux-2.6.32.41/scripts/Makefile.build
70036 --- linux-2.6.32.41/scripts/Makefile.build 2011-03-27 14:31:47.000000000 -0400
70037 +++ linux-2.6.32.41/scripts/Makefile.build 2011-06-04 20:46:51.000000000 -0400
70038 @@ -59,7 +59,7 @@ endif
70039 endif
70040
70041 # Do not include host rules unless needed
70042 -ifneq ($(hostprogs-y)$(hostprogs-m),)
70043 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
70044 include scripts/Makefile.host
70045 endif
70046
70047 diff -urNp linux-2.6.32.41/scripts/Makefile.clean linux-2.6.32.41/scripts/Makefile.clean
70048 --- linux-2.6.32.41/scripts/Makefile.clean 2011-03-27 14:31:47.000000000 -0400
70049 +++ linux-2.6.32.41/scripts/Makefile.clean 2011-06-04 20:47:19.000000000 -0400
70050 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subd
70051 __clean-files := $(extra-y) $(always) \
70052 $(targets) $(clean-files) \
70053 $(host-progs) \
70054 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
70055 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
70056 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
70057
70058 # as clean-files is given relative to the current directory, this adds
70059 # a $(obj) prefix, except for absolute paths
70060 diff -urNp linux-2.6.32.41/scripts/Makefile.host linux-2.6.32.41/scripts/Makefile.host
70061 --- linux-2.6.32.41/scripts/Makefile.host 2011-03-27 14:31:47.000000000 -0400
70062 +++ linux-2.6.32.41/scripts/Makefile.host 2011-06-04 20:48:22.000000000 -0400
70063 @@ -31,6 +31,7 @@
70064 # Note: Shared libraries consisting of C++ files are not supported
70065
70066 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
70067 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
70068
70069 # C code
70070 # Executables compiled from a single .c file
70071 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(hos
70072 # Shared libaries (only .c supported)
70073 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
70074 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
70075 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
70076 # Remove .so files from "xxx-objs"
70077 host-cobjs := $(filter-out %.so,$(host-cobjs))
70078
70079 diff -urNp linux-2.6.32.41/scripts/mod/file2alias.c linux-2.6.32.41/scripts/mod/file2alias.c
70080 --- linux-2.6.32.41/scripts/mod/file2alias.c 2011-03-27 14:31:47.000000000 -0400
70081 +++ linux-2.6.32.41/scripts/mod/file2alias.c 2011-04-17 15:56:46.000000000 -0400
70082 @@ -72,7 +72,7 @@ static void device_id_check(const char *
70083 unsigned long size, unsigned long id_size,
70084 void *symval)
70085 {
70086 - int i;
70087 + unsigned int i;
70088
70089 if (size % id_size || size < id_size) {
70090 if (cross_build != 0)
70091 @@ -102,7 +102,7 @@ static void device_id_check(const char *
70092 /* USB is special because the bcdDevice can be matched against a numeric range */
70093 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
70094 static void do_usb_entry(struct usb_device_id *id,
70095 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
70096 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
70097 unsigned char range_lo, unsigned char range_hi,
70098 struct module *mod)
70099 {
70100 @@ -368,7 +368,7 @@ static void do_pnp_device_entry(void *sy
70101 for (i = 0; i < count; i++) {
70102 const char *id = (char *)devs[i].id;
70103 char acpi_id[sizeof(devs[0].id)];
70104 - int j;
70105 + unsigned int j;
70106
70107 buf_printf(&mod->dev_table_buf,
70108 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
70109 @@ -398,7 +398,7 @@ static void do_pnp_card_entries(void *sy
70110
70111 for (j = 0; j < PNP_MAX_DEVICES; j++) {
70112 const char *id = (char *)card->devs[j].id;
70113 - int i2, j2;
70114 + unsigned int i2, j2;
70115 int dup = 0;
70116
70117 if (!id[0])
70118 @@ -424,7 +424,7 @@ static void do_pnp_card_entries(void *sy
70119 /* add an individual alias for every device entry */
70120 if (!dup) {
70121 char acpi_id[sizeof(card->devs[0].id)];
70122 - int k;
70123 + unsigned int k;
70124
70125 buf_printf(&mod->dev_table_buf,
70126 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
70127 @@ -699,7 +699,7 @@ static void dmi_ascii_filter(char *d, co
70128 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
70129 char *alias)
70130 {
70131 - int i, j;
70132 + unsigned int i, j;
70133
70134 sprintf(alias, "dmi*");
70135
70136 diff -urNp linux-2.6.32.41/scripts/mod/modpost.c linux-2.6.32.41/scripts/mod/modpost.c
70137 --- linux-2.6.32.41/scripts/mod/modpost.c 2011-03-27 14:31:47.000000000 -0400
70138 +++ linux-2.6.32.41/scripts/mod/modpost.c 2011-04-17 15:56:46.000000000 -0400
70139 @@ -835,6 +835,7 @@ enum mismatch {
70140 INIT_TO_EXIT,
70141 EXIT_TO_INIT,
70142 EXPORT_TO_INIT_EXIT,
70143 + DATA_TO_TEXT
70144 };
70145
70146 struct sectioncheck {
70147 @@ -920,6 +921,12 @@ const struct sectioncheck sectioncheck[]
70148 .fromsec = { "__ksymtab*", NULL },
70149 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
70150 .mismatch = EXPORT_TO_INIT_EXIT
70151 +},
70152 +/* Do not reference code from writable data */
70153 +{
70154 + .fromsec = { DATA_SECTIONS, NULL },
70155 + .tosec = { TEXT_SECTIONS, NULL },
70156 + .mismatch = DATA_TO_TEXT
70157 }
70158 };
70159
70160 @@ -1024,10 +1031,10 @@ static Elf_Sym *find_elf_symbol(struct e
70161 continue;
70162 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
70163 continue;
70164 - if (sym->st_value == addr)
70165 - return sym;
70166 /* Find a symbol nearby - addr are maybe negative */
70167 d = sym->st_value - addr;
70168 + if (d == 0)
70169 + return sym;
70170 if (d < 0)
70171 d = addr - sym->st_value;
70172 if (d < distance) {
70173 @@ -1268,6 +1275,14 @@ static void report_sec_mismatch(const ch
70174 "Fix this by removing the %sannotation of %s "
70175 "or drop the export.\n",
70176 tosym, sec2annotation(tosec), sec2annotation(tosec), tosym);
70177 + case DATA_TO_TEXT:
70178 +/*
70179 + fprintf(stderr,
70180 + "The variable %s references\n"
70181 + "the %s %s%s%s\n",
70182 + fromsym, to, sec2annotation(tosec), tosym, to_p);
70183 +*/
70184 + break;
70185 case NO_MISMATCH:
70186 /* To get warnings on missing members */
70187 break;
70188 @@ -1651,7 +1666,7 @@ void __attribute__((format(printf, 2, 3)
70189 va_end(ap);
70190 }
70191
70192 -void buf_write(struct buffer *buf, const char *s, int len)
70193 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
70194 {
70195 if (buf->size - buf->pos < len) {
70196 buf->size += len + SZ;
70197 @@ -1863,7 +1878,7 @@ static void write_if_changed(struct buff
70198 if (fstat(fileno(file), &st) < 0)
70199 goto close_write;
70200
70201 - if (st.st_size != b->pos)
70202 + if (st.st_size != (off_t)b->pos)
70203 goto close_write;
70204
70205 tmp = NOFAIL(malloc(b->pos));
70206 diff -urNp linux-2.6.32.41/scripts/mod/modpost.h linux-2.6.32.41/scripts/mod/modpost.h
70207 --- linux-2.6.32.41/scripts/mod/modpost.h 2011-03-27 14:31:47.000000000 -0400
70208 +++ linux-2.6.32.41/scripts/mod/modpost.h 2011-04-17 15:56:46.000000000 -0400
70209 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *e
70210
70211 struct buffer {
70212 char *p;
70213 - int pos;
70214 - int size;
70215 + unsigned int pos;
70216 + unsigned int size;
70217 };
70218
70219 void __attribute__((format(printf, 2, 3)))
70220 buf_printf(struct buffer *buf, const char *fmt, ...);
70221
70222 void
70223 -buf_write(struct buffer *buf, const char *s, int len);
70224 +buf_write(struct buffer *buf, const char *s, unsigned int len);
70225
70226 struct module {
70227 struct module *next;
70228 diff -urNp linux-2.6.32.41/scripts/mod/sumversion.c linux-2.6.32.41/scripts/mod/sumversion.c
70229 --- linux-2.6.32.41/scripts/mod/sumversion.c 2011-03-27 14:31:47.000000000 -0400
70230 +++ linux-2.6.32.41/scripts/mod/sumversion.c 2011-04-17 15:56:46.000000000 -0400
70231 @@ -455,7 +455,7 @@ static void write_version(const char *fi
70232 goto out;
70233 }
70234
70235 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
70236 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
70237 warn("writing sum in %s failed: %s\n",
70238 filename, strerror(errno));
70239 goto out;
70240 diff -urNp linux-2.6.32.41/scripts/pnmtologo.c linux-2.6.32.41/scripts/pnmtologo.c
70241 --- linux-2.6.32.41/scripts/pnmtologo.c 2011-03-27 14:31:47.000000000 -0400
70242 +++ linux-2.6.32.41/scripts/pnmtologo.c 2011-04-17 15:56:46.000000000 -0400
70243 @@ -237,14 +237,14 @@ static void write_header(void)
70244 fprintf(out, " * Linux logo %s\n", logoname);
70245 fputs(" */\n\n", out);
70246 fputs("#include <linux/linux_logo.h>\n\n", out);
70247 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
70248 + fprintf(out, "static unsigned char %s_data[] = {\n",
70249 logoname);
70250 }
70251
70252 static void write_footer(void)
70253 {
70254 fputs("\n};\n\n", out);
70255 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
70256 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
70257 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
70258 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
70259 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
70260 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
70261 fputs("\n};\n\n", out);
70262
70263 /* write logo clut */
70264 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
70265 + fprintf(out, "static unsigned char %s_clut[] = {\n",
70266 logoname);
70267 write_hex_cnt = 0;
70268 for (i = 0; i < logo_clutsize; i++) {
70269 diff -urNp linux-2.6.32.41/scripts/tags.sh linux-2.6.32.41/scripts/tags.sh
70270 --- linux-2.6.32.41/scripts/tags.sh 2011-03-27 14:31:47.000000000 -0400
70271 +++ linux-2.6.32.41/scripts/tags.sh 2011-06-07 18:06:04.000000000 -0400
70272 @@ -93,6 +93,11 @@ docscope()
70273 cscope -b -f cscope.out
70274 }
70275
70276 +dogtags()
70277 +{
70278 + all_sources | gtags -f -
70279 +}
70280 +
70281 exuberant()
70282 {
70283 all_sources | xargs $1 -a \
70284 @@ -164,6 +169,10 @@ case "$1" in
70285 docscope
70286 ;;
70287
70288 + "gtags")
70289 + dogtags
70290 + ;;
70291 +
70292 "tags")
70293 rm -f tags
70294 xtags ctags
70295 diff -urNp linux-2.6.32.41/security/capability.c linux-2.6.32.41/security/capability.c
70296 --- linux-2.6.32.41/security/capability.c 2011-03-27 14:31:47.000000000 -0400
70297 +++ linux-2.6.32.41/security/capability.c 2011-04-17 15:56:46.000000000 -0400
70298 @@ -890,7 +890,7 @@ static void cap_audit_rule_free(void *ls
70299 }
70300 #endif /* CONFIG_AUDIT */
70301
70302 -struct security_operations default_security_ops = {
70303 +struct security_operations default_security_ops __read_only = {
70304 .name = "default",
70305 };
70306
70307 diff -urNp linux-2.6.32.41/security/commoncap.c linux-2.6.32.41/security/commoncap.c
70308 --- linux-2.6.32.41/security/commoncap.c 2011-03-27 14:31:47.000000000 -0400
70309 +++ linux-2.6.32.41/security/commoncap.c 2011-04-17 15:56:46.000000000 -0400
70310 @@ -27,7 +27,7 @@
70311 #include <linux/sched.h>
70312 #include <linux/prctl.h>
70313 #include <linux/securebits.h>
70314 -
70315 +#include <net/sock.h>
70316 /*
70317 * If a non-root user executes a setuid-root binary in
70318 * !secure(SECURE_NOROOT) mode, then we raise capabilities.
70319 @@ -50,9 +50,11 @@ static void warn_setuid_and_fcaps_mixed(
70320 }
70321 }
70322
70323 +extern kernel_cap_t gr_cap_rtnetlink(struct sock *sk);
70324 +
70325 int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
70326 {
70327 - NETLINK_CB(skb).eff_cap = current_cap();
70328 + NETLINK_CB(skb).eff_cap = gr_cap_rtnetlink(sk);
70329 return 0;
70330 }
70331
70332 @@ -582,6 +584,9 @@ int cap_bprm_secureexec(struct linux_bin
70333 {
70334 const struct cred *cred = current_cred();
70335
70336 + if (gr_acl_enable_at_secure())
70337 + return 1;
70338 +
70339 if (cred->uid != 0) {
70340 if (bprm->cap_effective)
70341 return 1;
70342 diff -urNp linux-2.6.32.41/security/integrity/ima/ima_api.c linux-2.6.32.41/security/integrity/ima/ima_api.c
70343 --- linux-2.6.32.41/security/integrity/ima/ima_api.c 2011-03-27 14:31:47.000000000 -0400
70344 +++ linux-2.6.32.41/security/integrity/ima/ima_api.c 2011-04-17 15:56:46.000000000 -0400
70345 @@ -74,7 +74,7 @@ void ima_add_violation(struct inode *ino
70346 int result;
70347
70348 /* can overflow, only indicator */
70349 - atomic_long_inc(&ima_htable.violations);
70350 + atomic_long_inc_unchecked(&ima_htable.violations);
70351
70352 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
70353 if (!entry) {
70354 diff -urNp linux-2.6.32.41/security/integrity/ima/ima_fs.c linux-2.6.32.41/security/integrity/ima/ima_fs.c
70355 --- linux-2.6.32.41/security/integrity/ima/ima_fs.c 2011-03-27 14:31:47.000000000 -0400
70356 +++ linux-2.6.32.41/security/integrity/ima/ima_fs.c 2011-04-17 15:56:46.000000000 -0400
70357 @@ -27,12 +27,12 @@
70358 static int valid_policy = 1;
70359 #define TMPBUFLEN 12
70360 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
70361 - loff_t *ppos, atomic_long_t *val)
70362 + loff_t *ppos, atomic_long_unchecked_t *val)
70363 {
70364 char tmpbuf[TMPBUFLEN];
70365 ssize_t len;
70366
70367 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
70368 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
70369 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
70370 }
70371
70372 diff -urNp linux-2.6.32.41/security/integrity/ima/ima.h linux-2.6.32.41/security/integrity/ima/ima.h
70373 --- linux-2.6.32.41/security/integrity/ima/ima.h 2011-03-27 14:31:47.000000000 -0400
70374 +++ linux-2.6.32.41/security/integrity/ima/ima.h 2011-04-17 15:56:46.000000000 -0400
70375 @@ -84,8 +84,8 @@ void ima_add_violation(struct inode *ino
70376 extern spinlock_t ima_queue_lock;
70377
70378 struct ima_h_table {
70379 - atomic_long_t len; /* number of stored measurements in the list */
70380 - atomic_long_t violations;
70381 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
70382 + atomic_long_unchecked_t violations;
70383 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
70384 };
70385 extern struct ima_h_table ima_htable;
70386 diff -urNp linux-2.6.32.41/security/integrity/ima/ima_queue.c linux-2.6.32.41/security/integrity/ima/ima_queue.c
70387 --- linux-2.6.32.41/security/integrity/ima/ima_queue.c 2011-03-27 14:31:47.000000000 -0400
70388 +++ linux-2.6.32.41/security/integrity/ima/ima_queue.c 2011-04-17 15:56:46.000000000 -0400
70389 @@ -78,7 +78,7 @@ static int ima_add_digest_entry(struct i
70390 INIT_LIST_HEAD(&qe->later);
70391 list_add_tail_rcu(&qe->later, &ima_measurements);
70392
70393 - atomic_long_inc(&ima_htable.len);
70394 + atomic_long_inc_unchecked(&ima_htable.len);
70395 key = ima_hash_key(entry->digest);
70396 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
70397 return 0;
70398 diff -urNp linux-2.6.32.41/security/Kconfig linux-2.6.32.41/security/Kconfig
70399 --- linux-2.6.32.41/security/Kconfig 2011-03-27 14:31:47.000000000 -0400
70400 +++ linux-2.6.32.41/security/Kconfig 2011-06-04 20:45:36.000000000 -0400
70401 @@ -4,6 +4,555 @@
70402
70403 menu "Security options"
70404
70405 +source grsecurity/Kconfig
70406 +
70407 +menu "PaX"
70408 +
70409 + config ARCH_TRACK_EXEC_LIMIT
70410 + bool
70411 +
70412 + config PAX_PER_CPU_PGD
70413 + bool
70414 +
70415 + config TASK_SIZE_MAX_SHIFT
70416 + int
70417 + depends on X86_64
70418 + default 47 if !PAX_PER_CPU_PGD
70419 + default 42 if PAX_PER_CPU_PGD
70420 +
70421 + config PAX_ENABLE_PAE
70422 + bool
70423 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
70424 +
70425 +config PAX
70426 + bool "Enable various PaX features"
70427 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
70428 + help
70429 + This allows you to enable various PaX features. PaX adds
70430 + intrusion prevention mechanisms to the kernel that reduce
70431 + the risks posed by exploitable memory corruption bugs.
70432 +
70433 +menu "PaX Control"
70434 + depends on PAX
70435 +
70436 +config PAX_SOFTMODE
70437 + bool 'Support soft mode'
70438 + select PAX_PT_PAX_FLAGS
70439 + help
70440 + Enabling this option will allow you to run PaX in soft mode, that
70441 + is, PaX features will not be enforced by default, only on executables
70442 + marked explicitly. You must also enable PT_PAX_FLAGS support as it
70443 + is the only way to mark executables for soft mode use.
70444 +
70445 + Soft mode can be activated by using the "pax_softmode=1" kernel command
70446 + line option on boot. Furthermore you can control various PaX features
70447 + at runtime via the entries in /proc/sys/kernel/pax.
70448 +
70449 +config PAX_EI_PAX
70450 + bool 'Use legacy ELF header marking'
70451 + help
70452 + Enabling this option will allow you to control PaX features on
70453 + a per executable basis via the 'chpax' utility available at
70454 + http://pax.grsecurity.net/. The control flags will be read from
70455 + an otherwise reserved part of the ELF header. This marking has
70456 + numerous drawbacks (no support for soft-mode, toolchain does not
70457 + know about the non-standard use of the ELF header) therefore it
70458 + has been deprecated in favour of PT_PAX_FLAGS support.
70459 +
70460 + Note that if you enable PT_PAX_FLAGS marking support as well,
70461 + the PT_PAX_FLAG marks will override the legacy EI_PAX marks.
70462 +
70463 +config PAX_PT_PAX_FLAGS
70464 + bool 'Use ELF program header marking'
70465 + help
70466 + Enabling this option will allow you to control PaX features on
70467 + a per executable basis via the 'paxctl' utility available at
70468 + http://pax.grsecurity.net/. The control flags will be read from
70469 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
70470 + has the benefits of supporting both soft mode and being fully
70471 + integrated into the toolchain (the binutils patch is available
70472 + from http://pax.grsecurity.net).
70473 +
70474 + If your toolchain does not support PT_PAX_FLAGS markings,
70475 + you can create one in most cases with 'paxctl -C'.
70476 +
70477 + Note that if you enable the legacy EI_PAX marking support as well,
70478 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
70479 +
70480 +choice
70481 + prompt 'MAC system integration'
70482 + default PAX_HAVE_ACL_FLAGS
70483 + help
70484 + Mandatory Access Control systems have the option of controlling
70485 + PaX flags on a per executable basis, choose the method supported
70486 + by your particular system.
70487 +
70488 + - "none": if your MAC system does not interact with PaX,
70489 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
70490 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
70491 +
70492 + NOTE: this option is for developers/integrators only.
70493 +
70494 + config PAX_NO_ACL_FLAGS
70495 + bool 'none'
70496 +
70497 + config PAX_HAVE_ACL_FLAGS
70498 + bool 'direct'
70499 +
70500 + config PAX_HOOK_ACL_FLAGS
70501 + bool 'hook'
70502 +endchoice
70503 +
70504 +endmenu
70505 +
70506 +menu "Non-executable pages"
70507 + depends on PAX
70508 +
70509 +config PAX_NOEXEC
70510 + bool "Enforce non-executable pages"
70511 + depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
70512 + help
70513 + By design some architectures do not allow for protecting memory
70514 + pages against execution or even if they do, Linux does not make
70515 + use of this feature. In practice this means that if a page is
70516 + readable (such as the stack or heap) it is also executable.
70517 +
70518 + There is a well known exploit technique that makes use of this
70519 + fact and a common programming mistake where an attacker can
70520 + introduce code of his choice somewhere in the attacked program's
70521 + memory (typically the stack or the heap) and then execute it.
70522 +
70523 + If the attacked program was running with different (typically
70524 + higher) privileges than that of the attacker, then he can elevate
70525 + his own privilege level (e.g. get a root shell, write to files for
70526 + which he does not have write access to, etc).
70527 +
70528 + Enabling this option will let you choose from various features
70529 + that prevent the injection and execution of 'foreign' code in
70530 + a program.
70531 +
70532 + This will also break programs that rely on the old behaviour and
70533 + expect that dynamically allocated memory via the malloc() family
70534 + of functions is executable (which it is not). Notable examples
70535 + are the XFree86 4.x server, the java runtime and wine.
70536 +
70537 +config PAX_PAGEEXEC
70538 + bool "Paging based non-executable pages"
70539 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
70540 + select S390_SWITCH_AMODE if S390
70541 + select S390_EXEC_PROTECT if S390
70542 + select ARCH_TRACK_EXEC_LIMIT if X86_32
70543 + help
70544 + This implementation is based on the paging feature of the CPU.
70545 + On i386 without hardware non-executable bit support there is a
70546 + variable but usually low performance impact, however on Intel's
70547 + P4 core based CPUs it is very high so you should not enable this
70548 + for kernels meant to be used on such CPUs.
70549 +
70550 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
70551 + with hardware non-executable bit support there is no performance
70552 + impact, on ppc the impact is negligible.
70553 +
70554 + Note that several architectures require various emulations due to
70555 + badly designed userland ABIs, this will cause a performance impact
70556 + but will disappear as soon as userland is fixed. For example, ppc
70557 + userland MUST have been built with secure-plt by a recent toolchain.
70558 +
70559 +config PAX_SEGMEXEC
70560 + bool "Segmentation based non-executable pages"
70561 + depends on PAX_NOEXEC && X86_32
70562 + help
70563 + This implementation is based on the segmentation feature of the
70564 + CPU and has a very small performance impact, however applications
70565 + will be limited to a 1.5 GB address space instead of the normal
70566 + 3 GB.
70567 +
70568 +config PAX_EMUTRAMP
70569 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
70570 + default y if PARISC
70571 + help
70572 + There are some programs and libraries that for one reason or
70573 + another attempt to execute special small code snippets from
70574 + non-executable memory pages. Most notable examples are the
70575 + signal handler return code generated by the kernel itself and
70576 + the GCC trampolines.
70577 +
70578 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
70579 + such programs will no longer work under your kernel.
70580 +
70581 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
70582 + utilities to enable trampoline emulation for the affected programs
70583 + yet still have the protection provided by the non-executable pages.
70584 +
70585 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
70586 + your system will not even boot.
70587 +
70588 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
70589 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
70590 + for the affected files.
70591 +
70592 + NOTE: enabling this feature *may* open up a loophole in the
70593 + protection provided by non-executable pages that an attacker
70594 + could abuse. Therefore the best solution is to not have any
70595 + files on your system that would require this option. This can
70596 + be achieved by not using libc5 (which relies on the kernel
70597 + signal handler return code) and not using or rewriting programs
70598 + that make use of the nested function implementation of GCC.
70599 + Skilled users can just fix GCC itself so that it implements
70600 + nested function calls in a way that does not interfere with PaX.
70601 +
70602 +config PAX_EMUSIGRT
70603 + bool "Automatically emulate sigreturn trampolines"
70604 + depends on PAX_EMUTRAMP && PARISC
70605 + default y
70606 + help
70607 + Enabling this option will have the kernel automatically detect
70608 + and emulate signal return trampolines executing on the stack
70609 + that would otherwise lead to task termination.
70610 +
70611 + This solution is intended as a temporary one for users with
70612 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
70613 + Modula-3 runtime, etc) or executables linked to such, basically
70614 + everything that does not specify its own SA_RESTORER function in
70615 + normal executable memory like glibc 2.1+ does.
70616 +
70617 + On parisc you MUST enable this option, otherwise your system will
70618 + not even boot.
70619 +
70620 + NOTE: this feature cannot be disabled on a per executable basis
70621 + and since it *does* open up a loophole in the protection provided
70622 + by non-executable pages, the best solution is to not have any
70623 + files on your system that would require this option.
70624 +
70625 +config PAX_MPROTECT
70626 + bool "Restrict mprotect()"
70627 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
70628 + help
70629 + Enabling this option will prevent programs from
70630 + - changing the executable status of memory pages that were
70631 + not originally created as executable,
70632 + - making read-only executable pages writable again,
70633 + - creating executable pages from anonymous memory,
70634 + - making read-only-after-relocations (RELRO) data pages writable again.
70635 +
70636 + You should say Y here to complete the protection provided by
70637 + the enforcement of non-executable pages.
70638 +
70639 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
70640 + this feature on a per file basis.
70641 +
70642 +config PAX_MPROTECT_COMPAT
70643 + bool "Use legacy/compat protection demoting (read help)"
70644 + depends on PAX_MPROTECT
70645 + default n
70646 + help
70647 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
70648 + by sending the proper error code to the application. For some broken
70649 + userland, this can cause problems with Python or other applications. The
70650 + current implementation however allows for applications like clamav to
70651 + detect if JIT compilation/execution is allowed and to fall back gracefully
70652 + to an interpreter-based mode if it does not. While we encourage everyone
70653 + to use the current implementation as-is and push upstream to fix broken
70654 + userland (note that the RWX logging option can assist with this), in some
70655 + environments this may not be possible. Having to disable MPROTECT
70656 + completely on certain binaries reduces the security benefit of PaX,
70657 + so this option is provided for those environments to revert to the old
70658 + behavior.
70659 +
70660 +config PAX_ELFRELOCS
70661 + bool "Allow ELF text relocations (read help)"
70662 + depends on PAX_MPROTECT
70663 + default n
70664 + help
70665 + Non-executable pages and mprotect() restrictions are effective
70666 + in preventing the introduction of new executable code into an
70667 + attacked task's address space. There remain only two venues
70668 + for this kind of attack: if the attacker can execute already
70669 + existing code in the attacked task then he can either have it
70670 + create and mmap() a file containing his code or have it mmap()
70671 + an already existing ELF library that does not have position
70672 + independent code in it and use mprotect() on it to make it
70673 + writable and copy his code there. While protecting against
70674 + the former approach is beyond PaX, the latter can be prevented
70675 + by having only PIC ELF libraries on one's system (which do not
70676 + need to relocate their code). If you are sure this is your case,
70677 + as is the case with all modern Linux distributions, then leave
70678 + this option disabled. You should say 'n' here.
70679 +
70680 +config PAX_ETEXECRELOCS
70681 + bool "Allow ELF ET_EXEC text relocations"
70682 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
70683 + select PAX_ELFRELOCS
70684 + default y
70685 + help
70686 + On some architectures there are incorrectly created applications
70687 + that require text relocations and would not work without enabling
70688 + this option. If you are an alpha, ia64 or parisc user, you should
70689 + enable this option and disable it once you have made sure that
70690 + none of your applications need it.
70691 +
70692 +config PAX_EMUPLT
70693 + bool "Automatically emulate ELF PLT"
70694 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
70695 + default y
70696 + help
70697 + Enabling this option will have the kernel automatically detect
70698 + and emulate the Procedure Linkage Table entries in ELF files.
70699 + On some architectures such entries are in writable memory, and
70700 + become non-executable leading to task termination. Therefore
70701 + it is mandatory that you enable this option on alpha, parisc,
70702 + sparc and sparc64, otherwise your system would not even boot.
70703 +
70704 + NOTE: this feature *does* open up a loophole in the protection
70705 + provided by the non-executable pages, therefore the proper
70706 + solution is to modify the toolchain to produce a PLT that does
70707 + not need to be writable.
70708 +
70709 +config PAX_DLRESOLVE
70710 + bool 'Emulate old glibc resolver stub'
70711 + depends on PAX_EMUPLT && SPARC
70712 + default n
70713 + help
70714 + This option is needed if userland has an old glibc (before 2.4)
70715 + that puts a 'save' instruction into the runtime generated resolver
70716 + stub that needs special emulation.
70717 +
70718 +config PAX_KERNEXEC
70719 + bool "Enforce non-executable kernel pages"
70720 + depends on PAX_NOEXEC && (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
70721 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
70722 + help
70723 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
70724 + that is, enabling this option will make it harder to inject
70725 + and execute 'foreign' code in kernel memory itself.
70726 +
70727 + Note that on x86_64 kernels there is a known regression when
70728 + this feature and KVM/VMX are both enabled in the host kernel.
70729 +
70730 +config PAX_KERNEXEC_MODULE_TEXT
70731 + int "Minimum amount of memory reserved for module code"
70732 + default "4"
70733 + depends on PAX_KERNEXEC && X86_32 && MODULES
70734 + help
70735 + Due to implementation details the kernel must reserve a fixed
70736 + amount of memory for module code at compile time that cannot be
70737 + changed at runtime. Here you can specify the minimum amount
70738 + in MB that will be reserved. Due to the same implementation
70739 + details this size will always be rounded up to the next 2/4 MB
70740 + boundary (depends on PAE) so the actually available memory for
70741 + module code will usually be more than this minimum.
70742 +
70743 + The default 4 MB should be enough for most users but if you have
70744 + an excessive number of modules (e.g., most distribution configs
70745 + compile many drivers as modules) or use huge modules such as
70746 + nvidia's kernel driver, you will need to adjust this amount.
70747 + A good rule of thumb is to look at your currently loaded kernel
70748 + modules and add up their sizes.
70749 +
70750 +endmenu
70751 +
70752 +menu "Address Space Layout Randomization"
70753 + depends on PAX
70754 +
70755 +config PAX_ASLR
70756 + bool "Address Space Layout Randomization"
70757 + depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
70758 + help
70759 + Many if not most exploit techniques rely on the knowledge of
70760 + certain addresses in the attacked program. The following options
70761 + will allow the kernel to apply a certain amount of randomization
70762 + to specific parts of the program thereby forcing an attacker to
70763 + guess them in most cases. Any failed guess will most likely crash
70764 + the attacked program which allows the kernel to detect such attempts
70765 + and react on them. PaX itself provides no reaction mechanisms,
70766 + instead it is strongly encouraged that you make use of Nergal's
70767 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
70768 + (http://www.grsecurity.net/) built-in crash detection features or
70769 + develop one yourself.
70770 +
70771 + By saying Y here you can choose to randomize the following areas:
70772 + - top of the task's kernel stack
70773 + - top of the task's userland stack
70774 + - base address for mmap() requests that do not specify one
70775 + (this includes all libraries)
70776 + - base address of the main executable
70777 +
70778 + It is strongly recommended to say Y here as address space layout
70779 + randomization has negligible impact on performance yet it provides
70780 + a very effective protection.
70781 +
70782 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
70783 + this feature on a per file basis.
70784 +
70785 +config PAX_RANDKSTACK
70786 + bool "Randomize kernel stack base"
70787 + depends on PAX_ASLR && X86_TSC && X86
70788 + help
70789 + By saying Y here the kernel will randomize every task's kernel
70790 + stack on every system call. This will not only force an attacker
70791 + to guess it but also prevent him from making use of possible
70792 + leaked information about it.
70793 +
70794 + Since the kernel stack is a rather scarce resource, randomization
70795 + may cause unexpected stack overflows, therefore you should very
70796 + carefully test your system. Note that once enabled in the kernel
70797 + configuration, this feature cannot be disabled on a per file basis.
70798 +
70799 +config PAX_RANDUSTACK
70800 + bool "Randomize user stack base"
70801 + depends on PAX_ASLR
70802 + help
70803 + By saying Y here the kernel will randomize every task's userland
70804 + stack. The randomization is done in two steps where the second
70805 + one may apply a big amount of shift to the top of the stack and
70806 + cause problems for programs that want to use lots of memory (more
70807 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
70808 + For this reason the second step can be controlled by 'chpax' or
70809 + 'paxctl' on a per file basis.
70810 +
70811 +config PAX_RANDMMAP
70812 + bool "Randomize mmap() base"
70813 + depends on PAX_ASLR
70814 + help
70815 + By saying Y here the kernel will use a randomized base address for
70816 + mmap() requests that do not specify one themselves. As a result
70817 + all dynamically loaded libraries will appear at random addresses
70818 + and therefore be harder to exploit by a technique where an attacker
70819 + attempts to execute library code for his purposes (e.g. spawn a
70820 + shell from an exploited program that is running at an elevated
70821 + privilege level).
70822 +
70823 + Furthermore, if a program is relinked as a dynamic ELF file, its
70824 + base address will be randomized as well, completing the full
70825 + randomization of the address space layout. Attacking such programs
70826 + becomes a guess game. You can find an example of doing this at
70827 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
70828 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
70829 +
70830 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
70831 + feature on a per file basis.
70832 +
70833 +endmenu
70834 +
70835 +menu "Miscellaneous hardening features"
70836 +
70837 +config PAX_MEMORY_SANITIZE
70838 + bool "Sanitize all freed memory"
70839 + help
70840 + By saying Y here the kernel will erase memory pages as soon as they
70841 + are freed. This in turn reduces the lifetime of data stored in the
70842 + pages, making it less likely that sensitive information such as
70843 + passwords, cryptographic secrets, etc stay in memory for too long.
70844 +
70845 + This is especially useful for programs whose runtime is short, long
70846 + lived processes and the kernel itself benefit from this as long as
70847 + they operate on whole memory pages and ensure timely freeing of pages
70848 + that may hold sensitive information.
70849 +
70850 + The tradeoff is performance impact, on a single CPU system kernel
70851 + compilation sees a 3% slowdown, other systems and workloads may vary
70852 + and you are advised to test this feature on your expected workload
70853 + before deploying it.
70854 +
70855 + Note that this feature does not protect data stored in live pages,
70856 + e.g., process memory swapped to disk may stay there for a long time.
70857 +
70858 +config PAX_MEMORY_STACKLEAK
70859 + bool "Sanitize kernel stack"
70860 + depends on X86
70861 + help
70862 + By saying Y here the kernel will erase the kernel stack before it
70863 + returns from a system call. This in turn reduces the information
70864 + that a kernel stack leak bug can reveal.
70865 +
70866 + Note that such a bug can still leak information that was put on
70867 + the stack by the current system call (the one eventually triggering
70868 + the bug) but traces of earlier system calls on the kernel stack
70869 + cannot leak anymore.
70870 +
70871 + The tradeoff is performance impact, on a single CPU system kernel
70872 + compilation sees a 1% slowdown, other systems and workloads may vary
70873 + and you are advised to test this feature on your expected workload
70874 + before deploying it.
70875 +
70876 + Note: full support for this feature requires gcc with plugin support
70877 + so make sure your compiler is at least gcc 4.5.0 (cross compilation
70878 + is not supported). Using older gcc versions means that functions
70879 + with large enough stack frames may leave uninitialized memory behind
70880 + that may be exposed to a later syscall leaking the stack.
70881 +
70882 +config PAX_MEMORY_UDEREF
70883 + bool "Prevent invalid userland pointer dereference"
70884 + depends on X86 && !UML_X86 && !XEN
70885 + select PAX_PER_CPU_PGD if X86_64
70886 + help
70887 + By saying Y here the kernel will be prevented from dereferencing
70888 + userland pointers in contexts where the kernel expects only kernel
70889 + pointers. This is both a useful runtime debugging feature and a
70890 + security measure that prevents exploiting a class of kernel bugs.
70891 +
70892 + The tradeoff is that some virtualization solutions may experience
70893 + a huge slowdown and therefore you should not enable this feature
70894 + for kernels meant to run in such environments. Whether a given VM
70895 + solution is affected or not is best determined by simply trying it
70896 + out, the performance impact will be obvious right on boot as this
70897 + mechanism engages from very early on. A good rule of thumb is that
70898 + VMs running on CPUs without hardware virtualization support (i.e.,
70899 + the majority of IA-32 CPUs) will likely experience the slowdown.
70900 +
70901 +config PAX_REFCOUNT
70902 + bool "Prevent various kernel object reference counter overflows"
70903 + depends on GRKERNSEC && (X86 || SPARC64)
70904 + help
70905 + By saying Y here the kernel will detect and prevent overflowing
70906 + various (but not all) kinds of object reference counters. Such
70907 + overflows can normally occur due to bugs only and are often, if
70908 + not always, exploitable.
70909 +
70910 + The tradeoff is that data structures protected by an overflowed
70911 + refcount will never be freed and therefore will leak memory. Note
70912 + that this leak also happens even without this protection but in
70913 + that case the overflow can eventually trigger the freeing of the
70914 + data structure while it is still being used elsewhere, resulting
70915 + in the exploitable situation that this feature prevents.
70916 +
70917 + Since this has a negligible performance impact, you should enable
70918 + this feature.
70919 +
70920 +config PAX_USERCOPY
70921 + bool "Harden heap object copies between kernel and userland"
70922 + depends on X86 || PPC || SPARC
70923 + depends on GRKERNSEC && (SLAB || SLUB)
70924 + help
70925 + By saying Y here the kernel will enforce the size of heap objects
70926 + when they are copied in either direction between the kernel and
70927 + userland, even if only a part of the heap object is copied.
70928 +
70929 + Specifically, this checking prevents information leaking from the
70930 + kernel heap during kernel to userland copies (if the kernel heap
70931 + object is otherwise fully initialized) and prevents kernel heap
70932 + overflows during userland to kernel copies.
70933 +
70934 + Note that the current implementation provides the strictest bounds
70935 + checks for the SLUB allocator.
70936 +
70937 + Enabling this option also enables per-slab cache protection against
70938 + data in a given cache being copied into/out of via userland
70939 + accessors. Though the whitelist of regions will be reduced over
70940 + time, it notably protects important data structures like task structs.
70941 +
70942 +
70943 + If frame pointers are enabled on x86, this option will also
70944 + restrict copies into and out of the kernel stack to local variables
70945 + within a single frame.
70946 +
70947 + Since this has a negligible performance impact, you should enable
70948 + this feature.
70949 +
70950 +endmenu
70951 +
70952 +endmenu
70953 +
70954 config KEYS
70955 bool "Enable access key retention support"
70956 help
70957 @@ -146,7 +695,7 @@ config INTEL_TXT
70958 config LSM_MMAP_MIN_ADDR
70959 int "Low address space for LSM to protect from user allocation"
70960 depends on SECURITY && SECURITY_SELINUX
70961 - default 65536
70962 + default 32768
70963 help
70964 This is the portion of low virtual memory which should be protected
70965 from userspace allocation. Keeping a user from writing to low pages
70966 diff -urNp linux-2.6.32.41/security/keys/keyring.c linux-2.6.32.41/security/keys/keyring.c
70967 --- linux-2.6.32.41/security/keys/keyring.c 2011-03-27 14:31:47.000000000 -0400
70968 +++ linux-2.6.32.41/security/keys/keyring.c 2011-04-18 22:03:00.000000000 -0400
70969 @@ -214,15 +214,15 @@ static long keyring_read(const struct ke
70970 ret = -EFAULT;
70971
70972 for (loop = 0; loop < klist->nkeys; loop++) {
70973 + key_serial_t serial;
70974 key = klist->keys[loop];
70975 + serial = key->serial;
70976
70977 tmp = sizeof(key_serial_t);
70978 if (tmp > buflen)
70979 tmp = buflen;
70980
70981 - if (copy_to_user(buffer,
70982 - &key->serial,
70983 - tmp) != 0)
70984 + if (copy_to_user(buffer, &serial, tmp))
70985 goto error;
70986
70987 buflen -= tmp;
70988 diff -urNp linux-2.6.32.41/security/min_addr.c linux-2.6.32.41/security/min_addr.c
70989 --- linux-2.6.32.41/security/min_addr.c 2011-03-27 14:31:47.000000000 -0400
70990 +++ linux-2.6.32.41/security/min_addr.c 2011-04-17 15:56:46.000000000 -0400
70991 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG
70992 */
70993 static void update_mmap_min_addr(void)
70994 {
70995 +#ifndef SPARC
70996 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
70997 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
70998 mmap_min_addr = dac_mmap_min_addr;
70999 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
71000 #else
71001 mmap_min_addr = dac_mmap_min_addr;
71002 #endif
71003 +#endif
71004 }
71005
71006 /*
71007 diff -urNp linux-2.6.32.41/security/root_plug.c linux-2.6.32.41/security/root_plug.c
71008 --- linux-2.6.32.41/security/root_plug.c 2011-03-27 14:31:47.000000000 -0400
71009 +++ linux-2.6.32.41/security/root_plug.c 2011-04-17 15:56:46.000000000 -0400
71010 @@ -70,7 +70,7 @@ static int rootplug_bprm_check_security
71011 return 0;
71012 }
71013
71014 -static struct security_operations rootplug_security_ops = {
71015 +static struct security_operations rootplug_security_ops __read_only = {
71016 .bprm_check_security = rootplug_bprm_check_security,
71017 };
71018
71019 diff -urNp linux-2.6.32.41/security/security.c linux-2.6.32.41/security/security.c
71020 --- linux-2.6.32.41/security/security.c 2011-03-27 14:31:47.000000000 -0400
71021 +++ linux-2.6.32.41/security/security.c 2011-04-17 15:56:46.000000000 -0400
71022 @@ -24,7 +24,7 @@ static __initdata char chosen_lsm[SECURI
71023 extern struct security_operations default_security_ops;
71024 extern void security_fixup_ops(struct security_operations *ops);
71025
71026 -struct security_operations *security_ops; /* Initialized to NULL */
71027 +struct security_operations *security_ops __read_only; /* Initialized to NULL */
71028
71029 static inline int verify(struct security_operations *ops)
71030 {
71031 @@ -106,7 +106,7 @@ int __init security_module_enable(struct
71032 * If there is already a security module registered with the kernel,
71033 * an error will be returned. Otherwise %0 is returned on success.
71034 */
71035 -int register_security(struct security_operations *ops)
71036 +int __init register_security(struct security_operations *ops)
71037 {
71038 if (verify(ops)) {
71039 printk(KERN_DEBUG "%s could not verify "
71040 diff -urNp linux-2.6.32.41/security/selinux/hooks.c linux-2.6.32.41/security/selinux/hooks.c
71041 --- linux-2.6.32.41/security/selinux/hooks.c 2011-03-27 14:31:47.000000000 -0400
71042 +++ linux-2.6.32.41/security/selinux/hooks.c 2011-04-17 15:56:46.000000000 -0400
71043 @@ -131,7 +131,7 @@ int selinux_enabled = 1;
71044 * Minimal support for a secondary security module,
71045 * just to allow the use of the capability module.
71046 */
71047 -static struct security_operations *secondary_ops;
71048 +static struct security_operations *secondary_ops __read_only;
71049
71050 /* Lists of inode and superblock security structures initialized
71051 before the policy was loaded. */
71052 @@ -5457,7 +5457,7 @@ static int selinux_key_getsecurity(struc
71053
71054 #endif
71055
71056 -static struct security_operations selinux_ops = {
71057 +static struct security_operations selinux_ops __read_only = {
71058 .name = "selinux",
71059
71060 .ptrace_access_check = selinux_ptrace_access_check,
71061 @@ -5841,7 +5841,9 @@ int selinux_disable(void)
71062 avc_disable();
71063
71064 /* Reset security_ops to the secondary module, dummy or capability. */
71065 + pax_open_kernel();
71066 security_ops = secondary_ops;
71067 + pax_close_kernel();
71068
71069 /* Unregister netfilter hooks. */
71070 selinux_nf_ip_exit();
71071 diff -urNp linux-2.6.32.41/security/selinux/include/xfrm.h linux-2.6.32.41/security/selinux/include/xfrm.h
71072 --- linux-2.6.32.41/security/selinux/include/xfrm.h 2011-03-27 14:31:47.000000000 -0400
71073 +++ linux-2.6.32.41/security/selinux/include/xfrm.h 2011-05-18 20:09:37.000000000 -0400
71074 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct s
71075
71076 static inline void selinux_xfrm_notify_policyload(void)
71077 {
71078 - atomic_inc(&flow_cache_genid);
71079 + atomic_inc_unchecked(&flow_cache_genid);
71080 }
71081 #else
71082 static inline int selinux_xfrm_enabled(void)
71083 diff -urNp linux-2.6.32.41/security/selinux/ss/services.c linux-2.6.32.41/security/selinux/ss/services.c
71084 --- linux-2.6.32.41/security/selinux/ss/services.c 2011-03-27 14:31:47.000000000 -0400
71085 +++ linux-2.6.32.41/security/selinux/ss/services.c 2011-05-16 21:46:57.000000000 -0400
71086 @@ -1715,6 +1715,8 @@ int security_load_policy(void *data, siz
71087 int rc = 0;
71088 struct policy_file file = { data, len }, *fp = &file;
71089
71090 + pax_track_stack();
71091 +
71092 if (!ss_initialized) {
71093 avtab_cache_init();
71094 if (policydb_read(&policydb, fp)) {
71095 diff -urNp linux-2.6.32.41/security/smack/smack_lsm.c linux-2.6.32.41/security/smack/smack_lsm.c
71096 --- linux-2.6.32.41/security/smack/smack_lsm.c 2011-03-27 14:31:47.000000000 -0400
71097 +++ linux-2.6.32.41/security/smack/smack_lsm.c 2011-04-17 15:56:46.000000000 -0400
71098 @@ -3073,7 +3073,7 @@ static int smack_inode_getsecctx(struct
71099 return 0;
71100 }
71101
71102 -struct security_operations smack_ops = {
71103 +struct security_operations smack_ops __read_only = {
71104 .name = "smack",
71105
71106 .ptrace_access_check = smack_ptrace_access_check,
71107 diff -urNp linux-2.6.32.41/security/tomoyo/tomoyo.c linux-2.6.32.41/security/tomoyo/tomoyo.c
71108 --- linux-2.6.32.41/security/tomoyo/tomoyo.c 2011-03-27 14:31:47.000000000 -0400
71109 +++ linux-2.6.32.41/security/tomoyo/tomoyo.c 2011-04-17 15:56:46.000000000 -0400
71110 @@ -275,7 +275,7 @@ static int tomoyo_dentry_open(struct fil
71111 * tomoyo_security_ops is a "struct security_operations" which is used for
71112 * registering TOMOYO.
71113 */
71114 -static struct security_operations tomoyo_security_ops = {
71115 +static struct security_operations tomoyo_security_ops __read_only = {
71116 .name = "tomoyo",
71117 .cred_alloc_blank = tomoyo_cred_alloc_blank,
71118 .cred_prepare = tomoyo_cred_prepare,
71119 diff -urNp linux-2.6.32.41/sound/aoa/codecs/onyx.c linux-2.6.32.41/sound/aoa/codecs/onyx.c
71120 --- linux-2.6.32.41/sound/aoa/codecs/onyx.c 2011-03-27 14:31:47.000000000 -0400
71121 +++ linux-2.6.32.41/sound/aoa/codecs/onyx.c 2011-04-17 15:56:46.000000000 -0400
71122 @@ -53,7 +53,7 @@ struct onyx {
71123 spdif_locked:1,
71124 analog_locked:1,
71125 original_mute:2;
71126 - int open_count;
71127 + local_t open_count;
71128 struct codec_info *codec_info;
71129
71130 /* mutex serializes concurrent access to the device
71131 @@ -752,7 +752,7 @@ static int onyx_open(struct codec_info_i
71132 struct onyx *onyx = cii->codec_data;
71133
71134 mutex_lock(&onyx->mutex);
71135 - onyx->open_count++;
71136 + local_inc(&onyx->open_count);
71137 mutex_unlock(&onyx->mutex);
71138
71139 return 0;
71140 @@ -764,8 +764,7 @@ static int onyx_close(struct codec_info_
71141 struct onyx *onyx = cii->codec_data;
71142
71143 mutex_lock(&onyx->mutex);
71144 - onyx->open_count--;
71145 - if (!onyx->open_count)
71146 + if (local_dec_and_test(&onyx->open_count))
71147 onyx->spdif_locked = onyx->analog_locked = 0;
71148 mutex_unlock(&onyx->mutex);
71149
71150 diff -urNp linux-2.6.32.41/sound/aoa/codecs/onyx.h linux-2.6.32.41/sound/aoa/codecs/onyx.h
71151 --- linux-2.6.32.41/sound/aoa/codecs/onyx.h 2011-03-27 14:31:47.000000000 -0400
71152 +++ linux-2.6.32.41/sound/aoa/codecs/onyx.h 2011-04-17 15:56:46.000000000 -0400
71153 @@ -11,6 +11,7 @@
71154 #include <linux/i2c.h>
71155 #include <asm/pmac_low_i2c.h>
71156 #include <asm/prom.h>
71157 +#include <asm/local.h>
71158
71159 /* PCM3052 register definitions */
71160
71161 diff -urNp linux-2.6.32.41/sound/drivers/mts64.c linux-2.6.32.41/sound/drivers/mts64.c
71162 --- linux-2.6.32.41/sound/drivers/mts64.c 2011-03-27 14:31:47.000000000 -0400
71163 +++ linux-2.6.32.41/sound/drivers/mts64.c 2011-04-17 15:56:46.000000000 -0400
71164 @@ -27,6 +27,7 @@
71165 #include <sound/initval.h>
71166 #include <sound/rawmidi.h>
71167 #include <sound/control.h>
71168 +#include <asm/local.h>
71169
71170 #define CARD_NAME "Miditerminal 4140"
71171 #define DRIVER_NAME "MTS64"
71172 @@ -65,7 +66,7 @@ struct mts64 {
71173 struct pardevice *pardev;
71174 int pardev_claimed;
71175
71176 - int open_count;
71177 + local_t open_count;
71178 int current_midi_output_port;
71179 int current_midi_input_port;
71180 u8 mode[MTS64_NUM_INPUT_PORTS];
71181 @@ -695,7 +696,7 @@ static int snd_mts64_rawmidi_open(struct
71182 {
71183 struct mts64 *mts = substream->rmidi->private_data;
71184
71185 - if (mts->open_count == 0) {
71186 + if (local_read(&mts->open_count) == 0) {
71187 /* We don't need a spinlock here, because this is just called
71188 if the device has not been opened before.
71189 So there aren't any IRQs from the device */
71190 @@ -703,7 +704,7 @@ static int snd_mts64_rawmidi_open(struct
71191
71192 msleep(50);
71193 }
71194 - ++(mts->open_count);
71195 + local_inc(&mts->open_count);
71196
71197 return 0;
71198 }
71199 @@ -713,8 +714,7 @@ static int snd_mts64_rawmidi_close(struc
71200 struct mts64 *mts = substream->rmidi->private_data;
71201 unsigned long flags;
71202
71203 - --(mts->open_count);
71204 - if (mts->open_count == 0) {
71205 + if (local_dec_return(&mts->open_count) == 0) {
71206 /* We need the spinlock_irqsave here because we can still
71207 have IRQs at this point */
71208 spin_lock_irqsave(&mts->lock, flags);
71209 @@ -723,8 +723,8 @@ static int snd_mts64_rawmidi_close(struc
71210
71211 msleep(500);
71212
71213 - } else if (mts->open_count < 0)
71214 - mts->open_count = 0;
71215 + } else if (local_read(&mts->open_count) < 0)
71216 + local_set(&mts->open_count, 0);
71217
71218 return 0;
71219 }
71220 diff -urNp linux-2.6.32.41/sound/drivers/portman2x4.c linux-2.6.32.41/sound/drivers/portman2x4.c
71221 --- linux-2.6.32.41/sound/drivers/portman2x4.c 2011-03-27 14:31:47.000000000 -0400
71222 +++ linux-2.6.32.41/sound/drivers/portman2x4.c 2011-04-17 15:56:46.000000000 -0400
71223 @@ -46,6 +46,7 @@
71224 #include <sound/initval.h>
71225 #include <sound/rawmidi.h>
71226 #include <sound/control.h>
71227 +#include <asm/local.h>
71228
71229 #define CARD_NAME "Portman 2x4"
71230 #define DRIVER_NAME "portman"
71231 @@ -83,7 +84,7 @@ struct portman {
71232 struct pardevice *pardev;
71233 int pardev_claimed;
71234
71235 - int open_count;
71236 + local_t open_count;
71237 int mode[PORTMAN_NUM_INPUT_PORTS];
71238 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
71239 };
71240 diff -urNp linux-2.6.32.41/sound/oss/sb_audio.c linux-2.6.32.41/sound/oss/sb_audio.c
71241 --- linux-2.6.32.41/sound/oss/sb_audio.c 2011-03-27 14:31:47.000000000 -0400
71242 +++ linux-2.6.32.41/sound/oss/sb_audio.c 2011-04-17 15:56:46.000000000 -0400
71243 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
71244 buf16 = (signed short *)(localbuf + localoffs);
71245 while (c)
71246 {
71247 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
71248 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
71249 if (copy_from_user(lbuf8,
71250 userbuf+useroffs + p,
71251 locallen))
71252 diff -urNp linux-2.6.32.41/sound/oss/swarm_cs4297a.c linux-2.6.32.41/sound/oss/swarm_cs4297a.c
71253 --- linux-2.6.32.41/sound/oss/swarm_cs4297a.c 2011-03-27 14:31:47.000000000 -0400
71254 +++ linux-2.6.32.41/sound/oss/swarm_cs4297a.c 2011-04-17 15:56:46.000000000 -0400
71255 @@ -2577,7 +2577,6 @@ static int __init cs4297a_init(void)
71256 {
71257 struct cs4297a_state *s;
71258 u32 pwr, id;
71259 - mm_segment_t fs;
71260 int rval;
71261 #ifndef CONFIG_BCM_CS4297A_CSWARM
71262 u64 cfg;
71263 @@ -2667,22 +2666,23 @@ static int __init cs4297a_init(void)
71264 if (!rval) {
71265 char *sb1250_duart_present;
71266
71267 +#if 0
71268 + mm_segment_t fs;
71269 fs = get_fs();
71270 set_fs(KERNEL_DS);
71271 -#if 0
71272 val = SOUND_MASK_LINE;
71273 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
71274 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
71275 val = initvol[i].vol;
71276 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
71277 }
71278 + set_fs(fs);
71279 // cs4297a_write_ac97(s, 0x18, 0x0808);
71280 #else
71281 // cs4297a_write_ac97(s, 0x5e, 0x180);
71282 cs4297a_write_ac97(s, 0x02, 0x0808);
71283 cs4297a_write_ac97(s, 0x18, 0x0808);
71284 #endif
71285 - set_fs(fs);
71286
71287 list_add(&s->list, &cs4297a_devs);
71288
71289 diff -urNp linux-2.6.32.41/sound/pci/ac97/ac97_codec.c linux-2.6.32.41/sound/pci/ac97/ac97_codec.c
71290 --- linux-2.6.32.41/sound/pci/ac97/ac97_codec.c 2011-03-27 14:31:47.000000000 -0400
71291 +++ linux-2.6.32.41/sound/pci/ac97/ac97_codec.c 2011-04-17 15:56:46.000000000 -0400
71292 @@ -1952,7 +1952,7 @@ static int snd_ac97_dev_disconnect(struc
71293 }
71294
71295 /* build_ops to do nothing */
71296 -static struct snd_ac97_build_ops null_build_ops;
71297 +static const struct snd_ac97_build_ops null_build_ops;
71298
71299 #ifdef CONFIG_SND_AC97_POWER_SAVE
71300 static void do_update_power(struct work_struct *work)
71301 diff -urNp linux-2.6.32.41/sound/pci/ac97/ac97_patch.c linux-2.6.32.41/sound/pci/ac97/ac97_patch.c
71302 --- linux-2.6.32.41/sound/pci/ac97/ac97_patch.c 2011-03-27 14:31:47.000000000 -0400
71303 +++ linux-2.6.32.41/sound/pci/ac97/ac97_patch.c 2011-04-23 12:56:12.000000000 -0400
71304 @@ -371,7 +371,7 @@ static int patch_yamaha_ymf743_build_spd
71305 return 0;
71306 }
71307
71308 -static struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
71309 +static const struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
71310 .build_spdif = patch_yamaha_ymf743_build_spdif,
71311 .build_3d = patch_yamaha_ymf7x3_3d,
71312 };
71313 @@ -455,7 +455,7 @@ static int patch_yamaha_ymf753_post_spdi
71314 return 0;
71315 }
71316
71317 -static struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
71318 +static const struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
71319 .build_3d = patch_yamaha_ymf7x3_3d,
71320 .build_post_spdif = patch_yamaha_ymf753_post_spdif
71321 };
71322 @@ -502,7 +502,7 @@ static int patch_wolfson_wm9703_specific
71323 return 0;
71324 }
71325
71326 -static struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
71327 +static const struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
71328 .build_specific = patch_wolfson_wm9703_specific,
71329 };
71330
71331 @@ -533,7 +533,7 @@ static int patch_wolfson_wm9704_specific
71332 return 0;
71333 }
71334
71335 -static struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
71336 +static const struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
71337 .build_specific = patch_wolfson_wm9704_specific,
71338 };
71339
71340 @@ -555,7 +555,7 @@ static int patch_wolfson_wm9705_specific
71341 return 0;
71342 }
71343
71344 -static struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
71345 +static const struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
71346 .build_specific = patch_wolfson_wm9705_specific,
71347 };
71348
71349 @@ -692,7 +692,7 @@ static int patch_wolfson_wm9711_specific
71350 return 0;
71351 }
71352
71353 -static struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
71354 +static const struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
71355 .build_specific = patch_wolfson_wm9711_specific,
71356 };
71357
71358 @@ -886,7 +886,7 @@ static void patch_wolfson_wm9713_resume
71359 }
71360 #endif
71361
71362 -static struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
71363 +static const struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
71364 .build_specific = patch_wolfson_wm9713_specific,
71365 .build_3d = patch_wolfson_wm9713_3d,
71366 #ifdef CONFIG_PM
71367 @@ -991,7 +991,7 @@ static int patch_sigmatel_stac97xx_speci
71368 return 0;
71369 }
71370
71371 -static struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
71372 +static const struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
71373 .build_3d = patch_sigmatel_stac9700_3d,
71374 .build_specific = patch_sigmatel_stac97xx_specific
71375 };
71376 @@ -1038,7 +1038,7 @@ static int patch_sigmatel_stac9708_speci
71377 return patch_sigmatel_stac97xx_specific(ac97);
71378 }
71379
71380 -static struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
71381 +static const struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
71382 .build_3d = patch_sigmatel_stac9708_3d,
71383 .build_specific = patch_sigmatel_stac9708_specific
71384 };
71385 @@ -1267,7 +1267,7 @@ static int patch_sigmatel_stac9758_speci
71386 return 0;
71387 }
71388
71389 -static struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
71390 +static const struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
71391 .build_3d = patch_sigmatel_stac9700_3d,
71392 .build_specific = patch_sigmatel_stac9758_specific
71393 };
71394 @@ -1342,7 +1342,7 @@ static int patch_cirrus_build_spdif(stru
71395 return 0;
71396 }
71397
71398 -static struct snd_ac97_build_ops patch_cirrus_ops = {
71399 +static const struct snd_ac97_build_ops patch_cirrus_ops = {
71400 .build_spdif = patch_cirrus_build_spdif
71401 };
71402
71403 @@ -1399,7 +1399,7 @@ static int patch_conexant_build_spdif(st
71404 return 0;
71405 }
71406
71407 -static struct snd_ac97_build_ops patch_conexant_ops = {
71408 +static const struct snd_ac97_build_ops patch_conexant_ops = {
71409 .build_spdif = patch_conexant_build_spdif
71410 };
71411
71412 @@ -1575,7 +1575,7 @@ static void patch_ad1881_chained(struct
71413 }
71414 }
71415
71416 -static struct snd_ac97_build_ops patch_ad1881_build_ops = {
71417 +static const struct snd_ac97_build_ops patch_ad1881_build_ops = {
71418 #ifdef CONFIG_PM
71419 .resume = ad18xx_resume
71420 #endif
71421 @@ -1662,7 +1662,7 @@ static int patch_ad1885_specific(struct
71422 return 0;
71423 }
71424
71425 -static struct snd_ac97_build_ops patch_ad1885_build_ops = {
71426 +static const struct snd_ac97_build_ops patch_ad1885_build_ops = {
71427 .build_specific = &patch_ad1885_specific,
71428 #ifdef CONFIG_PM
71429 .resume = ad18xx_resume
71430 @@ -1689,7 +1689,7 @@ static int patch_ad1886_specific(struct
71431 return 0;
71432 }
71433
71434 -static struct snd_ac97_build_ops patch_ad1886_build_ops = {
71435 +static const struct snd_ac97_build_ops patch_ad1886_build_ops = {
71436 .build_specific = &patch_ad1886_specific,
71437 #ifdef CONFIG_PM
71438 .resume = ad18xx_resume
71439 @@ -1896,7 +1896,7 @@ static int patch_ad1981a_specific(struct
71440 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
71441 }
71442
71443 -static struct snd_ac97_build_ops patch_ad1981a_build_ops = {
71444 +static const struct snd_ac97_build_ops patch_ad1981a_build_ops = {
71445 .build_post_spdif = patch_ad198x_post_spdif,
71446 .build_specific = patch_ad1981a_specific,
71447 #ifdef CONFIG_PM
71448 @@ -1951,7 +1951,7 @@ static int patch_ad1981b_specific(struct
71449 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
71450 }
71451
71452 -static struct snd_ac97_build_ops patch_ad1981b_build_ops = {
71453 +static const struct snd_ac97_build_ops patch_ad1981b_build_ops = {
71454 .build_post_spdif = patch_ad198x_post_spdif,
71455 .build_specific = patch_ad1981b_specific,
71456 #ifdef CONFIG_PM
71457 @@ -2090,7 +2090,7 @@ static int patch_ad1888_specific(struct
71458 return patch_build_controls(ac97, snd_ac97_ad1888_controls, ARRAY_SIZE(snd_ac97_ad1888_controls));
71459 }
71460
71461 -static struct snd_ac97_build_ops patch_ad1888_build_ops = {
71462 +static const struct snd_ac97_build_ops patch_ad1888_build_ops = {
71463 .build_post_spdif = patch_ad198x_post_spdif,
71464 .build_specific = patch_ad1888_specific,
71465 #ifdef CONFIG_PM
71466 @@ -2139,7 +2139,7 @@ static int patch_ad1980_specific(struct
71467 return patch_build_controls(ac97, &snd_ac97_ad198x_2cmic, 1);
71468 }
71469
71470 -static struct snd_ac97_build_ops patch_ad1980_build_ops = {
71471 +static const struct snd_ac97_build_ops patch_ad1980_build_ops = {
71472 .build_post_spdif = patch_ad198x_post_spdif,
71473 .build_specific = patch_ad1980_specific,
71474 #ifdef CONFIG_PM
71475 @@ -2254,7 +2254,7 @@ static int patch_ad1985_specific(struct
71476 ARRAY_SIZE(snd_ac97_ad1985_controls));
71477 }
71478
71479 -static struct snd_ac97_build_ops patch_ad1985_build_ops = {
71480 +static const struct snd_ac97_build_ops patch_ad1985_build_ops = {
71481 .build_post_spdif = patch_ad198x_post_spdif,
71482 .build_specific = patch_ad1985_specific,
71483 #ifdef CONFIG_PM
71484 @@ -2546,7 +2546,7 @@ static int patch_ad1986_specific(struct
71485 ARRAY_SIZE(snd_ac97_ad1985_controls));
71486 }
71487
71488 -static struct snd_ac97_build_ops patch_ad1986_build_ops = {
71489 +static const struct snd_ac97_build_ops patch_ad1986_build_ops = {
71490 .build_post_spdif = patch_ad198x_post_spdif,
71491 .build_specific = patch_ad1986_specific,
71492 #ifdef CONFIG_PM
71493 @@ -2651,7 +2651,7 @@ static int patch_alc650_specific(struct
71494 return 0;
71495 }
71496
71497 -static struct snd_ac97_build_ops patch_alc650_ops = {
71498 +static const struct snd_ac97_build_ops patch_alc650_ops = {
71499 .build_specific = patch_alc650_specific,
71500 .update_jacks = alc650_update_jacks
71501 };
71502 @@ -2803,7 +2803,7 @@ static int patch_alc655_specific(struct
71503 return 0;
71504 }
71505
71506 -static struct snd_ac97_build_ops patch_alc655_ops = {
71507 +static const struct snd_ac97_build_ops patch_alc655_ops = {
71508 .build_specific = patch_alc655_specific,
71509 .update_jacks = alc655_update_jacks
71510 };
71511 @@ -2915,7 +2915,7 @@ static int patch_alc850_specific(struct
71512 return 0;
71513 }
71514
71515 -static struct snd_ac97_build_ops patch_alc850_ops = {
71516 +static const struct snd_ac97_build_ops patch_alc850_ops = {
71517 .build_specific = patch_alc850_specific,
71518 .update_jacks = alc850_update_jacks
71519 };
71520 @@ -2977,7 +2977,7 @@ static int patch_cm9738_specific(struct
71521 return patch_build_controls(ac97, snd_ac97_cm9738_controls, ARRAY_SIZE(snd_ac97_cm9738_controls));
71522 }
71523
71524 -static struct snd_ac97_build_ops patch_cm9738_ops = {
71525 +static const struct snd_ac97_build_ops patch_cm9738_ops = {
71526 .build_specific = patch_cm9738_specific,
71527 .update_jacks = cm9738_update_jacks
71528 };
71529 @@ -3068,7 +3068,7 @@ static int patch_cm9739_post_spdif(struc
71530 return patch_build_controls(ac97, snd_ac97_cm9739_controls_spdif, ARRAY_SIZE(snd_ac97_cm9739_controls_spdif));
71531 }
71532
71533 -static struct snd_ac97_build_ops patch_cm9739_ops = {
71534 +static const struct snd_ac97_build_ops patch_cm9739_ops = {
71535 .build_specific = patch_cm9739_specific,
71536 .build_post_spdif = patch_cm9739_post_spdif,
71537 .update_jacks = cm9739_update_jacks
71538 @@ -3242,7 +3242,7 @@ static int patch_cm9761_specific(struct
71539 return patch_build_controls(ac97, snd_ac97_cm9761_controls, ARRAY_SIZE(snd_ac97_cm9761_controls));
71540 }
71541
71542 -static struct snd_ac97_build_ops patch_cm9761_ops = {
71543 +static const struct snd_ac97_build_ops patch_cm9761_ops = {
71544 .build_specific = patch_cm9761_specific,
71545 .build_post_spdif = patch_cm9761_post_spdif,
71546 .update_jacks = cm9761_update_jacks
71547 @@ -3338,7 +3338,7 @@ static int patch_cm9780_specific(struct
71548 return patch_build_controls(ac97, cm9780_controls, ARRAY_SIZE(cm9780_controls));
71549 }
71550
71551 -static struct snd_ac97_build_ops patch_cm9780_ops = {
71552 +static const struct snd_ac97_build_ops patch_cm9780_ops = {
71553 .build_specific = patch_cm9780_specific,
71554 .build_post_spdif = patch_cm9761_post_spdif /* identical with CM9761 */
71555 };
71556 @@ -3458,7 +3458,7 @@ static int patch_vt1616_specific(struct
71557 return 0;
71558 }
71559
71560 -static struct snd_ac97_build_ops patch_vt1616_ops = {
71561 +static const struct snd_ac97_build_ops patch_vt1616_ops = {
71562 .build_specific = patch_vt1616_specific
71563 };
71564
71565 @@ -3812,7 +3812,7 @@ static int patch_it2646_specific(struct
71566 return 0;
71567 }
71568
71569 -static struct snd_ac97_build_ops patch_it2646_ops = {
71570 +static const struct snd_ac97_build_ops patch_it2646_ops = {
71571 .build_specific = patch_it2646_specific,
71572 .update_jacks = it2646_update_jacks
71573 };
71574 @@ -3846,7 +3846,7 @@ static int patch_si3036_specific(struct
71575 return 0;
71576 }
71577
71578 -static struct snd_ac97_build_ops patch_si3036_ops = {
71579 +static const struct snd_ac97_build_ops patch_si3036_ops = {
71580 .build_specific = patch_si3036_specific,
71581 };
71582
71583 @@ -3913,7 +3913,7 @@ static int patch_ucb1400_specific(struct
71584 return 0;
71585 }
71586
71587 -static struct snd_ac97_build_ops patch_ucb1400_ops = {
71588 +static const struct snd_ac97_build_ops patch_ucb1400_ops = {
71589 .build_specific = patch_ucb1400_specific,
71590 };
71591
71592 diff -urNp linux-2.6.32.41/sound/pci/hda/patch_intelhdmi.c linux-2.6.32.41/sound/pci/hda/patch_intelhdmi.c
71593 --- linux-2.6.32.41/sound/pci/hda/patch_intelhdmi.c 2011-03-27 14:31:47.000000000 -0400
71594 +++ linux-2.6.32.41/sound/pci/hda/patch_intelhdmi.c 2011-04-17 15:56:46.000000000 -0400
71595 @@ -511,10 +511,10 @@ static void hdmi_non_intrinsic_event(str
71596 cp_ready);
71597
71598 /* TODO */
71599 - if (cp_state)
71600 - ;
71601 - if (cp_ready)
71602 - ;
71603 + if (cp_state) {
71604 + }
71605 + if (cp_ready) {
71606 + }
71607 }
71608
71609
71610 diff -urNp linux-2.6.32.41/sound/pci/intel8x0m.c linux-2.6.32.41/sound/pci/intel8x0m.c
71611 --- linux-2.6.32.41/sound/pci/intel8x0m.c 2011-03-27 14:31:47.000000000 -0400
71612 +++ linux-2.6.32.41/sound/pci/intel8x0m.c 2011-04-23 12:56:12.000000000 -0400
71613 @@ -1264,7 +1264,7 @@ static struct shortname_table {
71614 { 0x5455, "ALi M5455" },
71615 { 0x746d, "AMD AMD8111" },
71616 #endif
71617 - { 0 },
71618 + { 0, },
71619 };
71620
71621 static int __devinit snd_intel8x0m_probe(struct pci_dev *pci,
71622 diff -urNp linux-2.6.32.41/sound/pci/ymfpci/ymfpci_main.c linux-2.6.32.41/sound/pci/ymfpci/ymfpci_main.c
71623 --- linux-2.6.32.41/sound/pci/ymfpci/ymfpci_main.c 2011-03-27 14:31:47.000000000 -0400
71624 +++ linux-2.6.32.41/sound/pci/ymfpci/ymfpci_main.c 2011-05-04 17:56:28.000000000 -0400
71625 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct sn
71626 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
71627 break;
71628 }
71629 - if (atomic_read(&chip->interrupt_sleep_count)) {
71630 - atomic_set(&chip->interrupt_sleep_count, 0);
71631 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
71632 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
71633 wake_up(&chip->interrupt_sleep);
71634 }
71635 __end:
71636 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct s
71637 continue;
71638 init_waitqueue_entry(&wait, current);
71639 add_wait_queue(&chip->interrupt_sleep, &wait);
71640 - atomic_inc(&chip->interrupt_sleep_count);
71641 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
71642 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
71643 remove_wait_queue(&chip->interrupt_sleep, &wait);
71644 }
71645 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(
71646 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
71647 spin_unlock(&chip->reg_lock);
71648
71649 - if (atomic_read(&chip->interrupt_sleep_count)) {
71650 - atomic_set(&chip->interrupt_sleep_count, 0);
71651 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
71652 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
71653 wake_up(&chip->interrupt_sleep);
71654 }
71655 }
71656 @@ -2369,7 +2369,7 @@ int __devinit snd_ymfpci_create(struct s
71657 spin_lock_init(&chip->reg_lock);
71658 spin_lock_init(&chip->voice_lock);
71659 init_waitqueue_head(&chip->interrupt_sleep);
71660 - atomic_set(&chip->interrupt_sleep_count, 0);
71661 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
71662 chip->card = card;
71663 chip->pci = pci;
71664 chip->irq = -1;
71665 diff -urNp linux-2.6.32.41/tools/gcc/Makefile linux-2.6.32.41/tools/gcc/Makefile
71666 --- linux-2.6.32.41/tools/gcc/Makefile 1969-12-31 19:00:00.000000000 -0500
71667 +++ linux-2.6.32.41/tools/gcc/Makefile 2011-06-04 20:52:13.000000000 -0400
71668 @@ -0,0 +1,11 @@
71669 +#CC := gcc
71670 +#PLUGIN_SOURCE_FILES := pax_plugin.c
71671 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
71672 +GCCPLUGINS_DIR := $(shell $(HOSTCC) -print-file-name=plugin)
71673 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W
71674 +
71675 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include
71676 +
71677 +hostlibs-y := pax_plugin.so
71678 +always := $(hostlibs-y)
71679 +pax_plugin-objs := pax_plugin.o
71680 diff -urNp linux-2.6.32.41/tools/gcc/pax_plugin.c linux-2.6.32.41/tools/gcc/pax_plugin.c
71681 --- linux-2.6.32.41/tools/gcc/pax_plugin.c 1969-12-31 19:00:00.000000000 -0500
71682 +++ linux-2.6.32.41/tools/gcc/pax_plugin.c 2011-06-04 20:52:13.000000000 -0400
71683 @@ -0,0 +1,242 @@
71684 +/*
71685 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
71686 + * Licensed under the GPL v2
71687 + *
71688 + * Note: the choice of the license means that the compilation process is
71689 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
71690 + * but for the kernel it doesn't matter since it doesn't link against
71691 + * any of the gcc libraries
71692 + *
71693 + * gcc plugin to help implement various PaX features
71694 + *
71695 + * - track lowest stack pointer
71696 + *
71697 + * TODO:
71698 + * - initialize all local variables
71699 + *
71700 + * BUGS:
71701 + */
71702 +#include "gcc-plugin.h"
71703 +#include "plugin-version.h"
71704 +#include "config.h"
71705 +#include "system.h"
71706 +#include "coretypes.h"
71707 +#include "tm.h"
71708 +#include "toplev.h"
71709 +#include "basic-block.h"
71710 +#include "gimple.h"
71711 +//#include "expr.h" where are you...
71712 +#include "diagnostic.h"
71713 +#include "rtl.h"
71714 +#include "emit-rtl.h"
71715 +#include "function.h"
71716 +#include "tree.h"
71717 +#include "tree-pass.h"
71718 +#include "intl.h"
71719 +
71720 +int plugin_is_GPL_compatible;
71721 +
71722 +static int track_frame_size = -1;
71723 +static const char track_function[] = "pax_track_stack";
71724 +static bool init_locals;
71725 +
71726 +static struct plugin_info pax_plugin_info = {
71727 + .version = "201106030000",
71728 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
71729 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
71730 +};
71731 +
71732 +static bool gate_pax_track_stack(void);
71733 +static unsigned int execute_pax_tree_instrument(void);
71734 +static unsigned int execute_pax_final(void);
71735 +
71736 +static struct gimple_opt_pass pax_tree_instrument_pass = {
71737 + .pass = {
71738 + .type = GIMPLE_PASS,
71739 + .name = "pax_tree_instrument",
71740 + .gate = gate_pax_track_stack,
71741 + .execute = execute_pax_tree_instrument,
71742 + .sub = NULL,
71743 + .next = NULL,
71744 + .static_pass_number = 0,
71745 + .tv_id = TV_NONE,
71746 + .properties_required = PROP_gimple_leh | PROP_cfg,
71747 + .properties_provided = 0,
71748 + .properties_destroyed = 0,
71749 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
71750 + .todo_flags_finish = TODO_verify_stmts // | TODO_dump_func
71751 + }
71752 +};
71753 +
71754 +static struct rtl_opt_pass pax_final_rtl_opt_pass = {
71755 + .pass = {
71756 + .type = RTL_PASS,
71757 + .name = "pax_final",
71758 + .gate = gate_pax_track_stack,
71759 + .execute = execute_pax_final,
71760 + .sub = NULL,
71761 + .next = NULL,
71762 + .static_pass_number = 0,
71763 + .tv_id = TV_NONE,
71764 + .properties_required = 0,
71765 + .properties_provided = 0,
71766 + .properties_destroyed = 0,
71767 + .todo_flags_start = 0,
71768 + .todo_flags_finish = 0
71769 + }
71770 +};
71771 +
71772 +static bool gate_pax_track_stack(void)
71773 +{
71774 + return track_frame_size >= 0;
71775 +}
71776 +
71777 +static void pax_add_instrumentation(gimple_stmt_iterator *gsi, bool before)
71778 +{
71779 + gimple call;
71780 + tree decl, type;
71781 +
71782 + // insert call to void pax_track_stack(void)
71783 + type = build_function_type_list(void_type_node, NULL_TREE);
71784 + decl = build_fn_decl(track_function, type);
71785 + DECL_ASSEMBLER_NAME(decl); // for LTO
71786 + call = gimple_build_call(decl, 0);
71787 + if (before)
71788 + gsi_insert_before(gsi, call, GSI_CONTINUE_LINKING);
71789 + else
71790 + gsi_insert_after(gsi, call, GSI_CONTINUE_LINKING);
71791 +}
71792 +
71793 +static unsigned int execute_pax_tree_instrument(void)
71794 +{
71795 + basic_block bb;
71796 + gimple_stmt_iterator gsi;
71797 +
71798 + // 1. loop through BBs and GIMPLE statements
71799 + FOR_EACH_BB(bb) {
71800 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
71801 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
71802 + tree decl;
71803 + gimple stmt = gsi_stmt(gsi);
71804 +
71805 + if (!is_gimple_call(stmt))
71806 + continue;
71807 + decl = gimple_call_fndecl(stmt);
71808 + if (!decl)
71809 + continue;
71810 + if (TREE_CODE(decl) != FUNCTION_DECL)
71811 + continue;
71812 + if (!DECL_BUILT_IN(decl))
71813 + continue;
71814 + if (DECL_BUILT_IN_CLASS(decl) != BUILT_IN_NORMAL)
71815 + continue;
71816 + if (DECL_FUNCTION_CODE(decl) != BUILT_IN_ALLOCA)
71817 + continue;
71818 +
71819 + // 2. insert track call after each __builtin_alloca call
71820 + pax_add_instrumentation(&gsi, false);
71821 +// print_node(stderr, "pax", decl, 4);
71822 + }
71823 + }
71824 +
71825 + // 3. insert track call at the beginning
71826 + bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
71827 + gsi = gsi_start_bb(bb);
71828 + pax_add_instrumentation(&gsi, true);
71829 +
71830 + return 0;
71831 +}
71832 +
71833 +static unsigned int execute_pax_final(void)
71834 +{
71835 + rtx insn;
71836 +
71837 + if (cfun->calls_alloca)
71838 + return 0;
71839 +
71840 + // 1. find pax_track_stack calls
71841 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
71842 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
71843 + rtx body;
71844 +
71845 + if (!CALL_P(insn))
71846 + continue;
71847 + body = PATTERN(insn);
71848 + if (GET_CODE(body) != CALL)
71849 + continue;
71850 + body = XEXP(body, 0);
71851 + if (GET_CODE(body) != MEM)
71852 + continue;
71853 + body = XEXP(body, 0);
71854 + if (GET_CODE(body) != SYMBOL_REF)
71855 + continue;
71856 + if (strcmp(XSTR(body, 0), track_function))
71857 + continue;
71858 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
71859 + // 2. delete call if function frame is not big enough
71860 + if (get_frame_size() >= track_frame_size)
71861 + continue;
71862 + delete_insn_and_edges(insn);
71863 + }
71864 +
71865 +// print_simple_rtl(stderr, get_insns());
71866 +// print_rtl(stderr, get_insns());
71867 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
71868 +
71869 + return 0;
71870 +}
71871 +
71872 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
71873 +{
71874 + const char * const plugin_name = plugin_info->base_name;
71875 + const int argc = plugin_info->argc;
71876 + const struct plugin_argument * const argv = plugin_info->argv;
71877 + int i;
71878 + struct register_pass_info pax_tree_instrument_pass_info = {
71879 + .pass = &pax_tree_instrument_pass.pass,
71880 +// .reference_pass_name = "tree_profile",
71881 + .reference_pass_name = "optimized",
71882 + .ref_pass_instance_number = 0,
71883 + .pos_op = PASS_POS_INSERT_AFTER
71884 + };
71885 + struct register_pass_info pax_final_pass_info = {
71886 + .pass = &pax_final_rtl_opt_pass.pass,
71887 + .reference_pass_name = "final",
71888 + .ref_pass_instance_number = 0,
71889 + .pos_op = PASS_POS_INSERT_BEFORE
71890 + };
71891 +
71892 + if (!plugin_default_version_check(version, &gcc_version)) {
71893 + error(G_("incompatible gcc/plugin versions"));
71894 + return 1;
71895 + }
71896 +
71897 + register_callback(plugin_name, PLUGIN_INFO, NULL, &pax_plugin_info);
71898 +
71899 + for (i = 0; i < argc; ++i) {
71900 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
71901 + if (!argv[i].value) {
71902 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
71903 + continue;
71904 + }
71905 + track_frame_size = atoi(argv[i].value);
71906 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
71907 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
71908 + continue;
71909 + }
71910 + if (!strcmp(argv[i].key, "initialize-locals")) {
71911 + if (argv[i].value) {
71912 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
71913 + continue;
71914 + }
71915 + init_locals = true;
71916 + continue;
71917 + }
71918 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
71919 + }
71920 +
71921 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pax_tree_instrument_pass_info);
71922 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pax_final_pass_info);
71923 +
71924 + return 0;
71925 +}
71926 Binary files linux-2.6.32.41/tools/gcc/pax_plugin.so and linux-2.6.32.41/tools/gcc/pax_plugin.so differ
71927 diff -urNp linux-2.6.32.41/usr/gen_init_cpio.c linux-2.6.32.41/usr/gen_init_cpio.c
71928 --- linux-2.6.32.41/usr/gen_init_cpio.c 2011-03-27 14:31:47.000000000 -0400
71929 +++ linux-2.6.32.41/usr/gen_init_cpio.c 2011-04-17 15:56:46.000000000 -0400
71930 @@ -299,7 +299,7 @@ static int cpio_mkfile(const char *name,
71931 int retval;
71932 int rc = -1;
71933 int namesize;
71934 - int i;
71935 + unsigned int i;
71936
71937 mode |= S_IFREG;
71938
71939 @@ -383,9 +383,10 @@ static char *cpio_replace_env(char *new_
71940 *env_var = *expanded = '\0';
71941 strncat(env_var, start + 2, end - start - 2);
71942 strncat(expanded, new_location, start - new_location);
71943 - strncat(expanded, getenv(env_var), PATH_MAX);
71944 - strncat(expanded, end + 1, PATH_MAX);
71945 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
71946 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
71947 strncpy(new_location, expanded, PATH_MAX);
71948 + new_location[PATH_MAX] = 0;
71949 } else
71950 break;
71951 }
71952 diff -urNp linux-2.6.32.41/virt/kvm/kvm_main.c linux-2.6.32.41/virt/kvm/kvm_main.c
71953 --- linux-2.6.32.41/virt/kvm/kvm_main.c 2011-03-27 14:31:47.000000000 -0400
71954 +++ linux-2.6.32.41/virt/kvm/kvm_main.c 2011-04-23 21:41:37.000000000 -0400
71955 @@ -1748,6 +1748,7 @@ static int kvm_vcpu_release(struct inode
71956 return 0;
71957 }
71958
71959 +/* cannot be const */
71960 static struct file_operations kvm_vcpu_fops = {
71961 .release = kvm_vcpu_release,
71962 .unlocked_ioctl = kvm_vcpu_ioctl,
71963 @@ -2344,6 +2345,7 @@ static int kvm_vm_mmap(struct file *file
71964 return 0;
71965 }
71966
71967 +/* cannot be const */
71968 static struct file_operations kvm_vm_fops = {
71969 .release = kvm_vm_release,
71970 .unlocked_ioctl = kvm_vm_ioctl,
71971 @@ -2431,6 +2433,7 @@ out:
71972 return r;
71973 }
71974
71975 +/* cannot be const */
71976 static struct file_operations kvm_chardev_ops = {
71977 .unlocked_ioctl = kvm_dev_ioctl,
71978 .compat_ioctl = kvm_dev_ioctl,
71979 @@ -2494,7 +2497,7 @@ asmlinkage void kvm_handle_fault_on_rebo
71980 if (kvm_rebooting)
71981 /* spin while reset goes on */
71982 while (true)
71983 - ;
71984 + cpu_relax();
71985 /* Fault while not rebooting. We want the trace. */
71986 BUG();
71987 }
71988 @@ -2714,7 +2717,7 @@ static void kvm_sched_out(struct preempt
71989 kvm_arch_vcpu_put(vcpu);
71990 }
71991
71992 -int kvm_init(void *opaque, unsigned int vcpu_size,
71993 +int kvm_init(const void *opaque, unsigned int vcpu_size,
71994 struct module *module)
71995 {
71996 int r;
71997 @@ -2767,7 +2770,7 @@ int kvm_init(void *opaque, unsigned int
71998 /* A kmem cache lets us meet the alignment requirements of fx_save. */
71999 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
72000 __alignof__(struct kvm_vcpu),
72001 - 0, NULL);
72002 + SLAB_USERCOPY, NULL);
72003 if (!kvm_vcpu_cache) {
72004 r = -ENOMEM;
72005 goto out_free_5;