]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.2.2-2.6.32.41-201105251736.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-2.6.32.41-201105251736.patch
CommitLineData
8c5734e2
PK
1diff -urNp linux-2.6.32.41/arch/alpha/include/asm/elf.h linux-2.6.32.41/arch/alpha/include/asm/elf.h
2--- linux-2.6.32.41/arch/alpha/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3+++ linux-2.6.32.41/arch/alpha/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
4@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8+#ifdef CONFIG_PAX_ASLR
9+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10+
11+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13+#endif
14+
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18diff -urNp linux-2.6.32.41/arch/alpha/include/asm/pgtable.h linux-2.6.32.41/arch/alpha/include/asm/pgtable.h
19--- linux-2.6.32.41/arch/alpha/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
20+++ linux-2.6.32.41/arch/alpha/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
21@@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25+
26+#ifdef CONFIG_PAX_PAGEEXEC
27+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30+#else
31+# define PAGE_SHARED_NOEXEC PAGE_SHARED
32+# define PAGE_COPY_NOEXEC PAGE_COPY
33+# define PAGE_READONLY_NOEXEC PAGE_READONLY
34+#endif
35+
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39diff -urNp linux-2.6.32.41/arch/alpha/kernel/module.c linux-2.6.32.41/arch/alpha/kernel/module.c
40--- linux-2.6.32.41/arch/alpha/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
41+++ linux-2.6.32.41/arch/alpha/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
42@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46- gp = (u64)me->module_core + me->core_size - 0x8000;
47+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51diff -urNp linux-2.6.32.41/arch/alpha/kernel/osf_sys.c linux-2.6.32.41/arch/alpha/kernel/osf_sys.c
52--- linux-2.6.32.41/arch/alpha/kernel/osf_sys.c 2011-03-27 14:31:47.000000000 -0400
53+++ linux-2.6.32.41/arch/alpha/kernel/osf_sys.c 2011-04-17 15:56:45.000000000 -0400
54@@ -1169,7 +1169,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58- if (!vma || addr + len <= vma->vm_start)
59+ if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63@@ -1205,6 +1205,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67+#ifdef CONFIG_PAX_RANDMMAP
68+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69+#endif
70+
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74@@ -1212,8 +1216,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79- len, limit);
80+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81+
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85diff -urNp linux-2.6.32.41/arch/alpha/mm/fault.c linux-2.6.32.41/arch/alpha/mm/fault.c
86--- linux-2.6.32.41/arch/alpha/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
87+++ linux-2.6.32.41/arch/alpha/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
88@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92+#ifdef CONFIG_PAX_PAGEEXEC
93+/*
94+ * PaX: decide what to do with offenders (regs->pc = fault address)
95+ *
96+ * returns 1 when task should be killed
97+ * 2 when patched PLT trampoline was detected
98+ * 3 when unpatched PLT trampoline was detected
99+ */
100+static int pax_handle_fetch_fault(struct pt_regs *regs)
101+{
102+
103+#ifdef CONFIG_PAX_EMUPLT
104+ int err;
105+
106+ do { /* PaX: patched PLT emulation #1 */
107+ unsigned int ldah, ldq, jmp;
108+
109+ err = get_user(ldah, (unsigned int *)regs->pc);
110+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112+
113+ if (err)
114+ break;
115+
116+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118+ jmp == 0x6BFB0000U)
119+ {
120+ unsigned long r27, addr;
121+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123+
124+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125+ err = get_user(r27, (unsigned long *)addr);
126+ if (err)
127+ break;
128+
129+ regs->r27 = r27;
130+ regs->pc = r27;
131+ return 2;
132+ }
133+ } while (0);
134+
135+ do { /* PaX: patched PLT emulation #2 */
136+ unsigned int ldah, lda, br;
137+
138+ err = get_user(ldah, (unsigned int *)regs->pc);
139+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
140+ err |= get_user(br, (unsigned int *)(regs->pc+8));
141+
142+ if (err)
143+ break;
144+
145+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
147+ (br & 0xFFE00000U) == 0xC3E00000U)
148+ {
149+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152+
153+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155+ return 2;
156+ }
157+ } while (0);
158+
159+ do { /* PaX: unpatched PLT emulation */
160+ unsigned int br;
161+
162+ err = get_user(br, (unsigned int *)regs->pc);
163+
164+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165+ unsigned int br2, ldq, nop, jmp;
166+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167+
168+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169+ err = get_user(br2, (unsigned int *)addr);
170+ err |= get_user(ldq, (unsigned int *)(addr+4));
171+ err |= get_user(nop, (unsigned int *)(addr+8));
172+ err |= get_user(jmp, (unsigned int *)(addr+12));
173+ err |= get_user(resolver, (unsigned long *)(addr+16));
174+
175+ if (err)
176+ break;
177+
178+ if (br2 == 0xC3600000U &&
179+ ldq == 0xA77B000CU &&
180+ nop == 0x47FF041FU &&
181+ jmp == 0x6B7B0000U)
182+ {
183+ regs->r28 = regs->pc+4;
184+ regs->r27 = addr+16;
185+ regs->pc = resolver;
186+ return 3;
187+ }
188+ }
189+ } while (0);
190+#endif
191+
192+ return 1;
193+}
194+
195+void pax_report_insns(void *pc, void *sp)
196+{
197+ unsigned long i;
198+
199+ printk(KERN_ERR "PAX: bytes at PC: ");
200+ for (i = 0; i < 5; i++) {
201+ unsigned int c;
202+ if (get_user(c, (unsigned int *)pc+i))
203+ printk(KERN_CONT "???????? ");
204+ else
205+ printk(KERN_CONT "%08x ", c);
206+ }
207+ printk("\n");
208+}
209+#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217- if (!(vma->vm_flags & VM_EXEC))
218+ if (!(vma->vm_flags & VM_EXEC)) {
219+
220+#ifdef CONFIG_PAX_PAGEEXEC
221+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222+ goto bad_area;
223+
224+ up_read(&mm->mmap_sem);
225+ switch (pax_handle_fetch_fault(regs)) {
226+
227+#ifdef CONFIG_PAX_EMUPLT
228+ case 2:
229+ case 3:
230+ return;
231+#endif
232+
233+ }
234+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235+ do_group_exit(SIGKILL);
236+#else
237 goto bad_area;
238+#endif
239+
240+ }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244diff -urNp linux-2.6.32.41/arch/arm/include/asm/elf.h linux-2.6.32.41/arch/arm/include/asm/elf.h
245--- linux-2.6.32.41/arch/arm/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
246+++ linux-2.6.32.41/arch/arm/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
247@@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253+
254+#ifdef CONFIG_PAX_ASLR
255+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256+
257+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259+#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263diff -urNp linux-2.6.32.41/arch/arm/include/asm/kmap_types.h linux-2.6.32.41/arch/arm/include/asm/kmap_types.h
264--- linux-2.6.32.41/arch/arm/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
265+++ linux-2.6.32.41/arch/arm/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
266@@ -19,6 +19,7 @@ enum km_type {
267 KM_SOFTIRQ0,
268 KM_SOFTIRQ1,
269 KM_L2_CACHE,
270+ KM_CLEARPAGE,
271 KM_TYPE_NR
272 };
273
274diff -urNp linux-2.6.32.41/arch/arm/include/asm/uaccess.h linux-2.6.32.41/arch/arm/include/asm/uaccess.h
275--- linux-2.6.32.41/arch/arm/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
276+++ linux-2.6.32.41/arch/arm/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
277@@ -403,6 +403,9 @@ extern unsigned long __must_check __strn
278
279 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
280 {
281+ if ((long)n < 0)
282+ return n;
283+
284 if (access_ok(VERIFY_READ, from, n))
285 n = __copy_from_user(to, from, n);
286 else /* security hole - plug it */
287@@ -412,6 +415,9 @@ static inline unsigned long __must_check
288
289 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
290 {
291+ if ((long)n < 0)
292+ return n;
293+
294 if (access_ok(VERIFY_WRITE, to, n))
295 n = __copy_to_user(to, from, n);
296 return n;
297diff -urNp linux-2.6.32.41/arch/arm/kernel/kgdb.c linux-2.6.32.41/arch/arm/kernel/kgdb.c
298--- linux-2.6.32.41/arch/arm/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
299+++ linux-2.6.32.41/arch/arm/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
300@@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
301 * and we handle the normal undef case within the do_undefinstr
302 * handler.
303 */
304-struct kgdb_arch arch_kgdb_ops = {
305+const struct kgdb_arch arch_kgdb_ops = {
306 #ifndef __ARMEB__
307 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
308 #else /* ! __ARMEB__ */
309diff -urNp linux-2.6.32.41/arch/arm/mach-at91/pm.c linux-2.6.32.41/arch/arm/mach-at91/pm.c
310--- linux-2.6.32.41/arch/arm/mach-at91/pm.c 2011-03-27 14:31:47.000000000 -0400
311+++ linux-2.6.32.41/arch/arm/mach-at91/pm.c 2011-04-17 15:56:45.000000000 -0400
312@@ -348,7 +348,7 @@ static void at91_pm_end(void)
313 }
314
315
316-static struct platform_suspend_ops at91_pm_ops ={
317+static const struct platform_suspend_ops at91_pm_ops ={
318 .valid = at91_pm_valid_state,
319 .begin = at91_pm_begin,
320 .enter = at91_pm_enter,
321diff -urNp linux-2.6.32.41/arch/arm/mach-omap1/pm.c linux-2.6.32.41/arch/arm/mach-omap1/pm.c
322--- linux-2.6.32.41/arch/arm/mach-omap1/pm.c 2011-03-27 14:31:47.000000000 -0400
323+++ linux-2.6.32.41/arch/arm/mach-omap1/pm.c 2011-04-17 15:56:45.000000000 -0400
324@@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq
325
326
327
328-static struct platform_suspend_ops omap_pm_ops ={
329+static const struct platform_suspend_ops omap_pm_ops ={
330 .prepare = omap_pm_prepare,
331 .enter = omap_pm_enter,
332 .finish = omap_pm_finish,
333diff -urNp linux-2.6.32.41/arch/arm/mach-omap2/pm24xx.c linux-2.6.32.41/arch/arm/mach-omap2/pm24xx.c
334--- linux-2.6.32.41/arch/arm/mach-omap2/pm24xx.c 2011-03-27 14:31:47.000000000 -0400
335+++ linux-2.6.32.41/arch/arm/mach-omap2/pm24xx.c 2011-04-17 15:56:45.000000000 -0400
336@@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
337 enable_hlt();
338 }
339
340-static struct platform_suspend_ops omap_pm_ops = {
341+static const struct platform_suspend_ops omap_pm_ops = {
342 .prepare = omap2_pm_prepare,
343 .enter = omap2_pm_enter,
344 .finish = omap2_pm_finish,
345diff -urNp linux-2.6.32.41/arch/arm/mach-omap2/pm34xx.c linux-2.6.32.41/arch/arm/mach-omap2/pm34xx.c
346--- linux-2.6.32.41/arch/arm/mach-omap2/pm34xx.c 2011-03-27 14:31:47.000000000 -0400
347+++ linux-2.6.32.41/arch/arm/mach-omap2/pm34xx.c 2011-04-17 15:56:45.000000000 -0400
348@@ -401,7 +401,7 @@ static void omap3_pm_end(void)
349 return;
350 }
351
352-static struct platform_suspend_ops omap_pm_ops = {
353+static const struct platform_suspend_ops omap_pm_ops = {
354 .begin = omap3_pm_begin,
355 .end = omap3_pm_end,
356 .prepare = omap3_pm_prepare,
357diff -urNp linux-2.6.32.41/arch/arm/mach-pnx4008/pm.c linux-2.6.32.41/arch/arm/mach-pnx4008/pm.c
358--- linux-2.6.32.41/arch/arm/mach-pnx4008/pm.c 2011-03-27 14:31:47.000000000 -0400
359+++ linux-2.6.32.41/arch/arm/mach-pnx4008/pm.c 2011-04-17 15:56:45.000000000 -0400
360@@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_stat
361 (state == PM_SUSPEND_MEM);
362 }
363
364-static struct platform_suspend_ops pnx4008_pm_ops = {
365+static const struct platform_suspend_ops pnx4008_pm_ops = {
366 .enter = pnx4008_pm_enter,
367 .valid = pnx4008_pm_valid,
368 };
369diff -urNp linux-2.6.32.41/arch/arm/mach-pxa/pm.c linux-2.6.32.41/arch/arm/mach-pxa/pm.c
370--- linux-2.6.32.41/arch/arm/mach-pxa/pm.c 2011-03-27 14:31:47.000000000 -0400
371+++ linux-2.6.32.41/arch/arm/mach-pxa/pm.c 2011-04-17 15:56:45.000000000 -0400
372@@ -95,7 +95,7 @@ void pxa_pm_finish(void)
373 pxa_cpu_pm_fns->finish();
374 }
375
376-static struct platform_suspend_ops pxa_pm_ops = {
377+static const struct platform_suspend_ops pxa_pm_ops = {
378 .valid = pxa_pm_valid,
379 .enter = pxa_pm_enter,
380 .prepare = pxa_pm_prepare,
381diff -urNp linux-2.6.32.41/arch/arm/mach-pxa/sharpsl_pm.c linux-2.6.32.41/arch/arm/mach-pxa/sharpsl_pm.c
382--- linux-2.6.32.41/arch/arm/mach-pxa/sharpsl_pm.c 2011-03-27 14:31:47.000000000 -0400
383+++ linux-2.6.32.41/arch/arm/mach-pxa/sharpsl_pm.c 2011-04-17 15:56:45.000000000 -0400
384@@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status
385 }
386
387 #ifdef CONFIG_PM
388-static struct platform_suspend_ops sharpsl_pm_ops = {
389+static const struct platform_suspend_ops sharpsl_pm_ops = {
390 .prepare = pxa_pm_prepare,
391 .finish = pxa_pm_finish,
392 .enter = corgi_pxa_pm_enter,
393diff -urNp linux-2.6.32.41/arch/arm/mach-sa1100/pm.c linux-2.6.32.41/arch/arm/mach-sa1100/pm.c
394--- linux-2.6.32.41/arch/arm/mach-sa1100/pm.c 2011-03-27 14:31:47.000000000 -0400
395+++ linux-2.6.32.41/arch/arm/mach-sa1100/pm.c 2011-04-17 15:56:45.000000000 -0400
396@@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
397 return virt_to_phys(sp);
398 }
399
400-static struct platform_suspend_ops sa11x0_pm_ops = {
401+static const struct platform_suspend_ops sa11x0_pm_ops = {
402 .enter = sa11x0_pm_enter,
403 .valid = suspend_valid_only_mem,
404 };
405diff -urNp linux-2.6.32.41/arch/arm/mm/fault.c linux-2.6.32.41/arch/arm/mm/fault.c
406--- linux-2.6.32.41/arch/arm/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
407+++ linux-2.6.32.41/arch/arm/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
408@@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk,
409 }
410 #endif
411
412+#ifdef CONFIG_PAX_PAGEEXEC
413+ if (fsr & FSR_LNX_PF) {
414+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
415+ do_group_exit(SIGKILL);
416+ }
417+#endif
418+
419 tsk->thread.address = addr;
420 tsk->thread.error_code = fsr;
421 tsk->thread.trap_no = 14;
422@@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsign
423 }
424 #endif /* CONFIG_MMU */
425
426+#ifdef CONFIG_PAX_PAGEEXEC
427+void pax_report_insns(void *pc, void *sp)
428+{
429+ long i;
430+
431+ printk(KERN_ERR "PAX: bytes at PC: ");
432+ for (i = 0; i < 20; i++) {
433+ unsigned char c;
434+ if (get_user(c, (__force unsigned char __user *)pc+i))
435+ printk(KERN_CONT "?? ");
436+ else
437+ printk(KERN_CONT "%02x ", c);
438+ }
439+ printk("\n");
440+
441+ printk(KERN_ERR "PAX: bytes at SP-4: ");
442+ for (i = -1; i < 20; i++) {
443+ unsigned long c;
444+ if (get_user(c, (__force unsigned long __user *)sp+i))
445+ printk(KERN_CONT "???????? ");
446+ else
447+ printk(KERN_CONT "%08lx ", c);
448+ }
449+ printk("\n");
450+}
451+#endif
452+
453 /*
454 * First Level Translation Fault Handler
455 *
456diff -urNp linux-2.6.32.41/arch/arm/mm/mmap.c linux-2.6.32.41/arch/arm/mm/mmap.c
457--- linux-2.6.32.41/arch/arm/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
458+++ linux-2.6.32.41/arch/arm/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
459@@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp
460 if (len > TASK_SIZE)
461 return -ENOMEM;
462
463+#ifdef CONFIG_PAX_RANDMMAP
464+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
465+#endif
466+
467 if (addr) {
468 if (do_align)
469 addr = COLOUR_ALIGN(addr, pgoff);
470@@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp
471 addr = PAGE_ALIGN(addr);
472
473 vma = find_vma(mm, addr);
474- if (TASK_SIZE - len >= addr &&
475- (!vma || addr + len <= vma->vm_start))
476+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
477 return addr;
478 }
479 if (len > mm->cached_hole_size) {
480- start_addr = addr = mm->free_area_cache;
481+ start_addr = addr = mm->free_area_cache;
482 } else {
483- start_addr = addr = TASK_UNMAPPED_BASE;
484- mm->cached_hole_size = 0;
485+ start_addr = addr = mm->mmap_base;
486+ mm->cached_hole_size = 0;
487 }
488
489 full_search:
490@@ -94,14 +97,14 @@ full_search:
491 * Start a new search - just in case we missed
492 * some holes.
493 */
494- if (start_addr != TASK_UNMAPPED_BASE) {
495- start_addr = addr = TASK_UNMAPPED_BASE;
496+ if (start_addr != mm->mmap_base) {
497+ start_addr = addr = mm->mmap_base;
498 mm->cached_hole_size = 0;
499 goto full_search;
500 }
501 return -ENOMEM;
502 }
503- if (!vma || addr + len <= vma->vm_start) {
504+ if (check_heap_stack_gap(vma, addr, len)) {
505 /*
506 * Remember the place where we stopped the search:
507 */
508diff -urNp linux-2.6.32.41/arch/arm/plat-s3c/pm.c linux-2.6.32.41/arch/arm/plat-s3c/pm.c
509--- linux-2.6.32.41/arch/arm/plat-s3c/pm.c 2011-03-27 14:31:47.000000000 -0400
510+++ linux-2.6.32.41/arch/arm/plat-s3c/pm.c 2011-04-17 15:56:45.000000000 -0400
511@@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
512 s3c_pm_check_cleanup();
513 }
514
515-static struct platform_suspend_ops s3c_pm_ops = {
516+static const struct platform_suspend_ops s3c_pm_ops = {
517 .enter = s3c_pm_enter,
518 .prepare = s3c_pm_prepare,
519 .finish = s3c_pm_finish,
520diff -urNp linux-2.6.32.41/arch/avr32/include/asm/elf.h linux-2.6.32.41/arch/avr32/include/asm/elf.h
521--- linux-2.6.32.41/arch/avr32/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
522+++ linux-2.6.32.41/arch/avr32/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
523@@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpreg
524 the loader. We need to make sure that it is out of the way of the program
525 that it will "exec", and that there is sufficient room for the brk. */
526
527-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
528+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
529
530+#ifdef CONFIG_PAX_ASLR
531+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
532+
533+#define PAX_DELTA_MMAP_LEN 15
534+#define PAX_DELTA_STACK_LEN 15
535+#endif
536
537 /* This yields a mask that user programs can use to figure out what
538 instruction set this CPU supports. This could be done in user space,
539diff -urNp linux-2.6.32.41/arch/avr32/include/asm/kmap_types.h linux-2.6.32.41/arch/avr32/include/asm/kmap_types.h
540--- linux-2.6.32.41/arch/avr32/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
541+++ linux-2.6.32.41/arch/avr32/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
542@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
543 D(11) KM_IRQ1,
544 D(12) KM_SOFTIRQ0,
545 D(13) KM_SOFTIRQ1,
546-D(14) KM_TYPE_NR
547+D(14) KM_CLEARPAGE,
548+D(15) KM_TYPE_NR
549 };
550
551 #undef D
552diff -urNp linux-2.6.32.41/arch/avr32/mach-at32ap/pm.c linux-2.6.32.41/arch/avr32/mach-at32ap/pm.c
553--- linux-2.6.32.41/arch/avr32/mach-at32ap/pm.c 2011-03-27 14:31:47.000000000 -0400
554+++ linux-2.6.32.41/arch/avr32/mach-at32ap/pm.c 2011-04-17 15:56:45.000000000 -0400
555@@ -176,7 +176,7 @@ out:
556 return 0;
557 }
558
559-static struct platform_suspend_ops avr32_pm_ops = {
560+static const struct platform_suspend_ops avr32_pm_ops = {
561 .valid = avr32_pm_valid_state,
562 .enter = avr32_pm_enter,
563 };
564diff -urNp linux-2.6.32.41/arch/avr32/mm/fault.c linux-2.6.32.41/arch/avr32/mm/fault.c
565--- linux-2.6.32.41/arch/avr32/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
566+++ linux-2.6.32.41/arch/avr32/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
567@@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
568
569 int exception_trace = 1;
570
571+#ifdef CONFIG_PAX_PAGEEXEC
572+void pax_report_insns(void *pc, void *sp)
573+{
574+ unsigned long i;
575+
576+ printk(KERN_ERR "PAX: bytes at PC: ");
577+ for (i = 0; i < 20; i++) {
578+ unsigned char c;
579+ if (get_user(c, (unsigned char *)pc+i))
580+ printk(KERN_CONT "???????? ");
581+ else
582+ printk(KERN_CONT "%02x ", c);
583+ }
584+ printk("\n");
585+}
586+#endif
587+
588 /*
589 * This routine handles page faults. It determines the address and the
590 * problem, and then passes it off to one of the appropriate routines.
591@@ -157,6 +174,16 @@ bad_area:
592 up_read(&mm->mmap_sem);
593
594 if (user_mode(regs)) {
595+
596+#ifdef CONFIG_PAX_PAGEEXEC
597+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
598+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
599+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
600+ do_group_exit(SIGKILL);
601+ }
602+ }
603+#endif
604+
605 if (exception_trace && printk_ratelimit())
606 printk("%s%s[%d]: segfault at %08lx pc %08lx "
607 "sp %08lx ecr %lu\n",
608diff -urNp linux-2.6.32.41/arch/blackfin/kernel/kgdb.c linux-2.6.32.41/arch/blackfin/kernel/kgdb.c
609--- linux-2.6.32.41/arch/blackfin/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
610+++ linux-2.6.32.41/arch/blackfin/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
611@@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vecto
612 return -1; /* this means that we do not want to exit from the handler */
613 }
614
615-struct kgdb_arch arch_kgdb_ops = {
616+const struct kgdb_arch arch_kgdb_ops = {
617 .gdb_bpt_instr = {0xa1},
618 #ifdef CONFIG_SMP
619 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
620diff -urNp linux-2.6.32.41/arch/blackfin/mach-common/pm.c linux-2.6.32.41/arch/blackfin/mach-common/pm.c
621--- linux-2.6.32.41/arch/blackfin/mach-common/pm.c 2011-03-27 14:31:47.000000000 -0400
622+++ linux-2.6.32.41/arch/blackfin/mach-common/pm.c 2011-04-17 15:56:45.000000000 -0400
623@@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t
624 return 0;
625 }
626
627-struct platform_suspend_ops bfin_pm_ops = {
628+const struct platform_suspend_ops bfin_pm_ops = {
629 .enter = bfin_pm_enter,
630 .valid = bfin_pm_valid,
631 };
632diff -urNp linux-2.6.32.41/arch/frv/include/asm/kmap_types.h linux-2.6.32.41/arch/frv/include/asm/kmap_types.h
633--- linux-2.6.32.41/arch/frv/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
634+++ linux-2.6.32.41/arch/frv/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
635@@ -23,6 +23,7 @@ enum km_type {
636 KM_IRQ1,
637 KM_SOFTIRQ0,
638 KM_SOFTIRQ1,
639+ KM_CLEARPAGE,
640 KM_TYPE_NR
641 };
642
643diff -urNp linux-2.6.32.41/arch/frv/mm/elf-fdpic.c linux-2.6.32.41/arch/frv/mm/elf-fdpic.c
644--- linux-2.6.32.41/arch/frv/mm/elf-fdpic.c 2011-03-27 14:31:47.000000000 -0400
645+++ linux-2.6.32.41/arch/frv/mm/elf-fdpic.c 2011-04-17 15:56:45.000000000 -0400
646@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
647 if (addr) {
648 addr = PAGE_ALIGN(addr);
649 vma = find_vma(current->mm, addr);
650- if (TASK_SIZE - len >= addr &&
651- (!vma || addr + len <= vma->vm_start))
652+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
653 goto success;
654 }
655
656@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
657 for (; vma; vma = vma->vm_next) {
658 if (addr > limit)
659 break;
660- if (addr + len <= vma->vm_start)
661+ if (check_heap_stack_gap(vma, addr, len))
662 goto success;
663 addr = vma->vm_end;
664 }
665@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
666 for (; vma; vma = vma->vm_next) {
667 if (addr > limit)
668 break;
669- if (addr + len <= vma->vm_start)
670+ if (check_heap_stack_gap(vma, addr, len))
671 goto success;
672 addr = vma->vm_end;
673 }
674diff -urNp linux-2.6.32.41/arch/ia64/hp/common/hwsw_iommu.c linux-2.6.32.41/arch/ia64/hp/common/hwsw_iommu.c
675--- linux-2.6.32.41/arch/ia64/hp/common/hwsw_iommu.c 2011-03-27 14:31:47.000000000 -0400
676+++ linux-2.6.32.41/arch/ia64/hp/common/hwsw_iommu.c 2011-04-17 15:56:45.000000000 -0400
677@@ -17,7 +17,7 @@
678 #include <linux/swiotlb.h>
679 #include <asm/machvec.h>
680
681-extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
682+extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
683
684 /* swiotlb declarations & definitions: */
685 extern int swiotlb_late_init_with_default_size (size_t size);
686@@ -33,7 +33,7 @@ static inline int use_swiotlb(struct dev
687 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
688 }
689
690-struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
691+const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
692 {
693 if (use_swiotlb(dev))
694 return &swiotlb_dma_ops;
695diff -urNp linux-2.6.32.41/arch/ia64/hp/common/sba_iommu.c linux-2.6.32.41/arch/ia64/hp/common/sba_iommu.c
696--- linux-2.6.32.41/arch/ia64/hp/common/sba_iommu.c 2011-03-27 14:31:47.000000000 -0400
697+++ linux-2.6.32.41/arch/ia64/hp/common/sba_iommu.c 2011-04-17 15:56:45.000000000 -0400
698@@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_d
699 },
700 };
701
702-extern struct dma_map_ops swiotlb_dma_ops;
703+extern const struct dma_map_ops swiotlb_dma_ops;
704
705 static int __init
706 sba_init(void)
707@@ -2211,7 +2211,7 @@ sba_page_override(char *str)
708
709 __setup("sbapagesize=",sba_page_override);
710
711-struct dma_map_ops sba_dma_ops = {
712+const struct dma_map_ops sba_dma_ops = {
713 .alloc_coherent = sba_alloc_coherent,
714 .free_coherent = sba_free_coherent,
715 .map_page = sba_map_page,
716diff -urNp linux-2.6.32.41/arch/ia64/ia32/binfmt_elf32.c linux-2.6.32.41/arch/ia64/ia32/binfmt_elf32.c
717--- linux-2.6.32.41/arch/ia64/ia32/binfmt_elf32.c 2011-03-27 14:31:47.000000000 -0400
718+++ linux-2.6.32.41/arch/ia64/ia32/binfmt_elf32.c 2011-04-17 15:56:45.000000000 -0400
719@@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_
720
721 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
722
723+#ifdef CONFIG_PAX_ASLR
724+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
725+
726+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
727+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
728+#endif
729+
730 /* Ugly but avoids duplication */
731 #include "../../../fs/binfmt_elf.c"
732
733diff -urNp linux-2.6.32.41/arch/ia64/ia32/ia32priv.h linux-2.6.32.41/arch/ia64/ia32/ia32priv.h
734--- linux-2.6.32.41/arch/ia64/ia32/ia32priv.h 2011-03-27 14:31:47.000000000 -0400
735+++ linux-2.6.32.41/arch/ia64/ia32/ia32priv.h 2011-04-17 15:56:45.000000000 -0400
736@@ -296,7 +296,14 @@ typedef struct compat_siginfo {
737 #define ELF_DATA ELFDATA2LSB
738 #define ELF_ARCH EM_386
739
740-#define IA32_STACK_TOP IA32_PAGE_OFFSET
741+#ifdef CONFIG_PAX_RANDUSTACK
742+#define __IA32_DELTA_STACK (current->mm->delta_stack)
743+#else
744+#define __IA32_DELTA_STACK 0UL
745+#endif
746+
747+#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
748+
749 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
750 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
751
752diff -urNp linux-2.6.32.41/arch/ia64/include/asm/dma-mapping.h linux-2.6.32.41/arch/ia64/include/asm/dma-mapping.h
753--- linux-2.6.32.41/arch/ia64/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
754+++ linux-2.6.32.41/arch/ia64/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
755@@ -12,7 +12,7 @@
756
757 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
758
759-extern struct dma_map_ops *dma_ops;
760+extern const struct dma_map_ops *dma_ops;
761 extern struct ia64_machine_vector ia64_mv;
762 extern void set_iommu_machvec(void);
763
764@@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct d
765 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
766 dma_addr_t *daddr, gfp_t gfp)
767 {
768- struct dma_map_ops *ops = platform_dma_get_ops(dev);
769+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
770 void *caddr;
771
772 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
773@@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(s
774 static inline void dma_free_coherent(struct device *dev, size_t size,
775 void *caddr, dma_addr_t daddr)
776 {
777- struct dma_map_ops *ops = platform_dma_get_ops(dev);
778+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
779 debug_dma_free_coherent(dev, size, caddr, daddr);
780 ops->free_coherent(dev, size, caddr, daddr);
781 }
782@@ -49,13 +49,13 @@ static inline void dma_free_coherent(str
783
784 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
785 {
786- struct dma_map_ops *ops = platform_dma_get_ops(dev);
787+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
788 return ops->mapping_error(dev, daddr);
789 }
790
791 static inline int dma_supported(struct device *dev, u64 mask)
792 {
793- struct dma_map_ops *ops = platform_dma_get_ops(dev);
794+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
795 return ops->dma_supported(dev, mask);
796 }
797
798diff -urNp linux-2.6.32.41/arch/ia64/include/asm/elf.h linux-2.6.32.41/arch/ia64/include/asm/elf.h
799--- linux-2.6.32.41/arch/ia64/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
800+++ linux-2.6.32.41/arch/ia64/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
801@@ -43,6 +43,13 @@
802 */
803 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
804
805+#ifdef CONFIG_PAX_ASLR
806+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
807+
808+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
809+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
810+#endif
811+
812 #define PT_IA_64_UNWIND 0x70000001
813
814 /* IA-64 relocations: */
815diff -urNp linux-2.6.32.41/arch/ia64/include/asm/machvec.h linux-2.6.32.41/arch/ia64/include/asm/machvec.h
816--- linux-2.6.32.41/arch/ia64/include/asm/machvec.h 2011-03-27 14:31:47.000000000 -0400
817+++ linux-2.6.32.41/arch/ia64/include/asm/machvec.h 2011-04-17 15:56:45.000000000 -0400
818@@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event
819 /* DMA-mapping interface: */
820 typedef void ia64_mv_dma_init (void);
821 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
822-typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
823+typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
824
825 /*
826 * WARNING: The legacy I/O space is _architected_. Platforms are
827@@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(co
828 # endif /* CONFIG_IA64_GENERIC */
829
830 extern void swiotlb_dma_init(void);
831-extern struct dma_map_ops *dma_get_ops(struct device *);
832+extern const struct dma_map_ops *dma_get_ops(struct device *);
833
834 /*
835 * Define default versions so we can extend machvec for new platforms without having
836diff -urNp linux-2.6.32.41/arch/ia64/include/asm/pgtable.h linux-2.6.32.41/arch/ia64/include/asm/pgtable.h
837--- linux-2.6.32.41/arch/ia64/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
838+++ linux-2.6.32.41/arch/ia64/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
839@@ -12,7 +12,7 @@
840 * David Mosberger-Tang <davidm@hpl.hp.com>
841 */
842
843-
844+#include <linux/const.h>
845 #include <asm/mman.h>
846 #include <asm/page.h>
847 #include <asm/processor.h>
848@@ -143,6 +143,17 @@
849 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
850 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
851 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
852+
853+#ifdef CONFIG_PAX_PAGEEXEC
854+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
855+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
856+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
857+#else
858+# define PAGE_SHARED_NOEXEC PAGE_SHARED
859+# define PAGE_READONLY_NOEXEC PAGE_READONLY
860+# define PAGE_COPY_NOEXEC PAGE_COPY
861+#endif
862+
863 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
864 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
865 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
866diff -urNp linux-2.6.32.41/arch/ia64/include/asm/spinlock.h linux-2.6.32.41/arch/ia64/include/asm/spinlock.h
867--- linux-2.6.32.41/arch/ia64/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
868+++ linux-2.6.32.41/arch/ia64/include/asm/spinlock.h 2011-04-17 15:56:45.000000000 -0400
869@@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
870 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
871
872 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
873- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
874+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
875 }
876
877 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
878diff -urNp linux-2.6.32.41/arch/ia64/include/asm/uaccess.h linux-2.6.32.41/arch/ia64/include/asm/uaccess.h
879--- linux-2.6.32.41/arch/ia64/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
880+++ linux-2.6.32.41/arch/ia64/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
881@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
882 const void *__cu_from = (from); \
883 long __cu_len = (n); \
884 \
885- if (__access_ok(__cu_to, __cu_len, get_fs())) \
886+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
887 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
888 __cu_len; \
889 })
890@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
891 long __cu_len = (n); \
892 \
893 __chk_user_ptr(__cu_from); \
894- if (__access_ok(__cu_from, __cu_len, get_fs())) \
895+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
896 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
897 __cu_len; \
898 })
899diff -urNp linux-2.6.32.41/arch/ia64/kernel/dma-mapping.c linux-2.6.32.41/arch/ia64/kernel/dma-mapping.c
900--- linux-2.6.32.41/arch/ia64/kernel/dma-mapping.c 2011-03-27 14:31:47.000000000 -0400
901+++ linux-2.6.32.41/arch/ia64/kernel/dma-mapping.c 2011-04-17 15:56:45.000000000 -0400
902@@ -3,7 +3,7 @@
903 /* Set this to 1 if there is a HW IOMMU in the system */
904 int iommu_detected __read_mostly;
905
906-struct dma_map_ops *dma_ops;
907+const struct dma_map_ops *dma_ops;
908 EXPORT_SYMBOL(dma_ops);
909
910 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
911@@ -16,7 +16,7 @@ static int __init dma_init(void)
912 }
913 fs_initcall(dma_init);
914
915-struct dma_map_ops *dma_get_ops(struct device *dev)
916+const struct dma_map_ops *dma_get_ops(struct device *dev)
917 {
918 return dma_ops;
919 }
920diff -urNp linux-2.6.32.41/arch/ia64/kernel/module.c linux-2.6.32.41/arch/ia64/kernel/module.c
921--- linux-2.6.32.41/arch/ia64/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
922+++ linux-2.6.32.41/arch/ia64/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
923@@ -315,8 +315,7 @@ module_alloc (unsigned long size)
924 void
925 module_free (struct module *mod, void *module_region)
926 {
927- if (mod && mod->arch.init_unw_table &&
928- module_region == mod->module_init) {
929+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
930 unw_remove_unwind_table(mod->arch.init_unw_table);
931 mod->arch.init_unw_table = NULL;
932 }
933@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
934 }
935
936 static inline int
937+in_init_rx (const struct module *mod, uint64_t addr)
938+{
939+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
940+}
941+
942+static inline int
943+in_init_rw (const struct module *mod, uint64_t addr)
944+{
945+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
946+}
947+
948+static inline int
949 in_init (const struct module *mod, uint64_t addr)
950 {
951- return addr - (uint64_t) mod->module_init < mod->init_size;
952+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
953+}
954+
955+static inline int
956+in_core_rx (const struct module *mod, uint64_t addr)
957+{
958+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
959+}
960+
961+static inline int
962+in_core_rw (const struct module *mod, uint64_t addr)
963+{
964+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
965 }
966
967 static inline int
968 in_core (const struct module *mod, uint64_t addr)
969 {
970- return addr - (uint64_t) mod->module_core < mod->core_size;
971+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
972 }
973
974 static inline int
975@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
976 break;
977
978 case RV_BDREL:
979- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
980+ if (in_init_rx(mod, val))
981+ val -= (uint64_t) mod->module_init_rx;
982+ else if (in_init_rw(mod, val))
983+ val -= (uint64_t) mod->module_init_rw;
984+ else if (in_core_rx(mod, val))
985+ val -= (uint64_t) mod->module_core_rx;
986+ else if (in_core_rw(mod, val))
987+ val -= (uint64_t) mod->module_core_rw;
988 break;
989
990 case RV_LTV:
991@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
992 * addresses have been selected...
993 */
994 uint64_t gp;
995- if (mod->core_size > MAX_LTOFF)
996+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
997 /*
998 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
999 * at the end of the module.
1000 */
1001- gp = mod->core_size - MAX_LTOFF / 2;
1002+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1003 else
1004- gp = mod->core_size / 2;
1005- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1006+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1007+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1008 mod->arch.gp = gp;
1009 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1010 }
1011diff -urNp linux-2.6.32.41/arch/ia64/kernel/pci-dma.c linux-2.6.32.41/arch/ia64/kernel/pci-dma.c
1012--- linux-2.6.32.41/arch/ia64/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
1013+++ linux-2.6.32.41/arch/ia64/kernel/pci-dma.c 2011-04-17 15:56:45.000000000 -0400
1014@@ -43,7 +43,7 @@ struct device fallback_dev = {
1015 .dma_mask = &fallback_dev.coherent_dma_mask,
1016 };
1017
1018-extern struct dma_map_ops intel_dma_ops;
1019+extern const struct dma_map_ops intel_dma_ops;
1020
1021 static int __init pci_iommu_init(void)
1022 {
1023@@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *d
1024 }
1025 EXPORT_SYMBOL(iommu_dma_supported);
1026
1027+extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1028+extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1029+extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1030+extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1031+extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1032+extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1033+extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1034+
1035+static const struct dma_map_ops intel_iommu_dma_ops = {
1036+ /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1037+ .alloc_coherent = intel_alloc_coherent,
1038+ .free_coherent = intel_free_coherent,
1039+ .map_sg = intel_map_sg,
1040+ .unmap_sg = intel_unmap_sg,
1041+ .map_page = intel_map_page,
1042+ .unmap_page = intel_unmap_page,
1043+ .mapping_error = intel_mapping_error,
1044+
1045+ .sync_single_for_cpu = machvec_dma_sync_single,
1046+ .sync_sg_for_cpu = machvec_dma_sync_sg,
1047+ .sync_single_for_device = machvec_dma_sync_single,
1048+ .sync_sg_for_device = machvec_dma_sync_sg,
1049+ .dma_supported = iommu_dma_supported,
1050+};
1051+
1052 void __init pci_iommu_alloc(void)
1053 {
1054- dma_ops = &intel_dma_ops;
1055-
1056- dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
1057- dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
1058- dma_ops->sync_single_for_device = machvec_dma_sync_single;
1059- dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
1060- dma_ops->dma_supported = iommu_dma_supported;
1061+ dma_ops = &intel_iommu_dma_ops;
1062
1063 /*
1064 * The order of these functions is important for
1065diff -urNp linux-2.6.32.41/arch/ia64/kernel/pci-swiotlb.c linux-2.6.32.41/arch/ia64/kernel/pci-swiotlb.c
1066--- linux-2.6.32.41/arch/ia64/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
1067+++ linux-2.6.32.41/arch/ia64/kernel/pci-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
1068@@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent
1069 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1070 }
1071
1072-struct dma_map_ops swiotlb_dma_ops = {
1073+const struct dma_map_ops swiotlb_dma_ops = {
1074 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1075 .free_coherent = swiotlb_free_coherent,
1076 .map_page = swiotlb_map_page,
1077diff -urNp linux-2.6.32.41/arch/ia64/kernel/sys_ia64.c linux-2.6.32.41/arch/ia64/kernel/sys_ia64.c
1078--- linux-2.6.32.41/arch/ia64/kernel/sys_ia64.c 2011-03-27 14:31:47.000000000 -0400
1079+++ linux-2.6.32.41/arch/ia64/kernel/sys_ia64.c 2011-04-17 15:56:45.000000000 -0400
1080@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
1081 if (REGION_NUMBER(addr) == RGN_HPAGE)
1082 addr = 0;
1083 #endif
1084+
1085+#ifdef CONFIG_PAX_RANDMMAP
1086+ if (mm->pax_flags & MF_PAX_RANDMMAP)
1087+ addr = mm->free_area_cache;
1088+ else
1089+#endif
1090+
1091 if (!addr)
1092 addr = mm->free_area_cache;
1093
1094@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
1095 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1096 /* At this point: (!vma || addr < vma->vm_end). */
1097 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1098- if (start_addr != TASK_UNMAPPED_BASE) {
1099+ if (start_addr != mm->mmap_base) {
1100 /* Start a new search --- just in case we missed some holes. */
1101- addr = TASK_UNMAPPED_BASE;
1102+ addr = mm->mmap_base;
1103 goto full_search;
1104 }
1105 return -ENOMEM;
1106 }
1107- if (!vma || addr + len <= vma->vm_start) {
1108+ if (check_heap_stack_gap(vma, addr, len)) {
1109 /* Remember the address where we stopped this search: */
1110 mm->free_area_cache = addr + len;
1111 return addr;
1112diff -urNp linux-2.6.32.41/arch/ia64/kernel/topology.c linux-2.6.32.41/arch/ia64/kernel/topology.c
1113--- linux-2.6.32.41/arch/ia64/kernel/topology.c 2011-03-27 14:31:47.000000000 -0400
1114+++ linux-2.6.32.41/arch/ia64/kernel/topology.c 2011-04-17 15:56:45.000000000 -0400
1115@@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject
1116 return ret;
1117 }
1118
1119-static struct sysfs_ops cache_sysfs_ops = {
1120+static const struct sysfs_ops cache_sysfs_ops = {
1121 .show = cache_show
1122 };
1123
1124diff -urNp linux-2.6.32.41/arch/ia64/kernel/vmlinux.lds.S linux-2.6.32.41/arch/ia64/kernel/vmlinux.lds.S
1125--- linux-2.6.32.41/arch/ia64/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
1126+++ linux-2.6.32.41/arch/ia64/kernel/vmlinux.lds.S 2011-04-17 15:56:45.000000000 -0400
1127@@ -190,7 +190,7 @@ SECTIONS
1128 /* Per-cpu data: */
1129 . = ALIGN(PERCPU_PAGE_SIZE);
1130 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1131- __phys_per_cpu_start = __per_cpu_load;
1132+ __phys_per_cpu_start = per_cpu_load;
1133 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1134 * into percpu page size
1135 */
1136diff -urNp linux-2.6.32.41/arch/ia64/mm/fault.c linux-2.6.32.41/arch/ia64/mm/fault.c
1137--- linux-2.6.32.41/arch/ia64/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1138+++ linux-2.6.32.41/arch/ia64/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1139@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
1140 return pte_present(pte);
1141 }
1142
1143+#ifdef CONFIG_PAX_PAGEEXEC
1144+void pax_report_insns(void *pc, void *sp)
1145+{
1146+ unsigned long i;
1147+
1148+ printk(KERN_ERR "PAX: bytes at PC: ");
1149+ for (i = 0; i < 8; i++) {
1150+ unsigned int c;
1151+ if (get_user(c, (unsigned int *)pc+i))
1152+ printk(KERN_CONT "???????? ");
1153+ else
1154+ printk(KERN_CONT "%08x ", c);
1155+ }
1156+ printk("\n");
1157+}
1158+#endif
1159+
1160 void __kprobes
1161 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1162 {
1163@@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres
1164 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1165 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1166
1167- if ((vma->vm_flags & mask) != mask)
1168+ if ((vma->vm_flags & mask) != mask) {
1169+
1170+#ifdef CONFIG_PAX_PAGEEXEC
1171+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1172+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1173+ goto bad_area;
1174+
1175+ up_read(&mm->mmap_sem);
1176+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1177+ do_group_exit(SIGKILL);
1178+ }
1179+#endif
1180+
1181 goto bad_area;
1182
1183+ }
1184+
1185 survive:
1186 /*
1187 * If for any reason at all we couldn't handle the fault, make
1188diff -urNp linux-2.6.32.41/arch/ia64/mm/hugetlbpage.c linux-2.6.32.41/arch/ia64/mm/hugetlbpage.c
1189--- linux-2.6.32.41/arch/ia64/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
1190+++ linux-2.6.32.41/arch/ia64/mm/hugetlbpage.c 2011-04-17 15:56:45.000000000 -0400
1191@@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(
1192 /* At this point: (!vmm || addr < vmm->vm_end). */
1193 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1194 return -ENOMEM;
1195- if (!vmm || (addr + len) <= vmm->vm_start)
1196+ if (check_heap_stack_gap(vmm, addr, len))
1197 return addr;
1198 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1199 }
1200diff -urNp linux-2.6.32.41/arch/ia64/mm/init.c linux-2.6.32.41/arch/ia64/mm/init.c
1201--- linux-2.6.32.41/arch/ia64/mm/init.c 2011-03-27 14:31:47.000000000 -0400
1202+++ linux-2.6.32.41/arch/ia64/mm/init.c 2011-04-17 15:56:45.000000000 -0400
1203@@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1204 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1205 vma->vm_end = vma->vm_start + PAGE_SIZE;
1206 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1207+
1208+#ifdef CONFIG_PAX_PAGEEXEC
1209+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1210+ vma->vm_flags &= ~VM_EXEC;
1211+
1212+#ifdef CONFIG_PAX_MPROTECT
1213+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1214+ vma->vm_flags &= ~VM_MAYEXEC;
1215+#endif
1216+
1217+ }
1218+#endif
1219+
1220 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1221 down_write(&current->mm->mmap_sem);
1222 if (insert_vm_struct(current->mm, vma)) {
1223diff -urNp linux-2.6.32.41/arch/ia64/sn/pci/pci_dma.c linux-2.6.32.41/arch/ia64/sn/pci/pci_dma.c
1224--- linux-2.6.32.41/arch/ia64/sn/pci/pci_dma.c 2011-03-27 14:31:47.000000000 -0400
1225+++ linux-2.6.32.41/arch/ia64/sn/pci/pci_dma.c 2011-04-17 15:56:45.000000000 -0400
1226@@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *
1227 return ret;
1228 }
1229
1230-static struct dma_map_ops sn_dma_ops = {
1231+static const struct dma_map_ops sn_dma_ops = {
1232 .alloc_coherent = sn_dma_alloc_coherent,
1233 .free_coherent = sn_dma_free_coherent,
1234 .map_page = sn_dma_map_page,
1235diff -urNp linux-2.6.32.41/arch/m32r/lib/usercopy.c linux-2.6.32.41/arch/m32r/lib/usercopy.c
1236--- linux-2.6.32.41/arch/m32r/lib/usercopy.c 2011-03-27 14:31:47.000000000 -0400
1237+++ linux-2.6.32.41/arch/m32r/lib/usercopy.c 2011-04-17 15:56:45.000000000 -0400
1238@@ -14,6 +14,9 @@
1239 unsigned long
1240 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1241 {
1242+ if ((long)n < 0)
1243+ return n;
1244+
1245 prefetch(from);
1246 if (access_ok(VERIFY_WRITE, to, n))
1247 __copy_user(to,from,n);
1248@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1249 unsigned long
1250 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1251 {
1252+ if ((long)n < 0)
1253+ return n;
1254+
1255 prefetchw(to);
1256 if (access_ok(VERIFY_READ, from, n))
1257 __copy_user_zeroing(to,from,n);
1258diff -urNp linux-2.6.32.41/arch/mips/alchemy/devboards/pm.c linux-2.6.32.41/arch/mips/alchemy/devboards/pm.c
1259--- linux-2.6.32.41/arch/mips/alchemy/devboards/pm.c 2011-03-27 14:31:47.000000000 -0400
1260+++ linux-2.6.32.41/arch/mips/alchemy/devboards/pm.c 2011-04-17 15:56:45.000000000 -0400
1261@@ -78,7 +78,7 @@ static void db1x_pm_end(void)
1262
1263 }
1264
1265-static struct platform_suspend_ops db1x_pm_ops = {
1266+static const struct platform_suspend_ops db1x_pm_ops = {
1267 .valid = suspend_valid_only_mem,
1268 .begin = db1x_pm_begin,
1269 .enter = db1x_pm_enter,
1270diff -urNp linux-2.6.32.41/arch/mips/include/asm/elf.h linux-2.6.32.41/arch/mips/include/asm/elf.h
1271--- linux-2.6.32.41/arch/mips/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1272+++ linux-2.6.32.41/arch/mips/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1273@@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_str
1274 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1275 #endif
1276
1277+#ifdef CONFIG_PAX_ASLR
1278+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1279+
1280+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1281+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1282+#endif
1283+
1284 #endif /* _ASM_ELF_H */
1285diff -urNp linux-2.6.32.41/arch/mips/include/asm/page.h linux-2.6.32.41/arch/mips/include/asm/page.h
1286--- linux-2.6.32.41/arch/mips/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
1287+++ linux-2.6.32.41/arch/mips/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
1288@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1289 #ifdef CONFIG_CPU_MIPS32
1290 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1291 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1292- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1293+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1294 #else
1295 typedef struct { unsigned long long pte; } pte_t;
1296 #define pte_val(x) ((x).pte)
1297diff -urNp linux-2.6.32.41/arch/mips/include/asm/system.h linux-2.6.32.41/arch/mips/include/asm/system.h
1298--- linux-2.6.32.41/arch/mips/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
1299+++ linux-2.6.32.41/arch/mips/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
1300@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1301 */
1302 #define __ARCH_WANT_UNLOCKED_CTXSW
1303
1304-extern unsigned long arch_align_stack(unsigned long sp);
1305+#define arch_align_stack(x) ((x) & ~0xfUL)
1306
1307 #endif /* _ASM_SYSTEM_H */
1308diff -urNp linux-2.6.32.41/arch/mips/kernel/binfmt_elfn32.c linux-2.6.32.41/arch/mips/kernel/binfmt_elfn32.c
1309--- linux-2.6.32.41/arch/mips/kernel/binfmt_elfn32.c 2011-03-27 14:31:47.000000000 -0400
1310+++ linux-2.6.32.41/arch/mips/kernel/binfmt_elfn32.c 2011-04-17 15:56:45.000000000 -0400
1311@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1312 #undef ELF_ET_DYN_BASE
1313 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1314
1315+#ifdef CONFIG_PAX_ASLR
1316+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1317+
1318+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1319+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1320+#endif
1321+
1322 #include <asm/processor.h>
1323 #include <linux/module.h>
1324 #include <linux/elfcore.h>
1325diff -urNp linux-2.6.32.41/arch/mips/kernel/binfmt_elfo32.c linux-2.6.32.41/arch/mips/kernel/binfmt_elfo32.c
1326--- linux-2.6.32.41/arch/mips/kernel/binfmt_elfo32.c 2011-03-27 14:31:47.000000000 -0400
1327+++ linux-2.6.32.41/arch/mips/kernel/binfmt_elfo32.c 2011-04-17 15:56:45.000000000 -0400
1328@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1329 #undef ELF_ET_DYN_BASE
1330 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1331
1332+#ifdef CONFIG_PAX_ASLR
1333+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1334+
1335+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1336+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1337+#endif
1338+
1339 #include <asm/processor.h>
1340
1341 /*
1342diff -urNp linux-2.6.32.41/arch/mips/kernel/kgdb.c linux-2.6.32.41/arch/mips/kernel/kgdb.c
1343--- linux-2.6.32.41/arch/mips/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
1344+++ linux-2.6.32.41/arch/mips/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
1345@@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vecto
1346 return -1;
1347 }
1348
1349+/* cannot be const */
1350 struct kgdb_arch arch_kgdb_ops;
1351
1352 /*
1353diff -urNp linux-2.6.32.41/arch/mips/kernel/process.c linux-2.6.32.41/arch/mips/kernel/process.c
1354--- linux-2.6.32.41/arch/mips/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
1355+++ linux-2.6.32.41/arch/mips/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
1356@@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_stru
1357 out:
1358 return pc;
1359 }
1360-
1361-/*
1362- * Don't forget that the stack pointer must be aligned on a 8 bytes
1363- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1364- */
1365-unsigned long arch_align_stack(unsigned long sp)
1366-{
1367- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1368- sp -= get_random_int() & ~PAGE_MASK;
1369-
1370- return sp & ALMASK;
1371-}
1372diff -urNp linux-2.6.32.41/arch/mips/kernel/syscall.c linux-2.6.32.41/arch/mips/kernel/syscall.c
1373--- linux-2.6.32.41/arch/mips/kernel/syscall.c 2011-03-27 14:31:47.000000000 -0400
1374+++ linux-2.6.32.41/arch/mips/kernel/syscall.c 2011-04-17 15:56:45.000000000 -0400
1375@@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(str
1376 do_color_align = 0;
1377 if (filp || (flags & MAP_SHARED))
1378 do_color_align = 1;
1379+
1380+#ifdef CONFIG_PAX_RANDMMAP
1381+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1382+#endif
1383+
1384 if (addr) {
1385 if (do_color_align)
1386 addr = COLOUR_ALIGN(addr, pgoff);
1387 else
1388 addr = PAGE_ALIGN(addr);
1389 vmm = find_vma(current->mm, addr);
1390- if (task_size - len >= addr &&
1391- (!vmm || addr + len <= vmm->vm_start))
1392+ if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
1393 return addr;
1394 }
1395- addr = TASK_UNMAPPED_BASE;
1396+ addr = current->mm->mmap_base;
1397 if (do_color_align)
1398 addr = COLOUR_ALIGN(addr, pgoff);
1399 else
1400@@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(str
1401 /* At this point: (!vmm || addr < vmm->vm_end). */
1402 if (task_size - len < addr)
1403 return -ENOMEM;
1404- if (!vmm || addr + len <= vmm->vm_start)
1405+ if (check_heap_stack_gap(vmm, addr, len))
1406 return addr;
1407 addr = vmm->vm_end;
1408 if (do_color_align)
1409diff -urNp linux-2.6.32.41/arch/mips/mm/fault.c linux-2.6.32.41/arch/mips/mm/fault.c
1410--- linux-2.6.32.41/arch/mips/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1411+++ linux-2.6.32.41/arch/mips/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1412@@ -26,6 +26,23 @@
1413 #include <asm/ptrace.h>
1414 #include <asm/highmem.h> /* For VMALLOC_END */
1415
1416+#ifdef CONFIG_PAX_PAGEEXEC
1417+void pax_report_insns(void *pc, void *sp)
1418+{
1419+ unsigned long i;
1420+
1421+ printk(KERN_ERR "PAX: bytes at PC: ");
1422+ for (i = 0; i < 5; i++) {
1423+ unsigned int c;
1424+ if (get_user(c, (unsigned int *)pc+i))
1425+ printk(KERN_CONT "???????? ");
1426+ else
1427+ printk(KERN_CONT "%08x ", c);
1428+ }
1429+ printk("\n");
1430+}
1431+#endif
1432+
1433 /*
1434 * This routine handles page faults. It determines the address,
1435 * and the problem, and then passes it off to one of the appropriate
1436diff -urNp linux-2.6.32.41/arch/parisc/include/asm/elf.h linux-2.6.32.41/arch/parisc/include/asm/elf.h
1437--- linux-2.6.32.41/arch/parisc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1438+++ linux-2.6.32.41/arch/parisc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1439@@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration..
1440
1441 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1442
1443+#ifdef CONFIG_PAX_ASLR
1444+#define PAX_ELF_ET_DYN_BASE 0x10000UL
1445+
1446+#define PAX_DELTA_MMAP_LEN 16
1447+#define PAX_DELTA_STACK_LEN 16
1448+#endif
1449+
1450 /* This yields a mask that user programs can use to figure out what
1451 instruction set this CPU supports. This could be done in user space,
1452 but it's not easy, and we've already done it here. */
1453diff -urNp linux-2.6.32.41/arch/parisc/include/asm/pgtable.h linux-2.6.32.41/arch/parisc/include/asm/pgtable.h
1454--- linux-2.6.32.41/arch/parisc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1455+++ linux-2.6.32.41/arch/parisc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1456@@ -207,6 +207,17 @@
1457 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1458 #define PAGE_COPY PAGE_EXECREAD
1459 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1460+
1461+#ifdef CONFIG_PAX_PAGEEXEC
1462+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1463+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1464+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1465+#else
1466+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1467+# define PAGE_COPY_NOEXEC PAGE_COPY
1468+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1469+#endif
1470+
1471 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1472 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
1473 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
1474diff -urNp linux-2.6.32.41/arch/parisc/kernel/module.c linux-2.6.32.41/arch/parisc/kernel/module.c
1475--- linux-2.6.32.41/arch/parisc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1476+++ linux-2.6.32.41/arch/parisc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1477@@ -95,16 +95,38 @@
1478
1479 /* three functions to determine where in the module core
1480 * or init pieces the location is */
1481+static inline int in_init_rx(struct module *me, void *loc)
1482+{
1483+ return (loc >= me->module_init_rx &&
1484+ loc < (me->module_init_rx + me->init_size_rx));
1485+}
1486+
1487+static inline int in_init_rw(struct module *me, void *loc)
1488+{
1489+ return (loc >= me->module_init_rw &&
1490+ loc < (me->module_init_rw + me->init_size_rw));
1491+}
1492+
1493 static inline int in_init(struct module *me, void *loc)
1494 {
1495- return (loc >= me->module_init &&
1496- loc <= (me->module_init + me->init_size));
1497+ return in_init_rx(me, loc) || in_init_rw(me, loc);
1498+}
1499+
1500+static inline int in_core_rx(struct module *me, void *loc)
1501+{
1502+ return (loc >= me->module_core_rx &&
1503+ loc < (me->module_core_rx + me->core_size_rx));
1504+}
1505+
1506+static inline int in_core_rw(struct module *me, void *loc)
1507+{
1508+ return (loc >= me->module_core_rw &&
1509+ loc < (me->module_core_rw + me->core_size_rw));
1510 }
1511
1512 static inline int in_core(struct module *me, void *loc)
1513 {
1514- return (loc >= me->module_core &&
1515- loc <= (me->module_core + me->core_size));
1516+ return in_core_rx(me, loc) || in_core_rw(me, loc);
1517 }
1518
1519 static inline int in_local(struct module *me, void *loc)
1520@@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_
1521 }
1522
1523 /* align things a bit */
1524- me->core_size = ALIGN(me->core_size, 16);
1525- me->arch.got_offset = me->core_size;
1526- me->core_size += gots * sizeof(struct got_entry);
1527-
1528- me->core_size = ALIGN(me->core_size, 16);
1529- me->arch.fdesc_offset = me->core_size;
1530- me->core_size += fdescs * sizeof(Elf_Fdesc);
1531+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1532+ me->arch.got_offset = me->core_size_rw;
1533+ me->core_size_rw += gots * sizeof(struct got_entry);
1534+
1535+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1536+ me->arch.fdesc_offset = me->core_size_rw;
1537+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1538
1539 me->arch.got_max = gots;
1540 me->arch.fdesc_max = fdescs;
1541@@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module
1542
1543 BUG_ON(value == 0);
1544
1545- got = me->module_core + me->arch.got_offset;
1546+ got = me->module_core_rw + me->arch.got_offset;
1547 for (i = 0; got[i].addr; i++)
1548 if (got[i].addr == value)
1549 goto out;
1550@@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module
1551 #ifdef CONFIG_64BIT
1552 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1553 {
1554- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1555+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1556
1557 if (!value) {
1558 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1559@@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module
1560
1561 /* Create new one */
1562 fdesc->addr = value;
1563- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1564+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1565 return (Elf_Addr)fdesc;
1566 }
1567 #endif /* CONFIG_64BIT */
1568@@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
1569
1570 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1571 end = table + sechdrs[me->arch.unwind_section].sh_size;
1572- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1573+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1574
1575 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1576 me->arch.unwind_section, table, end, gp);
1577diff -urNp linux-2.6.32.41/arch/parisc/kernel/sys_parisc.c linux-2.6.32.41/arch/parisc/kernel/sys_parisc.c
1578--- linux-2.6.32.41/arch/parisc/kernel/sys_parisc.c 2011-03-27 14:31:47.000000000 -0400
1579+++ linux-2.6.32.41/arch/parisc/kernel/sys_parisc.c 2011-04-17 15:56:45.000000000 -0400
1580@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1581 /* At this point: (!vma || addr < vma->vm_end). */
1582 if (TASK_SIZE - len < addr)
1583 return -ENOMEM;
1584- if (!vma || addr + len <= vma->vm_start)
1585+ if (check_heap_stack_gap(vma, addr, len))
1586 return addr;
1587 addr = vma->vm_end;
1588 }
1589@@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1590 /* At this point: (!vma || addr < vma->vm_end). */
1591 if (TASK_SIZE - len < addr)
1592 return -ENOMEM;
1593- if (!vma || addr + len <= vma->vm_start)
1594+ if (check_heap_stack_gap(vma, addr, len))
1595 return addr;
1596 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1597 if (addr < vma->vm_end) /* handle wraparound */
1598@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1599 if (flags & MAP_FIXED)
1600 return addr;
1601 if (!addr)
1602- addr = TASK_UNMAPPED_BASE;
1603+ addr = current->mm->mmap_base;
1604
1605 if (filp) {
1606 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1607diff -urNp linux-2.6.32.41/arch/parisc/kernel/traps.c linux-2.6.32.41/arch/parisc/kernel/traps.c
1608--- linux-2.6.32.41/arch/parisc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
1609+++ linux-2.6.32.41/arch/parisc/kernel/traps.c 2011-04-17 15:56:45.000000000 -0400
1610@@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1611
1612 down_read(&current->mm->mmap_sem);
1613 vma = find_vma(current->mm,regs->iaoq[0]);
1614- if (vma && (regs->iaoq[0] >= vma->vm_start)
1615- && (vma->vm_flags & VM_EXEC)) {
1616-
1617+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1618 fault_address = regs->iaoq[0];
1619 fault_space = regs->iasq[0];
1620
1621diff -urNp linux-2.6.32.41/arch/parisc/mm/fault.c linux-2.6.32.41/arch/parisc/mm/fault.c
1622--- linux-2.6.32.41/arch/parisc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1623+++ linux-2.6.32.41/arch/parisc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1624@@ -15,6 +15,7 @@
1625 #include <linux/sched.h>
1626 #include <linux/interrupt.h>
1627 #include <linux/module.h>
1628+#include <linux/unistd.h>
1629
1630 #include <asm/uaccess.h>
1631 #include <asm/traps.h>
1632@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1633 static unsigned long
1634 parisc_acctyp(unsigned long code, unsigned int inst)
1635 {
1636- if (code == 6 || code == 16)
1637+ if (code == 6 || code == 7 || code == 16)
1638 return VM_EXEC;
1639
1640 switch (inst & 0xf0000000) {
1641@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1642 }
1643 #endif
1644
1645+#ifdef CONFIG_PAX_PAGEEXEC
1646+/*
1647+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1648+ *
1649+ * returns 1 when task should be killed
1650+ * 2 when rt_sigreturn trampoline was detected
1651+ * 3 when unpatched PLT trampoline was detected
1652+ */
1653+static int pax_handle_fetch_fault(struct pt_regs *regs)
1654+{
1655+
1656+#ifdef CONFIG_PAX_EMUPLT
1657+ int err;
1658+
1659+ do { /* PaX: unpatched PLT emulation */
1660+ unsigned int bl, depwi;
1661+
1662+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1663+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1664+
1665+ if (err)
1666+ break;
1667+
1668+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1669+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1670+
1671+ err = get_user(ldw, (unsigned int *)addr);
1672+ err |= get_user(bv, (unsigned int *)(addr+4));
1673+ err |= get_user(ldw2, (unsigned int *)(addr+8));
1674+
1675+ if (err)
1676+ break;
1677+
1678+ if (ldw == 0x0E801096U &&
1679+ bv == 0xEAC0C000U &&
1680+ ldw2 == 0x0E881095U)
1681+ {
1682+ unsigned int resolver, map;
1683+
1684+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1685+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1686+ if (err)
1687+ break;
1688+
1689+ regs->gr[20] = instruction_pointer(regs)+8;
1690+ regs->gr[21] = map;
1691+ regs->gr[22] = resolver;
1692+ regs->iaoq[0] = resolver | 3UL;
1693+ regs->iaoq[1] = regs->iaoq[0] + 4;
1694+ return 3;
1695+ }
1696+ }
1697+ } while (0);
1698+#endif
1699+
1700+#ifdef CONFIG_PAX_EMUTRAMP
1701+
1702+#ifndef CONFIG_PAX_EMUSIGRT
1703+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1704+ return 1;
1705+#endif
1706+
1707+ do { /* PaX: rt_sigreturn emulation */
1708+ unsigned int ldi1, ldi2, bel, nop;
1709+
1710+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1711+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1712+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1713+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1714+
1715+ if (err)
1716+ break;
1717+
1718+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1719+ ldi2 == 0x3414015AU &&
1720+ bel == 0xE4008200U &&
1721+ nop == 0x08000240U)
1722+ {
1723+ regs->gr[25] = (ldi1 & 2) >> 1;
1724+ regs->gr[20] = __NR_rt_sigreturn;
1725+ regs->gr[31] = regs->iaoq[1] + 16;
1726+ regs->sr[0] = regs->iasq[1];
1727+ regs->iaoq[0] = 0x100UL;
1728+ regs->iaoq[1] = regs->iaoq[0] + 4;
1729+ regs->iasq[0] = regs->sr[2];
1730+ regs->iasq[1] = regs->sr[2];
1731+ return 2;
1732+ }
1733+ } while (0);
1734+#endif
1735+
1736+ return 1;
1737+}
1738+
1739+void pax_report_insns(void *pc, void *sp)
1740+{
1741+ unsigned long i;
1742+
1743+ printk(KERN_ERR "PAX: bytes at PC: ");
1744+ for (i = 0; i < 5; i++) {
1745+ unsigned int c;
1746+ if (get_user(c, (unsigned int *)pc+i))
1747+ printk(KERN_CONT "???????? ");
1748+ else
1749+ printk(KERN_CONT "%08x ", c);
1750+ }
1751+ printk("\n");
1752+}
1753+#endif
1754+
1755 int fixup_exception(struct pt_regs *regs)
1756 {
1757 const struct exception_table_entry *fix;
1758@@ -192,8 +303,33 @@ good_area:
1759
1760 acc_type = parisc_acctyp(code,regs->iir);
1761
1762- if ((vma->vm_flags & acc_type) != acc_type)
1763+ if ((vma->vm_flags & acc_type) != acc_type) {
1764+
1765+#ifdef CONFIG_PAX_PAGEEXEC
1766+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1767+ (address & ~3UL) == instruction_pointer(regs))
1768+ {
1769+ up_read(&mm->mmap_sem);
1770+ switch (pax_handle_fetch_fault(regs)) {
1771+
1772+#ifdef CONFIG_PAX_EMUPLT
1773+ case 3:
1774+ return;
1775+#endif
1776+
1777+#ifdef CONFIG_PAX_EMUTRAMP
1778+ case 2:
1779+ return;
1780+#endif
1781+
1782+ }
1783+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1784+ do_group_exit(SIGKILL);
1785+ }
1786+#endif
1787+
1788 goto bad_area;
1789+ }
1790
1791 /*
1792 * If for any reason at all we couldn't handle the fault, make
1793diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/device.h linux-2.6.32.41/arch/powerpc/include/asm/device.h
1794--- linux-2.6.32.41/arch/powerpc/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
1795+++ linux-2.6.32.41/arch/powerpc/include/asm/device.h 2011-04-17 15:56:45.000000000 -0400
1796@@ -14,7 +14,7 @@ struct dev_archdata {
1797 struct device_node *of_node;
1798
1799 /* DMA operations on that device */
1800- struct dma_map_ops *dma_ops;
1801+ const struct dma_map_ops *dma_ops;
1802
1803 /*
1804 * When an iommu is in use, dma_data is used as a ptr to the base of the
1805diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/dma-mapping.h linux-2.6.32.41/arch/powerpc/include/asm/dma-mapping.h
1806--- linux-2.6.32.41/arch/powerpc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
1807+++ linux-2.6.32.41/arch/powerpc/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
1808@@ -69,9 +69,9 @@ static inline unsigned long device_to_ma
1809 #ifdef CONFIG_PPC64
1810 extern struct dma_map_ops dma_iommu_ops;
1811 #endif
1812-extern struct dma_map_ops dma_direct_ops;
1813+extern const struct dma_map_ops dma_direct_ops;
1814
1815-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
1816+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
1817 {
1818 /* We don't handle the NULL dev case for ISA for now. We could
1819 * do it via an out of line call but it is not needed for now. The
1820@@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dm
1821 return dev->archdata.dma_ops;
1822 }
1823
1824-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
1825+static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
1826 {
1827 dev->archdata.dma_ops = ops;
1828 }
1829@@ -118,7 +118,7 @@ static inline void set_dma_offset(struct
1830
1831 static inline int dma_supported(struct device *dev, u64 mask)
1832 {
1833- struct dma_map_ops *dma_ops = get_dma_ops(dev);
1834+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
1835
1836 if (unlikely(dma_ops == NULL))
1837 return 0;
1838@@ -132,7 +132,7 @@ static inline int dma_supported(struct d
1839
1840 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
1841 {
1842- struct dma_map_ops *dma_ops = get_dma_ops(dev);
1843+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
1844
1845 if (unlikely(dma_ops == NULL))
1846 return -EIO;
1847@@ -147,7 +147,7 @@ static inline int dma_set_mask(struct de
1848 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1849 dma_addr_t *dma_handle, gfp_t flag)
1850 {
1851- struct dma_map_ops *dma_ops = get_dma_ops(dev);
1852+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
1853 void *cpu_addr;
1854
1855 BUG_ON(!dma_ops);
1856@@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(s
1857 static inline void dma_free_coherent(struct device *dev, size_t size,
1858 void *cpu_addr, dma_addr_t dma_handle)
1859 {
1860- struct dma_map_ops *dma_ops = get_dma_ops(dev);
1861+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
1862
1863 BUG_ON(!dma_ops);
1864
1865@@ -173,7 +173,7 @@ static inline void dma_free_coherent(str
1866
1867 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1868 {
1869- struct dma_map_ops *dma_ops = get_dma_ops(dev);
1870+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
1871
1872 if (dma_ops->mapping_error)
1873 return dma_ops->mapping_error(dev, dma_addr);
1874diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/elf.h linux-2.6.32.41/arch/powerpc/include/asm/elf.h
1875--- linux-2.6.32.41/arch/powerpc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1876+++ linux-2.6.32.41/arch/powerpc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1877@@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
1878 the loader. We need to make sure that it is out of the way of the program
1879 that it will "exec", and that there is sufficient room for the brk. */
1880
1881-extern unsigned long randomize_et_dyn(unsigned long base);
1882-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
1883+#define ELF_ET_DYN_BASE (0x20000000)
1884+
1885+#ifdef CONFIG_PAX_ASLR
1886+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
1887+
1888+#ifdef __powerpc64__
1889+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
1890+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
1891+#else
1892+#define PAX_DELTA_MMAP_LEN 15
1893+#define PAX_DELTA_STACK_LEN 15
1894+#endif
1895+#endif
1896
1897 /*
1898 * Our registers are always unsigned longs, whether we're a 32 bit
1899@@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(s
1900 (0x7ff >> (PAGE_SHIFT - 12)) : \
1901 (0x3ffff >> (PAGE_SHIFT - 12)))
1902
1903-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1904-#define arch_randomize_brk arch_randomize_brk
1905-
1906 #endif /* __KERNEL__ */
1907
1908 /*
1909diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/iommu.h linux-2.6.32.41/arch/powerpc/include/asm/iommu.h
1910--- linux-2.6.32.41/arch/powerpc/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
1911+++ linux-2.6.32.41/arch/powerpc/include/asm/iommu.h 2011-04-17 15:56:45.000000000 -0400
1912@@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(voi
1913 extern void iommu_init_early_dart(void);
1914 extern void iommu_init_early_pasemi(void);
1915
1916+/* dma-iommu.c */
1917+extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
1918+
1919 #ifdef CONFIG_PCI
1920 extern void pci_iommu_init(void);
1921 extern void pci_direct_iommu_init(void);
1922diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/kmap_types.h linux-2.6.32.41/arch/powerpc/include/asm/kmap_types.h
1923--- linux-2.6.32.41/arch/powerpc/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
1924+++ linux-2.6.32.41/arch/powerpc/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
1925@@ -26,6 +26,7 @@ enum km_type {
1926 KM_SOFTIRQ1,
1927 KM_PPC_SYNC_PAGE,
1928 KM_PPC_SYNC_ICACHE,
1929+ KM_CLEARPAGE,
1930 KM_TYPE_NR
1931 };
1932
1933diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/page_64.h linux-2.6.32.41/arch/powerpc/include/asm/page_64.h
1934--- linux-2.6.32.41/arch/powerpc/include/asm/page_64.h 2011-03-27 14:31:47.000000000 -0400
1935+++ linux-2.6.32.41/arch/powerpc/include/asm/page_64.h 2011-04-17 15:56:45.000000000 -0400
1936@@ -180,15 +180,18 @@ do { \
1937 * stack by default, so in the absense of a PT_GNU_STACK program header
1938 * we turn execute permission off.
1939 */
1940-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1941- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1942+#define VM_STACK_DEFAULT_FLAGS32 \
1943+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1944+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1945
1946 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1947 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1948
1949+#ifndef CONFIG_PAX_PAGEEXEC
1950 #define VM_STACK_DEFAULT_FLAGS \
1951 (test_thread_flag(TIF_32BIT) ? \
1952 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
1953+#endif
1954
1955 #include <asm-generic/getorder.h>
1956
1957diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/page.h linux-2.6.32.41/arch/powerpc/include/asm/page.h
1958--- linux-2.6.32.41/arch/powerpc/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
1959+++ linux-2.6.32.41/arch/powerpc/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
1960@@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
1961 * and needs to be executable. This means the whole heap ends
1962 * up being executable.
1963 */
1964-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1965- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1966+#define VM_DATA_DEFAULT_FLAGS32 \
1967+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1968+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1969
1970 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1971 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1972@@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
1973 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
1974 #endif
1975
1976+#define ktla_ktva(addr) (addr)
1977+#define ktva_ktla(addr) (addr)
1978+
1979 #ifndef __ASSEMBLY__
1980
1981 #undef STRICT_MM_TYPECHECKS
1982diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/pci.h linux-2.6.32.41/arch/powerpc/include/asm/pci.h
1983--- linux-2.6.32.41/arch/powerpc/include/asm/pci.h 2011-03-27 14:31:47.000000000 -0400
1984+++ linux-2.6.32.41/arch/powerpc/include/asm/pci.h 2011-04-17 15:56:45.000000000 -0400
1985@@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq
1986 }
1987
1988 #ifdef CONFIG_PCI
1989-extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
1990-extern struct dma_map_ops *get_pci_dma_ops(void);
1991+extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
1992+extern const struct dma_map_ops *get_pci_dma_ops(void);
1993 #else /* CONFIG_PCI */
1994 #define set_pci_dma_ops(d)
1995 #define get_pci_dma_ops() NULL
1996diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/pgtable.h linux-2.6.32.41/arch/powerpc/include/asm/pgtable.h
1997--- linux-2.6.32.41/arch/powerpc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1998+++ linux-2.6.32.41/arch/powerpc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1999@@ -2,6 +2,7 @@
2000 #define _ASM_POWERPC_PGTABLE_H
2001 #ifdef __KERNEL__
2002
2003+#include <linux/const.h>
2004 #ifndef __ASSEMBLY__
2005 #include <asm/processor.h> /* For TASK_SIZE */
2006 #include <asm/mmu.h>
2007diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/pte-hash32.h linux-2.6.32.41/arch/powerpc/include/asm/pte-hash32.h
2008--- linux-2.6.32.41/arch/powerpc/include/asm/pte-hash32.h 2011-03-27 14:31:47.000000000 -0400
2009+++ linux-2.6.32.41/arch/powerpc/include/asm/pte-hash32.h 2011-04-17 15:56:45.000000000 -0400
2010@@ -21,6 +21,7 @@
2011 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2012 #define _PAGE_USER 0x004 /* usermode access allowed */
2013 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2014+#define _PAGE_EXEC _PAGE_GUARDED
2015 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2016 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2017 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2018diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/reg.h linux-2.6.32.41/arch/powerpc/include/asm/reg.h
2019--- linux-2.6.32.41/arch/powerpc/include/asm/reg.h 2011-03-27 14:31:47.000000000 -0400
2020+++ linux-2.6.32.41/arch/powerpc/include/asm/reg.h 2011-04-17 15:56:45.000000000 -0400
2021@@ -191,6 +191,7 @@
2022 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2023 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2024 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2025+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2026 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2027 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2028 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2029diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/swiotlb.h linux-2.6.32.41/arch/powerpc/include/asm/swiotlb.h
2030--- linux-2.6.32.41/arch/powerpc/include/asm/swiotlb.h 2011-03-27 14:31:47.000000000 -0400
2031+++ linux-2.6.32.41/arch/powerpc/include/asm/swiotlb.h 2011-04-17 15:56:45.000000000 -0400
2032@@ -13,7 +13,7 @@
2033
2034 #include <linux/swiotlb.h>
2035
2036-extern struct dma_map_ops swiotlb_dma_ops;
2037+extern const struct dma_map_ops swiotlb_dma_ops;
2038
2039 static inline void dma_mark_clean(void *addr, size_t size) {}
2040
2041diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/system.h linux-2.6.32.41/arch/powerpc/include/asm/system.h
2042--- linux-2.6.32.41/arch/powerpc/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
2043+++ linux-2.6.32.41/arch/powerpc/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
2044@@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
2045 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2046 #endif
2047
2048-extern unsigned long arch_align_stack(unsigned long sp);
2049+#define arch_align_stack(x) ((x) & ~0xfUL)
2050
2051 /* Used in very early kernel initialization. */
2052 extern unsigned long reloc_offset(void);
2053diff -urNp linux-2.6.32.41/arch/powerpc/include/asm/uaccess.h linux-2.6.32.41/arch/powerpc/include/asm/uaccess.h
2054--- linux-2.6.32.41/arch/powerpc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
2055+++ linux-2.6.32.41/arch/powerpc/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
2056@@ -13,6 +13,8 @@
2057 #define VERIFY_READ 0
2058 #define VERIFY_WRITE 1
2059
2060+extern void check_object_size(const void *ptr, unsigned long n, bool to);
2061+
2062 /*
2063 * The fs value determines whether argument validity checking should be
2064 * performed or not. If get_fs() == USER_DS, checking is performed, with
2065@@ -327,52 +329,6 @@ do { \
2066 extern unsigned long __copy_tofrom_user(void __user *to,
2067 const void __user *from, unsigned long size);
2068
2069-#ifndef __powerpc64__
2070-
2071-static inline unsigned long copy_from_user(void *to,
2072- const void __user *from, unsigned long n)
2073-{
2074- unsigned long over;
2075-
2076- if (access_ok(VERIFY_READ, from, n))
2077- return __copy_tofrom_user((__force void __user *)to, from, n);
2078- if ((unsigned long)from < TASK_SIZE) {
2079- over = (unsigned long)from + n - TASK_SIZE;
2080- return __copy_tofrom_user((__force void __user *)to, from,
2081- n - over) + over;
2082- }
2083- return n;
2084-}
2085-
2086-static inline unsigned long copy_to_user(void __user *to,
2087- const void *from, unsigned long n)
2088-{
2089- unsigned long over;
2090-
2091- if (access_ok(VERIFY_WRITE, to, n))
2092- return __copy_tofrom_user(to, (__force void __user *)from, n);
2093- if ((unsigned long)to < TASK_SIZE) {
2094- over = (unsigned long)to + n - TASK_SIZE;
2095- return __copy_tofrom_user(to, (__force void __user *)from,
2096- n - over) + over;
2097- }
2098- return n;
2099-}
2100-
2101-#else /* __powerpc64__ */
2102-
2103-#define __copy_in_user(to, from, size) \
2104- __copy_tofrom_user((to), (from), (size))
2105-
2106-extern unsigned long copy_from_user(void *to, const void __user *from,
2107- unsigned long n);
2108-extern unsigned long copy_to_user(void __user *to, const void *from,
2109- unsigned long n);
2110-extern unsigned long copy_in_user(void __user *to, const void __user *from,
2111- unsigned long n);
2112-
2113-#endif /* __powerpc64__ */
2114-
2115 static inline unsigned long __copy_from_user_inatomic(void *to,
2116 const void __user *from, unsigned long n)
2117 {
2118@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
2119 if (ret == 0)
2120 return 0;
2121 }
2122+
2123+ if (!__builtin_constant_p(n))
2124+ check_object_size(to, n, false);
2125+
2126 return __copy_tofrom_user((__force void __user *)to, from, n);
2127 }
2128
2129@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
2130 if (ret == 0)
2131 return 0;
2132 }
2133+
2134+ if (!__builtin_constant_p(n))
2135+ check_object_size(from, n, true);
2136+
2137 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2138 }
2139
2140@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
2141 return __copy_to_user_inatomic(to, from, size);
2142 }
2143
2144+#ifndef __powerpc64__
2145+
2146+static inline unsigned long __must_check copy_from_user(void *to,
2147+ const void __user *from, unsigned long n)
2148+{
2149+ unsigned long over;
2150+
2151+ if ((long)n < 0)
2152+ return n;
2153+
2154+ if (access_ok(VERIFY_READ, from, n)) {
2155+ if (!__builtin_constant_p(n))
2156+ check_object_size(to, n, false);
2157+ return __copy_tofrom_user((__force void __user *)to, from, n);
2158+ }
2159+ if ((unsigned long)from < TASK_SIZE) {
2160+ over = (unsigned long)from + n - TASK_SIZE;
2161+ if (!__builtin_constant_p(n - over))
2162+ check_object_size(to, n - over, false);
2163+ return __copy_tofrom_user((__force void __user *)to, from,
2164+ n - over) + over;
2165+ }
2166+ return n;
2167+}
2168+
2169+static inline unsigned long __must_check copy_to_user(void __user *to,
2170+ const void *from, unsigned long n)
2171+{
2172+ unsigned long over;
2173+
2174+ if ((long)n < 0)
2175+ return n;
2176+
2177+ if (access_ok(VERIFY_WRITE, to, n)) {
2178+ if (!__builtin_constant_p(n))
2179+ check_object_size(from, n, true);
2180+ return __copy_tofrom_user(to, (__force void __user *)from, n);
2181+ }
2182+ if ((unsigned long)to < TASK_SIZE) {
2183+ over = (unsigned long)to + n - TASK_SIZE;
2184+ if (!__builtin_constant_p(n))
2185+ check_object_size(from, n - over, true);
2186+ return __copy_tofrom_user(to, (__force void __user *)from,
2187+ n - over) + over;
2188+ }
2189+ return n;
2190+}
2191+
2192+#else /* __powerpc64__ */
2193+
2194+#define __copy_in_user(to, from, size) \
2195+ __copy_tofrom_user((to), (from), (size))
2196+
2197+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2198+{
2199+ if ((long)n < 0 || n > INT_MAX)
2200+ return n;
2201+
2202+ if (!__builtin_constant_p(n))
2203+ check_object_size(to, n, false);
2204+
2205+ if (likely(access_ok(VERIFY_READ, from, n)))
2206+ n = __copy_from_user(to, from, n);
2207+ else
2208+ memset(to, 0, n);
2209+ return n;
2210+}
2211+
2212+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2213+{
2214+ if ((long)n < 0 || n > INT_MAX)
2215+ return n;
2216+
2217+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
2218+ if (!__builtin_constant_p(n))
2219+ check_object_size(from, n, true);
2220+ n = __copy_to_user(to, from, n);
2221+ }
2222+ return n;
2223+}
2224+
2225+extern unsigned long copy_in_user(void __user *to, const void __user *from,
2226+ unsigned long n);
2227+
2228+#endif /* __powerpc64__ */
2229+
2230 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2231
2232 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2233diff -urNp linux-2.6.32.41/arch/powerpc/kernel/cacheinfo.c linux-2.6.32.41/arch/powerpc/kernel/cacheinfo.c
2234--- linux-2.6.32.41/arch/powerpc/kernel/cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
2235+++ linux-2.6.32.41/arch/powerpc/kernel/cacheinfo.c 2011-04-17 15:56:45.000000000 -0400
2236@@ -642,7 +642,7 @@ static struct kobj_attribute *cache_inde
2237 &cache_assoc_attr,
2238 };
2239
2240-static struct sysfs_ops cache_index_ops = {
2241+static const struct sysfs_ops cache_index_ops = {
2242 .show = cache_index_show,
2243 };
2244
2245diff -urNp linux-2.6.32.41/arch/powerpc/kernel/dma.c linux-2.6.32.41/arch/powerpc/kernel/dma.c
2246--- linux-2.6.32.41/arch/powerpc/kernel/dma.c 2011-03-27 14:31:47.000000000 -0400
2247+++ linux-2.6.32.41/arch/powerpc/kernel/dma.c 2011-04-17 15:56:45.000000000 -0400
2248@@ -134,7 +134,7 @@ static inline void dma_direct_sync_singl
2249 }
2250 #endif
2251
2252-struct dma_map_ops dma_direct_ops = {
2253+const struct dma_map_ops dma_direct_ops = {
2254 .alloc_coherent = dma_direct_alloc_coherent,
2255 .free_coherent = dma_direct_free_coherent,
2256 .map_sg = dma_direct_map_sg,
2257diff -urNp linux-2.6.32.41/arch/powerpc/kernel/dma-iommu.c linux-2.6.32.41/arch/powerpc/kernel/dma-iommu.c
2258--- linux-2.6.32.41/arch/powerpc/kernel/dma-iommu.c 2011-03-27 14:31:47.000000000 -0400
2259+++ linux-2.6.32.41/arch/powerpc/kernel/dma-iommu.c 2011-04-17 15:56:45.000000000 -0400
2260@@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct de
2261 }
2262
2263 /* We support DMA to/from any memory page via the iommu */
2264-static int dma_iommu_dma_supported(struct device *dev, u64 mask)
2265+int dma_iommu_dma_supported(struct device *dev, u64 mask)
2266 {
2267 struct iommu_table *tbl = get_iommu_table_base(dev);
2268
2269diff -urNp linux-2.6.32.41/arch/powerpc/kernel/dma-swiotlb.c linux-2.6.32.41/arch/powerpc/kernel/dma-swiotlb.c
2270--- linux-2.6.32.41/arch/powerpc/kernel/dma-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
2271+++ linux-2.6.32.41/arch/powerpc/kernel/dma-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
2272@@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
2273 * map_page, and unmap_page on highmem, use normal dma_ops
2274 * for everything else.
2275 */
2276-struct dma_map_ops swiotlb_dma_ops = {
2277+const struct dma_map_ops swiotlb_dma_ops = {
2278 .alloc_coherent = dma_direct_alloc_coherent,
2279 .free_coherent = dma_direct_free_coherent,
2280 .map_sg = swiotlb_map_sg_attrs,
2281diff -urNp linux-2.6.32.41/arch/powerpc/kernel/exceptions-64e.S linux-2.6.32.41/arch/powerpc/kernel/exceptions-64e.S
2282--- linux-2.6.32.41/arch/powerpc/kernel/exceptions-64e.S 2011-03-27 14:31:47.000000000 -0400
2283+++ linux-2.6.32.41/arch/powerpc/kernel/exceptions-64e.S 2011-04-17 15:56:45.000000000 -0400
2284@@ -455,6 +455,7 @@ storage_fault_common:
2285 std r14,_DAR(r1)
2286 std r15,_DSISR(r1)
2287 addi r3,r1,STACK_FRAME_OVERHEAD
2288+ bl .save_nvgprs
2289 mr r4,r14
2290 mr r5,r15
2291 ld r14,PACA_EXGEN+EX_R14(r13)
2292@@ -464,8 +465,7 @@ storage_fault_common:
2293 cmpdi r3,0
2294 bne- 1f
2295 b .ret_from_except_lite
2296-1: bl .save_nvgprs
2297- mr r5,r3
2298+1: mr r5,r3
2299 addi r3,r1,STACK_FRAME_OVERHEAD
2300 ld r4,_DAR(r1)
2301 bl .bad_page_fault
2302diff -urNp linux-2.6.32.41/arch/powerpc/kernel/exceptions-64s.S linux-2.6.32.41/arch/powerpc/kernel/exceptions-64s.S
2303--- linux-2.6.32.41/arch/powerpc/kernel/exceptions-64s.S 2011-03-27 14:31:47.000000000 -0400
2304+++ linux-2.6.32.41/arch/powerpc/kernel/exceptions-64s.S 2011-04-17 15:56:45.000000000 -0400
2305@@ -818,10 +818,10 @@ handle_page_fault:
2306 11: ld r4,_DAR(r1)
2307 ld r5,_DSISR(r1)
2308 addi r3,r1,STACK_FRAME_OVERHEAD
2309+ bl .save_nvgprs
2310 bl .do_page_fault
2311 cmpdi r3,0
2312 beq+ 13f
2313- bl .save_nvgprs
2314 mr r5,r3
2315 addi r3,r1,STACK_FRAME_OVERHEAD
2316 lwz r4,_DAR(r1)
2317diff -urNp linux-2.6.32.41/arch/powerpc/kernel/ibmebus.c linux-2.6.32.41/arch/powerpc/kernel/ibmebus.c
2318--- linux-2.6.32.41/arch/powerpc/kernel/ibmebus.c 2011-03-27 14:31:47.000000000 -0400
2319+++ linux-2.6.32.41/arch/powerpc/kernel/ibmebus.c 2011-04-17 15:56:45.000000000 -0400
2320@@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct
2321 return 1;
2322 }
2323
2324-static struct dma_map_ops ibmebus_dma_ops = {
2325+static const struct dma_map_ops ibmebus_dma_ops = {
2326 .alloc_coherent = ibmebus_alloc_coherent,
2327 .free_coherent = ibmebus_free_coherent,
2328 .map_sg = ibmebus_map_sg,
2329diff -urNp linux-2.6.32.41/arch/powerpc/kernel/kgdb.c linux-2.6.32.41/arch/powerpc/kernel/kgdb.c
2330--- linux-2.6.32.41/arch/powerpc/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
2331+++ linux-2.6.32.41/arch/powerpc/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
2332@@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct
2333 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
2334 return 0;
2335
2336- if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2337+ if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2338 regs->nip += 4;
2339
2340 return 1;
2341@@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vecto
2342 /*
2343 * Global data
2344 */
2345-struct kgdb_arch arch_kgdb_ops = {
2346+const struct kgdb_arch arch_kgdb_ops = {
2347 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
2348 };
2349
2350diff -urNp linux-2.6.32.41/arch/powerpc/kernel/module_32.c linux-2.6.32.41/arch/powerpc/kernel/module_32.c
2351--- linux-2.6.32.41/arch/powerpc/kernel/module_32.c 2011-03-27 14:31:47.000000000 -0400
2352+++ linux-2.6.32.41/arch/powerpc/kernel/module_32.c 2011-04-17 15:56:45.000000000 -0400
2353@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2354 me->arch.core_plt_section = i;
2355 }
2356 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2357- printk("Module doesn't contain .plt or .init.plt sections.\n");
2358+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2359 return -ENOEXEC;
2360 }
2361
2362@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2363
2364 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2365 /* Init, or core PLT? */
2366- if (location >= mod->module_core
2367- && location < mod->module_core + mod->core_size)
2368+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2369+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2370 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2371- else
2372+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2373+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2374 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2375+ else {
2376+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2377+ return ~0UL;
2378+ }
2379
2380 /* Find this entry, or if that fails, the next avail. entry */
2381 while (entry->jump[0]) {
2382diff -urNp linux-2.6.32.41/arch/powerpc/kernel/module.c linux-2.6.32.41/arch/powerpc/kernel/module.c
2383--- linux-2.6.32.41/arch/powerpc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
2384+++ linux-2.6.32.41/arch/powerpc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
2385@@ -31,11 +31,24 @@
2386
2387 LIST_HEAD(module_bug_list);
2388
2389+#ifdef CONFIG_PAX_KERNEXEC
2390 void *module_alloc(unsigned long size)
2391 {
2392 if (size == 0)
2393 return NULL;
2394
2395+ return vmalloc(size);
2396+}
2397+
2398+void *module_alloc_exec(unsigned long size)
2399+#else
2400+void *module_alloc(unsigned long size)
2401+#endif
2402+
2403+{
2404+ if (size == 0)
2405+ return NULL;
2406+
2407 return vmalloc_exec(size);
2408 }
2409
2410@@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2411 vfree(module_region);
2412 }
2413
2414+#ifdef CONFIG_PAX_KERNEXEC
2415+void module_free_exec(struct module *mod, void *module_region)
2416+{
2417+ module_free(mod, module_region);
2418+}
2419+#endif
2420+
2421 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2422 const Elf_Shdr *sechdrs,
2423 const char *name)
2424diff -urNp linux-2.6.32.41/arch/powerpc/kernel/pci-common.c linux-2.6.32.41/arch/powerpc/kernel/pci-common.c
2425--- linux-2.6.32.41/arch/powerpc/kernel/pci-common.c 2011-03-27 14:31:47.000000000 -0400
2426+++ linux-2.6.32.41/arch/powerpc/kernel/pci-common.c 2011-04-17 15:56:45.000000000 -0400
2427@@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
2428 unsigned int ppc_pci_flags = 0;
2429
2430
2431-static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2432+static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2433
2434-void set_pci_dma_ops(struct dma_map_ops *dma_ops)
2435+void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
2436 {
2437 pci_dma_ops = dma_ops;
2438 }
2439
2440-struct dma_map_ops *get_pci_dma_ops(void)
2441+const struct dma_map_ops *get_pci_dma_ops(void)
2442 {
2443 return pci_dma_ops;
2444 }
2445diff -urNp linux-2.6.32.41/arch/powerpc/kernel/process.c linux-2.6.32.41/arch/powerpc/kernel/process.c
2446--- linux-2.6.32.41/arch/powerpc/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
2447+++ linux-2.6.32.41/arch/powerpc/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
2448@@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
2449 * Lookup NIP late so we have the best change of getting the
2450 * above info out without failing
2451 */
2452- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2453- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2454+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2455+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2456 #endif
2457 show_stack(current, (unsigned long *) regs->gpr[1]);
2458 if (!user_mode(regs))
2459@@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk,
2460 newsp = stack[0];
2461 ip = stack[STACK_FRAME_LR_SAVE];
2462 if (!firstframe || ip != lr) {
2463- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2464+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2465 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2466 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2467- printk(" (%pS)",
2468+ printk(" (%pA)",
2469 (void *)current->ret_stack[curr_frame].ret);
2470 curr_frame--;
2471 }
2472@@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk,
2473 struct pt_regs *regs = (struct pt_regs *)
2474 (sp + STACK_FRAME_OVERHEAD);
2475 lr = regs->link;
2476- printk("--- Exception: %lx at %pS\n LR = %pS\n",
2477+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
2478 regs->trap, (void *)regs->nip, (void *)lr);
2479 firstframe = 1;
2480 }
2481@@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
2482 }
2483
2484 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2485-
2486-unsigned long arch_align_stack(unsigned long sp)
2487-{
2488- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2489- sp -= get_random_int() & ~PAGE_MASK;
2490- return sp & ~0xf;
2491-}
2492-
2493-static inline unsigned long brk_rnd(void)
2494-{
2495- unsigned long rnd = 0;
2496-
2497- /* 8MB for 32bit, 1GB for 64bit */
2498- if (is_32bit_task())
2499- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2500- else
2501- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2502-
2503- return rnd << PAGE_SHIFT;
2504-}
2505-
2506-unsigned long arch_randomize_brk(struct mm_struct *mm)
2507-{
2508- unsigned long base = mm->brk;
2509- unsigned long ret;
2510-
2511-#ifdef CONFIG_PPC_STD_MMU_64
2512- /*
2513- * If we are using 1TB segments and we are allowed to randomise
2514- * the heap, we can put it above 1TB so it is backed by a 1TB
2515- * segment. Otherwise the heap will be in the bottom 1TB
2516- * which always uses 256MB segments and this may result in a
2517- * performance penalty.
2518- */
2519- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2520- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2521-#endif
2522-
2523- ret = PAGE_ALIGN(base + brk_rnd());
2524-
2525- if (ret < mm->brk)
2526- return mm->brk;
2527-
2528- return ret;
2529-}
2530-
2531-unsigned long randomize_et_dyn(unsigned long base)
2532-{
2533- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2534-
2535- if (ret < base)
2536- return base;
2537-
2538- return ret;
2539-}
2540diff -urNp linux-2.6.32.41/arch/powerpc/kernel/signal_32.c linux-2.6.32.41/arch/powerpc/kernel/signal_32.c
2541--- linux-2.6.32.41/arch/powerpc/kernel/signal_32.c 2011-03-27 14:31:47.000000000 -0400
2542+++ linux-2.6.32.41/arch/powerpc/kernel/signal_32.c 2011-04-17 15:56:45.000000000 -0400
2543@@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig
2544 /* Save user registers on the stack */
2545 frame = &rt_sf->uc.uc_mcontext;
2546 addr = frame;
2547- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2548+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2549 if (save_user_regs(regs, frame, 0, 1))
2550 goto badframe;
2551 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2552diff -urNp linux-2.6.32.41/arch/powerpc/kernel/signal_64.c linux-2.6.32.41/arch/powerpc/kernel/signal_64.c
2553--- linux-2.6.32.41/arch/powerpc/kernel/signal_64.c 2011-03-27 14:31:47.000000000 -0400
2554+++ linux-2.6.32.41/arch/powerpc/kernel/signal_64.c 2011-04-17 15:56:45.000000000 -0400
2555@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct
2556 current->thread.fpscr.val = 0;
2557
2558 /* Set up to return from userspace. */
2559- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2560+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2561 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2562 } else {
2563 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2564diff -urNp linux-2.6.32.41/arch/powerpc/kernel/sys_ppc32.c linux-2.6.32.41/arch/powerpc/kernel/sys_ppc32.c
2565--- linux-2.6.32.41/arch/powerpc/kernel/sys_ppc32.c 2011-03-27 14:31:47.000000000 -0400
2566+++ linux-2.6.32.41/arch/powerpc/kernel/sys_ppc32.c 2011-04-17 15:56:45.000000000 -0400
2567@@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct
2568 if (oldlenp) {
2569 if (!error) {
2570 if (get_user(oldlen, oldlenp) ||
2571- put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
2572+ put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
2573+ copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
2574 error = -EFAULT;
2575 }
2576- copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
2577 }
2578 return error;
2579 }
2580diff -urNp linux-2.6.32.41/arch/powerpc/kernel/vdso.c linux-2.6.32.41/arch/powerpc/kernel/vdso.c
2581--- linux-2.6.32.41/arch/powerpc/kernel/vdso.c 2011-03-27 14:31:47.000000000 -0400
2582+++ linux-2.6.32.41/arch/powerpc/kernel/vdso.c 2011-04-17 15:56:45.000000000 -0400
2583@@ -36,6 +36,7 @@
2584 #include <asm/firmware.h>
2585 #include <asm/vdso.h>
2586 #include <asm/vdso_datapage.h>
2587+#include <asm/mman.h>
2588
2589 #include "setup.h"
2590
2591@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2592 vdso_base = VDSO32_MBASE;
2593 #endif
2594
2595- current->mm->context.vdso_base = 0;
2596+ current->mm->context.vdso_base = ~0UL;
2597
2598 /* vDSO has a problem and was disabled, just don't "enable" it for the
2599 * process
2600@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2601 vdso_base = get_unmapped_area(NULL, vdso_base,
2602 (vdso_pages << PAGE_SHIFT) +
2603 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2604- 0, 0);
2605+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
2606 if (IS_ERR_VALUE(vdso_base)) {
2607 rc = vdso_base;
2608 goto fail_mmapsem;
2609diff -urNp linux-2.6.32.41/arch/powerpc/kernel/vio.c linux-2.6.32.41/arch/powerpc/kernel/vio.c
2610--- linux-2.6.32.41/arch/powerpc/kernel/vio.c 2011-03-27 14:31:47.000000000 -0400
2611+++ linux-2.6.32.41/arch/powerpc/kernel/vio.c 2011-04-17 15:56:45.000000000 -0400
2612@@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struc
2613 vio_cmo_dealloc(viodev, alloc_size);
2614 }
2615
2616-struct dma_map_ops vio_dma_mapping_ops = {
2617+static const struct dma_map_ops vio_dma_mapping_ops = {
2618 .alloc_coherent = vio_dma_iommu_alloc_coherent,
2619 .free_coherent = vio_dma_iommu_free_coherent,
2620 .map_sg = vio_dma_iommu_map_sg,
2621 .unmap_sg = vio_dma_iommu_unmap_sg,
2622+ .dma_supported = dma_iommu_dma_supported,
2623 .map_page = vio_dma_iommu_map_page,
2624 .unmap_page = vio_dma_iommu_unmap_page,
2625
2626@@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vi
2627
2628 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
2629 {
2630- vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
2631 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
2632 }
2633
2634diff -urNp linux-2.6.32.41/arch/powerpc/lib/usercopy_64.c linux-2.6.32.41/arch/powerpc/lib/usercopy_64.c
2635--- linux-2.6.32.41/arch/powerpc/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
2636+++ linux-2.6.32.41/arch/powerpc/lib/usercopy_64.c 2011-04-17 15:56:45.000000000 -0400
2637@@ -9,22 +9,6 @@
2638 #include <linux/module.h>
2639 #include <asm/uaccess.h>
2640
2641-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2642-{
2643- if (likely(access_ok(VERIFY_READ, from, n)))
2644- n = __copy_from_user(to, from, n);
2645- else
2646- memset(to, 0, n);
2647- return n;
2648-}
2649-
2650-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2651-{
2652- if (likely(access_ok(VERIFY_WRITE, to, n)))
2653- n = __copy_to_user(to, from, n);
2654- return n;
2655-}
2656-
2657 unsigned long copy_in_user(void __user *to, const void __user *from,
2658 unsigned long n)
2659 {
2660@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2661 return n;
2662 }
2663
2664-EXPORT_SYMBOL(copy_from_user);
2665-EXPORT_SYMBOL(copy_to_user);
2666 EXPORT_SYMBOL(copy_in_user);
2667
2668diff -urNp linux-2.6.32.41/arch/powerpc/mm/fault.c linux-2.6.32.41/arch/powerpc/mm/fault.c
2669--- linux-2.6.32.41/arch/powerpc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
2670+++ linux-2.6.32.41/arch/powerpc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
2671@@ -30,6 +30,10 @@
2672 #include <linux/kprobes.h>
2673 #include <linux/kdebug.h>
2674 #include <linux/perf_event.h>
2675+#include <linux/slab.h>
2676+#include <linux/pagemap.h>
2677+#include <linux/compiler.h>
2678+#include <linux/unistd.h>
2679
2680 #include <asm/firmware.h>
2681 #include <asm/page.h>
2682@@ -40,6 +44,7 @@
2683 #include <asm/uaccess.h>
2684 #include <asm/tlbflush.h>
2685 #include <asm/siginfo.h>
2686+#include <asm/ptrace.h>
2687
2688
2689 #ifdef CONFIG_KPROBES
2690@@ -64,6 +69,33 @@ static inline int notify_page_fault(stru
2691 }
2692 #endif
2693
2694+#ifdef CONFIG_PAX_PAGEEXEC
2695+/*
2696+ * PaX: decide what to do with offenders (regs->nip = fault address)
2697+ *
2698+ * returns 1 when task should be killed
2699+ */
2700+static int pax_handle_fetch_fault(struct pt_regs *regs)
2701+{
2702+ return 1;
2703+}
2704+
2705+void pax_report_insns(void *pc, void *sp)
2706+{
2707+ unsigned long i;
2708+
2709+ printk(KERN_ERR "PAX: bytes at PC: ");
2710+ for (i = 0; i < 5; i++) {
2711+ unsigned int c;
2712+ if (get_user(c, (unsigned int __user *)pc+i))
2713+ printk(KERN_CONT "???????? ");
2714+ else
2715+ printk(KERN_CONT "%08x ", c);
2716+ }
2717+ printk("\n");
2718+}
2719+#endif
2720+
2721 /*
2722 * Check whether the instruction at regs->nip is a store using
2723 * an update addressing form which will update r1.
2724@@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_re
2725 * indicate errors in DSISR but can validly be set in SRR1.
2726 */
2727 if (trap == 0x400)
2728- error_code &= 0x48200000;
2729+ error_code &= 0x58200000;
2730 else
2731 is_write = error_code & DSISR_ISSTORE;
2732 #else
2733@@ -250,7 +282,7 @@ good_area:
2734 * "undefined". Of those that can be set, this is the only
2735 * one which seems bad.
2736 */
2737- if (error_code & 0x10000000)
2738+ if (error_code & DSISR_GUARDED)
2739 /* Guarded storage error. */
2740 goto bad_area;
2741 #endif /* CONFIG_8xx */
2742@@ -265,7 +297,7 @@ good_area:
2743 * processors use the same I/D cache coherency mechanism
2744 * as embedded.
2745 */
2746- if (error_code & DSISR_PROTFAULT)
2747+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2748 goto bad_area;
2749 #endif /* CONFIG_PPC_STD_MMU */
2750
2751@@ -335,6 +367,23 @@ bad_area:
2752 bad_area_nosemaphore:
2753 /* User mode accesses cause a SIGSEGV */
2754 if (user_mode(regs)) {
2755+
2756+#ifdef CONFIG_PAX_PAGEEXEC
2757+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2758+#ifdef CONFIG_PPC_STD_MMU
2759+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2760+#else
2761+ if (is_exec && regs->nip == address) {
2762+#endif
2763+ switch (pax_handle_fetch_fault(regs)) {
2764+ }
2765+
2766+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2767+ do_group_exit(SIGKILL);
2768+ }
2769+ }
2770+#endif
2771+
2772 _exception(SIGSEGV, regs, code, address);
2773 return 0;
2774 }
2775diff -urNp linux-2.6.32.41/arch/powerpc/mm/mmap_64.c linux-2.6.32.41/arch/powerpc/mm/mmap_64.c
2776--- linux-2.6.32.41/arch/powerpc/mm/mmap_64.c 2011-03-27 14:31:47.000000000 -0400
2777+++ linux-2.6.32.41/arch/powerpc/mm/mmap_64.c 2011-04-17 15:56:45.000000000 -0400
2778@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2779 */
2780 if (mmap_is_legacy()) {
2781 mm->mmap_base = TASK_UNMAPPED_BASE;
2782+
2783+#ifdef CONFIG_PAX_RANDMMAP
2784+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2785+ mm->mmap_base += mm->delta_mmap;
2786+#endif
2787+
2788 mm->get_unmapped_area = arch_get_unmapped_area;
2789 mm->unmap_area = arch_unmap_area;
2790 } else {
2791 mm->mmap_base = mmap_base();
2792+
2793+#ifdef CONFIG_PAX_RANDMMAP
2794+ if (mm->pax_flags & MF_PAX_RANDMMAP)
2795+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2796+#endif
2797+
2798 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2799 mm->unmap_area = arch_unmap_area_topdown;
2800 }
2801diff -urNp linux-2.6.32.41/arch/powerpc/mm/slice.c linux-2.6.32.41/arch/powerpc/mm/slice.c
2802--- linux-2.6.32.41/arch/powerpc/mm/slice.c 2011-03-27 14:31:47.000000000 -0400
2803+++ linux-2.6.32.41/arch/powerpc/mm/slice.c 2011-04-17 15:56:45.000000000 -0400
2804@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
2805 if ((mm->task_size - len) < addr)
2806 return 0;
2807 vma = find_vma(mm, addr);
2808- return (!vma || (addr + len) <= vma->vm_start);
2809+ return check_heap_stack_gap(vma, addr, len);
2810 }
2811
2812 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2813@@ -256,7 +256,7 @@ full_search:
2814 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2815 continue;
2816 }
2817- if (!vma || addr + len <= vma->vm_start) {
2818+ if (check_heap_stack_gap(vma, addr, len)) {
2819 /*
2820 * Remember the place where we stopped the search:
2821 */
2822@@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
2823 }
2824 }
2825
2826- addr = mm->mmap_base;
2827- while (addr > len) {
2828+ if (mm->mmap_base < len)
2829+ addr = -ENOMEM;
2830+ else
2831+ addr = mm->mmap_base - len;
2832+
2833+ while (!IS_ERR_VALUE(addr)) {
2834 /* Go down by chunk size */
2835- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2836+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
2837
2838 /* Check for hit with different page size */
2839 mask = slice_range_to_mask(addr, len);
2840@@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
2841 * return with success:
2842 */
2843 vma = find_vma(mm, addr);
2844- if (!vma || (addr + len) <= vma->vm_start) {
2845+ if (check_heap_stack_gap(vma, addr, len)) {
2846 /* remember the address as a hint for next time */
2847 if (use_cache)
2848 mm->free_area_cache = addr;
2849@@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
2850 mm->cached_hole_size = vma->vm_start - addr;
2851
2852 /* try just below the current vma->vm_start */
2853- addr = vma->vm_start;
2854+ addr = skip_heap_stack_gap(vma, len);
2855 }
2856
2857 /*
2858@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
2859 if (fixed && addr > (mm->task_size - len))
2860 return -EINVAL;
2861
2862+#ifdef CONFIG_PAX_RANDMMAP
2863+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2864+ addr = 0;
2865+#endif
2866+
2867 /* If hint, make sure it matches our alignment restrictions */
2868 if (!fixed && addr) {
2869 addr = _ALIGN_UP(addr, 1ul << pshift);
2870diff -urNp linux-2.6.32.41/arch/powerpc/platforms/52xx/lite5200_pm.c linux-2.6.32.41/arch/powerpc/platforms/52xx/lite5200_pm.c
2871--- linux-2.6.32.41/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-03-27 14:31:47.000000000 -0400
2872+++ linux-2.6.32.41/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-04-17 15:56:45.000000000 -0400
2873@@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
2874 lite5200_pm_target_state = PM_SUSPEND_ON;
2875 }
2876
2877-static struct platform_suspend_ops lite5200_pm_ops = {
2878+static const struct platform_suspend_ops lite5200_pm_ops = {
2879 .valid = lite5200_pm_valid,
2880 .begin = lite5200_pm_begin,
2881 .prepare = lite5200_pm_prepare,
2882diff -urNp linux-2.6.32.41/arch/powerpc/platforms/52xx/mpc52xx_pm.c linux-2.6.32.41/arch/powerpc/platforms/52xx/mpc52xx_pm.c
2883--- linux-2.6.32.41/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-03-27 14:31:47.000000000 -0400
2884+++ linux-2.6.32.41/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-04-17 15:56:45.000000000 -0400
2885@@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
2886 iounmap(mbar);
2887 }
2888
2889-static struct platform_suspend_ops mpc52xx_pm_ops = {
2890+static const struct platform_suspend_ops mpc52xx_pm_ops = {
2891 .valid = mpc52xx_pm_valid,
2892 .prepare = mpc52xx_pm_prepare,
2893 .enter = mpc52xx_pm_enter,
2894diff -urNp linux-2.6.32.41/arch/powerpc/platforms/83xx/suspend.c linux-2.6.32.41/arch/powerpc/platforms/83xx/suspend.c
2895--- linux-2.6.32.41/arch/powerpc/platforms/83xx/suspend.c 2011-03-27 14:31:47.000000000 -0400
2896+++ linux-2.6.32.41/arch/powerpc/platforms/83xx/suspend.c 2011-04-17 15:56:45.000000000 -0400
2897@@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
2898 return ret;
2899 }
2900
2901-static struct platform_suspend_ops mpc83xx_suspend_ops = {
2902+static const struct platform_suspend_ops mpc83xx_suspend_ops = {
2903 .valid = mpc83xx_suspend_valid,
2904 .begin = mpc83xx_suspend_begin,
2905 .enter = mpc83xx_suspend_enter,
2906diff -urNp linux-2.6.32.41/arch/powerpc/platforms/cell/iommu.c linux-2.6.32.41/arch/powerpc/platforms/cell/iommu.c
2907--- linux-2.6.32.41/arch/powerpc/platforms/cell/iommu.c 2011-03-27 14:31:47.000000000 -0400
2908+++ linux-2.6.32.41/arch/powerpc/platforms/cell/iommu.c 2011-04-17 15:56:45.000000000 -0400
2909@@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struc
2910
2911 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
2912
2913-struct dma_map_ops dma_iommu_fixed_ops = {
2914+const struct dma_map_ops dma_iommu_fixed_ops = {
2915 .alloc_coherent = dma_fixed_alloc_coherent,
2916 .free_coherent = dma_fixed_free_coherent,
2917 .map_sg = dma_fixed_map_sg,
2918diff -urNp linux-2.6.32.41/arch/powerpc/platforms/ps3/system-bus.c linux-2.6.32.41/arch/powerpc/platforms/ps3/system-bus.c
2919--- linux-2.6.32.41/arch/powerpc/platforms/ps3/system-bus.c 2011-03-27 14:31:47.000000000 -0400
2920+++ linux-2.6.32.41/arch/powerpc/platforms/ps3/system-bus.c 2011-04-17 15:56:45.000000000 -0400
2921@@ -694,7 +694,7 @@ static int ps3_dma_supported(struct devi
2922 return mask >= DMA_BIT_MASK(32);
2923 }
2924
2925-static struct dma_map_ops ps3_sb_dma_ops = {
2926+static const struct dma_map_ops ps3_sb_dma_ops = {
2927 .alloc_coherent = ps3_alloc_coherent,
2928 .free_coherent = ps3_free_coherent,
2929 .map_sg = ps3_sb_map_sg,
2930@@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops
2931 .unmap_page = ps3_unmap_page,
2932 };
2933
2934-static struct dma_map_ops ps3_ioc0_dma_ops = {
2935+static const struct dma_map_ops ps3_ioc0_dma_ops = {
2936 .alloc_coherent = ps3_alloc_coherent,
2937 .free_coherent = ps3_free_coherent,
2938 .map_sg = ps3_ioc0_map_sg,
2939diff -urNp linux-2.6.32.41/arch/powerpc/platforms/pseries/Kconfig linux-2.6.32.41/arch/powerpc/platforms/pseries/Kconfig
2940--- linux-2.6.32.41/arch/powerpc/platforms/pseries/Kconfig 2011-03-27 14:31:47.000000000 -0400
2941+++ linux-2.6.32.41/arch/powerpc/platforms/pseries/Kconfig 2011-04-17 15:56:45.000000000 -0400
2942@@ -2,6 +2,8 @@ config PPC_PSERIES
2943 depends on PPC64 && PPC_BOOK3S
2944 bool "IBM pSeries & new (POWER5-based) iSeries"
2945 select MPIC
2946+ select PCI_MSI
2947+ select XICS
2948 select PPC_I8259
2949 select PPC_RTAS
2950 select RTAS_ERROR_LOGGING
2951diff -urNp linux-2.6.32.41/arch/s390/include/asm/elf.h linux-2.6.32.41/arch/s390/include/asm/elf.h
2952--- linux-2.6.32.41/arch/s390/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
2953+++ linux-2.6.32.41/arch/s390/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
2954@@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
2955 that it will "exec", and that there is sufficient room for the brk. */
2956 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2957
2958+#ifdef CONFIG_PAX_ASLR
2959+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2960+
2961+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2962+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2963+#endif
2964+
2965 /* This yields a mask that user programs can use to figure out what
2966 instruction set this CPU supports. */
2967
2968diff -urNp linux-2.6.32.41/arch/s390/include/asm/setup.h linux-2.6.32.41/arch/s390/include/asm/setup.h
2969--- linux-2.6.32.41/arch/s390/include/asm/setup.h 2011-03-27 14:31:47.000000000 -0400
2970+++ linux-2.6.32.41/arch/s390/include/asm/setup.h 2011-04-17 15:56:45.000000000 -0400
2971@@ -50,13 +50,13 @@ extern unsigned long memory_end;
2972 void detect_memory_layout(struct mem_chunk chunk[]);
2973
2974 #ifdef CONFIG_S390_SWITCH_AMODE
2975-extern unsigned int switch_amode;
2976+#define switch_amode (1)
2977 #else
2978 #define switch_amode (0)
2979 #endif
2980
2981 #ifdef CONFIG_S390_EXEC_PROTECT
2982-extern unsigned int s390_noexec;
2983+#define s390_noexec (1)
2984 #else
2985 #define s390_noexec (0)
2986 #endif
2987diff -urNp linux-2.6.32.41/arch/s390/include/asm/uaccess.h linux-2.6.32.41/arch/s390/include/asm/uaccess.h
2988--- linux-2.6.32.41/arch/s390/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
2989+++ linux-2.6.32.41/arch/s390/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
2990@@ -232,6 +232,10 @@ static inline unsigned long __must_check
2991 copy_to_user(void __user *to, const void *from, unsigned long n)
2992 {
2993 might_fault();
2994+
2995+ if ((long)n < 0)
2996+ return n;
2997+
2998 if (access_ok(VERIFY_WRITE, to, n))
2999 n = __copy_to_user(to, from, n);
3000 return n;
3001@@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void
3002 static inline unsigned long __must_check
3003 __copy_from_user(void *to, const void __user *from, unsigned long n)
3004 {
3005+ if ((long)n < 0)
3006+ return n;
3007+
3008 if (__builtin_constant_p(n) && (n <= 256))
3009 return uaccess.copy_from_user_small(n, from, to);
3010 else
3011@@ -283,6 +290,10 @@ static inline unsigned long __must_check
3012 copy_from_user(void *to, const void __user *from, unsigned long n)
3013 {
3014 might_fault();
3015+
3016+ if ((long)n < 0)
3017+ return n;
3018+
3019 if (access_ok(VERIFY_READ, from, n))
3020 n = __copy_from_user(to, from, n);
3021 else
3022diff -urNp linux-2.6.32.41/arch/s390/Kconfig linux-2.6.32.41/arch/s390/Kconfig
3023--- linux-2.6.32.41/arch/s390/Kconfig 2011-03-27 14:31:47.000000000 -0400
3024+++ linux-2.6.32.41/arch/s390/Kconfig 2011-04-17 15:56:45.000000000 -0400
3025@@ -194,28 +194,26 @@ config AUDIT_ARCH
3026
3027 config S390_SWITCH_AMODE
3028 bool "Switch kernel/user addressing modes"
3029+ default y
3030 help
3031 This option allows to switch the addressing modes of kernel and user
3032- space. The kernel parameter switch_amode=on will enable this feature,
3033- default is disabled. Enabling this (via kernel parameter) on machines
3034- earlier than IBM System z9-109 EC/BC will reduce system performance.
3035+ space. Enabling this on machines earlier than IBM System z9-109 EC/BC
3036+ will reduce system performance.
3037
3038 Note that this option will also be selected by selecting the execute
3039- protection option below. Enabling the execute protection via the
3040- noexec kernel parameter will also switch the addressing modes,
3041- independent of the switch_amode kernel parameter.
3042+ protection option below. Enabling the execute protection will also
3043+ switch the addressing modes, independent of this option.
3044
3045
3046 config S390_EXEC_PROTECT
3047 bool "Data execute protection"
3048+ default y
3049 select S390_SWITCH_AMODE
3050 help
3051 This option allows to enable a buffer overflow protection for user
3052 space programs and it also selects the addressing mode option above.
3053- The kernel parameter noexec=on will enable this feature and also
3054- switch the addressing modes, default is disabled. Enabling this (via
3055- kernel parameter) on machines earlier than IBM System z9-109 EC/BC
3056- will reduce system performance.
3057+ Enabling this on machines earlier than IBM System z9-109 EC/BC will
3058+ reduce system performance.
3059
3060 comment "Code generation options"
3061
3062diff -urNp linux-2.6.32.41/arch/s390/kernel/module.c linux-2.6.32.41/arch/s390/kernel/module.c
3063--- linux-2.6.32.41/arch/s390/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
3064+++ linux-2.6.32.41/arch/s390/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
3065@@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
3066
3067 /* Increase core size by size of got & plt and set start
3068 offsets for got and plt. */
3069- me->core_size = ALIGN(me->core_size, 4);
3070- me->arch.got_offset = me->core_size;
3071- me->core_size += me->arch.got_size;
3072- me->arch.plt_offset = me->core_size;
3073- me->core_size += me->arch.plt_size;
3074+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
3075+ me->arch.got_offset = me->core_size_rw;
3076+ me->core_size_rw += me->arch.got_size;
3077+ me->arch.plt_offset = me->core_size_rx;
3078+ me->core_size_rx += me->arch.plt_size;
3079 return 0;
3080 }
3081
3082@@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3083 if (info->got_initialized == 0) {
3084 Elf_Addr *gotent;
3085
3086- gotent = me->module_core + me->arch.got_offset +
3087+ gotent = me->module_core_rw + me->arch.got_offset +
3088 info->got_offset;
3089 *gotent = val;
3090 info->got_initialized = 1;
3091@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3092 else if (r_type == R_390_GOTENT ||
3093 r_type == R_390_GOTPLTENT)
3094 *(unsigned int *) loc =
3095- (val + (Elf_Addr) me->module_core - loc) >> 1;
3096+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3097 else if (r_type == R_390_GOT64 ||
3098 r_type == R_390_GOTPLT64)
3099 *(unsigned long *) loc = val;
3100@@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3101 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3102 if (info->plt_initialized == 0) {
3103 unsigned int *ip;
3104- ip = me->module_core + me->arch.plt_offset +
3105+ ip = me->module_core_rx + me->arch.plt_offset +
3106 info->plt_offset;
3107 #ifndef CONFIG_64BIT
3108 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3109@@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3110 val - loc + 0xffffUL < 0x1ffffeUL) ||
3111 (r_type == R_390_PLT32DBL &&
3112 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3113- val = (Elf_Addr) me->module_core +
3114+ val = (Elf_Addr) me->module_core_rx +
3115 me->arch.plt_offset +
3116 info->plt_offset;
3117 val += rela->r_addend - loc;
3118@@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3119 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3120 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3121 val = val + rela->r_addend -
3122- ((Elf_Addr) me->module_core + me->arch.got_offset);
3123+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3124 if (r_type == R_390_GOTOFF16)
3125 *(unsigned short *) loc = val;
3126 else if (r_type == R_390_GOTOFF32)
3127@@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3128 break;
3129 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3130 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3131- val = (Elf_Addr) me->module_core + me->arch.got_offset +
3132+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3133 rela->r_addend - loc;
3134 if (r_type == R_390_GOTPC)
3135 *(unsigned int *) loc = val;
3136diff -urNp linux-2.6.32.41/arch/s390/kernel/setup.c linux-2.6.32.41/arch/s390/kernel/setup.c
3137--- linux-2.6.32.41/arch/s390/kernel/setup.c 2011-03-27 14:31:47.000000000 -0400
3138+++ linux-2.6.32.41/arch/s390/kernel/setup.c 2011-04-17 15:56:45.000000000 -0400
3139@@ -306,9 +306,6 @@ static int __init early_parse_mem(char *
3140 early_param("mem", early_parse_mem);
3141
3142 #ifdef CONFIG_S390_SWITCH_AMODE
3143-unsigned int switch_amode = 0;
3144-EXPORT_SYMBOL_GPL(switch_amode);
3145-
3146 static int set_amode_and_uaccess(unsigned long user_amode,
3147 unsigned long user32_amode)
3148 {
3149@@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigne
3150 return 0;
3151 }
3152 }
3153-
3154-/*
3155- * Switch kernel/user addressing modes?
3156- */
3157-static int __init early_parse_switch_amode(char *p)
3158-{
3159- switch_amode = 1;
3160- return 0;
3161-}
3162-early_param("switch_amode", early_parse_switch_amode);
3163-
3164 #else /* CONFIG_S390_SWITCH_AMODE */
3165 static inline int set_amode_and_uaccess(unsigned long user_amode,
3166 unsigned long user32_amode)
3167@@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(
3168 }
3169 #endif /* CONFIG_S390_SWITCH_AMODE */
3170
3171-#ifdef CONFIG_S390_EXEC_PROTECT
3172-unsigned int s390_noexec = 0;
3173-EXPORT_SYMBOL_GPL(s390_noexec);
3174-
3175-/*
3176- * Enable execute protection?
3177- */
3178-static int __init early_parse_noexec(char *p)
3179-{
3180- if (!strncmp(p, "off", 3))
3181- return 0;
3182- switch_amode = 1;
3183- s390_noexec = 1;
3184- return 0;
3185-}
3186-early_param("noexec", early_parse_noexec);
3187-#endif /* CONFIG_S390_EXEC_PROTECT */
3188-
3189 static void setup_addressing_mode(void)
3190 {
3191 if (s390_noexec) {
3192diff -urNp linux-2.6.32.41/arch/s390/mm/mmap.c linux-2.6.32.41/arch/s390/mm/mmap.c
3193--- linux-2.6.32.41/arch/s390/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3194+++ linux-2.6.32.41/arch/s390/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
3195@@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_str
3196 */
3197 if (mmap_is_legacy()) {
3198 mm->mmap_base = TASK_UNMAPPED_BASE;
3199+
3200+#ifdef CONFIG_PAX_RANDMMAP
3201+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3202+ mm->mmap_base += mm->delta_mmap;
3203+#endif
3204+
3205 mm->get_unmapped_area = arch_get_unmapped_area;
3206 mm->unmap_area = arch_unmap_area;
3207 } else {
3208 mm->mmap_base = mmap_base();
3209+
3210+#ifdef CONFIG_PAX_RANDMMAP
3211+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3212+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3213+#endif
3214+
3215 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3216 mm->unmap_area = arch_unmap_area_topdown;
3217 }
3218@@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_str
3219 */
3220 if (mmap_is_legacy()) {
3221 mm->mmap_base = TASK_UNMAPPED_BASE;
3222+
3223+#ifdef CONFIG_PAX_RANDMMAP
3224+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3225+ mm->mmap_base += mm->delta_mmap;
3226+#endif
3227+
3228 mm->get_unmapped_area = s390_get_unmapped_area;
3229 mm->unmap_area = arch_unmap_area;
3230 } else {
3231 mm->mmap_base = mmap_base();
3232+
3233+#ifdef CONFIG_PAX_RANDMMAP
3234+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3235+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3236+#endif
3237+
3238 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3239 mm->unmap_area = arch_unmap_area_topdown;
3240 }
3241diff -urNp linux-2.6.32.41/arch/score/include/asm/system.h linux-2.6.32.41/arch/score/include/asm/system.h
3242--- linux-2.6.32.41/arch/score/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
3243+++ linux-2.6.32.41/arch/score/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
3244@@ -17,7 +17,7 @@ do { \
3245 #define finish_arch_switch(prev) do {} while (0)
3246
3247 typedef void (*vi_handler_t)(void);
3248-extern unsigned long arch_align_stack(unsigned long sp);
3249+#define arch_align_stack(x) (x)
3250
3251 #define mb() barrier()
3252 #define rmb() barrier()
3253diff -urNp linux-2.6.32.41/arch/score/kernel/process.c linux-2.6.32.41/arch/score/kernel/process.c
3254--- linux-2.6.32.41/arch/score/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
3255+++ linux-2.6.32.41/arch/score/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
3256@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
3257
3258 return task_pt_regs(task)->cp0_epc;
3259 }
3260-
3261-unsigned long arch_align_stack(unsigned long sp)
3262-{
3263- return sp;
3264-}
3265diff -urNp linux-2.6.32.41/arch/sh/boards/mach-hp6xx/pm.c linux-2.6.32.41/arch/sh/boards/mach-hp6xx/pm.c
3266--- linux-2.6.32.41/arch/sh/boards/mach-hp6xx/pm.c 2011-03-27 14:31:47.000000000 -0400
3267+++ linux-2.6.32.41/arch/sh/boards/mach-hp6xx/pm.c 2011-04-17 15:56:45.000000000 -0400
3268@@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_
3269 return 0;
3270 }
3271
3272-static struct platform_suspend_ops hp6x0_pm_ops = {
3273+static const struct platform_suspend_ops hp6x0_pm_ops = {
3274 .enter = hp6x0_pm_enter,
3275 .valid = suspend_valid_only_mem,
3276 };
3277diff -urNp linux-2.6.32.41/arch/sh/kernel/cpu/sh4/sq.c linux-2.6.32.41/arch/sh/kernel/cpu/sh4/sq.c
3278--- linux-2.6.32.41/arch/sh/kernel/cpu/sh4/sq.c 2011-03-27 14:31:47.000000000 -0400
3279+++ linux-2.6.32.41/arch/sh/kernel/cpu/sh4/sq.c 2011-04-17 15:56:46.000000000 -0400
3280@@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[
3281 NULL,
3282 };
3283
3284-static struct sysfs_ops sq_sysfs_ops = {
3285+static const struct sysfs_ops sq_sysfs_ops = {
3286 .show = sq_sysfs_show,
3287 .store = sq_sysfs_store,
3288 };
3289diff -urNp linux-2.6.32.41/arch/sh/kernel/cpu/shmobile/pm.c linux-2.6.32.41/arch/sh/kernel/cpu/shmobile/pm.c
3290--- linux-2.6.32.41/arch/sh/kernel/cpu/shmobile/pm.c 2011-03-27 14:31:47.000000000 -0400
3291+++ linux-2.6.32.41/arch/sh/kernel/cpu/shmobile/pm.c 2011-04-17 15:56:46.000000000 -0400
3292@@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t s
3293 return 0;
3294 }
3295
3296-static struct platform_suspend_ops sh_pm_ops = {
3297+static const struct platform_suspend_ops sh_pm_ops = {
3298 .enter = sh_pm_enter,
3299 .valid = suspend_valid_only_mem,
3300 };
3301diff -urNp linux-2.6.32.41/arch/sh/kernel/kgdb.c linux-2.6.32.41/arch/sh/kernel/kgdb.c
3302--- linux-2.6.32.41/arch/sh/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
3303+++ linux-2.6.32.41/arch/sh/kernel/kgdb.c 2011-04-17 15:56:46.000000000 -0400
3304@@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
3305 {
3306 }
3307
3308-struct kgdb_arch arch_kgdb_ops = {
3309+const struct kgdb_arch arch_kgdb_ops = {
3310 /* Breakpoint instruction: trapa #0x3c */
3311 #ifdef CONFIG_CPU_LITTLE_ENDIAN
3312 .gdb_bpt_instr = { 0x3c, 0xc3 },
3313diff -urNp linux-2.6.32.41/arch/sh/mm/mmap.c linux-2.6.32.41/arch/sh/mm/mmap.c
3314--- linux-2.6.32.41/arch/sh/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3315+++ linux-2.6.32.41/arch/sh/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
3316@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
3317 addr = PAGE_ALIGN(addr);
3318
3319 vma = find_vma(mm, addr);
3320- if (TASK_SIZE - len >= addr &&
3321- (!vma || addr + len <= vma->vm_start))
3322+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3323 return addr;
3324 }
3325
3326@@ -106,7 +105,7 @@ full_search:
3327 }
3328 return -ENOMEM;
3329 }
3330- if (likely(!vma || addr + len <= vma->vm_start)) {
3331+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3332 /*
3333 * Remember the place where we stopped the search:
3334 */
3335@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
3336 addr = PAGE_ALIGN(addr);
3337
3338 vma = find_vma(mm, addr);
3339- if (TASK_SIZE - len >= addr &&
3340- (!vma || addr + len <= vma->vm_start))
3341+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3342 return addr;
3343 }
3344
3345@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
3346 /* make sure it can fit in the remaining address space */
3347 if (likely(addr > len)) {
3348 vma = find_vma(mm, addr-len);
3349- if (!vma || addr <= vma->vm_start) {
3350+ if (check_heap_stack_gap(vma, addr - len, len)) {
3351 /* remember the address as a hint for next time */
3352 return (mm->free_area_cache = addr-len);
3353 }
3354@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
3355 if (unlikely(mm->mmap_base < len))
3356 goto bottomup;
3357
3358- addr = mm->mmap_base-len;
3359- if (do_colour_align)
3360- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3361+ addr = mm->mmap_base - len;
3362
3363 do {
3364+ if (do_colour_align)
3365+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3366 /*
3367 * Lookup failure means no vma is above this address,
3368 * else if new region fits below vma->vm_start,
3369 * return with success:
3370 */
3371 vma = find_vma(mm, addr);
3372- if (likely(!vma || addr+len <= vma->vm_start)) {
3373+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3374 /* remember the address as a hint for next time */
3375 return (mm->free_area_cache = addr);
3376 }
3377@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
3378 mm->cached_hole_size = vma->vm_start - addr;
3379
3380 /* try just below the current vma->vm_start */
3381- addr = vma->vm_start-len;
3382- if (do_colour_align)
3383- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3384- } while (likely(len < vma->vm_start));
3385+ addr = skip_heap_stack_gap(vma, len);
3386+ } while (!IS_ERR_VALUE(addr));
3387
3388 bottomup:
3389 /*
3390diff -urNp linux-2.6.32.41/arch/sparc/include/asm/atomic_64.h linux-2.6.32.41/arch/sparc/include/asm/atomic_64.h
3391--- linux-2.6.32.41/arch/sparc/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
3392+++ linux-2.6.32.41/arch/sparc/include/asm/atomic_64.h 2011-05-04 17:56:20.000000000 -0400
3393@@ -14,18 +14,40 @@
3394 #define ATOMIC64_INIT(i) { (i) }
3395
3396 #define atomic_read(v) ((v)->counter)
3397+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3398+{
3399+ return v->counter;
3400+}
3401 #define atomic64_read(v) ((v)->counter)
3402+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3403+{
3404+ return v->counter;
3405+}
3406
3407 #define atomic_set(v, i) (((v)->counter) = i)
3408+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3409+{
3410+ v->counter = i;
3411+}
3412 #define atomic64_set(v, i) (((v)->counter) = i)
3413+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3414+{
3415+ v->counter = i;
3416+}
3417
3418 extern void atomic_add(int, atomic_t *);
3419+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3420 extern void atomic64_add(long, atomic64_t *);
3421+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3422 extern void atomic_sub(int, atomic_t *);
3423+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3424 extern void atomic64_sub(long, atomic64_t *);
3425+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3426
3427 extern int atomic_add_ret(int, atomic_t *);
3428+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3429 extern long atomic64_add_ret(long, atomic64_t *);
3430+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3431 extern int atomic_sub_ret(int, atomic_t *);
3432 extern long atomic64_sub_ret(long, atomic64_t *);
3433
3434@@ -33,7 +55,15 @@ extern long atomic64_sub_ret(long, atomi
3435 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3436
3437 #define atomic_inc_return(v) atomic_add_ret(1, v)
3438+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3439+{
3440+ return atomic_add_ret_unchecked(1, v);
3441+}
3442 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3443+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3444+{
3445+ return atomic64_add_ret_unchecked(1, v);
3446+}
3447
3448 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3449 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3450@@ -50,6 +80,7 @@ extern long atomic64_sub_ret(long, atomi
3451 * other cases.
3452 */
3453 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3454+#define atomic_inc_and_test_unchecked(v) (atomic_inc_return_unchecked(v) == 0)
3455 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3456
3457 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3458@@ -59,30 +90,59 @@ extern long atomic64_sub_ret(long, atomi
3459 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3460
3461 #define atomic_inc(v) atomic_add(1, v)
3462+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3463+{
3464+ atomic_add_unchecked(1, v);
3465+}
3466 #define atomic64_inc(v) atomic64_add(1, v)
3467+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3468+{
3469+ atomic64_add_unchecked(1, v);
3470+}
3471
3472 #define atomic_dec(v) atomic_sub(1, v)
3473+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3474+{
3475+ atomic_sub_unchecked(1, v);
3476+}
3477 #define atomic64_dec(v) atomic64_sub(1, v)
3478+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3479+{
3480+ atomic64_sub_unchecked(1, v);
3481+}
3482
3483 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3484 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3485
3486 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3487+#define atomic_cmpxchg_unchecked(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3488 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3489+#define atomic_xchg_unchecked(v, new) (xchg(&((v)->counter), new))
3490
3491 static inline int atomic_add_unless(atomic_t *v, int a, int u)
3492 {
3493- int c, old;
3494+ int c, old, new;
3495 c = atomic_read(v);
3496 for (;;) {
3497- if (unlikely(c == (u)))
3498+ if (unlikely(c == u))
3499 break;
3500- old = atomic_cmpxchg((v), c, c + (a));
3501+
3502+ asm volatile("addcc %2, %0, %0\n"
3503+
3504+#ifdef CONFIG_PAX_REFCOUNT
3505+ "tvs %%icc, 6\n"
3506+#endif
3507+
3508+ : "=r" (new)
3509+ : "0" (c), "ir" (a)
3510+ : "cc");
3511+
3512+ old = atomic_cmpxchg(v, c, new);
3513 if (likely(old == c))
3514 break;
3515 c = old;
3516 }
3517- return c != (u);
3518+ return c != u;
3519 }
3520
3521 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
3522@@ -93,17 +153,28 @@ static inline int atomic_add_unless(atom
3523
3524 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3525 {
3526- long c, old;
3527+ long c, old, new;
3528 c = atomic64_read(v);
3529 for (;;) {
3530- if (unlikely(c == (u)))
3531+ if (unlikely(c == u))
3532 break;
3533- old = atomic64_cmpxchg((v), c, c + (a));
3534+
3535+ asm volatile("addcc %2, %0, %0\n"
3536+
3537+#ifdef CONFIG_PAX_REFCOUNT
3538+ "tvs %%xcc, 6\n"
3539+#endif
3540+
3541+ : "=r" (new)
3542+ : "0" (c), "ir" (a)
3543+ : "cc");
3544+
3545+ old = atomic64_cmpxchg(v, c, new);
3546 if (likely(old == c))
3547 break;
3548 c = old;
3549 }
3550- return c != (u);
3551+ return c != u;
3552 }
3553
3554 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3555diff -urNp linux-2.6.32.41/arch/sparc/include/asm/cache.h linux-2.6.32.41/arch/sparc/include/asm/cache.h
3556--- linux-2.6.32.41/arch/sparc/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
3557+++ linux-2.6.32.41/arch/sparc/include/asm/cache.h 2011-05-17 19:26:34.000000000 -0400
3558@@ -8,7 +8,7 @@
3559 #define _SPARC_CACHE_H
3560
3561 #define L1_CACHE_SHIFT 5
3562-#define L1_CACHE_BYTES 32
3563+#define L1_CACHE_BYTES 32U
3564 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
3565
3566 #ifdef CONFIG_SPARC32
3567diff -urNp linux-2.6.32.41/arch/sparc/include/asm/dma-mapping.h linux-2.6.32.41/arch/sparc/include/asm/dma-mapping.h
3568--- linux-2.6.32.41/arch/sparc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
3569+++ linux-2.6.32.41/arch/sparc/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
3570@@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *d
3571 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
3572 #define dma_is_consistent(d, h) (1)
3573
3574-extern struct dma_map_ops *dma_ops, pci32_dma_ops;
3575+extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
3576 extern struct bus_type pci_bus_type;
3577
3578-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3579+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
3580 {
3581 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
3582 if (dev->bus == &pci_bus_type)
3583@@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dm
3584 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3585 dma_addr_t *dma_handle, gfp_t flag)
3586 {
3587- struct dma_map_ops *ops = get_dma_ops(dev);
3588+ const struct dma_map_ops *ops = get_dma_ops(dev);
3589 void *cpu_addr;
3590
3591 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
3592@@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(s
3593 static inline void dma_free_coherent(struct device *dev, size_t size,
3594 void *cpu_addr, dma_addr_t dma_handle)
3595 {
3596- struct dma_map_ops *ops = get_dma_ops(dev);
3597+ const struct dma_map_ops *ops = get_dma_ops(dev);
3598
3599 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
3600 ops->free_coherent(dev, size, cpu_addr, dma_handle);
3601diff -urNp linux-2.6.32.41/arch/sparc/include/asm/elf_32.h linux-2.6.32.41/arch/sparc/include/asm/elf_32.h
3602--- linux-2.6.32.41/arch/sparc/include/asm/elf_32.h 2011-03-27 14:31:47.000000000 -0400
3603+++ linux-2.6.32.41/arch/sparc/include/asm/elf_32.h 2011-04-17 15:56:46.000000000 -0400
3604@@ -116,6 +116,13 @@ typedef struct {
3605
3606 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3607
3608+#ifdef CONFIG_PAX_ASLR
3609+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3610+
3611+#define PAX_DELTA_MMAP_LEN 16
3612+#define PAX_DELTA_STACK_LEN 16
3613+#endif
3614+
3615 /* This yields a mask that user programs can use to figure out what
3616 instruction set this cpu supports. This can NOT be done in userspace
3617 on Sparc. */
3618diff -urNp linux-2.6.32.41/arch/sparc/include/asm/elf_64.h linux-2.6.32.41/arch/sparc/include/asm/elf_64.h
3619--- linux-2.6.32.41/arch/sparc/include/asm/elf_64.h 2011-03-27 14:31:47.000000000 -0400
3620+++ linux-2.6.32.41/arch/sparc/include/asm/elf_64.h 2011-04-17 15:56:46.000000000 -0400
3621@@ -163,6 +163,12 @@ typedef struct {
3622 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3623 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3624
3625+#ifdef CONFIG_PAX_ASLR
3626+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3627+
3628+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3629+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3630+#endif
3631
3632 /* This yields a mask that user programs can use to figure out what
3633 instruction set this cpu supports. */
3634diff -urNp linux-2.6.32.41/arch/sparc/include/asm/pgtable_32.h linux-2.6.32.41/arch/sparc/include/asm/pgtable_32.h
3635--- linux-2.6.32.41/arch/sparc/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
3636+++ linux-2.6.32.41/arch/sparc/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
3637@@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3638 BTFIXUPDEF_INT(page_none)
3639 BTFIXUPDEF_INT(page_copy)
3640 BTFIXUPDEF_INT(page_readonly)
3641+
3642+#ifdef CONFIG_PAX_PAGEEXEC
3643+BTFIXUPDEF_INT(page_shared_noexec)
3644+BTFIXUPDEF_INT(page_copy_noexec)
3645+BTFIXUPDEF_INT(page_readonly_noexec)
3646+#endif
3647+
3648 BTFIXUPDEF_INT(page_kernel)
3649
3650 #define PMD_SHIFT SUN4C_PMD_SHIFT
3651@@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
3652 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3653 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3654
3655+#ifdef CONFIG_PAX_PAGEEXEC
3656+extern pgprot_t PAGE_SHARED_NOEXEC;
3657+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3658+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3659+#else
3660+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3661+# define PAGE_COPY_NOEXEC PAGE_COPY
3662+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3663+#endif
3664+
3665 extern unsigned long page_kernel;
3666
3667 #ifdef MODULE
3668diff -urNp linux-2.6.32.41/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.32.41/arch/sparc/include/asm/pgtsrmmu.h
3669--- linux-2.6.32.41/arch/sparc/include/asm/pgtsrmmu.h 2011-03-27 14:31:47.000000000 -0400
3670+++ linux-2.6.32.41/arch/sparc/include/asm/pgtsrmmu.h 2011-04-17 15:56:46.000000000 -0400
3671@@ -115,6 +115,13 @@
3672 SRMMU_EXEC | SRMMU_REF)
3673 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3674 SRMMU_EXEC | SRMMU_REF)
3675+
3676+#ifdef CONFIG_PAX_PAGEEXEC
3677+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3678+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3679+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3680+#endif
3681+
3682 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3683 SRMMU_DIRTY | SRMMU_REF)
3684
3685diff -urNp linux-2.6.32.41/arch/sparc/include/asm/spinlock_64.h linux-2.6.32.41/arch/sparc/include/asm/spinlock_64.h
3686--- linux-2.6.32.41/arch/sparc/include/asm/spinlock_64.h 2011-03-27 14:31:47.000000000 -0400
3687+++ linux-2.6.32.41/arch/sparc/include/asm/spinlock_64.h 2011-05-04 17:56:20.000000000 -0400
3688@@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags
3689
3690 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3691
3692-static void inline arch_read_lock(raw_rwlock_t *lock)
3693+static inline void arch_read_lock(raw_rwlock_t *lock)
3694 {
3695 unsigned long tmp1, tmp2;
3696
3697 __asm__ __volatile__ (
3698 "1: ldsw [%2], %0\n"
3699 " brlz,pn %0, 2f\n"
3700-"4: add %0, 1, %1\n"
3701+"4: addcc %0, 1, %1\n"
3702+
3703+#ifdef CONFIG_PAX_REFCOUNT
3704+" tvs %%icc, 6\n"
3705+#endif
3706+
3707 " cas [%2], %0, %1\n"
3708 " cmp %0, %1\n"
3709 " bne,pn %%icc, 1b\n"
3710@@ -112,7 +117,7 @@ static void inline arch_read_lock(raw_rw
3711 " .previous"
3712 : "=&r" (tmp1), "=&r" (tmp2)
3713 : "r" (lock)
3714- : "memory");
3715+ : "memory", "cc");
3716 }
3717
3718 static int inline arch_read_trylock(raw_rwlock_t *lock)
3719@@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_
3720 "1: ldsw [%2], %0\n"
3721 " brlz,a,pn %0, 2f\n"
3722 " mov 0, %0\n"
3723-" add %0, 1, %1\n"
3724+" addcc %0, 1, %1\n"
3725+
3726+#ifdef CONFIG_PAX_REFCOUNT
3727+" tvs %%icc, 6\n"
3728+#endif
3729+
3730 " cas [%2], %0, %1\n"
3731 " cmp %0, %1\n"
3732 " bne,pn %%icc, 1b\n"
3733@@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_
3734 return tmp1;
3735 }
3736
3737-static void inline arch_read_unlock(raw_rwlock_t *lock)
3738+static inline void arch_read_unlock(raw_rwlock_t *lock)
3739 {
3740 unsigned long tmp1, tmp2;
3741
3742 __asm__ __volatile__(
3743 "1: lduw [%2], %0\n"
3744-" sub %0, 1, %1\n"
3745+" subcc %0, 1, %1\n"
3746+
3747+#ifdef CONFIG_PAX_REFCOUNT
3748+" tvs %%icc, 6\n"
3749+#endif
3750+
3751 " cas [%2], %0, %1\n"
3752 " cmp %0, %1\n"
3753 " bne,pn %%xcc, 1b\n"
3754@@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_
3755 : "memory");
3756 }
3757
3758-static void inline arch_write_lock(raw_rwlock_t *lock)
3759+static inline void arch_write_lock(raw_rwlock_t *lock)
3760 {
3761 unsigned long mask, tmp1, tmp2;
3762
3763@@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_r
3764 : "memory");
3765 }
3766
3767-static void inline arch_write_unlock(raw_rwlock_t *lock)
3768+static inline void arch_write_unlock(raw_rwlock_t *lock)
3769 {
3770 __asm__ __volatile__(
3771 " stw %%g0, [%0]"
3772diff -urNp linux-2.6.32.41/arch/sparc/include/asm/uaccess_32.h linux-2.6.32.41/arch/sparc/include/asm/uaccess_32.h
3773--- linux-2.6.32.41/arch/sparc/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
3774+++ linux-2.6.32.41/arch/sparc/include/asm/uaccess_32.h 2011-04-17 15:56:46.000000000 -0400
3775@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
3776
3777 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3778 {
3779- if (n && __access_ok((unsigned long) to, n))
3780+ if ((long)n < 0)
3781+ return n;
3782+
3783+ if (n && __access_ok((unsigned long) to, n)) {
3784+ if (!__builtin_constant_p(n))
3785+ check_object_size(from, n, true);
3786 return __copy_user(to, (__force void __user *) from, n);
3787- else
3788+ } else
3789 return n;
3790 }
3791
3792 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3793 {
3794+ if ((long)n < 0)
3795+ return n;
3796+
3797+ if (!__builtin_constant_p(n))
3798+ check_object_size(from, n, true);
3799+
3800 return __copy_user(to, (__force void __user *) from, n);
3801 }
3802
3803 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3804 {
3805- if (n && __access_ok((unsigned long) from, n))
3806+ if ((long)n < 0)
3807+ return n;
3808+
3809+ if (n && __access_ok((unsigned long) from, n)) {
3810+ if (!__builtin_constant_p(n))
3811+ check_object_size(to, n, false);
3812 return __copy_user((__force void __user *) to, from, n);
3813- else
3814+ } else
3815 return n;
3816 }
3817
3818 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3819 {
3820+ if ((long)n < 0)
3821+ return n;
3822+
3823 return __copy_user((__force void __user *) to, from, n);
3824 }
3825
3826diff -urNp linux-2.6.32.41/arch/sparc/include/asm/uaccess_64.h linux-2.6.32.41/arch/sparc/include/asm/uaccess_64.h
3827--- linux-2.6.32.41/arch/sparc/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
3828+++ linux-2.6.32.41/arch/sparc/include/asm/uaccess_64.h 2011-04-17 15:56:46.000000000 -0400
3829@@ -9,6 +9,7 @@
3830 #include <linux/compiler.h>
3831 #include <linux/string.h>
3832 #include <linux/thread_info.h>
3833+#include <linux/kernel.h>
3834 #include <asm/asi.h>
3835 #include <asm/system.h>
3836 #include <asm/spitfire.h>
3837@@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixu
3838 static inline unsigned long __must_check
3839 copy_from_user(void *to, const void __user *from, unsigned long size)
3840 {
3841- unsigned long ret = ___copy_from_user(to, from, size);
3842+ unsigned long ret;
3843
3844+ if ((long)size < 0 || size > INT_MAX)
3845+ return size;
3846+
3847+ if (!__builtin_constant_p(size))
3848+ check_object_size(to, size, false);
3849+
3850+ ret = ___copy_from_user(to, from, size);
3851 if (unlikely(ret))
3852 ret = copy_from_user_fixup(to, from, size);
3853 return ret;
3854@@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(
3855 static inline unsigned long __must_check
3856 copy_to_user(void __user *to, const void *from, unsigned long size)
3857 {
3858- unsigned long ret = ___copy_to_user(to, from, size);
3859+ unsigned long ret;
3860+
3861+ if ((long)size < 0 || size > INT_MAX)
3862+ return size;
3863+
3864+ if (!__builtin_constant_p(size))
3865+ check_object_size(from, size, true);
3866
3867+ ret = ___copy_to_user(to, from, size);
3868 if (unlikely(ret))
3869 ret = copy_to_user_fixup(to, from, size);
3870 return ret;
3871diff -urNp linux-2.6.32.41/arch/sparc/include/asm/uaccess.h linux-2.6.32.41/arch/sparc/include/asm/uaccess.h
3872--- linux-2.6.32.41/arch/sparc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
3873+++ linux-2.6.32.41/arch/sparc/include/asm/uaccess.h 2011-04-17 15:56:46.000000000 -0400
3874@@ -1,5 +1,13 @@
3875 #ifndef ___ASM_SPARC_UACCESS_H
3876 #define ___ASM_SPARC_UACCESS_H
3877+
3878+#ifdef __KERNEL__
3879+#ifndef __ASSEMBLY__
3880+#include <linux/types.h>
3881+extern void check_object_size(const void *ptr, unsigned long n, bool to);
3882+#endif
3883+#endif
3884+
3885 #if defined(__sparc__) && defined(__arch64__)
3886 #include <asm/uaccess_64.h>
3887 #else
3888diff -urNp linux-2.6.32.41/arch/sparc/kernel/iommu.c linux-2.6.32.41/arch/sparc/kernel/iommu.c
3889--- linux-2.6.32.41/arch/sparc/kernel/iommu.c 2011-03-27 14:31:47.000000000 -0400
3890+++ linux-2.6.32.41/arch/sparc/kernel/iommu.c 2011-04-17 15:56:46.000000000 -0400
3891@@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struc
3892 spin_unlock_irqrestore(&iommu->lock, flags);
3893 }
3894
3895-static struct dma_map_ops sun4u_dma_ops = {
3896+static const struct dma_map_ops sun4u_dma_ops = {
3897 .alloc_coherent = dma_4u_alloc_coherent,
3898 .free_coherent = dma_4u_free_coherent,
3899 .map_page = dma_4u_map_page,
3900@@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops
3901 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
3902 };
3903
3904-struct dma_map_ops *dma_ops = &sun4u_dma_ops;
3905+const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
3906 EXPORT_SYMBOL(dma_ops);
3907
3908 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
3909diff -urNp linux-2.6.32.41/arch/sparc/kernel/ioport.c linux-2.6.32.41/arch/sparc/kernel/ioport.c
3910--- linux-2.6.32.41/arch/sparc/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
3911+++ linux-2.6.32.41/arch/sparc/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
3912@@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(stru
3913 BUG();
3914 }
3915
3916-struct dma_map_ops sbus_dma_ops = {
3917+const struct dma_map_ops sbus_dma_ops = {
3918 .alloc_coherent = sbus_alloc_coherent,
3919 .free_coherent = sbus_free_coherent,
3920 .map_page = sbus_map_page,
3921@@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
3922 .sync_sg_for_device = sbus_sync_sg_for_device,
3923 };
3924
3925-struct dma_map_ops *dma_ops = &sbus_dma_ops;
3926+const struct dma_map_ops *dma_ops = &sbus_dma_ops;
3927 EXPORT_SYMBOL(dma_ops);
3928
3929 static int __init sparc_register_ioport(void)
3930@@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(str
3931 }
3932 }
3933
3934-struct dma_map_ops pci32_dma_ops = {
3935+const struct dma_map_ops pci32_dma_ops = {
3936 .alloc_coherent = pci32_alloc_coherent,
3937 .free_coherent = pci32_free_coherent,
3938 .map_page = pci32_map_page,
3939diff -urNp linux-2.6.32.41/arch/sparc/kernel/kgdb_32.c linux-2.6.32.41/arch/sparc/kernel/kgdb_32.c
3940--- linux-2.6.32.41/arch/sparc/kernel/kgdb_32.c 2011-03-27 14:31:47.000000000 -0400
3941+++ linux-2.6.32.41/arch/sparc/kernel/kgdb_32.c 2011-04-17 15:56:46.000000000 -0400
3942@@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
3943 {
3944 }
3945
3946-struct kgdb_arch arch_kgdb_ops = {
3947+const struct kgdb_arch arch_kgdb_ops = {
3948 /* Breakpoint instruction: ta 0x7d */
3949 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
3950 };
3951diff -urNp linux-2.6.32.41/arch/sparc/kernel/kgdb_64.c linux-2.6.32.41/arch/sparc/kernel/kgdb_64.c
3952--- linux-2.6.32.41/arch/sparc/kernel/kgdb_64.c 2011-03-27 14:31:47.000000000 -0400
3953+++ linux-2.6.32.41/arch/sparc/kernel/kgdb_64.c 2011-04-17 15:56:46.000000000 -0400
3954@@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
3955 {
3956 }
3957
3958-struct kgdb_arch arch_kgdb_ops = {
3959+const struct kgdb_arch arch_kgdb_ops = {
3960 /* Breakpoint instruction: ta 0x72 */
3961 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
3962 };
3963diff -urNp linux-2.6.32.41/arch/sparc/kernel/Makefile linux-2.6.32.41/arch/sparc/kernel/Makefile
3964--- linux-2.6.32.41/arch/sparc/kernel/Makefile 2011-03-27 14:31:47.000000000 -0400
3965+++ linux-2.6.32.41/arch/sparc/kernel/Makefile 2011-04-17 15:56:46.000000000 -0400
3966@@ -3,7 +3,7 @@
3967 #
3968
3969 asflags-y := -ansi
3970-ccflags-y := -Werror
3971+#ccflags-y := -Werror
3972
3973 extra-y := head_$(BITS).o
3974 extra-y += init_task.o
3975diff -urNp linux-2.6.32.41/arch/sparc/kernel/pci_sun4v.c linux-2.6.32.41/arch/sparc/kernel/pci_sun4v.c
3976--- linux-2.6.32.41/arch/sparc/kernel/pci_sun4v.c 2011-03-27 14:31:47.000000000 -0400
3977+++ linux-2.6.32.41/arch/sparc/kernel/pci_sun4v.c 2011-04-17 15:56:46.000000000 -0400
3978@@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct devic
3979 spin_unlock_irqrestore(&iommu->lock, flags);
3980 }
3981
3982-static struct dma_map_ops sun4v_dma_ops = {
3983+static const struct dma_map_ops sun4v_dma_ops = {
3984 .alloc_coherent = dma_4v_alloc_coherent,
3985 .free_coherent = dma_4v_free_coherent,
3986 .map_page = dma_4v_map_page,
3987diff -urNp linux-2.6.32.41/arch/sparc/kernel/process_32.c linux-2.6.32.41/arch/sparc/kernel/process_32.c
3988--- linux-2.6.32.41/arch/sparc/kernel/process_32.c 2011-03-27 14:31:47.000000000 -0400
3989+++ linux-2.6.32.41/arch/sparc/kernel/process_32.c 2011-04-17 15:56:46.000000000 -0400
3990@@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
3991 rw->ins[4], rw->ins[5],
3992 rw->ins[6],
3993 rw->ins[7]);
3994- printk("%pS\n", (void *) rw->ins[7]);
3995+ printk("%pA\n", (void *) rw->ins[7]);
3996 rw = (struct reg_window32 *) rw->ins[6];
3997 }
3998 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3999@@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
4000
4001 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
4002 r->psr, r->pc, r->npc, r->y, print_tainted());
4003- printk("PC: <%pS>\n", (void *) r->pc);
4004+ printk("PC: <%pA>\n", (void *) r->pc);
4005 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4006 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
4007 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
4008 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4009 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
4010 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
4011- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
4012+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
4013
4014 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4015 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
4016@@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk,
4017 rw = (struct reg_window32 *) fp;
4018 pc = rw->ins[7];
4019 printk("[%08lx : ", pc);
4020- printk("%pS ] ", (void *) pc);
4021+ printk("%pA ] ", (void *) pc);
4022 fp = rw->ins[6];
4023 } while (++count < 16);
4024 printk("\n");
4025diff -urNp linux-2.6.32.41/arch/sparc/kernel/process_64.c linux-2.6.32.41/arch/sparc/kernel/process_64.c
4026--- linux-2.6.32.41/arch/sparc/kernel/process_64.c 2011-03-27 14:31:47.000000000 -0400
4027+++ linux-2.6.32.41/arch/sparc/kernel/process_64.c 2011-04-17 15:56:46.000000000 -0400
4028@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
4029 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
4030 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
4031 if (regs->tstate & TSTATE_PRIV)
4032- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
4033+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
4034 }
4035
4036 void show_regs(struct pt_regs *regs)
4037 {
4038 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
4039 regs->tpc, regs->tnpc, regs->y, print_tainted());
4040- printk("TPC: <%pS>\n", (void *) regs->tpc);
4041+ printk("TPC: <%pA>\n", (void *) regs->tpc);
4042 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
4043 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
4044 regs->u_regs[3]);
4045@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
4046 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
4047 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
4048 regs->u_regs[15]);
4049- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
4050+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
4051 show_regwindow(regs);
4052 }
4053
4054@@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void
4055 ((tp && tp->task) ? tp->task->pid : -1));
4056
4057 if (gp->tstate & TSTATE_PRIV) {
4058- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
4059+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
4060 (void *) gp->tpc,
4061 (void *) gp->o7,
4062 (void *) gp->i7,
4063diff -urNp linux-2.6.32.41/arch/sparc/kernel/sys_sparc_32.c linux-2.6.32.41/arch/sparc/kernel/sys_sparc_32.c
4064--- linux-2.6.32.41/arch/sparc/kernel/sys_sparc_32.c 2011-03-27 14:31:47.000000000 -0400
4065+++ linux-2.6.32.41/arch/sparc/kernel/sys_sparc_32.c 2011-04-17 15:56:46.000000000 -0400
4066@@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(str
4067 if (ARCH_SUN4C && len > 0x20000000)
4068 return -ENOMEM;
4069 if (!addr)
4070- addr = TASK_UNMAPPED_BASE;
4071+ addr = current->mm->mmap_base;
4072
4073 if (flags & MAP_SHARED)
4074 addr = COLOUR_ALIGN(addr);
4075@@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
4076 }
4077 if (TASK_SIZE - PAGE_SIZE - len < addr)
4078 return -ENOMEM;
4079- if (!vmm || addr + len <= vmm->vm_start)
4080+ if (check_heap_stack_gap(vmm, addr, len))
4081 return addr;
4082 addr = vmm->vm_end;
4083 if (flags & MAP_SHARED)
4084diff -urNp linux-2.6.32.41/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.41/arch/sparc/kernel/sys_sparc_64.c
4085--- linux-2.6.32.41/arch/sparc/kernel/sys_sparc_64.c 2011-03-27 14:31:47.000000000 -0400
4086+++ linux-2.6.32.41/arch/sparc/kernel/sys_sparc_64.c 2011-04-17 15:56:46.000000000 -0400
4087@@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(str
4088 /* We do not accept a shared mapping if it would violate
4089 * cache aliasing constraints.
4090 */
4091- if ((flags & MAP_SHARED) &&
4092+ if ((filp || (flags & MAP_SHARED)) &&
4093 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4094 return -EINVAL;
4095 return addr;
4096@@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(str
4097 if (filp || (flags & MAP_SHARED))
4098 do_color_align = 1;
4099
4100+#ifdef CONFIG_PAX_RANDMMAP
4101+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4102+#endif
4103+
4104 if (addr) {
4105 if (do_color_align)
4106 addr = COLOUR_ALIGN(addr, pgoff);
4107@@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(str
4108 addr = PAGE_ALIGN(addr);
4109
4110 vma = find_vma(mm, addr);
4111- if (task_size - len >= addr &&
4112- (!vma || addr + len <= vma->vm_start))
4113+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4114 return addr;
4115 }
4116
4117 if (len > mm->cached_hole_size) {
4118- start_addr = addr = mm->free_area_cache;
4119+ start_addr = addr = mm->free_area_cache;
4120 } else {
4121- start_addr = addr = TASK_UNMAPPED_BASE;
4122+ start_addr = addr = mm->mmap_base;
4123 mm->cached_hole_size = 0;
4124 }
4125
4126@@ -175,14 +178,14 @@ full_search:
4127 vma = find_vma(mm, VA_EXCLUDE_END);
4128 }
4129 if (unlikely(task_size < addr)) {
4130- if (start_addr != TASK_UNMAPPED_BASE) {
4131- start_addr = addr = TASK_UNMAPPED_BASE;
4132+ if (start_addr != mm->mmap_base) {
4133+ start_addr = addr = mm->mmap_base;
4134 mm->cached_hole_size = 0;
4135 goto full_search;
4136 }
4137 return -ENOMEM;
4138 }
4139- if (likely(!vma || addr + len <= vma->vm_start)) {
4140+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4141 /*
4142 * Remember the place where we stopped the search:
4143 */
4144@@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct fi
4145 /* We do not accept a shared mapping if it would violate
4146 * cache aliasing constraints.
4147 */
4148- if ((flags & MAP_SHARED) &&
4149+ if ((filp || (flags & MAP_SHARED)) &&
4150 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4151 return -EINVAL;
4152 return addr;
4153@@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct fi
4154 addr = PAGE_ALIGN(addr);
4155
4156 vma = find_vma(mm, addr);
4157- if (task_size - len >= addr &&
4158- (!vma || addr + len <= vma->vm_start))
4159+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4160 return addr;
4161 }
4162
4163@@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct fi
4164 /* make sure it can fit in the remaining address space */
4165 if (likely(addr > len)) {
4166 vma = find_vma(mm, addr-len);
4167- if (!vma || addr <= vma->vm_start) {
4168+ if (check_heap_stack_gap(vma, addr - len, len)) {
4169 /* remember the address as a hint for next time */
4170 return (mm->free_area_cache = addr-len);
4171 }
4172@@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct fi
4173 if (unlikely(mm->mmap_base < len))
4174 goto bottomup;
4175
4176- addr = mm->mmap_base-len;
4177- if (do_color_align)
4178- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4179+ addr = mm->mmap_base - len;
4180
4181 do {
4182+ if (do_color_align)
4183+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4184 /*
4185 * Lookup failure means no vma is above this address,
4186 * else if new region fits below vma->vm_start,
4187 * return with success:
4188 */
4189 vma = find_vma(mm, addr);
4190- if (likely(!vma || addr+len <= vma->vm_start)) {
4191+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4192 /* remember the address as a hint for next time */
4193 return (mm->free_area_cache = addr);
4194 }
4195@@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct fi
4196 mm->cached_hole_size = vma->vm_start - addr;
4197
4198 /* try just below the current vma->vm_start */
4199- addr = vma->vm_start-len;
4200- if (do_color_align)
4201- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4202- } while (likely(len < vma->vm_start));
4203+ addr = skip_heap_stack_gap(vma, len);
4204+ } while (!IS_ERR_VALUE(addr));
4205
4206 bottomup:
4207 /*
4208@@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_str
4209 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
4210 sysctl_legacy_va_layout) {
4211 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4212+
4213+#ifdef CONFIG_PAX_RANDMMAP
4214+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4215+ mm->mmap_base += mm->delta_mmap;
4216+#endif
4217+
4218 mm->get_unmapped_area = arch_get_unmapped_area;
4219 mm->unmap_area = arch_unmap_area;
4220 } else {
4221@@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_str
4222 gap = (task_size / 6 * 5);
4223
4224 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4225+
4226+#ifdef CONFIG_PAX_RANDMMAP
4227+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4228+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4229+#endif
4230+
4231 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4232 mm->unmap_area = arch_unmap_area_topdown;
4233 }
4234diff -urNp linux-2.6.32.41/arch/sparc/kernel/traps_32.c linux-2.6.32.41/arch/sparc/kernel/traps_32.c
4235--- linux-2.6.32.41/arch/sparc/kernel/traps_32.c 2011-03-27 14:31:47.000000000 -0400
4236+++ linux-2.6.32.41/arch/sparc/kernel/traps_32.c 2011-04-17 15:56:46.000000000 -0400
4237@@ -76,7 +76,7 @@ void die_if_kernel(char *str, struct pt_
4238 count++ < 30 &&
4239 (((unsigned long) rw) >= PAGE_OFFSET) &&
4240 !(((unsigned long) rw) & 0x7)) {
4241- printk("Caller[%08lx]: %pS\n", rw->ins[7],
4242+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
4243 (void *) rw->ins[7]);
4244 rw = (struct reg_window32 *)rw->ins[6];
4245 }
4246diff -urNp linux-2.6.32.41/arch/sparc/kernel/traps_64.c linux-2.6.32.41/arch/sparc/kernel/traps_64.c
4247--- linux-2.6.32.41/arch/sparc/kernel/traps_64.c 2011-03-27 14:31:47.000000000 -0400
4248+++ linux-2.6.32.41/arch/sparc/kernel/traps_64.c 2011-04-17 15:56:46.000000000 -0400
4249@@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_
4250 i + 1,
4251 p->trapstack[i].tstate, p->trapstack[i].tpc,
4252 p->trapstack[i].tnpc, p->trapstack[i].tt);
4253- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4254+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4255 }
4256 }
4257
4258@@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long
4259
4260 lvl -= 0x100;
4261 if (regs->tstate & TSTATE_PRIV) {
4262+
4263+#ifdef CONFIG_PAX_REFCOUNT
4264+ if (lvl == 6)
4265+ pax_report_refcount_overflow(regs);
4266+#endif
4267+
4268 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4269 die_if_kernel(buffer, regs);
4270 }
4271@@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long
4272 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4273 {
4274 char buffer[32];
4275-
4276+
4277 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4278 0, lvl, SIGTRAP) == NOTIFY_STOP)
4279 return;
4280
4281+#ifdef CONFIG_PAX_REFCOUNT
4282+ if (lvl == 6)
4283+ pax_report_refcount_overflow(regs);
4284+#endif
4285+
4286 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4287
4288 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4289@@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt
4290 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4291 printk("%s" "ERROR(%d): ",
4292 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4293- printk("TPC<%pS>\n", (void *) regs->tpc);
4294+ printk("TPC<%pA>\n", (void *) regs->tpc);
4295 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4296 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4297 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4298@@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type,
4299 smp_processor_id(),
4300 (type & 0x1) ? 'I' : 'D',
4301 regs->tpc);
4302- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4303+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4304 panic("Irrecoverable Cheetah+ parity error.");
4305 }
4306
4307@@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type,
4308 smp_processor_id(),
4309 (type & 0x1) ? 'I' : 'D',
4310 regs->tpc);
4311- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4312+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4313 }
4314
4315 struct sun4v_error_entry {
4316@@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_r
4317
4318 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4319 regs->tpc, tl);
4320- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4321+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4322 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4323- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4324+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4325 (void *) regs->u_regs[UREG_I7]);
4326 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4327 "pte[%lx] error[%lx]\n",
4328@@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_r
4329
4330 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4331 regs->tpc, tl);
4332- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4333+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4334 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4335- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4336+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4337 (void *) regs->u_regs[UREG_I7]);
4338 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4339 "pte[%lx] error[%lx]\n",
4340@@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk,
4341 fp = (unsigned long)sf->fp + STACK_BIAS;
4342 }
4343
4344- printk(" [%016lx] %pS\n", pc, (void *) pc);
4345+ printk(" [%016lx] %pA\n", pc, (void *) pc);
4346 } while (++count < 16);
4347 }
4348
4349@@ -2260,7 +2271,7 @@ void die_if_kernel(char *str, struct pt_
4350 while (rw &&
4351 count++ < 30&&
4352 is_kernel_stack(current, rw)) {
4353- printk("Caller[%016lx]: %pS\n", rw->ins[7],
4354+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
4355 (void *) rw->ins[7]);
4356
4357 rw = kernel_stack_up(rw);
4358diff -urNp linux-2.6.32.41/arch/sparc/kernel/unaligned_64.c linux-2.6.32.41/arch/sparc/kernel/unaligned_64.c
4359--- linux-2.6.32.41/arch/sparc/kernel/unaligned_64.c 2011-03-27 14:31:47.000000000 -0400
4360+++ linux-2.6.32.41/arch/sparc/kernel/unaligned_64.c 2011-04-17 15:56:46.000000000 -0400
4361@@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs
4362 if (count < 5) {
4363 last_time = jiffies;
4364 count++;
4365- printk("Kernel unaligned access at TPC[%lx] %pS\n",
4366+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
4367 regs->tpc, (void *) regs->tpc);
4368 }
4369 }
4370diff -urNp linux-2.6.32.41/arch/sparc/lib/atomic_64.S linux-2.6.32.41/arch/sparc/lib/atomic_64.S
4371--- linux-2.6.32.41/arch/sparc/lib/atomic_64.S 2011-03-27 14:31:47.000000000 -0400
4372+++ linux-2.6.32.41/arch/sparc/lib/atomic_64.S 2011-04-17 15:56:46.000000000 -0400
4373@@ -18,7 +18,12 @@
4374 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4375 BACKOFF_SETUP(%o2)
4376 1: lduw [%o1], %g1
4377- add %g1, %o0, %g7
4378+ addcc %g1, %o0, %g7
4379+
4380+#ifdef CONFIG_PAX_REFCOUNT
4381+ tvs %icc, 6
4382+#endif
4383+
4384 cas [%o1], %g1, %g7
4385 cmp %g1, %g7
4386 bne,pn %icc, 2f
4387@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
4388 2: BACKOFF_SPIN(%o2, %o3, 1b)
4389 .size atomic_add, .-atomic_add
4390
4391+ .globl atomic_add_unchecked
4392+ .type atomic_add_unchecked,#function
4393+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4394+ BACKOFF_SETUP(%o2)
4395+1: lduw [%o1], %g1
4396+ add %g1, %o0, %g7
4397+ cas [%o1], %g1, %g7
4398+ cmp %g1, %g7
4399+ bne,pn %icc, 2f
4400+ nop
4401+ retl
4402+ nop
4403+2: BACKOFF_SPIN(%o2, %o3, 1b)
4404+ .size atomic_add_unchecked, .-atomic_add_unchecked
4405+
4406 .globl atomic_sub
4407 .type atomic_sub,#function
4408 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4409 BACKOFF_SETUP(%o2)
4410 1: lduw [%o1], %g1
4411- sub %g1, %o0, %g7
4412+ subcc %g1, %o0, %g7
4413+
4414+#ifdef CONFIG_PAX_REFCOUNT
4415+ tvs %icc, 6
4416+#endif
4417+
4418 cas [%o1], %g1, %g7
4419 cmp %g1, %g7
4420 bne,pn %icc, 2f
4421@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
4422 2: BACKOFF_SPIN(%o2, %o3, 1b)
4423 .size atomic_sub, .-atomic_sub
4424
4425+ .globl atomic_sub_unchecked
4426+ .type atomic_sub_unchecked,#function
4427+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4428+ BACKOFF_SETUP(%o2)
4429+1: lduw [%o1], %g1
4430+ sub %g1, %o0, %g7
4431+ cas [%o1], %g1, %g7
4432+ cmp %g1, %g7
4433+ bne,pn %icc, 2f
4434+ nop
4435+ retl
4436+ nop
4437+2: BACKOFF_SPIN(%o2, %o3, 1b)
4438+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
4439+
4440 .globl atomic_add_ret
4441 .type atomic_add_ret,#function
4442 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4443 BACKOFF_SETUP(%o2)
4444 1: lduw [%o1], %g1
4445- add %g1, %o0, %g7
4446+ addcc %g1, %o0, %g7
4447+
4448+#ifdef CONFIG_PAX_REFCOUNT
4449+ tvs %icc, 6
4450+#endif
4451+
4452 cas [%o1], %g1, %g7
4453 cmp %g1, %g7
4454 bne,pn %icc, 2f
4455@@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1
4456 2: BACKOFF_SPIN(%o2, %o3, 1b)
4457 .size atomic_add_ret, .-atomic_add_ret
4458
4459+ .globl atomic_add_ret_unchecked
4460+ .type atomic_add_ret_unchecked,#function
4461+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4462+ BACKOFF_SETUP(%o2)
4463+1: lduw [%o1], %g1
4464+ addcc %g1, %o0, %g7
4465+ cas [%o1], %g1, %g7
4466+ cmp %g1, %g7
4467+ bne,pn %icc, 2f
4468+ add %g7, %o0, %g7
4469+ sra %g7, 0, %o0
4470+ retl
4471+ nop
4472+2: BACKOFF_SPIN(%o2, %o3, 1b)
4473+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4474+
4475 .globl atomic_sub_ret
4476 .type atomic_sub_ret,#function
4477 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4478 BACKOFF_SETUP(%o2)
4479 1: lduw [%o1], %g1
4480- sub %g1, %o0, %g7
4481+ subcc %g1, %o0, %g7
4482+
4483+#ifdef CONFIG_PAX_REFCOUNT
4484+ tvs %icc, 6
4485+#endif
4486+
4487 cas [%o1], %g1, %g7
4488 cmp %g1, %g7
4489 bne,pn %icc, 2f
4490@@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
4491 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4492 BACKOFF_SETUP(%o2)
4493 1: ldx [%o1], %g1
4494- add %g1, %o0, %g7
4495+ addcc %g1, %o0, %g7
4496+
4497+#ifdef CONFIG_PAX_REFCOUNT
4498+ tvs %xcc, 6
4499+#endif
4500+
4501 casx [%o1], %g1, %g7
4502 cmp %g1, %g7
4503 bne,pn %xcc, 2f
4504@@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 =
4505 2: BACKOFF_SPIN(%o2, %o3, 1b)
4506 .size atomic64_add, .-atomic64_add
4507
4508+ .globl atomic64_add_unchecked
4509+ .type atomic64_add_unchecked,#function
4510+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4511+ BACKOFF_SETUP(%o2)
4512+1: ldx [%o1], %g1
4513+ addcc %g1, %o0, %g7
4514+ casx [%o1], %g1, %g7
4515+ cmp %g1, %g7
4516+ bne,pn %xcc, 2f
4517+ nop
4518+ retl
4519+ nop
4520+2: BACKOFF_SPIN(%o2, %o3, 1b)
4521+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
4522+
4523 .globl atomic64_sub
4524 .type atomic64_sub,#function
4525 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4526 BACKOFF_SETUP(%o2)
4527 1: ldx [%o1], %g1
4528- sub %g1, %o0, %g7
4529+ subcc %g1, %o0, %g7
4530+
4531+#ifdef CONFIG_PAX_REFCOUNT
4532+ tvs %xcc, 6
4533+#endif
4534+
4535 casx [%o1], %g1, %g7
4536 cmp %g1, %g7
4537 bne,pn %xcc, 2f
4538@@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
4539 2: BACKOFF_SPIN(%o2, %o3, 1b)
4540 .size atomic64_sub, .-atomic64_sub
4541
4542+ .globl atomic64_sub_unchecked
4543+ .type atomic64_sub_unchecked,#function
4544+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4545+ BACKOFF_SETUP(%o2)
4546+1: ldx [%o1], %g1
4547+ subcc %g1, %o0, %g7
4548+ casx [%o1], %g1, %g7
4549+ cmp %g1, %g7
4550+ bne,pn %xcc, 2f
4551+ nop
4552+ retl
4553+ nop
4554+2: BACKOFF_SPIN(%o2, %o3, 1b)
4555+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4556+
4557 .globl atomic64_add_ret
4558 .type atomic64_add_ret,#function
4559 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4560 BACKOFF_SETUP(%o2)
4561 1: ldx [%o1], %g1
4562- add %g1, %o0, %g7
4563+ addcc %g1, %o0, %g7
4564+
4565+#ifdef CONFIG_PAX_REFCOUNT
4566+ tvs %xcc, 6
4567+#endif
4568+
4569 casx [%o1], %g1, %g7
4570 cmp %g1, %g7
4571 bne,pn %xcc, 2f
4572@@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4573 2: BACKOFF_SPIN(%o2, %o3, 1b)
4574 .size atomic64_add_ret, .-atomic64_add_ret
4575
4576+ .globl atomic64_add_ret_unchecked
4577+ .type atomic64_add_ret_unchecked,#function
4578+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4579+ BACKOFF_SETUP(%o2)
4580+1: ldx [%o1], %g1
4581+ addcc %g1, %o0, %g7
4582+ casx [%o1], %g1, %g7
4583+ cmp %g1, %g7
4584+ bne,pn %xcc, 2f
4585+ add %g7, %o0, %g7
4586+ mov %g7, %o0
4587+ retl
4588+ nop
4589+2: BACKOFF_SPIN(%o2, %o3, 1b)
4590+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4591+
4592 .globl atomic64_sub_ret
4593 .type atomic64_sub_ret,#function
4594 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4595 BACKOFF_SETUP(%o2)
4596 1: ldx [%o1], %g1
4597- sub %g1, %o0, %g7
4598+ subcc %g1, %o0, %g7
4599+
4600+#ifdef CONFIG_PAX_REFCOUNT
4601+ tvs %xcc, 6
4602+#endif
4603+
4604 casx [%o1], %g1, %g7
4605 cmp %g1, %g7
4606 bne,pn %xcc, 2f
4607diff -urNp linux-2.6.32.41/arch/sparc/lib/ksyms.c linux-2.6.32.41/arch/sparc/lib/ksyms.c
4608--- linux-2.6.32.41/arch/sparc/lib/ksyms.c 2011-03-27 14:31:47.000000000 -0400
4609+++ linux-2.6.32.41/arch/sparc/lib/ksyms.c 2011-04-17 15:56:46.000000000 -0400
4610@@ -144,12 +144,17 @@ EXPORT_SYMBOL(__downgrade_write);
4611
4612 /* Atomic counter implementation. */
4613 EXPORT_SYMBOL(atomic_add);
4614+EXPORT_SYMBOL(atomic_add_unchecked);
4615 EXPORT_SYMBOL(atomic_add_ret);
4616 EXPORT_SYMBOL(atomic_sub);
4617+EXPORT_SYMBOL(atomic_sub_unchecked);
4618 EXPORT_SYMBOL(atomic_sub_ret);
4619 EXPORT_SYMBOL(atomic64_add);
4620+EXPORT_SYMBOL(atomic64_add_unchecked);
4621 EXPORT_SYMBOL(atomic64_add_ret);
4622+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4623 EXPORT_SYMBOL(atomic64_sub);
4624+EXPORT_SYMBOL(atomic64_sub_unchecked);
4625 EXPORT_SYMBOL(atomic64_sub_ret);
4626
4627 /* Atomic bit operations. */
4628diff -urNp linux-2.6.32.41/arch/sparc/lib/Makefile linux-2.6.32.41/arch/sparc/lib/Makefile
4629--- linux-2.6.32.41/arch/sparc/lib/Makefile 2011-03-27 14:31:47.000000000 -0400
4630+++ linux-2.6.32.41/arch/sparc/lib/Makefile 2011-05-17 19:26:34.000000000 -0400
4631@@ -2,7 +2,7 @@
4632 #
4633
4634 asflags-y := -ansi -DST_DIV0=0x02
4635-ccflags-y := -Werror
4636+#ccflags-y := -Werror
4637
4638 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4639 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4640diff -urNp linux-2.6.32.41/arch/sparc/lib/rwsem_64.S linux-2.6.32.41/arch/sparc/lib/rwsem_64.S
4641--- linux-2.6.32.41/arch/sparc/lib/rwsem_64.S 2011-03-27 14:31:47.000000000 -0400
4642+++ linux-2.6.32.41/arch/sparc/lib/rwsem_64.S 2011-04-17 15:56:46.000000000 -0400
4643@@ -11,7 +11,12 @@
4644 .globl __down_read
4645 __down_read:
4646 1: lduw [%o0], %g1
4647- add %g1, 1, %g7
4648+ addcc %g1, 1, %g7
4649+
4650+#ifdef CONFIG_PAX_REFCOUNT
4651+ tvs %icc, 6
4652+#endif
4653+
4654 cas [%o0], %g1, %g7
4655 cmp %g1, %g7
4656 bne,pn %icc, 1b
4657@@ -33,7 +38,12 @@ __down_read:
4658 .globl __down_read_trylock
4659 __down_read_trylock:
4660 1: lduw [%o0], %g1
4661- add %g1, 1, %g7
4662+ addcc %g1, 1, %g7
4663+
4664+#ifdef CONFIG_PAX_REFCOUNT
4665+ tvs %icc, 6
4666+#endif
4667+
4668 cmp %g7, 0
4669 bl,pn %icc, 2f
4670 mov 0, %o1
4671@@ -51,7 +61,12 @@ __down_write:
4672 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
4673 1:
4674 lduw [%o0], %g3
4675- add %g3, %g1, %g7
4676+ addcc %g3, %g1, %g7
4677+
4678+#ifdef CONFIG_PAX_REFCOUNT
4679+ tvs %icc, 6
4680+#endif
4681+
4682 cas [%o0], %g3, %g7
4683 cmp %g3, %g7
4684 bne,pn %icc, 1b
4685@@ -77,7 +92,12 @@ __down_write_trylock:
4686 cmp %g3, 0
4687 bne,pn %icc, 2f
4688 mov 0, %o1
4689- add %g3, %g1, %g7
4690+ addcc %g3, %g1, %g7
4691+
4692+#ifdef CONFIG_PAX_REFCOUNT
4693+ tvs %icc, 6
4694+#endif
4695+
4696 cas [%o0], %g3, %g7
4697 cmp %g3, %g7
4698 bne,pn %icc, 1b
4699@@ -90,7 +110,12 @@ __down_write_trylock:
4700 __up_read:
4701 1:
4702 lduw [%o0], %g1
4703- sub %g1, 1, %g7
4704+ subcc %g1, 1, %g7
4705+
4706+#ifdef CONFIG_PAX_REFCOUNT
4707+ tvs %icc, 6
4708+#endif
4709+
4710 cas [%o0], %g1, %g7
4711 cmp %g1, %g7
4712 bne,pn %icc, 1b
4713@@ -118,7 +143,12 @@ __up_write:
4714 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
4715 1:
4716 lduw [%o0], %g3
4717- sub %g3, %g1, %g7
4718+ subcc %g3, %g1, %g7
4719+
4720+#ifdef CONFIG_PAX_REFCOUNT
4721+ tvs %icc, 6
4722+#endif
4723+
4724 cas [%o0], %g3, %g7
4725 cmp %g3, %g7
4726 bne,pn %icc, 1b
4727@@ -143,7 +173,12 @@ __downgrade_write:
4728 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
4729 1:
4730 lduw [%o0], %g3
4731- sub %g3, %g1, %g7
4732+ subcc %g3, %g1, %g7
4733+
4734+#ifdef CONFIG_PAX_REFCOUNT
4735+ tvs %icc, 6
4736+#endif
4737+
4738 cas [%o0], %g3, %g7
4739 cmp %g3, %g7
4740 bne,pn %icc, 1b
4741diff -urNp linux-2.6.32.41/arch/sparc/Makefile linux-2.6.32.41/arch/sparc/Makefile
4742--- linux-2.6.32.41/arch/sparc/Makefile 2011-03-27 14:31:47.000000000 -0400
4743+++ linux-2.6.32.41/arch/sparc/Makefile 2011-04-17 15:56:46.000000000 -0400
4744@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
4745 # Export what is needed by arch/sparc/boot/Makefile
4746 export VMLINUX_INIT VMLINUX_MAIN
4747 VMLINUX_INIT := $(head-y) $(init-y)
4748-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4749+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4750 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4751 VMLINUX_MAIN += $(drivers-y) $(net-y)
4752
4753diff -urNp linux-2.6.32.41/arch/sparc/mm/fault_32.c linux-2.6.32.41/arch/sparc/mm/fault_32.c
4754--- linux-2.6.32.41/arch/sparc/mm/fault_32.c 2011-03-27 14:31:47.000000000 -0400
4755+++ linux-2.6.32.41/arch/sparc/mm/fault_32.c 2011-04-17 15:56:46.000000000 -0400
4756@@ -21,6 +21,9 @@
4757 #include <linux/interrupt.h>
4758 #include <linux/module.h>
4759 #include <linux/kdebug.h>
4760+#include <linux/slab.h>
4761+#include <linux/pagemap.h>
4762+#include <linux/compiler.h>
4763
4764 #include <asm/system.h>
4765 #include <asm/page.h>
4766@@ -167,6 +170,267 @@ static unsigned long compute_si_addr(str
4767 return safe_compute_effective_address(regs, insn);
4768 }
4769
4770+#ifdef CONFIG_PAX_PAGEEXEC
4771+#ifdef CONFIG_PAX_DLRESOLVE
4772+static void pax_emuplt_close(struct vm_area_struct *vma)
4773+{
4774+ vma->vm_mm->call_dl_resolve = 0UL;
4775+}
4776+
4777+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4778+{
4779+ unsigned int *kaddr;
4780+
4781+ vmf->page = alloc_page(GFP_HIGHUSER);
4782+ if (!vmf->page)
4783+ return VM_FAULT_OOM;
4784+
4785+ kaddr = kmap(vmf->page);
4786+ memset(kaddr, 0, PAGE_SIZE);
4787+ kaddr[0] = 0x9DE3BFA8U; /* save */
4788+ flush_dcache_page(vmf->page);
4789+ kunmap(vmf->page);
4790+ return VM_FAULT_MAJOR;
4791+}
4792+
4793+static const struct vm_operations_struct pax_vm_ops = {
4794+ .close = pax_emuplt_close,
4795+ .fault = pax_emuplt_fault
4796+};
4797+
4798+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4799+{
4800+ int ret;
4801+
4802+ vma->vm_mm = current->mm;
4803+ vma->vm_start = addr;
4804+ vma->vm_end = addr + PAGE_SIZE;
4805+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4806+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4807+ vma->vm_ops = &pax_vm_ops;
4808+
4809+ ret = insert_vm_struct(current->mm, vma);
4810+ if (ret)
4811+ return ret;
4812+
4813+ ++current->mm->total_vm;
4814+ return 0;
4815+}
4816+#endif
4817+
4818+/*
4819+ * PaX: decide what to do with offenders (regs->pc = fault address)
4820+ *
4821+ * returns 1 when task should be killed
4822+ * 2 when patched PLT trampoline was detected
4823+ * 3 when unpatched PLT trampoline was detected
4824+ */
4825+static int pax_handle_fetch_fault(struct pt_regs *regs)
4826+{
4827+
4828+#ifdef CONFIG_PAX_EMUPLT
4829+ int err;
4830+
4831+ do { /* PaX: patched PLT emulation #1 */
4832+ unsigned int sethi1, sethi2, jmpl;
4833+
4834+ err = get_user(sethi1, (unsigned int *)regs->pc);
4835+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4836+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4837+
4838+ if (err)
4839+ break;
4840+
4841+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4842+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
4843+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
4844+ {
4845+ unsigned int addr;
4846+
4847+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4848+ addr = regs->u_regs[UREG_G1];
4849+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4850+ regs->pc = addr;
4851+ regs->npc = addr+4;
4852+ return 2;
4853+ }
4854+ } while (0);
4855+
4856+ { /* PaX: patched PLT emulation #2 */
4857+ unsigned int ba;
4858+
4859+ err = get_user(ba, (unsigned int *)regs->pc);
4860+
4861+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4862+ unsigned int addr;
4863+
4864+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4865+ regs->pc = addr;
4866+ regs->npc = addr+4;
4867+ return 2;
4868+ }
4869+ }
4870+
4871+ do { /* PaX: patched PLT emulation #3 */
4872+ unsigned int sethi, jmpl, nop;
4873+
4874+ err = get_user(sethi, (unsigned int *)regs->pc);
4875+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4876+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4877+
4878+ if (err)
4879+ break;
4880+
4881+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4882+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4883+ nop == 0x01000000U)
4884+ {
4885+ unsigned int addr;
4886+
4887+ addr = (sethi & 0x003FFFFFU) << 10;
4888+ regs->u_regs[UREG_G1] = addr;
4889+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4890+ regs->pc = addr;
4891+ regs->npc = addr+4;
4892+ return 2;
4893+ }
4894+ } while (0);
4895+
4896+ do { /* PaX: unpatched PLT emulation step 1 */
4897+ unsigned int sethi, ba, nop;
4898+
4899+ err = get_user(sethi, (unsigned int *)regs->pc);
4900+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
4901+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
4902+
4903+ if (err)
4904+ break;
4905+
4906+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
4907+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4908+ nop == 0x01000000U)
4909+ {
4910+ unsigned int addr, save, call;
4911+
4912+ if ((ba & 0xFFC00000U) == 0x30800000U)
4913+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4914+ else
4915+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4916+
4917+ err = get_user(save, (unsigned int *)addr);
4918+ err |= get_user(call, (unsigned int *)(addr+4));
4919+ err |= get_user(nop, (unsigned int *)(addr+8));
4920+ if (err)
4921+ break;
4922+
4923+#ifdef CONFIG_PAX_DLRESOLVE
4924+ if (save == 0x9DE3BFA8U &&
4925+ (call & 0xC0000000U) == 0x40000000U &&
4926+ nop == 0x01000000U)
4927+ {
4928+ struct vm_area_struct *vma;
4929+ unsigned long call_dl_resolve;
4930+
4931+ down_read(&current->mm->mmap_sem);
4932+ call_dl_resolve = current->mm->call_dl_resolve;
4933+ up_read(&current->mm->mmap_sem);
4934+ if (likely(call_dl_resolve))
4935+ goto emulate;
4936+
4937+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4938+
4939+ down_write(&current->mm->mmap_sem);
4940+ if (current->mm->call_dl_resolve) {
4941+ call_dl_resolve = current->mm->call_dl_resolve;
4942+ up_write(&current->mm->mmap_sem);
4943+ if (vma)
4944+ kmem_cache_free(vm_area_cachep, vma);
4945+ goto emulate;
4946+ }
4947+
4948+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4949+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4950+ up_write(&current->mm->mmap_sem);
4951+ if (vma)
4952+ kmem_cache_free(vm_area_cachep, vma);
4953+ return 1;
4954+ }
4955+
4956+ if (pax_insert_vma(vma, call_dl_resolve)) {
4957+ up_write(&current->mm->mmap_sem);
4958+ kmem_cache_free(vm_area_cachep, vma);
4959+ return 1;
4960+ }
4961+
4962+ current->mm->call_dl_resolve = call_dl_resolve;
4963+ up_write(&current->mm->mmap_sem);
4964+
4965+emulate:
4966+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4967+ regs->pc = call_dl_resolve;
4968+ regs->npc = addr+4;
4969+ return 3;
4970+ }
4971+#endif
4972+
4973+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4974+ if ((save & 0xFFC00000U) == 0x05000000U &&
4975+ (call & 0xFFFFE000U) == 0x85C0A000U &&
4976+ nop == 0x01000000U)
4977+ {
4978+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4979+ regs->u_regs[UREG_G2] = addr + 4;
4980+ addr = (save & 0x003FFFFFU) << 10;
4981+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4982+ regs->pc = addr;
4983+ regs->npc = addr+4;
4984+ return 3;
4985+ }
4986+ }
4987+ } while (0);
4988+
4989+ do { /* PaX: unpatched PLT emulation step 2 */
4990+ unsigned int save, call, nop;
4991+
4992+ err = get_user(save, (unsigned int *)(regs->pc-4));
4993+ err |= get_user(call, (unsigned int *)regs->pc);
4994+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
4995+ if (err)
4996+ break;
4997+
4998+ if (save == 0x9DE3BFA8U &&
4999+ (call & 0xC0000000U) == 0x40000000U &&
5000+ nop == 0x01000000U)
5001+ {
5002+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
5003+
5004+ regs->u_regs[UREG_RETPC] = regs->pc;
5005+ regs->pc = dl_resolve;
5006+ regs->npc = dl_resolve+4;
5007+ return 3;
5008+ }
5009+ } while (0);
5010+#endif
5011+
5012+ return 1;
5013+}
5014+
5015+void pax_report_insns(void *pc, void *sp)
5016+{
5017+ unsigned long i;
5018+
5019+ printk(KERN_ERR "PAX: bytes at PC: ");
5020+ for (i = 0; i < 8; i++) {
5021+ unsigned int c;
5022+ if (get_user(c, (unsigned int *)pc+i))
5023+ printk(KERN_CONT "???????? ");
5024+ else
5025+ printk(KERN_CONT "%08x ", c);
5026+ }
5027+ printk("\n");
5028+}
5029+#endif
5030+
5031 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
5032 unsigned long address)
5033 {
5034@@ -231,6 +495,24 @@ good_area:
5035 if(!(vma->vm_flags & VM_WRITE))
5036 goto bad_area;
5037 } else {
5038+
5039+#ifdef CONFIG_PAX_PAGEEXEC
5040+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
5041+ up_read(&mm->mmap_sem);
5042+ switch (pax_handle_fetch_fault(regs)) {
5043+
5044+#ifdef CONFIG_PAX_EMUPLT
5045+ case 2:
5046+ case 3:
5047+ return;
5048+#endif
5049+
5050+ }
5051+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
5052+ do_group_exit(SIGKILL);
5053+ }
5054+#endif
5055+
5056 /* Allow reads even for write-only mappings */
5057 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
5058 goto bad_area;
5059diff -urNp linux-2.6.32.41/arch/sparc/mm/fault_64.c linux-2.6.32.41/arch/sparc/mm/fault_64.c
5060--- linux-2.6.32.41/arch/sparc/mm/fault_64.c 2011-03-27 14:31:47.000000000 -0400
5061+++ linux-2.6.32.41/arch/sparc/mm/fault_64.c 2011-04-17 15:56:46.000000000 -0400
5062@@ -20,6 +20,9 @@
5063 #include <linux/kprobes.h>
5064 #include <linux/kdebug.h>
5065 #include <linux/percpu.h>
5066+#include <linux/slab.h>
5067+#include <linux/pagemap.h>
5068+#include <linux/compiler.h>
5069
5070 #include <asm/page.h>
5071 #include <asm/pgtable.h>
5072@@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs
5073 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
5074 regs->tpc);
5075 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
5076- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
5077+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
5078 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
5079 dump_stack();
5080 unhandled_fault(regs->tpc, current, regs);
5081@@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_a
5082 show_regs(regs);
5083 }
5084
5085+#ifdef CONFIG_PAX_PAGEEXEC
5086+#ifdef CONFIG_PAX_DLRESOLVE
5087+static void pax_emuplt_close(struct vm_area_struct *vma)
5088+{
5089+ vma->vm_mm->call_dl_resolve = 0UL;
5090+}
5091+
5092+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5093+{
5094+ unsigned int *kaddr;
5095+
5096+ vmf->page = alloc_page(GFP_HIGHUSER);
5097+ if (!vmf->page)
5098+ return VM_FAULT_OOM;
5099+
5100+ kaddr = kmap(vmf->page);
5101+ memset(kaddr, 0, PAGE_SIZE);
5102+ kaddr[0] = 0x9DE3BFA8U; /* save */
5103+ flush_dcache_page(vmf->page);
5104+ kunmap(vmf->page);
5105+ return VM_FAULT_MAJOR;
5106+}
5107+
5108+static const struct vm_operations_struct pax_vm_ops = {
5109+ .close = pax_emuplt_close,
5110+ .fault = pax_emuplt_fault
5111+};
5112+
5113+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5114+{
5115+ int ret;
5116+
5117+ vma->vm_mm = current->mm;
5118+ vma->vm_start = addr;
5119+ vma->vm_end = addr + PAGE_SIZE;
5120+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5121+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5122+ vma->vm_ops = &pax_vm_ops;
5123+
5124+ ret = insert_vm_struct(current->mm, vma);
5125+ if (ret)
5126+ return ret;
5127+
5128+ ++current->mm->total_vm;
5129+ return 0;
5130+}
5131+#endif
5132+
5133+/*
5134+ * PaX: decide what to do with offenders (regs->tpc = fault address)
5135+ *
5136+ * returns 1 when task should be killed
5137+ * 2 when patched PLT trampoline was detected
5138+ * 3 when unpatched PLT trampoline was detected
5139+ */
5140+static int pax_handle_fetch_fault(struct pt_regs *regs)
5141+{
5142+
5143+#ifdef CONFIG_PAX_EMUPLT
5144+ int err;
5145+
5146+ do { /* PaX: patched PLT emulation #1 */
5147+ unsigned int sethi1, sethi2, jmpl;
5148+
5149+ err = get_user(sethi1, (unsigned int *)regs->tpc);
5150+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
5151+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
5152+
5153+ if (err)
5154+ break;
5155+
5156+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5157+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
5158+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
5159+ {
5160+ unsigned long addr;
5161+
5162+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5163+ addr = regs->u_regs[UREG_G1];
5164+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5165+
5166+ if (test_thread_flag(TIF_32BIT))
5167+ addr &= 0xFFFFFFFFUL;
5168+
5169+ regs->tpc = addr;
5170+ regs->tnpc = addr+4;
5171+ return 2;
5172+ }
5173+ } while (0);
5174+
5175+ { /* PaX: patched PLT emulation #2 */
5176+ unsigned int ba;
5177+
5178+ err = get_user(ba, (unsigned int *)regs->tpc);
5179+
5180+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5181+ unsigned long addr;
5182+
5183+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5184+
5185+ if (test_thread_flag(TIF_32BIT))
5186+ addr &= 0xFFFFFFFFUL;
5187+
5188+ regs->tpc = addr;
5189+ regs->tnpc = addr+4;
5190+ return 2;
5191+ }
5192+ }
5193+
5194+ do { /* PaX: patched PLT emulation #3 */
5195+ unsigned int sethi, jmpl, nop;
5196+
5197+ err = get_user(sethi, (unsigned int *)regs->tpc);
5198+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5199+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5200+
5201+ if (err)
5202+ break;
5203+
5204+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5205+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5206+ nop == 0x01000000U)
5207+ {
5208+ unsigned long addr;
5209+
5210+ addr = (sethi & 0x003FFFFFU) << 10;
5211+ regs->u_regs[UREG_G1] = addr;
5212+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5213+
5214+ if (test_thread_flag(TIF_32BIT))
5215+ addr &= 0xFFFFFFFFUL;
5216+
5217+ regs->tpc = addr;
5218+ regs->tnpc = addr+4;
5219+ return 2;
5220+ }
5221+ } while (0);
5222+
5223+ do { /* PaX: patched PLT emulation #4 */
5224+ unsigned int sethi, mov1, call, mov2;
5225+
5226+ err = get_user(sethi, (unsigned int *)regs->tpc);
5227+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5228+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
5229+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5230+
5231+ if (err)
5232+ break;
5233+
5234+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5235+ mov1 == 0x8210000FU &&
5236+ (call & 0xC0000000U) == 0x40000000U &&
5237+ mov2 == 0x9E100001U)
5238+ {
5239+ unsigned long addr;
5240+
5241+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5242+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5243+
5244+ if (test_thread_flag(TIF_32BIT))
5245+ addr &= 0xFFFFFFFFUL;
5246+
5247+ regs->tpc = addr;
5248+ regs->tnpc = addr+4;
5249+ return 2;
5250+ }
5251+ } while (0);
5252+
5253+ do { /* PaX: patched PLT emulation #5 */
5254+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5255+
5256+ err = get_user(sethi, (unsigned int *)regs->tpc);
5257+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5258+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5259+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5260+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5261+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5262+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5263+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5264+
5265+ if (err)
5266+ break;
5267+
5268+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5269+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
5270+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5271+ (or1 & 0xFFFFE000U) == 0x82106000U &&
5272+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
5273+ sllx == 0x83287020U &&
5274+ jmpl == 0x81C04005U &&
5275+ nop == 0x01000000U)
5276+ {
5277+ unsigned long addr;
5278+
5279+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5280+ regs->u_regs[UREG_G1] <<= 32;
5281+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5282+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5283+ regs->tpc = addr;
5284+ regs->tnpc = addr+4;
5285+ return 2;
5286+ }
5287+ } while (0);
5288+
5289+ do { /* PaX: patched PLT emulation #6 */
5290+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5291+
5292+ err = get_user(sethi, (unsigned int *)regs->tpc);
5293+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5294+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5295+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5296+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
5297+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5298+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5299+
5300+ if (err)
5301+ break;
5302+
5303+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5304+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
5305+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5306+ sllx == 0x83287020U &&
5307+ (or & 0xFFFFE000U) == 0x8A116000U &&
5308+ jmpl == 0x81C04005U &&
5309+ nop == 0x01000000U)
5310+ {
5311+ unsigned long addr;
5312+
5313+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5314+ regs->u_regs[UREG_G1] <<= 32;
5315+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5316+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5317+ regs->tpc = addr;
5318+ regs->tnpc = addr+4;
5319+ return 2;
5320+ }
5321+ } while (0);
5322+
5323+ do { /* PaX: unpatched PLT emulation step 1 */
5324+ unsigned int sethi, ba, nop;
5325+
5326+ err = get_user(sethi, (unsigned int *)regs->tpc);
5327+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5328+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5329+
5330+ if (err)
5331+ break;
5332+
5333+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5334+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5335+ nop == 0x01000000U)
5336+ {
5337+ unsigned long addr;
5338+ unsigned int save, call;
5339+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5340+
5341+ if ((ba & 0xFFC00000U) == 0x30800000U)
5342+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5343+ else
5344+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5345+
5346+ if (test_thread_flag(TIF_32BIT))
5347+ addr &= 0xFFFFFFFFUL;
5348+
5349+ err = get_user(save, (unsigned int *)addr);
5350+ err |= get_user(call, (unsigned int *)(addr+4));
5351+ err |= get_user(nop, (unsigned int *)(addr+8));
5352+ if (err)
5353+ break;
5354+
5355+#ifdef CONFIG_PAX_DLRESOLVE
5356+ if (save == 0x9DE3BFA8U &&
5357+ (call & 0xC0000000U) == 0x40000000U &&
5358+ nop == 0x01000000U)
5359+ {
5360+ struct vm_area_struct *vma;
5361+ unsigned long call_dl_resolve;
5362+
5363+ down_read(&current->mm->mmap_sem);
5364+ call_dl_resolve = current->mm->call_dl_resolve;
5365+ up_read(&current->mm->mmap_sem);
5366+ if (likely(call_dl_resolve))
5367+ goto emulate;
5368+
5369+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5370+
5371+ down_write(&current->mm->mmap_sem);
5372+ if (current->mm->call_dl_resolve) {
5373+ call_dl_resolve = current->mm->call_dl_resolve;
5374+ up_write(&current->mm->mmap_sem);
5375+ if (vma)
5376+ kmem_cache_free(vm_area_cachep, vma);
5377+ goto emulate;
5378+ }
5379+
5380+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5381+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5382+ up_write(&current->mm->mmap_sem);
5383+ if (vma)
5384+ kmem_cache_free(vm_area_cachep, vma);
5385+ return 1;
5386+ }
5387+
5388+ if (pax_insert_vma(vma, call_dl_resolve)) {
5389+ up_write(&current->mm->mmap_sem);
5390+ kmem_cache_free(vm_area_cachep, vma);
5391+ return 1;
5392+ }
5393+
5394+ current->mm->call_dl_resolve = call_dl_resolve;
5395+ up_write(&current->mm->mmap_sem);
5396+
5397+emulate:
5398+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5399+ regs->tpc = call_dl_resolve;
5400+ regs->tnpc = addr+4;
5401+ return 3;
5402+ }
5403+#endif
5404+
5405+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5406+ if ((save & 0xFFC00000U) == 0x05000000U &&
5407+ (call & 0xFFFFE000U) == 0x85C0A000U &&
5408+ nop == 0x01000000U)
5409+ {
5410+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5411+ regs->u_regs[UREG_G2] = addr + 4;
5412+ addr = (save & 0x003FFFFFU) << 10;
5413+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5414+
5415+ if (test_thread_flag(TIF_32BIT))
5416+ addr &= 0xFFFFFFFFUL;
5417+
5418+ regs->tpc = addr;
5419+ regs->tnpc = addr+4;
5420+ return 3;
5421+ }
5422+
5423+ /* PaX: 64-bit PLT stub */
5424+ err = get_user(sethi1, (unsigned int *)addr);
5425+ err |= get_user(sethi2, (unsigned int *)(addr+4));
5426+ err |= get_user(or1, (unsigned int *)(addr+8));
5427+ err |= get_user(or2, (unsigned int *)(addr+12));
5428+ err |= get_user(sllx, (unsigned int *)(addr+16));
5429+ err |= get_user(add, (unsigned int *)(addr+20));
5430+ err |= get_user(jmpl, (unsigned int *)(addr+24));
5431+ err |= get_user(nop, (unsigned int *)(addr+28));
5432+ if (err)
5433+ break;
5434+
5435+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5436+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5437+ (or1 & 0xFFFFE000U) == 0x88112000U &&
5438+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
5439+ sllx == 0x89293020U &&
5440+ add == 0x8A010005U &&
5441+ jmpl == 0x89C14000U &&
5442+ nop == 0x01000000U)
5443+ {
5444+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5445+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5446+ regs->u_regs[UREG_G4] <<= 32;
5447+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5448+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5449+ regs->u_regs[UREG_G4] = addr + 24;
5450+ addr = regs->u_regs[UREG_G5];
5451+ regs->tpc = addr;
5452+ regs->tnpc = addr+4;
5453+ return 3;
5454+ }
5455+ }
5456+ } while (0);
5457+
5458+#ifdef CONFIG_PAX_DLRESOLVE
5459+ do { /* PaX: unpatched PLT emulation step 2 */
5460+ unsigned int save, call, nop;
5461+
5462+ err = get_user(save, (unsigned int *)(regs->tpc-4));
5463+ err |= get_user(call, (unsigned int *)regs->tpc);
5464+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5465+ if (err)
5466+ break;
5467+
5468+ if (save == 0x9DE3BFA8U &&
5469+ (call & 0xC0000000U) == 0x40000000U &&
5470+ nop == 0x01000000U)
5471+ {
5472+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5473+
5474+ if (test_thread_flag(TIF_32BIT))
5475+ dl_resolve &= 0xFFFFFFFFUL;
5476+
5477+ regs->u_regs[UREG_RETPC] = regs->tpc;
5478+ regs->tpc = dl_resolve;
5479+ regs->tnpc = dl_resolve+4;
5480+ return 3;
5481+ }
5482+ } while (0);
5483+#endif
5484+
5485+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5486+ unsigned int sethi, ba, nop;
5487+
5488+ err = get_user(sethi, (unsigned int *)regs->tpc);
5489+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5490+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5491+
5492+ if (err)
5493+ break;
5494+
5495+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5496+ (ba & 0xFFF00000U) == 0x30600000U &&
5497+ nop == 0x01000000U)
5498+ {
5499+ unsigned long addr;
5500+
5501+ addr = (sethi & 0x003FFFFFU) << 10;
5502+ regs->u_regs[UREG_G1] = addr;
5503+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5504+
5505+ if (test_thread_flag(TIF_32BIT))
5506+ addr &= 0xFFFFFFFFUL;
5507+
5508+ regs->tpc = addr;
5509+ regs->tnpc = addr+4;
5510+ return 2;
5511+ }
5512+ } while (0);
5513+
5514+#endif
5515+
5516+ return 1;
5517+}
5518+
5519+void pax_report_insns(void *pc, void *sp)
5520+{
5521+ unsigned long i;
5522+
5523+ printk(KERN_ERR "PAX: bytes at PC: ");
5524+ for (i = 0; i < 8; i++) {
5525+ unsigned int c;
5526+ if (get_user(c, (unsigned int *)pc+i))
5527+ printk(KERN_CONT "???????? ");
5528+ else
5529+ printk(KERN_CONT "%08x ", c);
5530+ }
5531+ printk("\n");
5532+}
5533+#endif
5534+
5535 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5536 {
5537 struct mm_struct *mm = current->mm;
5538@@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fau
5539 if (!vma)
5540 goto bad_area;
5541
5542+#ifdef CONFIG_PAX_PAGEEXEC
5543+ /* PaX: detect ITLB misses on non-exec pages */
5544+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5545+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5546+ {
5547+ if (address != regs->tpc)
5548+ goto good_area;
5549+
5550+ up_read(&mm->mmap_sem);
5551+ switch (pax_handle_fetch_fault(regs)) {
5552+
5553+#ifdef CONFIG_PAX_EMUPLT
5554+ case 2:
5555+ case 3:
5556+ return;
5557+#endif
5558+
5559+ }
5560+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
5561+ do_group_exit(SIGKILL);
5562+ }
5563+#endif
5564+
5565 /* Pure DTLB misses do not tell us whether the fault causing
5566 * load/store/atomic was a write or not, it only says that there
5567 * was no match. So in such a case we (carefully) read the
5568diff -urNp linux-2.6.32.41/arch/sparc/mm/hugetlbpage.c linux-2.6.32.41/arch/sparc/mm/hugetlbpage.c
5569--- linux-2.6.32.41/arch/sparc/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
5570+++ linux-2.6.32.41/arch/sparc/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
5571@@ -69,7 +69,7 @@ full_search:
5572 }
5573 return -ENOMEM;
5574 }
5575- if (likely(!vma || addr + len <= vma->vm_start)) {
5576+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5577 /*
5578 * Remember the place where we stopped the search:
5579 */
5580@@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct
5581 /* make sure it can fit in the remaining address space */
5582 if (likely(addr > len)) {
5583 vma = find_vma(mm, addr-len);
5584- if (!vma || addr <= vma->vm_start) {
5585+ if (check_heap_stack_gap(vma, addr - len, len)) {
5586 /* remember the address as a hint for next time */
5587 return (mm->free_area_cache = addr-len);
5588 }
5589@@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct
5590 if (unlikely(mm->mmap_base < len))
5591 goto bottomup;
5592
5593- addr = (mm->mmap_base-len) & HPAGE_MASK;
5594+ addr = mm->mmap_base - len;
5595
5596 do {
5597+ addr &= HPAGE_MASK;
5598 /*
5599 * Lookup failure means no vma is above this address,
5600 * else if new region fits below vma->vm_start,
5601 * return with success:
5602 */
5603 vma = find_vma(mm, addr);
5604- if (likely(!vma || addr+len <= vma->vm_start)) {
5605+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5606 /* remember the address as a hint for next time */
5607 return (mm->free_area_cache = addr);
5608 }
5609@@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct
5610 mm->cached_hole_size = vma->vm_start - addr;
5611
5612 /* try just below the current vma->vm_start */
5613- addr = (vma->vm_start-len) & HPAGE_MASK;
5614- } while (likely(len < vma->vm_start));
5615+ addr = skip_heap_stack_gap(vma, len);
5616+ } while (!IS_ERR_VALUE(addr));
5617
5618 bottomup:
5619 /*
5620@@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *f
5621 if (addr) {
5622 addr = ALIGN(addr, HPAGE_SIZE);
5623 vma = find_vma(mm, addr);
5624- if (task_size - len >= addr &&
5625- (!vma || addr + len <= vma->vm_start))
5626+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5627 return addr;
5628 }
5629 if (mm->get_unmapped_area == arch_get_unmapped_area)
5630diff -urNp linux-2.6.32.41/arch/sparc/mm/init_32.c linux-2.6.32.41/arch/sparc/mm/init_32.c
5631--- linux-2.6.32.41/arch/sparc/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
5632+++ linux-2.6.32.41/arch/sparc/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
5633@@ -317,6 +317,9 @@ extern void device_scan(void);
5634 pgprot_t PAGE_SHARED __read_mostly;
5635 EXPORT_SYMBOL(PAGE_SHARED);
5636
5637+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5638+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5639+
5640 void __init paging_init(void)
5641 {
5642 switch(sparc_cpu_model) {
5643@@ -345,17 +348,17 @@ void __init paging_init(void)
5644
5645 /* Initialize the protection map with non-constant, MMU dependent values. */
5646 protection_map[0] = PAGE_NONE;
5647- protection_map[1] = PAGE_READONLY;
5648- protection_map[2] = PAGE_COPY;
5649- protection_map[3] = PAGE_COPY;
5650+ protection_map[1] = PAGE_READONLY_NOEXEC;
5651+ protection_map[2] = PAGE_COPY_NOEXEC;
5652+ protection_map[3] = PAGE_COPY_NOEXEC;
5653 protection_map[4] = PAGE_READONLY;
5654 protection_map[5] = PAGE_READONLY;
5655 protection_map[6] = PAGE_COPY;
5656 protection_map[7] = PAGE_COPY;
5657 protection_map[8] = PAGE_NONE;
5658- protection_map[9] = PAGE_READONLY;
5659- protection_map[10] = PAGE_SHARED;
5660- protection_map[11] = PAGE_SHARED;
5661+ protection_map[9] = PAGE_READONLY_NOEXEC;
5662+ protection_map[10] = PAGE_SHARED_NOEXEC;
5663+ protection_map[11] = PAGE_SHARED_NOEXEC;
5664 protection_map[12] = PAGE_READONLY;
5665 protection_map[13] = PAGE_READONLY;
5666 protection_map[14] = PAGE_SHARED;
5667diff -urNp linux-2.6.32.41/arch/sparc/mm/Makefile linux-2.6.32.41/arch/sparc/mm/Makefile
5668--- linux-2.6.32.41/arch/sparc/mm/Makefile 2011-03-27 14:31:47.000000000 -0400
5669+++ linux-2.6.32.41/arch/sparc/mm/Makefile 2011-04-17 15:56:46.000000000 -0400
5670@@ -2,7 +2,7 @@
5671 #
5672
5673 asflags-y := -ansi
5674-ccflags-y := -Werror
5675+#ccflags-y := -Werror
5676
5677 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5678 obj-y += fault_$(BITS).o
5679diff -urNp linux-2.6.32.41/arch/sparc/mm/srmmu.c linux-2.6.32.41/arch/sparc/mm/srmmu.c
5680--- linux-2.6.32.41/arch/sparc/mm/srmmu.c 2011-03-27 14:31:47.000000000 -0400
5681+++ linux-2.6.32.41/arch/sparc/mm/srmmu.c 2011-04-17 15:56:46.000000000 -0400
5682@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5683 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5684 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5685 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5686+
5687+#ifdef CONFIG_PAX_PAGEEXEC
5688+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5689+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5690+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5691+#endif
5692+
5693 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5694 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5695
5696diff -urNp linux-2.6.32.41/arch/um/include/asm/kmap_types.h linux-2.6.32.41/arch/um/include/asm/kmap_types.h
5697--- linux-2.6.32.41/arch/um/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
5698+++ linux-2.6.32.41/arch/um/include/asm/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
5699@@ -23,6 +23,7 @@ enum km_type {
5700 KM_IRQ1,
5701 KM_SOFTIRQ0,
5702 KM_SOFTIRQ1,
5703+ KM_CLEARPAGE,
5704 KM_TYPE_NR
5705 };
5706
5707diff -urNp linux-2.6.32.41/arch/um/include/asm/page.h linux-2.6.32.41/arch/um/include/asm/page.h
5708--- linux-2.6.32.41/arch/um/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
5709+++ linux-2.6.32.41/arch/um/include/asm/page.h 2011-04-17 15:56:46.000000000 -0400
5710@@ -14,6 +14,9 @@
5711 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5712 #define PAGE_MASK (~(PAGE_SIZE-1))
5713
5714+#define ktla_ktva(addr) (addr)
5715+#define ktva_ktla(addr) (addr)
5716+
5717 #ifndef __ASSEMBLY__
5718
5719 struct page;
5720diff -urNp linux-2.6.32.41/arch/um/kernel/process.c linux-2.6.32.41/arch/um/kernel/process.c
5721--- linux-2.6.32.41/arch/um/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
5722+++ linux-2.6.32.41/arch/um/kernel/process.c 2011-04-17 15:56:46.000000000 -0400
5723@@ -393,22 +393,6 @@ int singlestepping(void * t)
5724 return 2;
5725 }
5726
5727-/*
5728- * Only x86 and x86_64 have an arch_align_stack().
5729- * All other arches have "#define arch_align_stack(x) (x)"
5730- * in their asm/system.h
5731- * As this is included in UML from asm-um/system-generic.h,
5732- * we can use it to behave as the subarch does.
5733- */
5734-#ifndef arch_align_stack
5735-unsigned long arch_align_stack(unsigned long sp)
5736-{
5737- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5738- sp -= get_random_int() % 8192;
5739- return sp & ~0xf;
5740-}
5741-#endif
5742-
5743 unsigned long get_wchan(struct task_struct *p)
5744 {
5745 unsigned long stack_page, sp, ip;
5746diff -urNp linux-2.6.32.41/arch/um/sys-i386/syscalls.c linux-2.6.32.41/arch/um/sys-i386/syscalls.c
5747--- linux-2.6.32.41/arch/um/sys-i386/syscalls.c 2011-03-27 14:31:47.000000000 -0400
5748+++ linux-2.6.32.41/arch/um/sys-i386/syscalls.c 2011-04-17 15:56:46.000000000 -0400
5749@@ -11,6 +11,21 @@
5750 #include "asm/uaccess.h"
5751 #include "asm/unistd.h"
5752
5753+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5754+{
5755+ unsigned long pax_task_size = TASK_SIZE;
5756+
5757+#ifdef CONFIG_PAX_SEGMEXEC
5758+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5759+ pax_task_size = SEGMEXEC_TASK_SIZE;
5760+#endif
5761+
5762+ if (len > pax_task_size || addr > pax_task_size - len)
5763+ return -EINVAL;
5764+
5765+ return 0;
5766+}
5767+
5768 /*
5769 * Perform the select(nd, in, out, ex, tv) and mmap() system
5770 * calls. Linux/i386 didn't use to be able to handle more than
5771diff -urNp linux-2.6.32.41/arch/x86/boot/bitops.h linux-2.6.32.41/arch/x86/boot/bitops.h
5772--- linux-2.6.32.41/arch/x86/boot/bitops.h 2011-03-27 14:31:47.000000000 -0400
5773+++ linux-2.6.32.41/arch/x86/boot/bitops.h 2011-04-17 15:56:46.000000000 -0400
5774@@ -26,7 +26,7 @@ static inline int variable_test_bit(int
5775 u8 v;
5776 const u32 *p = (const u32 *)addr;
5777
5778- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5779+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5780 return v;
5781 }
5782
5783@@ -37,7 +37,7 @@ static inline int variable_test_bit(int
5784
5785 static inline void set_bit(int nr, void *addr)
5786 {
5787- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5788+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5789 }
5790
5791 #endif /* BOOT_BITOPS_H */
5792diff -urNp linux-2.6.32.41/arch/x86/boot/boot.h linux-2.6.32.41/arch/x86/boot/boot.h
5793--- linux-2.6.32.41/arch/x86/boot/boot.h 2011-03-27 14:31:47.000000000 -0400
5794+++ linux-2.6.32.41/arch/x86/boot/boot.h 2011-04-17 15:56:46.000000000 -0400
5795@@ -82,7 +82,7 @@ static inline void io_delay(void)
5796 static inline u16 ds(void)
5797 {
5798 u16 seg;
5799- asm("movw %%ds,%0" : "=rm" (seg));
5800+ asm volatile("movw %%ds,%0" : "=rm" (seg));
5801 return seg;
5802 }
5803
5804@@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t
5805 static inline int memcmp(const void *s1, const void *s2, size_t len)
5806 {
5807 u8 diff;
5808- asm("repe; cmpsb; setnz %0"
5809+ asm volatile("repe; cmpsb; setnz %0"
5810 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5811 return diff;
5812 }
5813diff -urNp linux-2.6.32.41/arch/x86/boot/compressed/head_32.S linux-2.6.32.41/arch/x86/boot/compressed/head_32.S
5814--- linux-2.6.32.41/arch/x86/boot/compressed/head_32.S 2011-03-27 14:31:47.000000000 -0400
5815+++ linux-2.6.32.41/arch/x86/boot/compressed/head_32.S 2011-04-17 15:56:46.000000000 -0400
5816@@ -76,7 +76,7 @@ ENTRY(startup_32)
5817 notl %eax
5818 andl %eax, %ebx
5819 #else
5820- movl $LOAD_PHYSICAL_ADDR, %ebx
5821+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5822 #endif
5823
5824 /* Target address to relocate to for decompression */
5825@@ -149,7 +149,7 @@ relocated:
5826 * and where it was actually loaded.
5827 */
5828 movl %ebp, %ebx
5829- subl $LOAD_PHYSICAL_ADDR, %ebx
5830+ subl $____LOAD_PHYSICAL_ADDR, %ebx
5831 jz 2f /* Nothing to be done if loaded at compiled addr. */
5832 /*
5833 * Process relocations.
5834@@ -157,8 +157,7 @@ relocated:
5835
5836 1: subl $4, %edi
5837 movl (%edi), %ecx
5838- testl %ecx, %ecx
5839- jz 2f
5840+ jecxz 2f
5841 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5842 jmp 1b
5843 2:
5844diff -urNp linux-2.6.32.41/arch/x86/boot/compressed/head_64.S linux-2.6.32.41/arch/x86/boot/compressed/head_64.S
5845--- linux-2.6.32.41/arch/x86/boot/compressed/head_64.S 2011-03-27 14:31:47.000000000 -0400
5846+++ linux-2.6.32.41/arch/x86/boot/compressed/head_64.S 2011-04-17 15:56:46.000000000 -0400
5847@@ -91,7 +91,7 @@ ENTRY(startup_32)
5848 notl %eax
5849 andl %eax, %ebx
5850 #else
5851- movl $LOAD_PHYSICAL_ADDR, %ebx
5852+ movl $____LOAD_PHYSICAL_ADDR, %ebx
5853 #endif
5854
5855 /* Target address to relocate to for decompression */
5856@@ -234,7 +234,7 @@ ENTRY(startup_64)
5857 notq %rax
5858 andq %rax, %rbp
5859 #else
5860- movq $LOAD_PHYSICAL_ADDR, %rbp
5861+ movq $____LOAD_PHYSICAL_ADDR, %rbp
5862 #endif
5863
5864 /* Target address to relocate to for decompression */
5865diff -urNp linux-2.6.32.41/arch/x86/boot/compressed/misc.c linux-2.6.32.41/arch/x86/boot/compressed/misc.c
5866--- linux-2.6.32.41/arch/x86/boot/compressed/misc.c 2011-03-27 14:31:47.000000000 -0400
5867+++ linux-2.6.32.41/arch/x86/boot/compressed/misc.c 2011-04-17 15:56:46.000000000 -0400
5868@@ -288,7 +288,7 @@ static void parse_elf(void *output)
5869 case PT_LOAD:
5870 #ifdef CONFIG_RELOCATABLE
5871 dest = output;
5872- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5873+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5874 #else
5875 dest = (void *)(phdr->p_paddr);
5876 #endif
5877@@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *
5878 error("Destination address too large");
5879 #endif
5880 #ifndef CONFIG_RELOCATABLE
5881- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5882+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5883 error("Wrong destination address");
5884 #endif
5885
5886diff -urNp linux-2.6.32.41/arch/x86/boot/compressed/mkpiggy.c linux-2.6.32.41/arch/x86/boot/compressed/mkpiggy.c
5887--- linux-2.6.32.41/arch/x86/boot/compressed/mkpiggy.c 2011-03-27 14:31:47.000000000 -0400
5888+++ linux-2.6.32.41/arch/x86/boot/compressed/mkpiggy.c 2011-04-17 15:56:46.000000000 -0400
5889@@ -74,7 +74,7 @@ int main(int argc, char *argv[])
5890
5891 offs = (olen > ilen) ? olen - ilen : 0;
5892 offs += olen >> 12; /* Add 8 bytes for each 32K block */
5893- offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
5894+ offs += 64*1024; /* Add 64K bytes slack */
5895 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
5896
5897 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
5898diff -urNp linux-2.6.32.41/arch/x86/boot/compressed/relocs.c linux-2.6.32.41/arch/x86/boot/compressed/relocs.c
5899--- linux-2.6.32.41/arch/x86/boot/compressed/relocs.c 2011-03-27 14:31:47.000000000 -0400
5900+++ linux-2.6.32.41/arch/x86/boot/compressed/relocs.c 2011-04-17 15:56:46.000000000 -0400
5901@@ -10,8 +10,11 @@
5902 #define USE_BSD
5903 #include <endian.h>
5904
5905+#include "../../../../include/linux/autoconf.h"
5906+
5907 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5908 static Elf32_Ehdr ehdr;
5909+static Elf32_Phdr *phdr;
5910 static unsigned long reloc_count, reloc_idx;
5911 static unsigned long *relocs;
5912
5913@@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
5914
5915 static int is_safe_abs_reloc(const char* sym_name)
5916 {
5917- int i;
5918+ unsigned int i;
5919
5920 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
5921 if (!strcmp(sym_name, safe_abs_relocs[i]))
5922@@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
5923 }
5924 }
5925
5926+static void read_phdrs(FILE *fp)
5927+{
5928+ unsigned int i;
5929+
5930+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5931+ if (!phdr) {
5932+ die("Unable to allocate %d program headers\n",
5933+ ehdr.e_phnum);
5934+ }
5935+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5936+ die("Seek to %d failed: %s\n",
5937+ ehdr.e_phoff, strerror(errno));
5938+ }
5939+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5940+ die("Cannot read ELF program headers: %s\n",
5941+ strerror(errno));
5942+ }
5943+ for(i = 0; i < ehdr.e_phnum; i++) {
5944+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5945+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5946+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5947+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5948+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5949+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5950+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5951+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5952+ }
5953+
5954+}
5955+
5956 static void read_shdrs(FILE *fp)
5957 {
5958- int i;
5959+ unsigned int i;
5960 Elf32_Shdr shdr;
5961
5962 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5963@@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
5964
5965 static void read_strtabs(FILE *fp)
5966 {
5967- int i;
5968+ unsigned int i;
5969 for (i = 0; i < ehdr.e_shnum; i++) {
5970 struct section *sec = &secs[i];
5971 if (sec->shdr.sh_type != SHT_STRTAB) {
5972@@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
5973
5974 static void read_symtabs(FILE *fp)
5975 {
5976- int i,j;
5977+ unsigned int i,j;
5978 for (i = 0; i < ehdr.e_shnum; i++) {
5979 struct section *sec = &secs[i];
5980 if (sec->shdr.sh_type != SHT_SYMTAB) {
5981@@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
5982
5983 static void read_relocs(FILE *fp)
5984 {
5985- int i,j;
5986+ unsigned int i,j;
5987+ uint32_t base;
5988+
5989 for (i = 0; i < ehdr.e_shnum; i++) {
5990 struct section *sec = &secs[i];
5991 if (sec->shdr.sh_type != SHT_REL) {
5992@@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
5993 die("Cannot read symbol table: %s\n",
5994 strerror(errno));
5995 }
5996+ base = 0;
5997+ for (j = 0; j < ehdr.e_phnum; j++) {
5998+ if (phdr[j].p_type != PT_LOAD )
5999+ continue;
6000+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
6001+ continue;
6002+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
6003+ break;
6004+ }
6005 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
6006 Elf32_Rel *rel = &sec->reltab[j];
6007- rel->r_offset = elf32_to_cpu(rel->r_offset);
6008+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
6009 rel->r_info = elf32_to_cpu(rel->r_info);
6010 }
6011 }
6012@@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
6013
6014 static void print_absolute_symbols(void)
6015 {
6016- int i;
6017+ unsigned int i;
6018 printf("Absolute symbols\n");
6019 printf(" Num: Value Size Type Bind Visibility Name\n");
6020 for (i = 0; i < ehdr.e_shnum; i++) {
6021 struct section *sec = &secs[i];
6022 char *sym_strtab;
6023 Elf32_Sym *sh_symtab;
6024- int j;
6025+ unsigned int j;
6026
6027 if (sec->shdr.sh_type != SHT_SYMTAB) {
6028 continue;
6029@@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
6030
6031 static void print_absolute_relocs(void)
6032 {
6033- int i, printed = 0;
6034+ unsigned int i, printed = 0;
6035
6036 for (i = 0; i < ehdr.e_shnum; i++) {
6037 struct section *sec = &secs[i];
6038 struct section *sec_applies, *sec_symtab;
6039 char *sym_strtab;
6040 Elf32_Sym *sh_symtab;
6041- int j;
6042+ unsigned int j;
6043 if (sec->shdr.sh_type != SHT_REL) {
6044 continue;
6045 }
6046@@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
6047
6048 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6049 {
6050- int i;
6051+ unsigned int i;
6052 /* Walk through the relocations */
6053 for (i = 0; i < ehdr.e_shnum; i++) {
6054 char *sym_strtab;
6055 Elf32_Sym *sh_symtab;
6056 struct section *sec_applies, *sec_symtab;
6057- int j;
6058+ unsigned int j;
6059 struct section *sec = &secs[i];
6060
6061 if (sec->shdr.sh_type != SHT_REL) {
6062@@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(El
6063 if (sym->st_shndx == SHN_ABS) {
6064 continue;
6065 }
6066+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6067+ if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6068+ continue;
6069+
6070+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6071+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6072+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6073+ continue;
6074+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6075+ continue;
6076+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6077+ continue;
6078+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6079+ continue;
6080+#endif
6081 if (r_type == R_386_NONE || r_type == R_386_PC32) {
6082 /*
6083 * NONE can be ignored and and PC relative
6084@@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, co
6085
6086 static void emit_relocs(int as_text)
6087 {
6088- int i;
6089+ unsigned int i;
6090 /* Count how many relocations I have and allocate space for them. */
6091 reloc_count = 0;
6092 walk_relocs(count_reloc);
6093@@ -634,6 +693,7 @@ int main(int argc, char **argv)
6094 fname, strerror(errno));
6095 }
6096 read_ehdr(fp);
6097+ read_phdrs(fp);
6098 read_shdrs(fp);
6099 read_strtabs(fp);
6100 read_symtabs(fp);
6101diff -urNp linux-2.6.32.41/arch/x86/boot/cpucheck.c linux-2.6.32.41/arch/x86/boot/cpucheck.c
6102--- linux-2.6.32.41/arch/x86/boot/cpucheck.c 2011-03-27 14:31:47.000000000 -0400
6103+++ linux-2.6.32.41/arch/x86/boot/cpucheck.c 2011-04-17 15:56:46.000000000 -0400
6104@@ -74,7 +74,7 @@ static int has_fpu(void)
6105 u16 fcw = -1, fsw = -1;
6106 u32 cr0;
6107
6108- asm("movl %%cr0,%0" : "=r" (cr0));
6109+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
6110 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6111 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6112 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6113@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6114 {
6115 u32 f0, f1;
6116
6117- asm("pushfl ; "
6118+ asm volatile("pushfl ; "
6119 "pushfl ; "
6120 "popl %0 ; "
6121 "movl %0,%1 ; "
6122@@ -115,7 +115,7 @@ static void get_flags(void)
6123 set_bit(X86_FEATURE_FPU, cpu.flags);
6124
6125 if (has_eflag(X86_EFLAGS_ID)) {
6126- asm("cpuid"
6127+ asm volatile("cpuid"
6128 : "=a" (max_intel_level),
6129 "=b" (cpu_vendor[0]),
6130 "=d" (cpu_vendor[1]),
6131@@ -124,7 +124,7 @@ static void get_flags(void)
6132
6133 if (max_intel_level >= 0x00000001 &&
6134 max_intel_level <= 0x0000ffff) {
6135- asm("cpuid"
6136+ asm volatile("cpuid"
6137 : "=a" (tfms),
6138 "=c" (cpu.flags[4]),
6139 "=d" (cpu.flags[0])
6140@@ -136,7 +136,7 @@ static void get_flags(void)
6141 cpu.model += ((tfms >> 16) & 0xf) << 4;
6142 }
6143
6144- asm("cpuid"
6145+ asm volatile("cpuid"
6146 : "=a" (max_amd_level)
6147 : "a" (0x80000000)
6148 : "ebx", "ecx", "edx");
6149@@ -144,7 +144,7 @@ static void get_flags(void)
6150 if (max_amd_level >= 0x80000001 &&
6151 max_amd_level <= 0x8000ffff) {
6152 u32 eax = 0x80000001;
6153- asm("cpuid"
6154+ asm volatile("cpuid"
6155 : "+a" (eax),
6156 "=c" (cpu.flags[6]),
6157 "=d" (cpu.flags[1])
6158@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6159 u32 ecx = MSR_K7_HWCR;
6160 u32 eax, edx;
6161
6162- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6163+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6164 eax &= ~(1 << 15);
6165- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6166+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6167
6168 get_flags(); /* Make sure it really did something */
6169 err = check_flags();
6170@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6171 u32 ecx = MSR_VIA_FCR;
6172 u32 eax, edx;
6173
6174- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6175+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6176 eax |= (1<<1)|(1<<7);
6177- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6178+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6179
6180 set_bit(X86_FEATURE_CX8, cpu.flags);
6181 err = check_flags();
6182@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
6183 u32 eax, edx;
6184 u32 level = 1;
6185
6186- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6187- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6188- asm("cpuid"
6189+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6190+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6191+ asm volatile("cpuid"
6192 : "+a" (level), "=d" (cpu.flags[0])
6193 : : "ecx", "ebx");
6194- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6195+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6196
6197 err = check_flags();
6198 }
6199diff -urNp linux-2.6.32.41/arch/x86/boot/header.S linux-2.6.32.41/arch/x86/boot/header.S
6200--- linux-2.6.32.41/arch/x86/boot/header.S 2011-03-27 14:31:47.000000000 -0400
6201+++ linux-2.6.32.41/arch/x86/boot/header.S 2011-04-17 15:56:46.000000000 -0400
6202@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
6203 # single linked list of
6204 # struct setup_data
6205
6206-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6207+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6208
6209 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6210 #define VO_INIT_SIZE (VO__end - VO__text)
6211diff -urNp linux-2.6.32.41/arch/x86/boot/memory.c linux-2.6.32.41/arch/x86/boot/memory.c
6212--- linux-2.6.32.41/arch/x86/boot/memory.c 2011-03-27 14:31:47.000000000 -0400
6213+++ linux-2.6.32.41/arch/x86/boot/memory.c 2011-04-17 15:56:46.000000000 -0400
6214@@ -19,7 +19,7 @@
6215
6216 static int detect_memory_e820(void)
6217 {
6218- int count = 0;
6219+ unsigned int count = 0;
6220 struct biosregs ireg, oreg;
6221 struct e820entry *desc = boot_params.e820_map;
6222 static struct e820entry buf; /* static so it is zeroed */
6223diff -urNp linux-2.6.32.41/arch/x86/boot/video.c linux-2.6.32.41/arch/x86/boot/video.c
6224--- linux-2.6.32.41/arch/x86/boot/video.c 2011-03-27 14:31:47.000000000 -0400
6225+++ linux-2.6.32.41/arch/x86/boot/video.c 2011-04-17 15:56:46.000000000 -0400
6226@@ -90,7 +90,7 @@ static void store_mode_params(void)
6227 static unsigned int get_entry(void)
6228 {
6229 char entry_buf[4];
6230- int i, len = 0;
6231+ unsigned int i, len = 0;
6232 int key;
6233 unsigned int v;
6234
6235diff -urNp linux-2.6.32.41/arch/x86/boot/video-vesa.c linux-2.6.32.41/arch/x86/boot/video-vesa.c
6236--- linux-2.6.32.41/arch/x86/boot/video-vesa.c 2011-03-27 14:31:47.000000000 -0400
6237+++ linux-2.6.32.41/arch/x86/boot/video-vesa.c 2011-04-17 15:56:46.000000000 -0400
6238@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6239
6240 boot_params.screen_info.vesapm_seg = oreg.es;
6241 boot_params.screen_info.vesapm_off = oreg.di;
6242+ boot_params.screen_info.vesapm_size = oreg.cx;
6243 }
6244
6245 /*
6246diff -urNp linux-2.6.32.41/arch/x86/ia32/ia32_aout.c linux-2.6.32.41/arch/x86/ia32/ia32_aout.c
6247--- linux-2.6.32.41/arch/x86/ia32/ia32_aout.c 2011-03-27 14:31:47.000000000 -0400
6248+++ linux-2.6.32.41/arch/x86/ia32/ia32_aout.c 2011-04-17 15:56:46.000000000 -0400
6249@@ -169,6 +169,8 @@ static int aout_core_dump(long signr, st
6250 unsigned long dump_start, dump_size;
6251 struct user32 dump;
6252
6253+ memset(&dump, 0, sizeof(dump));
6254+
6255 fs = get_fs();
6256 set_fs(KERNEL_DS);
6257 has_dumped = 1;
6258@@ -218,12 +220,6 @@ static int aout_core_dump(long signr, st
6259 dump_size = dump.u_ssize << PAGE_SHIFT;
6260 DUMP_WRITE(dump_start, dump_size);
6261 }
6262- /*
6263- * Finally dump the task struct. Not be used by gdb, but
6264- * could be useful
6265- */
6266- set_fs(KERNEL_DS);
6267- DUMP_WRITE(current, sizeof(*current));
6268 end_coredump:
6269 set_fs(fs);
6270 return has_dumped;
6271diff -urNp linux-2.6.32.41/arch/x86/ia32/ia32entry.S linux-2.6.32.41/arch/x86/ia32/ia32entry.S
6272--- linux-2.6.32.41/arch/x86/ia32/ia32entry.S 2011-03-27 14:31:47.000000000 -0400
6273+++ linux-2.6.32.41/arch/x86/ia32/ia32entry.S 2011-05-22 23:14:58.000000000 -0400
6274@@ -13,6 +13,7 @@
6275 #include <asm/thread_info.h>
6276 #include <asm/segment.h>
6277 #include <asm/irqflags.h>
6278+#include <asm/pgtable.h>
6279 #include <linux/linkage.h>
6280
6281 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6282@@ -93,6 +94,32 @@ ENTRY(native_irq_enable_sysexit)
6283 ENDPROC(native_irq_enable_sysexit)
6284 #endif
6285
6286+ .macro pax_enter_kernel_user
6287+#ifdef CONFIG_PAX_MEMORY_UDEREF
6288+ call pax_enter_kernel_user
6289+#endif
6290+ .endm
6291+
6292+ .macro pax_exit_kernel_user
6293+#ifdef CONFIG_PAX_MEMORY_UDEREF
6294+ call pax_exit_kernel_user
6295+#endif
6296+#ifdef CONFIG_PAX_RANDKSTACK
6297+ pushq %rax
6298+ call pax_randomize_kstack
6299+ popq %rax
6300+#endif
6301+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6302+ call pax_erase_kstack
6303+#endif
6304+ .endm
6305+
6306+ .macro pax_erase_kstack
6307+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6308+ call pax_erase_kstack
6309+#endif
6310+ .endm
6311+
6312 /*
6313 * 32bit SYSENTER instruction entry.
6314 *
6315@@ -119,7 +146,7 @@ ENTRY(ia32_sysenter_target)
6316 CFI_REGISTER rsp,rbp
6317 SWAPGS_UNSAFE_STACK
6318 movq PER_CPU_VAR(kernel_stack), %rsp
6319- addq $(KERNEL_STACK_OFFSET),%rsp
6320+ pax_enter_kernel_user
6321 /*
6322 * No need to follow this irqs on/off section: the syscall
6323 * disabled irqs, here we enable it straight after entry:
6324@@ -135,7 +162,8 @@ ENTRY(ia32_sysenter_target)
6325 pushfq
6326 CFI_ADJUST_CFA_OFFSET 8
6327 /*CFI_REL_OFFSET rflags,0*/
6328- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6329+ GET_THREAD_INFO(%r10)
6330+ movl TI_sysenter_return(%r10), %r10d
6331 CFI_REGISTER rip,r10
6332 pushq $__USER32_CS
6333 CFI_ADJUST_CFA_OFFSET 8
6334@@ -150,6 +178,12 @@ ENTRY(ia32_sysenter_target)
6335 SAVE_ARGS 0,0,1
6336 /* no need to do an access_ok check here because rbp has been
6337 32bit zero extended */
6338+
6339+#ifdef CONFIG_PAX_MEMORY_UDEREF
6340+ mov $PAX_USER_SHADOW_BASE,%r10
6341+ add %r10,%rbp
6342+#endif
6343+
6344 1: movl (%rbp),%ebp
6345 .section __ex_table,"a"
6346 .quad 1b,ia32_badarg
6347@@ -172,6 +206,7 @@ sysenter_dispatch:
6348 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6349 jnz sysexit_audit
6350 sysexit_from_sys_call:
6351+ pax_exit_kernel_user
6352 andl $~TS_COMPAT,TI_status(%r10)
6353 /* clear IF, that popfq doesn't enable interrupts early */
6354 andl $~0x200,EFLAGS-R11(%rsp)
6355@@ -200,6 +235,9 @@ sysexit_from_sys_call:
6356 movl %eax,%esi /* 2nd arg: syscall number */
6357 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6358 call audit_syscall_entry
6359+
6360+ pax_erase_kstack
6361+
6362 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6363 cmpq $(IA32_NR_syscalls-1),%rax
6364 ja ia32_badsys
6365@@ -252,6 +290,9 @@ sysenter_tracesys:
6366 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
6367 movq %rsp,%rdi /* &pt_regs -> arg1 */
6368 call syscall_trace_enter
6369+
6370+ pax_erase_kstack
6371+
6372 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6373 RESTORE_REST
6374 cmpq $(IA32_NR_syscalls-1),%rax
6375@@ -283,19 +324,24 @@ ENDPROC(ia32_sysenter_target)
6376 ENTRY(ia32_cstar_target)
6377 CFI_STARTPROC32 simple
6378 CFI_SIGNAL_FRAME
6379- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
6380+ CFI_DEF_CFA rsp,0
6381 CFI_REGISTER rip,rcx
6382 /*CFI_REGISTER rflags,r11*/
6383 SWAPGS_UNSAFE_STACK
6384 movl %esp,%r8d
6385 CFI_REGISTER rsp,r8
6386 movq PER_CPU_VAR(kernel_stack),%rsp
6387+
6388+#ifdef CONFIG_PAX_MEMORY_UDEREF
6389+ pax_enter_kernel_user
6390+#endif
6391+
6392 /*
6393 * No need to follow this irqs on/off section: the syscall
6394 * disabled irqs and here we enable it straight after entry:
6395 */
6396 ENABLE_INTERRUPTS(CLBR_NONE)
6397- SAVE_ARGS 8,1,1
6398+ SAVE_ARGS 8*6,1,1
6399 movl %eax,%eax /* zero extension */
6400 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
6401 movq %rcx,RIP-ARGOFFSET(%rsp)
6402@@ -311,6 +357,12 @@ ENTRY(ia32_cstar_target)
6403 /* no need to do an access_ok check here because r8 has been
6404 32bit zero extended */
6405 /* hardware stack frame is complete now */
6406+
6407+#ifdef CONFIG_PAX_MEMORY_UDEREF
6408+ mov $PAX_USER_SHADOW_BASE,%r10
6409+ add %r10,%r8
6410+#endif
6411+
6412 1: movl (%r8),%r9d
6413 .section __ex_table,"a"
6414 .quad 1b,ia32_badarg
6415@@ -333,6 +385,7 @@ cstar_dispatch:
6416 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6417 jnz sysretl_audit
6418 sysretl_from_sys_call:
6419+ pax_exit_kernel_user
6420 andl $~TS_COMPAT,TI_status(%r10)
6421 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
6422 movl RIP-ARGOFFSET(%rsp),%ecx
6423@@ -370,6 +423,9 @@ cstar_tracesys:
6424 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6425 movq %rsp,%rdi /* &pt_regs -> arg1 */
6426 call syscall_trace_enter
6427+
6428+ pax_erase_kstack
6429+
6430 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6431 RESTORE_REST
6432 xchgl %ebp,%r9d
6433@@ -415,6 +471,7 @@ ENTRY(ia32_syscall)
6434 CFI_REL_OFFSET rip,RIP-RIP
6435 PARAVIRT_ADJUST_EXCEPTION_FRAME
6436 SWAPGS
6437+ pax_enter_kernel_user
6438 /*
6439 * No need to follow this irqs on/off section: the syscall
6440 * disabled irqs and here we enable it straight after entry:
6441@@ -448,6 +505,9 @@ ia32_tracesys:
6442 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6443 movq %rsp,%rdi /* &pt_regs -> arg1 */
6444 call syscall_trace_enter
6445+
6446+ pax_erase_kstack
6447+
6448 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6449 RESTORE_REST
6450 cmpq $(IA32_NR_syscalls-1),%rax
6451diff -urNp linux-2.6.32.41/arch/x86/ia32/ia32_signal.c linux-2.6.32.41/arch/x86/ia32/ia32_signal.c
6452--- linux-2.6.32.41/arch/x86/ia32/ia32_signal.c 2011-03-27 14:31:47.000000000 -0400
6453+++ linux-2.6.32.41/arch/x86/ia32/ia32_signal.c 2011-04-17 15:56:46.000000000 -0400
6454@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
6455 sp -= frame_size;
6456 /* Align the stack pointer according to the i386 ABI,
6457 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6458- sp = ((sp + 4) & -16ul) - 4;
6459+ sp = ((sp - 12) & -16ul) - 4;
6460 return (void __user *) sp;
6461 }
6462
6463@@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
6464 * These are actually not used anymore, but left because some
6465 * gdb versions depend on them as a marker.
6466 */
6467- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6468+ put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6469 } put_user_catch(err);
6470
6471 if (err)
6472@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
6473 0xb8,
6474 __NR_ia32_rt_sigreturn,
6475 0x80cd,
6476- 0,
6477+ 0
6478 };
6479
6480 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6481@@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
6482
6483 if (ka->sa.sa_flags & SA_RESTORER)
6484 restorer = ka->sa.sa_restorer;
6485+ else if (current->mm->context.vdso)
6486+ /* Return stub is in 32bit vsyscall page */
6487+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6488 else
6489- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6490- rt_sigreturn);
6491+ restorer = &frame->retcode;
6492 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6493
6494 /*
6495 * Not actually used anymore, but left because some gdb
6496 * versions need it.
6497 */
6498- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6499+ put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6500 } put_user_catch(err);
6501
6502 if (err)
6503diff -urNp linux-2.6.32.41/arch/x86/include/asm/alternative.h linux-2.6.32.41/arch/x86/include/asm/alternative.h
6504--- linux-2.6.32.41/arch/x86/include/asm/alternative.h 2011-03-27 14:31:47.000000000 -0400
6505+++ linux-2.6.32.41/arch/x86/include/asm/alternative.h 2011-04-17 15:56:46.000000000 -0400
6506@@ -85,7 +85,7 @@ static inline void alternatives_smp_swit
6507 " .byte 662b-661b\n" /* sourcelen */ \
6508 " .byte 664f-663f\n" /* replacementlen */ \
6509 ".previous\n" \
6510- ".section .altinstr_replacement, \"ax\"\n" \
6511+ ".section .altinstr_replacement, \"a\"\n" \
6512 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
6513 ".previous"
6514
6515diff -urNp linux-2.6.32.41/arch/x86/include/asm/apm.h linux-2.6.32.41/arch/x86/include/asm/apm.h
6516--- linux-2.6.32.41/arch/x86/include/asm/apm.h 2011-03-27 14:31:47.000000000 -0400
6517+++ linux-2.6.32.41/arch/x86/include/asm/apm.h 2011-04-17 15:56:46.000000000 -0400
6518@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
6519 __asm__ __volatile__(APM_DO_ZERO_SEGS
6520 "pushl %%edi\n\t"
6521 "pushl %%ebp\n\t"
6522- "lcall *%%cs:apm_bios_entry\n\t"
6523+ "lcall *%%ss:apm_bios_entry\n\t"
6524 "setc %%al\n\t"
6525 "popl %%ebp\n\t"
6526 "popl %%edi\n\t"
6527@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
6528 __asm__ __volatile__(APM_DO_ZERO_SEGS
6529 "pushl %%edi\n\t"
6530 "pushl %%ebp\n\t"
6531- "lcall *%%cs:apm_bios_entry\n\t"
6532+ "lcall *%%ss:apm_bios_entry\n\t"
6533 "setc %%bl\n\t"
6534 "popl %%ebp\n\t"
6535 "popl %%edi\n\t"
6536diff -urNp linux-2.6.32.41/arch/x86/include/asm/atomic_32.h linux-2.6.32.41/arch/x86/include/asm/atomic_32.h
6537--- linux-2.6.32.41/arch/x86/include/asm/atomic_32.h 2011-03-27 14:31:47.000000000 -0400
6538+++ linux-2.6.32.41/arch/x86/include/asm/atomic_32.h 2011-05-04 17:56:20.000000000 -0400
6539@@ -25,6 +25,17 @@ static inline int atomic_read(const atom
6540 }
6541
6542 /**
6543+ * atomic_read_unchecked - read atomic variable
6544+ * @v: pointer of type atomic_unchecked_t
6545+ *
6546+ * Atomically reads the value of @v.
6547+ */
6548+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6549+{
6550+ return v->counter;
6551+}
6552+
6553+/**
6554 * atomic_set - set atomic variable
6555 * @v: pointer of type atomic_t
6556 * @i: required value
6557@@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *
6558 }
6559
6560 /**
6561+ * atomic_set_unchecked - set atomic variable
6562+ * @v: pointer of type atomic_unchecked_t
6563+ * @i: required value
6564+ *
6565+ * Atomically sets the value of @v to @i.
6566+ */
6567+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6568+{
6569+ v->counter = i;
6570+}
6571+
6572+/**
6573 * atomic_add - add integer to atomic variable
6574 * @i: integer value to add
6575 * @v: pointer of type atomic_t
6576@@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *
6577 */
6578 static inline void atomic_add(int i, atomic_t *v)
6579 {
6580- asm volatile(LOCK_PREFIX "addl %1,%0"
6581+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6582+
6583+#ifdef CONFIG_PAX_REFCOUNT
6584+ "jno 0f\n"
6585+ LOCK_PREFIX "subl %1,%0\n"
6586+ "int $4\n0:\n"
6587+ _ASM_EXTABLE(0b, 0b)
6588+#endif
6589+
6590+ : "+m" (v->counter)
6591+ : "ir" (i));
6592+}
6593+
6594+/**
6595+ * atomic_add_unchecked - add integer to atomic variable
6596+ * @i: integer value to add
6597+ * @v: pointer of type atomic_unchecked_t
6598+ *
6599+ * Atomically adds @i to @v.
6600+ */
6601+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6602+{
6603+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6604 : "+m" (v->counter)
6605 : "ir" (i));
6606 }
6607@@ -59,7 +104,29 @@ static inline void atomic_add(int i, ato
6608 */
6609 static inline void atomic_sub(int i, atomic_t *v)
6610 {
6611- asm volatile(LOCK_PREFIX "subl %1,%0"
6612+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6613+
6614+#ifdef CONFIG_PAX_REFCOUNT
6615+ "jno 0f\n"
6616+ LOCK_PREFIX "addl %1,%0\n"
6617+ "int $4\n0:\n"
6618+ _ASM_EXTABLE(0b, 0b)
6619+#endif
6620+
6621+ : "+m" (v->counter)
6622+ : "ir" (i));
6623+}
6624+
6625+/**
6626+ * atomic_sub_unchecked - subtract integer from atomic variable
6627+ * @i: integer value to subtract
6628+ * @v: pointer of type atomic_unchecked_t
6629+ *
6630+ * Atomically subtracts @i from @v.
6631+ */
6632+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6633+{
6634+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6635 : "+m" (v->counter)
6636 : "ir" (i));
6637 }
6638@@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(in
6639 {
6640 unsigned char c;
6641
6642- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6643+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
6644+
6645+#ifdef CONFIG_PAX_REFCOUNT
6646+ "jno 0f\n"
6647+ LOCK_PREFIX "addl %2,%0\n"
6648+ "int $4\n0:\n"
6649+ _ASM_EXTABLE(0b, 0b)
6650+#endif
6651+
6652+ "sete %1\n"
6653 : "+m" (v->counter), "=qm" (c)
6654 : "ir" (i) : "memory");
6655 return c;
6656@@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(in
6657 */
6658 static inline void atomic_inc(atomic_t *v)
6659 {
6660- asm volatile(LOCK_PREFIX "incl %0"
6661+ asm volatile(LOCK_PREFIX "incl %0\n"
6662+
6663+#ifdef CONFIG_PAX_REFCOUNT
6664+ "jno 0f\n"
6665+ LOCK_PREFIX "decl %0\n"
6666+ "int $4\n0:\n"
6667+ _ASM_EXTABLE(0b, 0b)
6668+#endif
6669+
6670+ : "+m" (v->counter));
6671+}
6672+
6673+/**
6674+ * atomic_inc_unchecked - increment atomic variable
6675+ * @v: pointer of type atomic_unchecked_t
6676+ *
6677+ * Atomically increments @v by 1.
6678+ */
6679+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6680+{
6681+ asm volatile(LOCK_PREFIX "incl %0\n"
6682 : "+m" (v->counter));
6683 }
6684
6685@@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *
6686 */
6687 static inline void atomic_dec(atomic_t *v)
6688 {
6689- asm volatile(LOCK_PREFIX "decl %0"
6690+ asm volatile(LOCK_PREFIX "decl %0\n"
6691+
6692+#ifdef CONFIG_PAX_REFCOUNT
6693+ "jno 0f\n"
6694+ LOCK_PREFIX "incl %0\n"
6695+ "int $4\n0:\n"
6696+ _ASM_EXTABLE(0b, 0b)
6697+#endif
6698+
6699+ : "+m" (v->counter));
6700+}
6701+
6702+/**
6703+ * atomic_dec_unchecked - decrement atomic variable
6704+ * @v: pointer of type atomic_unchecked_t
6705+ *
6706+ * Atomically decrements @v by 1.
6707+ */
6708+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6709+{
6710+ asm volatile(LOCK_PREFIX "decl %0\n"
6711 : "+m" (v->counter));
6712 }
6713
6714@@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(at
6715 {
6716 unsigned char c;
6717
6718- asm volatile(LOCK_PREFIX "decl %0; sete %1"
6719+ asm volatile(LOCK_PREFIX "decl %0\n"
6720+
6721+#ifdef CONFIG_PAX_REFCOUNT
6722+ "jno 0f\n"
6723+ LOCK_PREFIX "incl %0\n"
6724+ "int $4\n0:\n"
6725+ _ASM_EXTABLE(0b, 0b)
6726+#endif
6727+
6728+ "sete %1\n"
6729 : "+m" (v->counter), "=qm" (c)
6730 : : "memory");
6731 return c != 0;
6732@@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(at
6733 {
6734 unsigned char c;
6735
6736- asm volatile(LOCK_PREFIX "incl %0; sete %1"
6737+ asm volatile(LOCK_PREFIX "incl %0\n"
6738+
6739+#ifdef CONFIG_PAX_REFCOUNT
6740+ "jno 0f\n"
6741+ LOCK_PREFIX "decl %0\n"
6742+ "into\n0:\n"
6743+ _ASM_EXTABLE(0b, 0b)
6744+#endif
6745+
6746+ "sete %1\n"
6747+ : "+m" (v->counter), "=qm" (c)
6748+ : : "memory");
6749+ return c != 0;
6750+}
6751+
6752+/**
6753+ * atomic_inc_and_test_unchecked - increment and test
6754+ * @v: pointer of type atomic_unchecked_t
6755+ *
6756+ * Atomically increments @v by 1
6757+ * and returns true if the result is zero, or false for all
6758+ * other cases.
6759+ */
6760+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6761+{
6762+ unsigned char c;
6763+
6764+ asm volatile(LOCK_PREFIX "incl %0\n"
6765+ "sete %1\n"
6766 : "+m" (v->counter), "=qm" (c)
6767 : : "memory");
6768 return c != 0;
6769@@ -156,7 +309,16 @@ static inline int atomic_add_negative(in
6770 {
6771 unsigned char c;
6772
6773- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
6774+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
6775+
6776+#ifdef CONFIG_PAX_REFCOUNT
6777+ "jno 0f\n"
6778+ LOCK_PREFIX "subl %2,%0\n"
6779+ "int $4\n0:\n"
6780+ _ASM_EXTABLE(0b, 0b)
6781+#endif
6782+
6783+ "sets %1\n"
6784 : "+m" (v->counter), "=qm" (c)
6785 : "ir" (i) : "memory");
6786 return c;
6787@@ -179,6 +341,46 @@ static inline int atomic_add_return(int
6788 #endif
6789 /* Modern 486+ processor */
6790 __i = i;
6791+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
6792+
6793+#ifdef CONFIG_PAX_REFCOUNT
6794+ "jno 0f\n"
6795+ "movl %0, %1\n"
6796+ "int $4\n0:\n"
6797+ _ASM_EXTABLE(0b, 0b)
6798+#endif
6799+
6800+ : "+r" (i), "+m" (v->counter)
6801+ : : "memory");
6802+ return i + __i;
6803+
6804+#ifdef CONFIG_M386
6805+no_xadd: /* Legacy 386 processor */
6806+ local_irq_save(flags);
6807+ __i = atomic_read(v);
6808+ atomic_set(v, i + __i);
6809+ local_irq_restore(flags);
6810+ return i + __i;
6811+#endif
6812+}
6813+
6814+/**
6815+ * atomic_add_return_unchecked - add integer and return
6816+ * @v: pointer of type atomic_unchecked_t
6817+ * @i: integer value to add
6818+ *
6819+ * Atomically adds @i to @v and returns @i + @v
6820+ */
6821+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6822+{
6823+ int __i;
6824+#ifdef CONFIG_M386
6825+ unsigned long flags;
6826+ if (unlikely(boot_cpu_data.x86 <= 3))
6827+ goto no_xadd;
6828+#endif
6829+ /* Modern 486+ processor */
6830+ __i = i;
6831 asm volatile(LOCK_PREFIX "xaddl %0, %1"
6832 : "+r" (i), "+m" (v->counter)
6833 : : "memory");
6834@@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_
6835 return cmpxchg(&v->counter, old, new);
6836 }
6837
6838+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
6839+{
6840+ return cmpxchg(&v->counter, old, new);
6841+}
6842+
6843 static inline int atomic_xchg(atomic_t *v, int new)
6844 {
6845 return xchg(&v->counter, new);
6846 }
6847
6848+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6849+{
6850+ return xchg(&v->counter, new);
6851+}
6852+
6853 /**
6854 * atomic_add_unless - add unless the number is already a given value
6855 * @v: pointer of type atomic_t
6856@@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *
6857 */
6858 static inline int atomic_add_unless(atomic_t *v, int a, int u)
6859 {
6860- int c, old;
6861+ int c, old, new;
6862 c = atomic_read(v);
6863 for (;;) {
6864- if (unlikely(c == (u)))
6865+ if (unlikely(c == u))
6866 break;
6867- old = atomic_cmpxchg((v), c, c + (a));
6868+
6869+ asm volatile("addl %2,%0\n"
6870+
6871+#ifdef CONFIG_PAX_REFCOUNT
6872+ "jno 0f\n"
6873+ "subl %2,%0\n"
6874+ "int $4\n0:\n"
6875+ _ASM_EXTABLE(0b, 0b)
6876+#endif
6877+
6878+ : "=r" (new)
6879+ : "0" (c), "ir" (a));
6880+
6881+ old = atomic_cmpxchg(v, c, new);
6882 if (likely(old == c))
6883 break;
6884 c = old;
6885 }
6886- return c != (u);
6887+ return c != u;
6888 }
6889
6890 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
6891
6892 #define atomic_inc_return(v) (atomic_add_return(1, v))
6893+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6894+{
6895+ return atomic_add_return_unchecked(1, v);
6896+}
6897 #define atomic_dec_return(v) (atomic_sub_return(1, v))
6898
6899 /* These are x86-specific, used by some header files */
6900@@ -266,9 +495,18 @@ typedef struct {
6901 u64 __aligned(8) counter;
6902 } atomic64_t;
6903
6904+#ifdef CONFIG_PAX_REFCOUNT
6905+typedef struct {
6906+ u64 __aligned(8) counter;
6907+} atomic64_unchecked_t;
6908+#else
6909+typedef atomic64_t atomic64_unchecked_t;
6910+#endif
6911+
6912 #define ATOMIC64_INIT(val) { (val) }
6913
6914 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
6915+extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
6916
6917 /**
6918 * atomic64_xchg - xchg atomic64 variable
6919@@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *
6920 * the old value.
6921 */
6922 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
6923+extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
6924
6925 /**
6926 * atomic64_set - set atomic64 variable
6927@@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr
6928 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
6929
6930 /**
6931+ * atomic64_unchecked_set - set atomic64 variable
6932+ * @ptr: pointer to type atomic64_unchecked_t
6933+ * @new_val: value to assign
6934+ *
6935+ * Atomically sets the value of @ptr to @new_val.
6936+ */
6937+extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
6938+
6939+/**
6940 * atomic64_read - read atomic64 variable
6941 * @ptr: pointer to type atomic64_t
6942 *
6943@@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64
6944 return res;
6945 }
6946
6947-extern u64 atomic64_read(atomic64_t *ptr);
6948+/**
6949+ * atomic64_read_unchecked - read atomic64 variable
6950+ * @ptr: pointer to type atomic64_unchecked_t
6951+ *
6952+ * Atomically reads the value of @ptr and returns it.
6953+ */
6954+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
6955+{
6956+ u64 res;
6957+
6958+ /*
6959+ * Note, we inline this atomic64_unchecked_t primitive because
6960+ * it only clobbers EAX/EDX and leaves the others
6961+ * untouched. We also (somewhat subtly) rely on the
6962+ * fact that cmpxchg8b returns the current 64-bit value
6963+ * of the memory location we are touching:
6964+ */
6965+ asm volatile(
6966+ "mov %%ebx, %%eax\n\t"
6967+ "mov %%ecx, %%edx\n\t"
6968+ LOCK_PREFIX "cmpxchg8b %1\n"
6969+ : "=&A" (res)
6970+ : "m" (*ptr)
6971+ );
6972+
6973+ return res;
6974+}
6975
6976 /**
6977 * atomic64_add_return - add and return
6978@@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta
6979 * Other variants with different arithmetic operators:
6980 */
6981 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
6982+extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
6983 extern u64 atomic64_inc_return(atomic64_t *ptr);
6984+extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
6985 extern u64 atomic64_dec_return(atomic64_t *ptr);
6986+extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
6987
6988 /**
6989 * atomic64_add - add integer to atomic64 variable
6990@@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_
6991 extern void atomic64_add(u64 delta, atomic64_t *ptr);
6992
6993 /**
6994+ * atomic64_add_unchecked - add integer to atomic64 variable
6995+ * @delta: integer value to add
6996+ * @ptr: pointer to type atomic64_unchecked_t
6997+ *
6998+ * Atomically adds @delta to @ptr.
6999+ */
7000+extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7001+
7002+/**
7003 * atomic64_sub - subtract the atomic64 variable
7004 * @delta: integer value to subtract
7005 * @ptr: pointer to type atomic64_t
7006@@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atom
7007 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
7008
7009 /**
7010+ * atomic64_sub_unchecked - subtract the atomic64 variable
7011+ * @delta: integer value to subtract
7012+ * @ptr: pointer to type atomic64_unchecked_t
7013+ *
7014+ * Atomically subtracts @delta from @ptr.
7015+ */
7016+extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7017+
7018+/**
7019 * atomic64_sub_and_test - subtract value from variable and test result
7020 * @delta: integer value to subtract
7021 * @ptr: pointer to type atomic64_t
7022@@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 del
7023 extern void atomic64_inc(atomic64_t *ptr);
7024
7025 /**
7026+ * atomic64_inc_unchecked - increment atomic64 variable
7027+ * @ptr: pointer to type atomic64_unchecked_t
7028+ *
7029+ * Atomically increments @ptr by 1.
7030+ */
7031+extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
7032+
7033+/**
7034 * atomic64_dec - decrement atomic64 variable
7035 * @ptr: pointer to type atomic64_t
7036 *
7037@@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr
7038 extern void atomic64_dec(atomic64_t *ptr);
7039
7040 /**
7041+ * atomic64_dec_unchecked - decrement atomic64 variable
7042+ * @ptr: pointer to type atomic64_unchecked_t
7043+ *
7044+ * Atomically decrements @ptr by 1.
7045+ */
7046+extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
7047+
7048+/**
7049 * atomic64_dec_and_test - decrement and test
7050 * @ptr: pointer to type atomic64_t
7051 *
7052diff -urNp linux-2.6.32.41/arch/x86/include/asm/atomic_64.h linux-2.6.32.41/arch/x86/include/asm/atomic_64.h
7053--- linux-2.6.32.41/arch/x86/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
7054+++ linux-2.6.32.41/arch/x86/include/asm/atomic_64.h 2011-05-04 18:35:31.000000000 -0400
7055@@ -24,6 +24,17 @@ static inline int atomic_read(const atom
7056 }
7057
7058 /**
7059+ * atomic_read_unchecked - read atomic variable
7060+ * @v: pointer of type atomic_unchecked_t
7061+ *
7062+ * Atomically reads the value of @v.
7063+ */
7064+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7065+{
7066+ return v->counter;
7067+}
7068+
7069+/**
7070 * atomic_set - set atomic variable
7071 * @v: pointer of type atomic_t
7072 * @i: required value
7073@@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *
7074 }
7075
7076 /**
7077+ * atomic_set_unchecked - set atomic variable
7078+ * @v: pointer of type atomic_unchecked_t
7079+ * @i: required value
7080+ *
7081+ * Atomically sets the value of @v to @i.
7082+ */
7083+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7084+{
7085+ v->counter = i;
7086+}
7087+
7088+/**
7089 * atomic_add - add integer to atomic variable
7090 * @i: integer value to add
7091 * @v: pointer of type atomic_t
7092@@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *
7093 */
7094 static inline void atomic_add(int i, atomic_t *v)
7095 {
7096- asm volatile(LOCK_PREFIX "addl %1,%0"
7097+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7098+
7099+#ifdef CONFIG_PAX_REFCOUNT
7100+ "jno 0f\n"
7101+ LOCK_PREFIX "subl %1,%0\n"
7102+ "int $4\n0:\n"
7103+ _ASM_EXTABLE(0b, 0b)
7104+#endif
7105+
7106+ : "=m" (v->counter)
7107+ : "ir" (i), "m" (v->counter));
7108+}
7109+
7110+/**
7111+ * atomic_add_unchecked - add integer to atomic variable
7112+ * @i: integer value to add
7113+ * @v: pointer of type atomic_unchecked_t
7114+ *
7115+ * Atomically adds @i to @v.
7116+ */
7117+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7118+{
7119+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7120 : "=m" (v->counter)
7121 : "ir" (i), "m" (v->counter));
7122 }
7123@@ -58,7 +103,29 @@ static inline void atomic_add(int i, ato
7124 */
7125 static inline void atomic_sub(int i, atomic_t *v)
7126 {
7127- asm volatile(LOCK_PREFIX "subl %1,%0"
7128+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7129+
7130+#ifdef CONFIG_PAX_REFCOUNT
7131+ "jno 0f\n"
7132+ LOCK_PREFIX "addl %1,%0\n"
7133+ "int $4\n0:\n"
7134+ _ASM_EXTABLE(0b, 0b)
7135+#endif
7136+
7137+ : "=m" (v->counter)
7138+ : "ir" (i), "m" (v->counter));
7139+}
7140+
7141+/**
7142+ * atomic_sub_unchecked - subtract the atomic variable
7143+ * @i: integer value to subtract
7144+ * @v: pointer of type atomic_unchecked_t
7145+ *
7146+ * Atomically subtracts @i from @v.
7147+ */
7148+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7149+{
7150+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7151 : "=m" (v->counter)
7152 : "ir" (i), "m" (v->counter));
7153 }
7154@@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(in
7155 {
7156 unsigned char c;
7157
7158- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7159+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
7160+
7161+#ifdef CONFIG_PAX_REFCOUNT
7162+ "jno 0f\n"
7163+ LOCK_PREFIX "addl %2,%0\n"
7164+ "int $4\n0:\n"
7165+ _ASM_EXTABLE(0b, 0b)
7166+#endif
7167+
7168+ "sete %1\n"
7169 : "=m" (v->counter), "=qm" (c)
7170 : "ir" (i), "m" (v->counter) : "memory");
7171 return c;
7172@@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(in
7173 */
7174 static inline void atomic_inc(atomic_t *v)
7175 {
7176- asm volatile(LOCK_PREFIX "incl %0"
7177+ asm volatile(LOCK_PREFIX "incl %0\n"
7178+
7179+#ifdef CONFIG_PAX_REFCOUNT
7180+ "jno 0f\n"
7181+ LOCK_PREFIX "decl %0\n"
7182+ "int $4\n0:\n"
7183+ _ASM_EXTABLE(0b, 0b)
7184+#endif
7185+
7186+ : "=m" (v->counter)
7187+ : "m" (v->counter));
7188+}
7189+
7190+/**
7191+ * atomic_inc_unchecked - increment atomic variable
7192+ * @v: pointer of type atomic_unchecked_t
7193+ *
7194+ * Atomically increments @v by 1.
7195+ */
7196+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7197+{
7198+ asm volatile(LOCK_PREFIX "incl %0\n"
7199 : "=m" (v->counter)
7200 : "m" (v->counter));
7201 }
7202@@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *
7203 */
7204 static inline void atomic_dec(atomic_t *v)
7205 {
7206- asm volatile(LOCK_PREFIX "decl %0"
7207+ asm volatile(LOCK_PREFIX "decl %0\n"
7208+
7209+#ifdef CONFIG_PAX_REFCOUNT
7210+ "jno 0f\n"
7211+ LOCK_PREFIX "incl %0\n"
7212+ "int $4\n0:\n"
7213+ _ASM_EXTABLE(0b, 0b)
7214+#endif
7215+
7216+ : "=m" (v->counter)
7217+ : "m" (v->counter));
7218+}
7219+
7220+/**
7221+ * atomic_dec_unchecked - decrement atomic variable
7222+ * @v: pointer of type atomic_unchecked_t
7223+ *
7224+ * Atomically decrements @v by 1.
7225+ */
7226+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7227+{
7228+ asm volatile(LOCK_PREFIX "decl %0\n"
7229 : "=m" (v->counter)
7230 : "m" (v->counter));
7231 }
7232@@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(at
7233 {
7234 unsigned char c;
7235
7236- asm volatile(LOCK_PREFIX "decl %0; sete %1"
7237+ asm volatile(LOCK_PREFIX "decl %0\n"
7238+
7239+#ifdef CONFIG_PAX_REFCOUNT
7240+ "jno 0f\n"
7241+ LOCK_PREFIX "incl %0\n"
7242+ "int $4\n0:\n"
7243+ _ASM_EXTABLE(0b, 0b)
7244+#endif
7245+
7246+ "sete %1\n"
7247 : "=m" (v->counter), "=qm" (c)
7248 : "m" (v->counter) : "memory");
7249 return c != 0;
7250@@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(at
7251 {
7252 unsigned char c;
7253
7254- asm volatile(LOCK_PREFIX "incl %0; sete %1"
7255+ asm volatile(LOCK_PREFIX "incl %0\n"
7256+
7257+#ifdef CONFIG_PAX_REFCOUNT
7258+ "jno 0f\n"
7259+ LOCK_PREFIX "decl %0\n"
7260+ "int $4\n0:\n"
7261+ _ASM_EXTABLE(0b, 0b)
7262+#endif
7263+
7264+ "sete %1\n"
7265+ : "=m" (v->counter), "=qm" (c)
7266+ : "m" (v->counter) : "memory");
7267+ return c != 0;
7268+}
7269+
7270+/**
7271+ * atomic_inc_and_test_unchecked - increment and test
7272+ * @v: pointer of type atomic_unchecked_t
7273+ *
7274+ * Atomically increments @v by 1
7275+ * and returns true if the result is zero, or false for all
7276+ * other cases.
7277+ */
7278+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7279+{
7280+ unsigned char c;
7281+
7282+ asm volatile(LOCK_PREFIX "incl %0\n"
7283+ "sete %1\n"
7284 : "=m" (v->counter), "=qm" (c)
7285 : "m" (v->counter) : "memory");
7286 return c != 0;
7287@@ -157,7 +312,16 @@ static inline int atomic_add_negative(in
7288 {
7289 unsigned char c;
7290
7291- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7292+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
7293+
7294+#ifdef CONFIG_PAX_REFCOUNT
7295+ "jno 0f\n"
7296+ LOCK_PREFIX "subl %2,%0\n"
7297+ "int $4\n0:\n"
7298+ _ASM_EXTABLE(0b, 0b)
7299+#endif
7300+
7301+ "sets %1\n"
7302 : "=m" (v->counter), "=qm" (c)
7303 : "ir" (i), "m" (v->counter) : "memory");
7304 return c;
7305@@ -173,7 +337,31 @@ static inline int atomic_add_negative(in
7306 static inline int atomic_add_return(int i, atomic_t *v)
7307 {
7308 int __i = i;
7309- asm volatile(LOCK_PREFIX "xaddl %0, %1"
7310+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7311+
7312+#ifdef CONFIG_PAX_REFCOUNT
7313+ "jno 0f\n"
7314+ "movl %0, %1\n"
7315+ "int $4\n0:\n"
7316+ _ASM_EXTABLE(0b, 0b)
7317+#endif
7318+
7319+ : "+r" (i), "+m" (v->counter)
7320+ : : "memory");
7321+ return i + __i;
7322+}
7323+
7324+/**
7325+ * atomic_add_return_unchecked - add and return
7326+ * @i: integer value to add
7327+ * @v: pointer of type atomic_unchecked_t
7328+ *
7329+ * Atomically adds @i to @v and returns @i + @v
7330+ */
7331+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7332+{
7333+ int __i = i;
7334+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7335 : "+r" (i), "+m" (v->counter)
7336 : : "memory");
7337 return i + __i;
7338@@ -185,6 +373,10 @@ static inline int atomic_sub_return(int
7339 }
7340
7341 #define atomic_inc_return(v) (atomic_add_return(1, v))
7342+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7343+{
7344+ return atomic_add_return_unchecked(1, v);
7345+}
7346 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7347
7348 /* The 64-bit atomic type */
7349@@ -204,6 +396,18 @@ static inline long atomic64_read(const a
7350 }
7351
7352 /**
7353+ * atomic64_read_unchecked - read atomic64 variable
7354+ * @v: pointer of type atomic64_unchecked_t
7355+ *
7356+ * Atomically reads the value of @v.
7357+ * Doesn't imply a read memory barrier.
7358+ */
7359+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7360+{
7361+ return v->counter;
7362+}
7363+
7364+/**
7365 * atomic64_set - set atomic64 variable
7366 * @v: pointer to type atomic64_t
7367 * @i: required value
7368@@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64
7369 }
7370
7371 /**
7372+ * atomic64_set_unchecked - set atomic64 variable
7373+ * @v: pointer to type atomic64_unchecked_t
7374+ * @i: required value
7375+ *
7376+ * Atomically sets the value of @v to @i.
7377+ */
7378+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7379+{
7380+ v->counter = i;
7381+}
7382+
7383+/**
7384 * atomic64_add - add integer to atomic64 variable
7385 * @i: integer value to add
7386 * @v: pointer to type atomic64_t
7387@@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64
7388 */
7389 static inline void atomic64_add(long i, atomic64_t *v)
7390 {
7391+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
7392+
7393+#ifdef CONFIG_PAX_REFCOUNT
7394+ "jno 0f\n"
7395+ LOCK_PREFIX "subq %1,%0\n"
7396+ "int $4\n0:\n"
7397+ _ASM_EXTABLE(0b, 0b)
7398+#endif
7399+
7400+ : "=m" (v->counter)
7401+ : "er" (i), "m" (v->counter));
7402+}
7403+
7404+/**
7405+ * atomic64_add_unchecked - add integer to atomic64 variable
7406+ * @i: integer value to add
7407+ * @v: pointer to type atomic64_unchecked_t
7408+ *
7409+ * Atomically adds @i to @v.
7410+ */
7411+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
7412+{
7413 asm volatile(LOCK_PREFIX "addq %1,%0"
7414 : "=m" (v->counter)
7415 : "er" (i), "m" (v->counter));
7416@@ -238,7 +476,15 @@ static inline void atomic64_add(long i,
7417 */
7418 static inline void atomic64_sub(long i, atomic64_t *v)
7419 {
7420- asm volatile(LOCK_PREFIX "subq %1,%0"
7421+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
7422+
7423+#ifdef CONFIG_PAX_REFCOUNT
7424+ "jno 0f\n"
7425+ LOCK_PREFIX "addq %1,%0\n"
7426+ "int $4\n0:\n"
7427+ _ASM_EXTABLE(0b, 0b)
7428+#endif
7429+
7430 : "=m" (v->counter)
7431 : "er" (i), "m" (v->counter));
7432 }
7433@@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(
7434 {
7435 unsigned char c;
7436
7437- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
7438+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
7439+
7440+#ifdef CONFIG_PAX_REFCOUNT
7441+ "jno 0f\n"
7442+ LOCK_PREFIX "addq %2,%0\n"
7443+ "int $4\n0:\n"
7444+ _ASM_EXTABLE(0b, 0b)
7445+#endif
7446+
7447+ "sete %1\n"
7448 : "=m" (v->counter), "=qm" (c)
7449 : "er" (i), "m" (v->counter) : "memory");
7450 return c;
7451@@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(
7452 */
7453 static inline void atomic64_inc(atomic64_t *v)
7454 {
7455+ asm volatile(LOCK_PREFIX "incq %0\n"
7456+
7457+#ifdef CONFIG_PAX_REFCOUNT
7458+ "jno 0f\n"
7459+ LOCK_PREFIX "decq %0\n"
7460+ "int $4\n0:\n"
7461+ _ASM_EXTABLE(0b, 0b)
7462+#endif
7463+
7464+ : "=m" (v->counter)
7465+ : "m" (v->counter));
7466+}
7467+
7468+/**
7469+ * atomic64_inc_unchecked - increment atomic64 variable
7470+ * @v: pointer to type atomic64_unchecked_t
7471+ *
7472+ * Atomically increments @v by 1.
7473+ */
7474+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7475+{
7476 asm volatile(LOCK_PREFIX "incq %0"
7477 : "=m" (v->counter)
7478 : "m" (v->counter));
7479@@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64
7480 */
7481 static inline void atomic64_dec(atomic64_t *v)
7482 {
7483- asm volatile(LOCK_PREFIX "decq %0"
7484+ asm volatile(LOCK_PREFIX "decq %0\n"
7485+
7486+#ifdef CONFIG_PAX_REFCOUNT
7487+ "jno 0f\n"
7488+ LOCK_PREFIX "incq %0\n"
7489+ "int $4\n0:\n"
7490+ _ASM_EXTABLE(0b, 0b)
7491+#endif
7492+
7493+ : "=m" (v->counter)
7494+ : "m" (v->counter));
7495+}
7496+
7497+/**
7498+ * atomic64_dec_unchecked - decrement atomic64 variable
7499+ * @v: pointer to type atomic64_t
7500+ *
7501+ * Atomically decrements @v by 1.
7502+ */
7503+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7504+{
7505+ asm volatile(LOCK_PREFIX "decq %0\n"
7506 : "=m" (v->counter)
7507 : "m" (v->counter));
7508 }
7509@@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(
7510 {
7511 unsigned char c;
7512
7513- asm volatile(LOCK_PREFIX "decq %0; sete %1"
7514+ asm volatile(LOCK_PREFIX "decq %0\n"
7515+
7516+#ifdef CONFIG_PAX_REFCOUNT
7517+ "jno 0f\n"
7518+ LOCK_PREFIX "incq %0\n"
7519+ "int $4\n0:\n"
7520+ _ASM_EXTABLE(0b, 0b)
7521+#endif
7522+
7523+ "sete %1\n"
7524 : "=m" (v->counter), "=qm" (c)
7525 : "m" (v->counter) : "memory");
7526 return c != 0;
7527@@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(
7528 {
7529 unsigned char c;
7530
7531- asm volatile(LOCK_PREFIX "incq %0; sete %1"
7532+ asm volatile(LOCK_PREFIX "incq %0\n"
7533+
7534+#ifdef CONFIG_PAX_REFCOUNT
7535+ "jno 0f\n"
7536+ LOCK_PREFIX "decq %0\n"
7537+ "int $4\n0:\n"
7538+ _ASM_EXTABLE(0b, 0b)
7539+#endif
7540+
7541+ "sete %1\n"
7542 : "=m" (v->counter), "=qm" (c)
7543 : "m" (v->counter) : "memory");
7544 return c != 0;
7545@@ -337,7 +652,16 @@ static inline int atomic64_add_negative(
7546 {
7547 unsigned char c;
7548
7549- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
7550+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
7551+
7552+#ifdef CONFIG_PAX_REFCOUNT
7553+ "jno 0f\n"
7554+ LOCK_PREFIX "subq %2,%0\n"
7555+ "int $4\n0:\n"
7556+ _ASM_EXTABLE(0b, 0b)
7557+#endif
7558+
7559+ "sets %1\n"
7560 : "=m" (v->counter), "=qm" (c)
7561 : "er" (i), "m" (v->counter) : "memory");
7562 return c;
7563@@ -353,7 +677,31 @@ static inline int atomic64_add_negative(
7564 static inline long atomic64_add_return(long i, atomic64_t *v)
7565 {
7566 long __i = i;
7567- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
7568+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
7569+
7570+#ifdef CONFIG_PAX_REFCOUNT
7571+ "jno 0f\n"
7572+ "movq %0, %1\n"
7573+ "int $4\n0:\n"
7574+ _ASM_EXTABLE(0b, 0b)
7575+#endif
7576+
7577+ : "+r" (i), "+m" (v->counter)
7578+ : : "memory");
7579+ return i + __i;
7580+}
7581+
7582+/**
7583+ * atomic64_add_return_unchecked - add and return
7584+ * @i: integer value to add
7585+ * @v: pointer to type atomic64_unchecked_t
7586+ *
7587+ * Atomically adds @i to @v and returns @i + @v
7588+ */
7589+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7590+{
7591+ long __i = i;
7592+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
7593 : "+r" (i), "+m" (v->counter)
7594 : : "memory");
7595 return i + __i;
7596@@ -365,6 +713,10 @@ static inline long atomic64_sub_return(l
7597 }
7598
7599 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
7600+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7601+{
7602+ return atomic64_add_return_unchecked(1, v);
7603+}
7604 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
7605
7606 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
7607@@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atom
7608 return cmpxchg(&v->counter, old, new);
7609 }
7610
7611+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
7612+{
7613+ return cmpxchg(&v->counter, old, new);
7614+}
7615+
7616 static inline long atomic64_xchg(atomic64_t *v, long new)
7617 {
7618 return xchg(&v->counter, new);
7619 }
7620
7621+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
7622+{
7623+ return xchg(&v->counter, new);
7624+}
7625+
7626 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
7627 {
7628 return cmpxchg(&v->counter, old, new);
7629 }
7630
7631+static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7632+{
7633+ return cmpxchg(&v->counter, old, new);
7634+}
7635+
7636 static inline long atomic_xchg(atomic_t *v, int new)
7637 {
7638 return xchg(&v->counter, new);
7639 }
7640
7641+static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7642+{
7643+ return xchg(&v->counter, new);
7644+}
7645+
7646 /**
7647 * atomic_add_unless - add unless the number is a given value
7648 * @v: pointer of type atomic_t
7649@@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t
7650 */
7651 static inline int atomic_add_unless(atomic_t *v, int a, int u)
7652 {
7653- int c, old;
7654+ int c, old, new;
7655 c = atomic_read(v);
7656 for (;;) {
7657- if (unlikely(c == (u)))
7658+ if (unlikely(c == u))
7659 break;
7660- old = atomic_cmpxchg((v), c, c + (a));
7661+
7662+ asm volatile("addl %2,%0\n"
7663+
7664+#ifdef CONFIG_PAX_REFCOUNT
7665+ "jno 0f\n"
7666+ "subl %2,%0\n"
7667+ "int $4\n0:\n"
7668+ _ASM_EXTABLE(0b, 0b)
7669+#endif
7670+
7671+ : "=r" (new)
7672+ : "0" (c), "ir" (a));
7673+
7674+ old = atomic_cmpxchg(v, c, new);
7675 if (likely(old == c))
7676 break;
7677 c = old;
7678 }
7679- return c != (u);
7680+ return c != u;
7681 }
7682
7683 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
7684@@ -424,17 +809,30 @@ static inline int atomic_add_unless(atom
7685 */
7686 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
7687 {
7688- long c, old;
7689+ long c, old, new;
7690 c = atomic64_read(v);
7691 for (;;) {
7692- if (unlikely(c == (u)))
7693+ if (unlikely(c == u))
7694 break;
7695- old = atomic64_cmpxchg((v), c, c + (a));
7696+
7697+ asm volatile("addq %2,%0\n"
7698+
7699+#ifdef CONFIG_PAX_REFCOUNT
7700+ "jno 0f\n"
7701+ "subq %2,%0\n"
7702+ "int $4\n0:\n"
7703+ _ASM_EXTABLE(0b, 0b)
7704+#endif
7705+
7706+ : "=r" (new)
7707+ : "0" (c), "er" (a));
7708+
7709+ old = atomic64_cmpxchg(v, c, new);
7710 if (likely(old == c))
7711 break;
7712 c = old;
7713 }
7714- return c != (u);
7715+ return c != u;
7716 }
7717
7718 /**
7719diff -urNp linux-2.6.32.41/arch/x86/include/asm/bitops.h linux-2.6.32.41/arch/x86/include/asm/bitops.h
7720--- linux-2.6.32.41/arch/x86/include/asm/bitops.h 2011-03-27 14:31:47.000000000 -0400
7721+++ linux-2.6.32.41/arch/x86/include/asm/bitops.h 2011-04-17 15:56:46.000000000 -0400
7722@@ -38,7 +38,7 @@
7723 * a mask operation on a byte.
7724 */
7725 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
7726-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
7727+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
7728 #define CONST_MASK(nr) (1 << ((nr) & 7))
7729
7730 /**
7731diff -urNp linux-2.6.32.41/arch/x86/include/asm/boot.h linux-2.6.32.41/arch/x86/include/asm/boot.h
7732--- linux-2.6.32.41/arch/x86/include/asm/boot.h 2011-03-27 14:31:47.000000000 -0400
7733+++ linux-2.6.32.41/arch/x86/include/asm/boot.h 2011-04-17 15:56:46.000000000 -0400
7734@@ -11,10 +11,15 @@
7735 #include <asm/pgtable_types.h>
7736
7737 /* Physical address where kernel should be loaded. */
7738-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
7739+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
7740 + (CONFIG_PHYSICAL_ALIGN - 1)) \
7741 & ~(CONFIG_PHYSICAL_ALIGN - 1))
7742
7743+#ifndef __ASSEMBLY__
7744+extern unsigned char __LOAD_PHYSICAL_ADDR[];
7745+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
7746+#endif
7747+
7748 /* Minimum kernel alignment, as a power of two */
7749 #ifdef CONFIG_X86_64
7750 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
7751diff -urNp linux-2.6.32.41/arch/x86/include/asm/cacheflush.h linux-2.6.32.41/arch/x86/include/asm/cacheflush.h
7752--- linux-2.6.32.41/arch/x86/include/asm/cacheflush.h 2011-03-27 14:31:47.000000000 -0400
7753+++ linux-2.6.32.41/arch/x86/include/asm/cacheflush.h 2011-04-17 15:56:46.000000000 -0400
7754@@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
7755 static inline unsigned long get_page_memtype(struct page *pg)
7756 {
7757 if (!PageUncached(pg) && !PageWC(pg))
7758- return -1;
7759+ return ~0UL;
7760 else if (!PageUncached(pg) && PageWC(pg))
7761 return _PAGE_CACHE_WC;
7762 else if (PageUncached(pg) && !PageWC(pg))
7763@@ -85,7 +85,7 @@ static inline void set_page_memtype(stru
7764 SetPageWC(pg);
7765 break;
7766 default:
7767- case -1:
7768+ case ~0UL:
7769 ClearPageUncached(pg);
7770 ClearPageWC(pg);
7771 break;
7772diff -urNp linux-2.6.32.41/arch/x86/include/asm/cache.h linux-2.6.32.41/arch/x86/include/asm/cache.h
7773--- linux-2.6.32.41/arch/x86/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
7774+++ linux-2.6.32.41/arch/x86/include/asm/cache.h 2011-05-04 17:56:20.000000000 -0400
7775@@ -5,9 +5,10 @@
7776
7777 /* L1 cache line size */
7778 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7779-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7780+#define L1_CACHE_BYTES (_AC(1,U) << L1_CACHE_SHIFT)
7781
7782 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
7783+#define __read_only __attribute__((__section__(".data.read_only")))
7784
7785 #ifdef CONFIG_X86_VSMP
7786 /* vSMP Internode cacheline shift */
7787diff -urNp linux-2.6.32.41/arch/x86/include/asm/checksum_32.h linux-2.6.32.41/arch/x86/include/asm/checksum_32.h
7788--- linux-2.6.32.41/arch/x86/include/asm/checksum_32.h 2011-03-27 14:31:47.000000000 -0400
7789+++ linux-2.6.32.41/arch/x86/include/asm/checksum_32.h 2011-04-17 15:56:46.000000000 -0400
7790@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
7791 int len, __wsum sum,
7792 int *src_err_ptr, int *dst_err_ptr);
7793
7794+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
7795+ int len, __wsum sum,
7796+ int *src_err_ptr, int *dst_err_ptr);
7797+
7798+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
7799+ int len, __wsum sum,
7800+ int *src_err_ptr, int *dst_err_ptr);
7801+
7802 /*
7803 * Note: when you get a NULL pointer exception here this means someone
7804 * passed in an incorrect kernel address to one of these functions.
7805@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
7806 int *err_ptr)
7807 {
7808 might_sleep();
7809- return csum_partial_copy_generic((__force void *)src, dst,
7810+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
7811 len, sum, err_ptr, NULL);
7812 }
7813
7814@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
7815 {
7816 might_sleep();
7817 if (access_ok(VERIFY_WRITE, dst, len))
7818- return csum_partial_copy_generic(src, (__force void *)dst,
7819+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
7820 len, sum, NULL, err_ptr);
7821
7822 if (len)
7823diff -urNp linux-2.6.32.41/arch/x86/include/asm/desc_defs.h linux-2.6.32.41/arch/x86/include/asm/desc_defs.h
7824--- linux-2.6.32.41/arch/x86/include/asm/desc_defs.h 2011-03-27 14:31:47.000000000 -0400
7825+++ linux-2.6.32.41/arch/x86/include/asm/desc_defs.h 2011-04-17 15:56:46.000000000 -0400
7826@@ -31,6 +31,12 @@ struct desc_struct {
7827 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
7828 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
7829 };
7830+ struct {
7831+ u16 offset_low;
7832+ u16 seg;
7833+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
7834+ unsigned offset_high: 16;
7835+ } gate;
7836 };
7837 } __attribute__((packed));
7838
7839diff -urNp linux-2.6.32.41/arch/x86/include/asm/desc.h linux-2.6.32.41/arch/x86/include/asm/desc.h
7840--- linux-2.6.32.41/arch/x86/include/asm/desc.h 2011-03-27 14:31:47.000000000 -0400
7841+++ linux-2.6.32.41/arch/x86/include/asm/desc.h 2011-04-23 12:56:10.000000000 -0400
7842@@ -4,6 +4,7 @@
7843 #include <asm/desc_defs.h>
7844 #include <asm/ldt.h>
7845 #include <asm/mmu.h>
7846+#include <asm/pgtable.h>
7847 #include <linux/smp.h>
7848
7849 static inline void fill_ldt(struct desc_struct *desc,
7850@@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_
7851 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
7852 desc->type = (info->read_exec_only ^ 1) << 1;
7853 desc->type |= info->contents << 2;
7854+ desc->type |= info->seg_not_present ^ 1;
7855 desc->s = 1;
7856 desc->dpl = 0x3;
7857 desc->p = info->seg_not_present ^ 1;
7858@@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_
7859 }
7860
7861 extern struct desc_ptr idt_descr;
7862-extern gate_desc idt_table[];
7863-
7864-struct gdt_page {
7865- struct desc_struct gdt[GDT_ENTRIES];
7866-} __attribute__((aligned(PAGE_SIZE)));
7867-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
7868+extern gate_desc idt_table[256];
7869
7870+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
7871 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
7872 {
7873- return per_cpu(gdt_page, cpu).gdt;
7874+ return cpu_gdt_table[cpu];
7875 }
7876
7877 #ifdef CONFIG_X86_64
7878@@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *
7879 unsigned long base, unsigned dpl, unsigned flags,
7880 unsigned short seg)
7881 {
7882- gate->a = (seg << 16) | (base & 0xffff);
7883- gate->b = (base & 0xffff0000) |
7884- (((0x80 | type | (dpl << 5)) & 0xff) << 8);
7885+ gate->gate.offset_low = base;
7886+ gate->gate.seg = seg;
7887+ gate->gate.reserved = 0;
7888+ gate->gate.type = type;
7889+ gate->gate.s = 0;
7890+ gate->gate.dpl = dpl;
7891+ gate->gate.p = 1;
7892+ gate->gate.offset_high = base >> 16;
7893 }
7894
7895 #endif
7896@@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(str
7897 static inline void native_write_idt_entry(gate_desc *idt, int entry,
7898 const gate_desc *gate)
7899 {
7900+ pax_open_kernel();
7901 memcpy(&idt[entry], gate, sizeof(*gate));
7902+ pax_close_kernel();
7903 }
7904
7905 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
7906 const void *desc)
7907 {
7908+ pax_open_kernel();
7909 memcpy(&ldt[entry], desc, 8);
7910+ pax_close_kernel();
7911 }
7912
7913 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
7914@@ -139,7 +146,10 @@ static inline void native_write_gdt_entr
7915 size = sizeof(struct desc_struct);
7916 break;
7917 }
7918+
7919+ pax_open_kernel();
7920 memcpy(&gdt[entry], desc, size);
7921+ pax_close_kernel();
7922 }
7923
7924 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
7925@@ -211,7 +221,9 @@ static inline void native_set_ldt(const
7926
7927 static inline void native_load_tr_desc(void)
7928 {
7929+ pax_open_kernel();
7930 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
7931+ pax_close_kernel();
7932 }
7933
7934 static inline void native_load_gdt(const struct desc_ptr *dtr)
7935@@ -246,8 +258,10 @@ static inline void native_load_tls(struc
7936 unsigned int i;
7937 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
7938
7939+ pax_open_kernel();
7940 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
7941 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
7942+ pax_close_kernel();
7943 }
7944
7945 #define _LDT_empty(info) \
7946@@ -309,7 +323,7 @@ static inline void set_desc_limit(struct
7947 desc->limit = (limit >> 16) & 0xf;
7948 }
7949
7950-static inline void _set_gate(int gate, unsigned type, void *addr,
7951+static inline void _set_gate(int gate, unsigned type, const void *addr,
7952 unsigned dpl, unsigned ist, unsigned seg)
7953 {
7954 gate_desc s;
7955@@ -327,7 +341,7 @@ static inline void _set_gate(int gate, u
7956 * Pentium F0 0F bugfix can have resulted in the mapped
7957 * IDT being write-protected.
7958 */
7959-static inline void set_intr_gate(unsigned int n, void *addr)
7960+static inline void set_intr_gate(unsigned int n, const void *addr)
7961 {
7962 BUG_ON((unsigned)n > 0xFF);
7963 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
7964@@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsig
7965 /*
7966 * This routine sets up an interrupt gate at directory privilege level 3.
7967 */
7968-static inline void set_system_intr_gate(unsigned int n, void *addr)
7969+static inline void set_system_intr_gate(unsigned int n, const void *addr)
7970 {
7971 BUG_ON((unsigned)n > 0xFF);
7972 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
7973 }
7974
7975-static inline void set_system_trap_gate(unsigned int n, void *addr)
7976+static inline void set_system_trap_gate(unsigned int n, const void *addr)
7977 {
7978 BUG_ON((unsigned)n > 0xFF);
7979 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
7980 }
7981
7982-static inline void set_trap_gate(unsigned int n, void *addr)
7983+static inline void set_trap_gate(unsigned int n, const void *addr)
7984 {
7985 BUG_ON((unsigned)n > 0xFF);
7986 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
7987@@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigne
7988 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
7989 {
7990 BUG_ON((unsigned)n > 0xFF);
7991- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
7992+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
7993 }
7994
7995-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
7996+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
7997 {
7998 BUG_ON((unsigned)n > 0xFF);
7999 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8000 }
8001
8002-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8003+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8004 {
8005 BUG_ON((unsigned)n > 0xFF);
8006 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8007 }
8008
8009+#ifdef CONFIG_X86_32
8010+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8011+{
8012+ struct desc_struct d;
8013+
8014+ if (likely(limit))
8015+ limit = (limit - 1UL) >> PAGE_SHIFT;
8016+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
8017+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8018+}
8019+#endif
8020+
8021 #endif /* _ASM_X86_DESC_H */
8022diff -urNp linux-2.6.32.41/arch/x86/include/asm/device.h linux-2.6.32.41/arch/x86/include/asm/device.h
8023--- linux-2.6.32.41/arch/x86/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
8024+++ linux-2.6.32.41/arch/x86/include/asm/device.h 2011-04-17 15:56:46.000000000 -0400
8025@@ -6,7 +6,7 @@ struct dev_archdata {
8026 void *acpi_handle;
8027 #endif
8028 #ifdef CONFIG_X86_64
8029-struct dma_map_ops *dma_ops;
8030+ const struct dma_map_ops *dma_ops;
8031 #endif
8032 #ifdef CONFIG_DMAR
8033 void *iommu; /* hook for IOMMU specific extension */
8034diff -urNp linux-2.6.32.41/arch/x86/include/asm/dma-mapping.h linux-2.6.32.41/arch/x86/include/asm/dma-mapping.h
8035--- linux-2.6.32.41/arch/x86/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
8036+++ linux-2.6.32.41/arch/x86/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
8037@@ -25,9 +25,9 @@ extern int iommu_merge;
8038 extern struct device x86_dma_fallback_dev;
8039 extern int panic_on_overflow;
8040
8041-extern struct dma_map_ops *dma_ops;
8042+extern const struct dma_map_ops *dma_ops;
8043
8044-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
8045+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
8046 {
8047 #ifdef CONFIG_X86_32
8048 return dma_ops;
8049@@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dm
8050 /* Make sure we keep the same behaviour */
8051 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
8052 {
8053- struct dma_map_ops *ops = get_dma_ops(dev);
8054+ const struct dma_map_ops *ops = get_dma_ops(dev);
8055 if (ops->mapping_error)
8056 return ops->mapping_error(dev, dma_addr);
8057
8058@@ -122,7 +122,7 @@ static inline void *
8059 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
8060 gfp_t gfp)
8061 {
8062- struct dma_map_ops *ops = get_dma_ops(dev);
8063+ const struct dma_map_ops *ops = get_dma_ops(dev);
8064 void *memory;
8065
8066 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
8067@@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, s
8068 static inline void dma_free_coherent(struct device *dev, size_t size,
8069 void *vaddr, dma_addr_t bus)
8070 {
8071- struct dma_map_ops *ops = get_dma_ops(dev);
8072+ const struct dma_map_ops *ops = get_dma_ops(dev);
8073
8074 WARN_ON(irqs_disabled()); /* for portability */
8075
8076diff -urNp linux-2.6.32.41/arch/x86/include/asm/e820.h linux-2.6.32.41/arch/x86/include/asm/e820.h
8077--- linux-2.6.32.41/arch/x86/include/asm/e820.h 2011-03-27 14:31:47.000000000 -0400
8078+++ linux-2.6.32.41/arch/x86/include/asm/e820.h 2011-04-17 15:56:46.000000000 -0400
8079@@ -133,7 +133,7 @@ extern char *default_machine_specific_me
8080 #define ISA_END_ADDRESS 0x100000
8081 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
8082
8083-#define BIOS_BEGIN 0x000a0000
8084+#define BIOS_BEGIN 0x000c0000
8085 #define BIOS_END 0x00100000
8086
8087 #ifdef __KERNEL__
8088diff -urNp linux-2.6.32.41/arch/x86/include/asm/elf.h linux-2.6.32.41/arch/x86/include/asm/elf.h
8089--- linux-2.6.32.41/arch/x86/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
8090+++ linux-2.6.32.41/arch/x86/include/asm/elf.h 2011-04-17 15:56:46.000000000 -0400
8091@@ -257,7 +257,25 @@ extern int force_personality32;
8092 the loader. We need to make sure that it is out of the way of the program
8093 that it will "exec", and that there is sufficient room for the brk. */
8094
8095+#ifdef CONFIG_PAX_SEGMEXEC
8096+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8097+#else
8098 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8099+#endif
8100+
8101+#ifdef CONFIG_PAX_ASLR
8102+#ifdef CONFIG_X86_32
8103+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8104+
8105+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8106+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8107+#else
8108+#define PAX_ELF_ET_DYN_BASE 0x400000UL
8109+
8110+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8111+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8112+#endif
8113+#endif
8114
8115 /* This yields a mask that user programs can use to figure out what
8116 instruction set this CPU supports. This could be done in user space,
8117@@ -311,8 +329,7 @@ do { \
8118 #define ARCH_DLINFO \
8119 do { \
8120 if (vdso_enabled) \
8121- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8122- (unsigned long)current->mm->context.vdso); \
8123+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);\
8124 } while (0)
8125
8126 #define AT_SYSINFO 32
8127@@ -323,7 +340,7 @@ do { \
8128
8129 #endif /* !CONFIG_X86_32 */
8130
8131-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8132+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8133
8134 #define VDSO_ENTRY \
8135 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8136@@ -337,7 +354,4 @@ extern int arch_setup_additional_pages(s
8137 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8138 #define compat_arch_setup_additional_pages syscall32_setup_pages
8139
8140-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8141-#define arch_randomize_brk arch_randomize_brk
8142-
8143 #endif /* _ASM_X86_ELF_H */
8144diff -urNp linux-2.6.32.41/arch/x86/include/asm/emergency-restart.h linux-2.6.32.41/arch/x86/include/asm/emergency-restart.h
8145--- linux-2.6.32.41/arch/x86/include/asm/emergency-restart.h 2011-03-27 14:31:47.000000000 -0400
8146+++ linux-2.6.32.41/arch/x86/include/asm/emergency-restart.h 2011-05-22 23:02:06.000000000 -0400
8147@@ -15,6 +15,6 @@ enum reboot_type {
8148
8149 extern enum reboot_type reboot_type;
8150
8151-extern void machine_emergency_restart(void);
8152+extern void machine_emergency_restart(void) __noreturn;
8153
8154 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8155diff -urNp linux-2.6.32.41/arch/x86/include/asm/futex.h linux-2.6.32.41/arch/x86/include/asm/futex.h
8156--- linux-2.6.32.41/arch/x86/include/asm/futex.h 2011-03-27 14:31:47.000000000 -0400
8157+++ linux-2.6.32.41/arch/x86/include/asm/futex.h 2011-04-17 15:56:46.000000000 -0400
8158@@ -12,16 +12,18 @@
8159 #include <asm/system.h>
8160
8161 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8162+ typecheck(u32 *, uaddr); \
8163 asm volatile("1:\t" insn "\n" \
8164 "2:\t.section .fixup,\"ax\"\n" \
8165 "3:\tmov\t%3, %1\n" \
8166 "\tjmp\t2b\n" \
8167 "\t.previous\n" \
8168 _ASM_EXTABLE(1b, 3b) \
8169- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8170+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
8171 : "i" (-EFAULT), "0" (oparg), "1" (0))
8172
8173 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8174+ typecheck(u32 *, uaddr); \
8175 asm volatile("1:\tmovl %2, %0\n" \
8176 "\tmovl\t%0, %3\n" \
8177 "\t" insn "\n" \
8178@@ -34,10 +36,10 @@
8179 _ASM_EXTABLE(1b, 4b) \
8180 _ASM_EXTABLE(2b, 4b) \
8181 : "=&a" (oldval), "=&r" (ret), \
8182- "+m" (*uaddr), "=&r" (tem) \
8183+ "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
8184 : "r" (oparg), "i" (-EFAULT), "1" (0))
8185
8186-static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
8187+static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8188 {
8189 int op = (encoded_op >> 28) & 7;
8190 int cmp = (encoded_op >> 24) & 15;
8191@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
8192
8193 switch (op) {
8194 case FUTEX_OP_SET:
8195- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8196+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8197 break;
8198 case FUTEX_OP_ADD:
8199- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8200+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8201 uaddr, oparg);
8202 break;
8203 case FUTEX_OP_OR:
8204@@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser
8205 return ret;
8206 }
8207
8208-static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
8209+static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
8210 int newval)
8211 {
8212
8213@@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_i
8214 return -ENOSYS;
8215 #endif
8216
8217- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
8218+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8219 return -EFAULT;
8220
8221- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
8222+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
8223 "2:\t.section .fixup, \"ax\"\n"
8224 "3:\tmov %2, %0\n"
8225 "\tjmp 2b\n"
8226 "\t.previous\n"
8227 _ASM_EXTABLE(1b, 3b)
8228- : "=a" (oldval), "+m" (*uaddr)
8229+ : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
8230 : "i" (-EFAULT), "r" (newval), "0" (oldval)
8231 : "memory"
8232 );
8233diff -urNp linux-2.6.32.41/arch/x86/include/asm/hw_irq.h linux-2.6.32.41/arch/x86/include/asm/hw_irq.h
8234--- linux-2.6.32.41/arch/x86/include/asm/hw_irq.h 2011-03-27 14:31:47.000000000 -0400
8235+++ linux-2.6.32.41/arch/x86/include/asm/hw_irq.h 2011-05-04 17:56:28.000000000 -0400
8236@@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
8237 extern void enable_IO_APIC(void);
8238
8239 /* Statistics */
8240-extern atomic_t irq_err_count;
8241-extern atomic_t irq_mis_count;
8242+extern atomic_unchecked_t irq_err_count;
8243+extern atomic_unchecked_t irq_mis_count;
8244
8245 /* EISA */
8246 extern void eisa_set_level_irq(unsigned int irq);
8247diff -urNp linux-2.6.32.41/arch/x86/include/asm/i387.h linux-2.6.32.41/arch/x86/include/asm/i387.h
8248--- linux-2.6.32.41/arch/x86/include/asm/i387.h 2011-03-27 14:31:47.000000000 -0400
8249+++ linux-2.6.32.41/arch/x86/include/asm/i387.h 2011-04-17 15:56:46.000000000 -0400
8250@@ -60,6 +60,11 @@ static inline int fxrstor_checking(struc
8251 {
8252 int err;
8253
8254+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8255+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8256+ fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
8257+#endif
8258+
8259 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
8260 "2:\n"
8261 ".section .fixup,\"ax\"\n"
8262@@ -105,6 +110,11 @@ static inline int fxsave_user(struct i38
8263 {
8264 int err;
8265
8266+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8267+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8268+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8269+#endif
8270+
8271 asm volatile("1: rex64/fxsave (%[fx])\n\t"
8272 "2:\n"
8273 ".section .fixup,\"ax\"\n"
8274@@ -195,13 +205,8 @@ static inline int fxrstor_checking(struc
8275 }
8276
8277 /* We need a safe address that is cheap to find and that is already
8278- in L1 during context switch. The best choices are unfortunately
8279- different for UP and SMP */
8280-#ifdef CONFIG_SMP
8281-#define safe_address (__per_cpu_offset[0])
8282-#else
8283-#define safe_address (kstat_cpu(0).cpustat.user)
8284-#endif
8285+ in L1 during context switch. */
8286+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
8287
8288 /*
8289 * These must be called with preempt disabled
8290@@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void
8291 struct thread_info *me = current_thread_info();
8292 preempt_disable();
8293 if (me->status & TS_USEDFPU)
8294- __save_init_fpu(me->task);
8295+ __save_init_fpu(current);
8296 else
8297 clts();
8298 }
8299diff -urNp linux-2.6.32.41/arch/x86/include/asm/io_32.h linux-2.6.32.41/arch/x86/include/asm/io_32.h
8300--- linux-2.6.32.41/arch/x86/include/asm/io_32.h 2011-03-27 14:31:47.000000000 -0400
8301+++ linux-2.6.32.41/arch/x86/include/asm/io_32.h 2011-04-17 15:56:46.000000000 -0400
8302@@ -3,6 +3,7 @@
8303
8304 #include <linux/string.h>
8305 #include <linux/compiler.h>
8306+#include <asm/processor.h>
8307
8308 /*
8309 * This file contains the definitions for the x86 IO instructions
8310@@ -42,6 +43,17 @@
8311
8312 #ifdef __KERNEL__
8313
8314+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8315+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8316+{
8317+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8318+}
8319+
8320+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8321+{
8322+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8323+}
8324+
8325 #include <asm-generic/iomap.h>
8326
8327 #include <linux/vmalloc.h>
8328diff -urNp linux-2.6.32.41/arch/x86/include/asm/io_64.h linux-2.6.32.41/arch/x86/include/asm/io_64.h
8329--- linux-2.6.32.41/arch/x86/include/asm/io_64.h 2011-03-27 14:31:47.000000000 -0400
8330+++ linux-2.6.32.41/arch/x86/include/asm/io_64.h 2011-04-17 15:56:46.000000000 -0400
8331@@ -140,6 +140,17 @@ __OUTS(l)
8332
8333 #include <linux/vmalloc.h>
8334
8335+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8336+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8337+{
8338+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8339+}
8340+
8341+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8342+{
8343+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8344+}
8345+
8346 #include <asm-generic/iomap.h>
8347
8348 void __memcpy_fromio(void *, unsigned long, unsigned);
8349diff -urNp linux-2.6.32.41/arch/x86/include/asm/iommu.h linux-2.6.32.41/arch/x86/include/asm/iommu.h
8350--- linux-2.6.32.41/arch/x86/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
8351+++ linux-2.6.32.41/arch/x86/include/asm/iommu.h 2011-04-17 15:56:46.000000000 -0400
8352@@ -3,7 +3,7 @@
8353
8354 extern void pci_iommu_shutdown(void);
8355 extern void no_iommu_init(void);
8356-extern struct dma_map_ops nommu_dma_ops;
8357+extern const struct dma_map_ops nommu_dma_ops;
8358 extern int force_iommu, no_iommu;
8359 extern int iommu_detected;
8360 extern int iommu_pass_through;
8361diff -urNp linux-2.6.32.41/arch/x86/include/asm/irqflags.h linux-2.6.32.41/arch/x86/include/asm/irqflags.h
8362--- linux-2.6.32.41/arch/x86/include/asm/irqflags.h 2011-03-27 14:31:47.000000000 -0400
8363+++ linux-2.6.32.41/arch/x86/include/asm/irqflags.h 2011-04-17 15:56:46.000000000 -0400
8364@@ -142,6 +142,11 @@ static inline unsigned long __raw_local_
8365 sti; \
8366 sysexit
8367
8368+#define GET_CR0_INTO_RDI mov %cr0, %rdi
8369+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8370+#define GET_CR3_INTO_RDI mov %cr3, %rdi
8371+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8372+
8373 #else
8374 #define INTERRUPT_RETURN iret
8375 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8376diff -urNp linux-2.6.32.41/arch/x86/include/asm/kprobes.h linux-2.6.32.41/arch/x86/include/asm/kprobes.h
8377--- linux-2.6.32.41/arch/x86/include/asm/kprobes.h 2011-03-27 14:31:47.000000000 -0400
8378+++ linux-2.6.32.41/arch/x86/include/asm/kprobes.h 2011-04-23 12:56:12.000000000 -0400
8379@@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
8380 #define BREAKPOINT_INSTRUCTION 0xcc
8381 #define RELATIVEJUMP_INSTRUCTION 0xe9
8382 #define MAX_INSN_SIZE 16
8383-#define MAX_STACK_SIZE 64
8384-#define MIN_STACK_SIZE(ADDR) \
8385- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8386- THREAD_SIZE - (unsigned long)(ADDR))) \
8387- ? (MAX_STACK_SIZE) \
8388- : (((unsigned long)current_thread_info()) + \
8389- THREAD_SIZE - (unsigned long)(ADDR)))
8390+#define MAX_STACK_SIZE 64UL
8391+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8392
8393 #define flush_insn_slot(p) do { } while (0)
8394
8395diff -urNp linux-2.6.32.41/arch/x86/include/asm/kvm_host.h linux-2.6.32.41/arch/x86/include/asm/kvm_host.h
8396--- linux-2.6.32.41/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:01.000000000 -0400
8397+++ linux-2.6.32.41/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:26.000000000 -0400
8398@@ -536,7 +536,7 @@ struct kvm_x86_ops {
8399 const struct trace_print_flags *exit_reasons_str;
8400 };
8401
8402-extern struct kvm_x86_ops *kvm_x86_ops;
8403+extern const struct kvm_x86_ops *kvm_x86_ops;
8404
8405 int kvm_mmu_module_init(void);
8406 void kvm_mmu_module_exit(void);
8407diff -urNp linux-2.6.32.41/arch/x86/include/asm/local.h linux-2.6.32.41/arch/x86/include/asm/local.h
8408--- linux-2.6.32.41/arch/x86/include/asm/local.h 2011-03-27 14:31:47.000000000 -0400
8409+++ linux-2.6.32.41/arch/x86/include/asm/local.h 2011-04-17 15:56:46.000000000 -0400
8410@@ -18,26 +18,58 @@ typedef struct {
8411
8412 static inline void local_inc(local_t *l)
8413 {
8414- asm volatile(_ASM_INC "%0"
8415+ asm volatile(_ASM_INC "%0\n"
8416+
8417+#ifdef CONFIG_PAX_REFCOUNT
8418+ "jno 0f\n"
8419+ _ASM_DEC "%0\n"
8420+ "int $4\n0:\n"
8421+ _ASM_EXTABLE(0b, 0b)
8422+#endif
8423+
8424 : "+m" (l->a.counter));
8425 }
8426
8427 static inline void local_dec(local_t *l)
8428 {
8429- asm volatile(_ASM_DEC "%0"
8430+ asm volatile(_ASM_DEC "%0\n"
8431+
8432+#ifdef CONFIG_PAX_REFCOUNT
8433+ "jno 0f\n"
8434+ _ASM_INC "%0\n"
8435+ "int $4\n0:\n"
8436+ _ASM_EXTABLE(0b, 0b)
8437+#endif
8438+
8439 : "+m" (l->a.counter));
8440 }
8441
8442 static inline void local_add(long i, local_t *l)
8443 {
8444- asm volatile(_ASM_ADD "%1,%0"
8445+ asm volatile(_ASM_ADD "%1,%0\n"
8446+
8447+#ifdef CONFIG_PAX_REFCOUNT
8448+ "jno 0f\n"
8449+ _ASM_SUB "%1,%0\n"
8450+ "int $4\n0:\n"
8451+ _ASM_EXTABLE(0b, 0b)
8452+#endif
8453+
8454 : "+m" (l->a.counter)
8455 : "ir" (i));
8456 }
8457
8458 static inline void local_sub(long i, local_t *l)
8459 {
8460- asm volatile(_ASM_SUB "%1,%0"
8461+ asm volatile(_ASM_SUB "%1,%0\n"
8462+
8463+#ifdef CONFIG_PAX_REFCOUNT
8464+ "jno 0f\n"
8465+ _ASM_ADD "%1,%0\n"
8466+ "int $4\n0:\n"
8467+ _ASM_EXTABLE(0b, 0b)
8468+#endif
8469+
8470 : "+m" (l->a.counter)
8471 : "ir" (i));
8472 }
8473@@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
8474 {
8475 unsigned char c;
8476
8477- asm volatile(_ASM_SUB "%2,%0; sete %1"
8478+ asm volatile(_ASM_SUB "%2,%0\n"
8479+
8480+#ifdef CONFIG_PAX_REFCOUNT
8481+ "jno 0f\n"
8482+ _ASM_ADD "%2,%0\n"
8483+ "int $4\n0:\n"
8484+ _ASM_EXTABLE(0b, 0b)
8485+#endif
8486+
8487+ "sete %1\n"
8488 : "+m" (l->a.counter), "=qm" (c)
8489 : "ir" (i) : "memory");
8490 return c;
8491@@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
8492 {
8493 unsigned char c;
8494
8495- asm volatile(_ASM_DEC "%0; sete %1"
8496+ asm volatile(_ASM_DEC "%0\n"
8497+
8498+#ifdef CONFIG_PAX_REFCOUNT
8499+ "jno 0f\n"
8500+ _ASM_INC "%0\n"
8501+ "int $4\n0:\n"
8502+ _ASM_EXTABLE(0b, 0b)
8503+#endif
8504+
8505+ "sete %1\n"
8506 : "+m" (l->a.counter), "=qm" (c)
8507 : : "memory");
8508 return c != 0;
8509@@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
8510 {
8511 unsigned char c;
8512
8513- asm volatile(_ASM_INC "%0; sete %1"
8514+ asm volatile(_ASM_INC "%0\n"
8515+
8516+#ifdef CONFIG_PAX_REFCOUNT
8517+ "jno 0f\n"
8518+ _ASM_DEC "%0\n"
8519+ "int $4\n0:\n"
8520+ _ASM_EXTABLE(0b, 0b)
8521+#endif
8522+
8523+ "sete %1\n"
8524 : "+m" (l->a.counter), "=qm" (c)
8525 : : "memory");
8526 return c != 0;
8527@@ -110,7 +169,16 @@ static inline int local_add_negative(lon
8528 {
8529 unsigned char c;
8530
8531- asm volatile(_ASM_ADD "%2,%0; sets %1"
8532+ asm volatile(_ASM_ADD "%2,%0\n"
8533+
8534+#ifdef CONFIG_PAX_REFCOUNT
8535+ "jno 0f\n"
8536+ _ASM_SUB "%2,%0\n"
8537+ "int $4\n0:\n"
8538+ _ASM_EXTABLE(0b, 0b)
8539+#endif
8540+
8541+ "sets %1\n"
8542 : "+m" (l->a.counter), "=qm" (c)
8543 : "ir" (i) : "memory");
8544 return c;
8545@@ -133,7 +201,15 @@ static inline long local_add_return(long
8546 #endif
8547 /* Modern 486+ processor */
8548 __i = i;
8549- asm volatile(_ASM_XADD "%0, %1;"
8550+ asm volatile(_ASM_XADD "%0, %1\n"
8551+
8552+#ifdef CONFIG_PAX_REFCOUNT
8553+ "jno 0f\n"
8554+ _ASM_MOV "%0,%1\n"
8555+ "int $4\n0:\n"
8556+ _ASM_EXTABLE(0b, 0b)
8557+#endif
8558+
8559 : "+r" (i), "+m" (l->a.counter)
8560 : : "memory");
8561 return i + __i;
8562diff -urNp linux-2.6.32.41/arch/x86/include/asm/microcode.h linux-2.6.32.41/arch/x86/include/asm/microcode.h
8563--- linux-2.6.32.41/arch/x86/include/asm/microcode.h 2011-03-27 14:31:47.000000000 -0400
8564+++ linux-2.6.32.41/arch/x86/include/asm/microcode.h 2011-04-17 15:56:46.000000000 -0400
8565@@ -12,13 +12,13 @@ struct device;
8566 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
8567
8568 struct microcode_ops {
8569- enum ucode_state (*request_microcode_user) (int cpu,
8570+ enum ucode_state (* const request_microcode_user) (int cpu,
8571 const void __user *buf, size_t size);
8572
8573- enum ucode_state (*request_microcode_fw) (int cpu,
8574+ enum ucode_state (* const request_microcode_fw) (int cpu,
8575 struct device *device);
8576
8577- void (*microcode_fini_cpu) (int cpu);
8578+ void (* const microcode_fini_cpu) (int cpu);
8579
8580 /*
8581 * The generic 'microcode_core' part guarantees that
8582@@ -38,18 +38,18 @@ struct ucode_cpu_info {
8583 extern struct ucode_cpu_info ucode_cpu_info[];
8584
8585 #ifdef CONFIG_MICROCODE_INTEL
8586-extern struct microcode_ops * __init init_intel_microcode(void);
8587+extern const struct microcode_ops * __init init_intel_microcode(void);
8588 #else
8589-static inline struct microcode_ops * __init init_intel_microcode(void)
8590+static inline const struct microcode_ops * __init init_intel_microcode(void)
8591 {
8592 return NULL;
8593 }
8594 #endif /* CONFIG_MICROCODE_INTEL */
8595
8596 #ifdef CONFIG_MICROCODE_AMD
8597-extern struct microcode_ops * __init init_amd_microcode(void);
8598+extern const struct microcode_ops * __init init_amd_microcode(void);
8599 #else
8600-static inline struct microcode_ops * __init init_amd_microcode(void)
8601+static inline const struct microcode_ops * __init init_amd_microcode(void)
8602 {
8603 return NULL;
8604 }
8605diff -urNp linux-2.6.32.41/arch/x86/include/asm/mman.h linux-2.6.32.41/arch/x86/include/asm/mman.h
8606--- linux-2.6.32.41/arch/x86/include/asm/mman.h 2011-03-27 14:31:47.000000000 -0400
8607+++ linux-2.6.32.41/arch/x86/include/asm/mman.h 2011-04-17 15:56:46.000000000 -0400
8608@@ -5,4 +5,14 @@
8609
8610 #include <asm-generic/mman.h>
8611
8612+#ifdef __KERNEL__
8613+#ifndef __ASSEMBLY__
8614+#ifdef CONFIG_X86_32
8615+#define arch_mmap_check i386_mmap_check
8616+int i386_mmap_check(unsigned long addr, unsigned long len,
8617+ unsigned long flags);
8618+#endif
8619+#endif
8620+#endif
8621+
8622 #endif /* _ASM_X86_MMAN_H */
8623diff -urNp linux-2.6.32.41/arch/x86/include/asm/mmu_context.h linux-2.6.32.41/arch/x86/include/asm/mmu_context.h
8624--- linux-2.6.32.41/arch/x86/include/asm/mmu_context.h 2011-03-27 14:31:47.000000000 -0400
8625+++ linux-2.6.32.41/arch/x86/include/asm/mmu_context.h 2011-04-17 15:56:46.000000000 -0400
8626@@ -24,6 +24,21 @@ void destroy_context(struct mm_struct *m
8627
8628 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
8629 {
8630+
8631+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8632+ unsigned int i;
8633+ pgd_t *pgd;
8634+
8635+ pax_open_kernel();
8636+ pgd = get_cpu_pgd(smp_processor_id());
8637+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
8638+ if (paravirt_enabled())
8639+ set_pgd(pgd+i, native_make_pgd(0));
8640+ else
8641+ pgd[i] = native_make_pgd(0);
8642+ pax_close_kernel();
8643+#endif
8644+
8645 #ifdef CONFIG_SMP
8646 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
8647 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
8648@@ -34,16 +49,30 @@ static inline void switch_mm(struct mm_s
8649 struct task_struct *tsk)
8650 {
8651 unsigned cpu = smp_processor_id();
8652+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
8653+ int tlbstate = TLBSTATE_OK;
8654+#endif
8655
8656 if (likely(prev != next)) {
8657 #ifdef CONFIG_SMP
8658+#ifdef CONFIG_X86_32
8659+ tlbstate = percpu_read(cpu_tlbstate.state);
8660+#endif
8661 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8662 percpu_write(cpu_tlbstate.active_mm, next);
8663 #endif
8664 cpumask_set_cpu(cpu, mm_cpumask(next));
8665
8666 /* Re-load page tables */
8667+#ifdef CONFIG_PAX_PER_CPU_PGD
8668+ pax_open_kernel();
8669+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
8670+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
8671+ pax_close_kernel();
8672+ load_cr3(get_cpu_pgd(cpu));
8673+#else
8674 load_cr3(next->pgd);
8675+#endif
8676
8677 /* stop flush ipis for the previous mm */
8678 cpumask_clear_cpu(cpu, mm_cpumask(prev));
8679@@ -53,9 +82,38 @@ static inline void switch_mm(struct mm_s
8680 */
8681 if (unlikely(prev->context.ldt != next->context.ldt))
8682 load_LDT_nolock(&next->context);
8683- }
8684+
8685+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8686+ if (!nx_enabled) {
8687+ smp_mb__before_clear_bit();
8688+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
8689+ smp_mb__after_clear_bit();
8690+ cpu_set(cpu, next->context.cpu_user_cs_mask);
8691+ }
8692+#endif
8693+
8694+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8695+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
8696+ prev->context.user_cs_limit != next->context.user_cs_limit))
8697+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8698 #ifdef CONFIG_SMP
8699+ else if (unlikely(tlbstate != TLBSTATE_OK))
8700+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8701+#endif
8702+#endif
8703+
8704+ }
8705 else {
8706+
8707+#ifdef CONFIG_PAX_PER_CPU_PGD
8708+ pax_open_kernel();
8709+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
8710+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
8711+ pax_close_kernel();
8712+ load_cr3(get_cpu_pgd(cpu));
8713+#endif
8714+
8715+#ifdef CONFIG_SMP
8716 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8717 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
8718
8719@@ -64,11 +122,28 @@ static inline void switch_mm(struct mm_s
8720 * tlb flush IPI delivery. We must reload CR3
8721 * to make sure to use no freed page tables.
8722 */
8723+
8724+#ifndef CONFIG_PAX_PER_CPU_PGD
8725 load_cr3(next->pgd);
8726+#endif
8727+
8728 load_LDT_nolock(&next->context);
8729+
8730+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
8731+ if (!nx_enabled)
8732+ cpu_set(cpu, next->context.cpu_user_cs_mask);
8733+#endif
8734+
8735+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8736+#ifdef CONFIG_PAX_PAGEEXEC
8737+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
8738+#endif
8739+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8740+#endif
8741+
8742 }
8743- }
8744 #endif
8745+ }
8746 }
8747
8748 #define activate_mm(prev, next) \
8749diff -urNp linux-2.6.32.41/arch/x86/include/asm/mmu.h linux-2.6.32.41/arch/x86/include/asm/mmu.h
8750--- linux-2.6.32.41/arch/x86/include/asm/mmu.h 2011-03-27 14:31:47.000000000 -0400
8751+++ linux-2.6.32.41/arch/x86/include/asm/mmu.h 2011-04-17 15:56:46.000000000 -0400
8752@@ -9,10 +9,23 @@
8753 * we put the segment information here.
8754 */
8755 typedef struct {
8756- void *ldt;
8757+ struct desc_struct *ldt;
8758 int size;
8759 struct mutex lock;
8760- void *vdso;
8761+ unsigned long vdso;
8762+
8763+#ifdef CONFIG_X86_32
8764+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
8765+ unsigned long user_cs_base;
8766+ unsigned long user_cs_limit;
8767+
8768+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8769+ cpumask_t cpu_user_cs_mask;
8770+#endif
8771+
8772+#endif
8773+#endif
8774+
8775 } mm_context_t;
8776
8777 #ifdef CONFIG_SMP
8778diff -urNp linux-2.6.32.41/arch/x86/include/asm/module.h linux-2.6.32.41/arch/x86/include/asm/module.h
8779--- linux-2.6.32.41/arch/x86/include/asm/module.h 2011-03-27 14:31:47.000000000 -0400
8780+++ linux-2.6.32.41/arch/x86/include/asm/module.h 2011-04-23 13:18:57.000000000 -0400
8781@@ -5,6 +5,7 @@
8782
8783 #ifdef CONFIG_X86_64
8784 /* X86_64 does not define MODULE_PROC_FAMILY */
8785+#define MODULE_PROC_FAMILY ""
8786 #elif defined CONFIG_M386
8787 #define MODULE_PROC_FAMILY "386 "
8788 #elif defined CONFIG_M486
8789@@ -59,13 +60,36 @@
8790 #error unknown processor family
8791 #endif
8792
8793-#ifdef CONFIG_X86_32
8794-# ifdef CONFIG_4KSTACKS
8795-# define MODULE_STACKSIZE "4KSTACKS "
8796-# else
8797-# define MODULE_STACKSIZE ""
8798-# endif
8799-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
8800+#ifdef CONFIG_PAX_MEMORY_UDEREF
8801+#define MODULE_PAX_UDEREF "UDEREF "
8802+#else
8803+#define MODULE_PAX_UDEREF ""
8804+#endif
8805+
8806+#ifdef CONFIG_PAX_KERNEXEC
8807+#define MODULE_PAX_KERNEXEC "KERNEXEC "
8808+#else
8809+#define MODULE_PAX_KERNEXEC ""
8810+#endif
8811+
8812+#ifdef CONFIG_PAX_REFCOUNT
8813+#define MODULE_PAX_REFCOUNT "REFCOUNT "
8814+#else
8815+#define MODULE_PAX_REFCOUNT ""
8816 #endif
8817
8818+#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
8819+#define MODULE_STACKSIZE "4KSTACKS "
8820+#else
8821+#define MODULE_STACKSIZE ""
8822+#endif
8823+
8824+#ifdef CONFIG_GRKERNSEC
8825+#define MODULE_GRSEC "GRSECURITY "
8826+#else
8827+#define MODULE_GRSEC ""
8828+#endif
8829+
8830+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
8831+
8832 #endif /* _ASM_X86_MODULE_H */
8833diff -urNp linux-2.6.32.41/arch/x86/include/asm/page_64_types.h linux-2.6.32.41/arch/x86/include/asm/page_64_types.h
8834--- linux-2.6.32.41/arch/x86/include/asm/page_64_types.h 2011-03-27 14:31:47.000000000 -0400
8835+++ linux-2.6.32.41/arch/x86/include/asm/page_64_types.h 2011-04-17 15:56:46.000000000 -0400
8836@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
8837
8838 /* duplicated to the one in bootmem.h */
8839 extern unsigned long max_pfn;
8840-extern unsigned long phys_base;
8841+extern const unsigned long phys_base;
8842
8843 extern unsigned long __phys_addr(unsigned long);
8844 #define __phys_reloc_hide(x) (x)
8845diff -urNp linux-2.6.32.41/arch/x86/include/asm/paravirt.h linux-2.6.32.41/arch/x86/include/asm/paravirt.h
8846--- linux-2.6.32.41/arch/x86/include/asm/paravirt.h 2011-03-27 14:31:47.000000000 -0400
8847+++ linux-2.6.32.41/arch/x86/include/asm/paravirt.h 2011-04-17 15:56:46.000000000 -0400
8848@@ -729,6 +729,21 @@ static inline void __set_fixmap(unsigned
8849 pv_mmu_ops.set_fixmap(idx, phys, flags);
8850 }
8851
8852+#ifdef CONFIG_PAX_KERNEXEC
8853+static inline unsigned long pax_open_kernel(void)
8854+{
8855+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
8856+}
8857+
8858+static inline unsigned long pax_close_kernel(void)
8859+{
8860+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
8861+}
8862+#else
8863+static inline unsigned long pax_open_kernel(void) { return 0; }
8864+static inline unsigned long pax_close_kernel(void) { return 0; }
8865+#endif
8866+
8867 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
8868
8869 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
8870@@ -945,7 +960,7 @@ extern void default_banner(void);
8871
8872 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
8873 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
8874-#define PARA_INDIRECT(addr) *%cs:addr
8875+#define PARA_INDIRECT(addr) *%ss:addr
8876 #endif
8877
8878 #define INTERRUPT_RETURN \
8879@@ -1022,6 +1037,21 @@ extern void default_banner(void);
8880 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
8881 CLBR_NONE, \
8882 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
8883+
8884+#define GET_CR0_INTO_RDI \
8885+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
8886+ mov %rax,%rdi
8887+
8888+#define SET_RDI_INTO_CR0 \
8889+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
8890+
8891+#define GET_CR3_INTO_RDI \
8892+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
8893+ mov %rax,%rdi
8894+
8895+#define SET_RDI_INTO_CR3 \
8896+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
8897+
8898 #endif /* CONFIG_X86_32 */
8899
8900 #endif /* __ASSEMBLY__ */
8901diff -urNp linux-2.6.32.41/arch/x86/include/asm/paravirt_types.h linux-2.6.32.41/arch/x86/include/asm/paravirt_types.h
8902--- linux-2.6.32.41/arch/x86/include/asm/paravirt_types.h 2011-03-27 14:31:47.000000000 -0400
8903+++ linux-2.6.32.41/arch/x86/include/asm/paravirt_types.h 2011-04-17 15:56:46.000000000 -0400
8904@@ -316,6 +316,12 @@ struct pv_mmu_ops {
8905 an mfn. We can tell which is which from the index. */
8906 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
8907 phys_addr_t phys, pgprot_t flags);
8908+
8909+#ifdef CONFIG_PAX_KERNEXEC
8910+ unsigned long (*pax_open_kernel)(void);
8911+ unsigned long (*pax_close_kernel)(void);
8912+#endif
8913+
8914 };
8915
8916 struct raw_spinlock;
8917diff -urNp linux-2.6.32.41/arch/x86/include/asm/pci_x86.h linux-2.6.32.41/arch/x86/include/asm/pci_x86.h
8918--- linux-2.6.32.41/arch/x86/include/asm/pci_x86.h 2011-03-27 14:31:47.000000000 -0400
8919+++ linux-2.6.32.41/arch/x86/include/asm/pci_x86.h 2011-04-17 15:56:46.000000000 -0400
8920@@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct
8921 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
8922
8923 struct pci_raw_ops {
8924- int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
8925+ int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
8926 int reg, int len, u32 *val);
8927- int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
8928+ int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
8929 int reg, int len, u32 val);
8930 };
8931
8932-extern struct pci_raw_ops *raw_pci_ops;
8933-extern struct pci_raw_ops *raw_pci_ext_ops;
8934+extern const struct pci_raw_ops *raw_pci_ops;
8935+extern const struct pci_raw_ops *raw_pci_ext_ops;
8936
8937-extern struct pci_raw_ops pci_direct_conf1;
8938+extern const struct pci_raw_ops pci_direct_conf1;
8939 extern bool port_cf9_safe;
8940
8941 /* arch_initcall level */
8942diff -urNp linux-2.6.32.41/arch/x86/include/asm/pgalloc.h linux-2.6.32.41/arch/x86/include/asm/pgalloc.h
8943--- linux-2.6.32.41/arch/x86/include/asm/pgalloc.h 2011-03-27 14:31:47.000000000 -0400
8944+++ linux-2.6.32.41/arch/x86/include/asm/pgalloc.h 2011-04-17 15:56:46.000000000 -0400
8945@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
8946 pmd_t *pmd, pte_t *pte)
8947 {
8948 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
8949+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
8950+}
8951+
8952+static inline void pmd_populate_user(struct mm_struct *mm,
8953+ pmd_t *pmd, pte_t *pte)
8954+{
8955+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
8956 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
8957 }
8958
8959diff -urNp linux-2.6.32.41/arch/x86/include/asm/pgtable-2level.h linux-2.6.32.41/arch/x86/include/asm/pgtable-2level.h
8960--- linux-2.6.32.41/arch/x86/include/asm/pgtable-2level.h 2011-03-27 14:31:47.000000000 -0400
8961+++ linux-2.6.32.41/arch/x86/include/asm/pgtable-2level.h 2011-04-17 15:56:46.000000000 -0400
8962@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
8963
8964 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8965 {
8966+ pax_open_kernel();
8967 *pmdp = pmd;
8968+ pax_close_kernel();
8969 }
8970
8971 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
8972diff -urNp linux-2.6.32.41/arch/x86/include/asm/pgtable_32.h linux-2.6.32.41/arch/x86/include/asm/pgtable_32.h
8973--- linux-2.6.32.41/arch/x86/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
8974+++ linux-2.6.32.41/arch/x86/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
8975@@ -26,9 +26,6 @@
8976 struct mm_struct;
8977 struct vm_area_struct;
8978
8979-extern pgd_t swapper_pg_dir[1024];
8980-extern pgd_t trampoline_pg_dir[1024];
8981-
8982 static inline void pgtable_cache_init(void) { }
8983 static inline void check_pgt_cache(void) { }
8984 void paging_init(void);
8985@@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, u
8986 # include <asm/pgtable-2level.h>
8987 #endif
8988
8989+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
8990+extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
8991+#ifdef CONFIG_X86_PAE
8992+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
8993+#endif
8994+
8995 #if defined(CONFIG_HIGHPTE)
8996 #define __KM_PTE \
8997 (in_nmi() ? KM_NMI_PTE : \
8998@@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, u
8999 /* Clear a kernel PTE and flush it from the TLB */
9000 #define kpte_clear_flush(ptep, vaddr) \
9001 do { \
9002+ pax_open_kernel(); \
9003 pte_clear(&init_mm, (vaddr), (ptep)); \
9004+ pax_close_kernel(); \
9005 __flush_tlb_one((vaddr)); \
9006 } while (0)
9007
9008@@ -85,6 +90,9 @@ do { \
9009
9010 #endif /* !__ASSEMBLY__ */
9011
9012+#define HAVE_ARCH_UNMAPPED_AREA
9013+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9014+
9015 /*
9016 * kern_addr_valid() is (1) for FLATMEM and (0) for
9017 * SPARSEMEM and DISCONTIGMEM
9018diff -urNp linux-2.6.32.41/arch/x86/include/asm/pgtable_32_types.h linux-2.6.32.41/arch/x86/include/asm/pgtable_32_types.h
9019--- linux-2.6.32.41/arch/x86/include/asm/pgtable_32_types.h 2011-03-27 14:31:47.000000000 -0400
9020+++ linux-2.6.32.41/arch/x86/include/asm/pgtable_32_types.h 2011-04-17 15:56:46.000000000 -0400
9021@@ -8,7 +8,7 @@
9022 */
9023 #ifdef CONFIG_X86_PAE
9024 # include <asm/pgtable-3level_types.h>
9025-# define PMD_SIZE (1UL << PMD_SHIFT)
9026+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9027 # define PMD_MASK (~(PMD_SIZE - 1))
9028 #else
9029 # include <asm/pgtable-2level_types.h>
9030@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
9031 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9032 #endif
9033
9034+#ifdef CONFIG_PAX_KERNEXEC
9035+#ifndef __ASSEMBLY__
9036+extern unsigned char MODULES_EXEC_VADDR[];
9037+extern unsigned char MODULES_EXEC_END[];
9038+#endif
9039+#include <asm/boot.h>
9040+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9041+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9042+#else
9043+#define ktla_ktva(addr) (addr)
9044+#define ktva_ktla(addr) (addr)
9045+#endif
9046+
9047 #define MODULES_VADDR VMALLOC_START
9048 #define MODULES_END VMALLOC_END
9049 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9050diff -urNp linux-2.6.32.41/arch/x86/include/asm/pgtable-3level.h linux-2.6.32.41/arch/x86/include/asm/pgtable-3level.h
9051--- linux-2.6.32.41/arch/x86/include/asm/pgtable-3level.h 2011-03-27 14:31:47.000000000 -0400
9052+++ linux-2.6.32.41/arch/x86/include/asm/pgtable-3level.h 2011-04-17 15:56:46.000000000 -0400
9053@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
9054
9055 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9056 {
9057+ pax_open_kernel();
9058 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9059+ pax_close_kernel();
9060 }
9061
9062 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9063 {
9064+ pax_open_kernel();
9065 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9066+ pax_close_kernel();
9067 }
9068
9069 /*
9070diff -urNp linux-2.6.32.41/arch/x86/include/asm/pgtable_64.h linux-2.6.32.41/arch/x86/include/asm/pgtable_64.h
9071--- linux-2.6.32.41/arch/x86/include/asm/pgtable_64.h 2011-03-27 14:31:47.000000000 -0400
9072+++ linux-2.6.32.41/arch/x86/include/asm/pgtable_64.h 2011-04-17 15:56:46.000000000 -0400
9073@@ -16,10 +16,13 @@
9074
9075 extern pud_t level3_kernel_pgt[512];
9076 extern pud_t level3_ident_pgt[512];
9077+extern pud_t level3_vmalloc_pgt[512];
9078+extern pud_t level3_vmemmap_pgt[512];
9079+extern pud_t level2_vmemmap_pgt[512];
9080 extern pmd_t level2_kernel_pgt[512];
9081 extern pmd_t level2_fixmap_pgt[512];
9082-extern pmd_t level2_ident_pgt[512];
9083-extern pgd_t init_level4_pgt[];
9084+extern pmd_t level2_ident_pgt[512*2];
9085+extern pgd_t init_level4_pgt[512];
9086
9087 #define swapper_pg_dir init_level4_pgt
9088
9089@@ -74,7 +77,9 @@ static inline pte_t native_ptep_get_and_
9090
9091 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9092 {
9093+ pax_open_kernel();
9094 *pmdp = pmd;
9095+ pax_close_kernel();
9096 }
9097
9098 static inline void native_pmd_clear(pmd_t *pmd)
9099@@ -94,7 +99,9 @@ static inline void native_pud_clear(pud_
9100
9101 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9102 {
9103+ pax_open_kernel();
9104 *pgdp = pgd;
9105+ pax_close_kernel();
9106 }
9107
9108 static inline void native_pgd_clear(pgd_t *pgd)
9109diff -urNp linux-2.6.32.41/arch/x86/include/asm/pgtable_64_types.h linux-2.6.32.41/arch/x86/include/asm/pgtable_64_types.h
9110--- linux-2.6.32.41/arch/x86/include/asm/pgtable_64_types.h 2011-03-27 14:31:47.000000000 -0400
9111+++ linux-2.6.32.41/arch/x86/include/asm/pgtable_64_types.h 2011-04-17 15:56:46.000000000 -0400
9112@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9113 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9114 #define MODULES_END _AC(0xffffffffff000000, UL)
9115 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9116+#define MODULES_EXEC_VADDR MODULES_VADDR
9117+#define MODULES_EXEC_END MODULES_END
9118+
9119+#define ktla_ktva(addr) (addr)
9120+#define ktva_ktla(addr) (addr)
9121
9122 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9123diff -urNp linux-2.6.32.41/arch/x86/include/asm/pgtable.h linux-2.6.32.41/arch/x86/include/asm/pgtable.h
9124--- linux-2.6.32.41/arch/x86/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
9125+++ linux-2.6.32.41/arch/x86/include/asm/pgtable.h 2011-04-17 15:56:46.000000000 -0400
9126@@ -74,12 +74,51 @@ extern struct list_head pgd_list;
9127
9128 #define arch_end_context_switch(prev) do {} while(0)
9129
9130+#define pax_open_kernel() native_pax_open_kernel()
9131+#define pax_close_kernel() native_pax_close_kernel()
9132 #endif /* CONFIG_PARAVIRT */
9133
9134+#define __HAVE_ARCH_PAX_OPEN_KERNEL
9135+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9136+
9137+#ifdef CONFIG_PAX_KERNEXEC
9138+static inline unsigned long native_pax_open_kernel(void)
9139+{
9140+ unsigned long cr0;
9141+
9142+ preempt_disable();
9143+ barrier();
9144+ cr0 = read_cr0() ^ X86_CR0_WP;
9145+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
9146+ write_cr0(cr0);
9147+ return cr0 ^ X86_CR0_WP;
9148+}
9149+
9150+static inline unsigned long native_pax_close_kernel(void)
9151+{
9152+ unsigned long cr0;
9153+
9154+ cr0 = read_cr0() ^ X86_CR0_WP;
9155+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9156+ write_cr0(cr0);
9157+ barrier();
9158+ preempt_enable_no_resched();
9159+ return cr0 ^ X86_CR0_WP;
9160+}
9161+#else
9162+static inline unsigned long native_pax_open_kernel(void) { return 0; }
9163+static inline unsigned long native_pax_close_kernel(void) { return 0; }
9164+#endif
9165+
9166 /*
9167 * The following only work if pte_present() is true.
9168 * Undefined behaviour if not..
9169 */
9170+static inline int pte_user(pte_t pte)
9171+{
9172+ return pte_val(pte) & _PAGE_USER;
9173+}
9174+
9175 static inline int pte_dirty(pte_t pte)
9176 {
9177 return pte_flags(pte) & _PAGE_DIRTY;
9178@@ -167,9 +206,29 @@ static inline pte_t pte_wrprotect(pte_t
9179 return pte_clear_flags(pte, _PAGE_RW);
9180 }
9181
9182+static inline pte_t pte_mkread(pte_t pte)
9183+{
9184+ return __pte(pte_val(pte) | _PAGE_USER);
9185+}
9186+
9187 static inline pte_t pte_mkexec(pte_t pte)
9188 {
9189- return pte_clear_flags(pte, _PAGE_NX);
9190+#ifdef CONFIG_X86_PAE
9191+ if (__supported_pte_mask & _PAGE_NX)
9192+ return pte_clear_flags(pte, _PAGE_NX);
9193+ else
9194+#endif
9195+ return pte_set_flags(pte, _PAGE_USER);
9196+}
9197+
9198+static inline pte_t pte_exprotect(pte_t pte)
9199+{
9200+#ifdef CONFIG_X86_PAE
9201+ if (__supported_pte_mask & _PAGE_NX)
9202+ return pte_set_flags(pte, _PAGE_NX);
9203+ else
9204+#endif
9205+ return pte_clear_flags(pte, _PAGE_USER);
9206 }
9207
9208 static inline pte_t pte_mkdirty(pte_t pte)
9209@@ -302,6 +361,15 @@ pte_t *populate_extra_pte(unsigned long
9210 #endif
9211
9212 #ifndef __ASSEMBLY__
9213+
9214+#ifdef CONFIG_PAX_PER_CPU_PGD
9215+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9216+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9217+{
9218+ return cpu_pgd[cpu];
9219+}
9220+#endif
9221+
9222 #include <linux/mm_types.h>
9223
9224 static inline int pte_none(pte_t pte)
9225@@ -472,7 +540,7 @@ static inline pud_t *pud_offset(pgd_t *p
9226
9227 static inline int pgd_bad(pgd_t pgd)
9228 {
9229- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9230+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9231 }
9232
9233 static inline int pgd_none(pgd_t pgd)
9234@@ -495,7 +563,12 @@ static inline int pgd_none(pgd_t pgd)
9235 * pgd_offset() returns a (pgd_t *)
9236 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9237 */
9238-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9239+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9240+
9241+#ifdef CONFIG_PAX_PER_CPU_PGD
9242+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9243+#endif
9244+
9245 /*
9246 * a shortcut which implies the use of the kernel's pgd, instead
9247 * of a process's
9248@@ -506,6 +579,20 @@ static inline int pgd_none(pgd_t pgd)
9249 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9250 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9251
9252+#ifdef CONFIG_X86_32
9253+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9254+#else
9255+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9256+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9257+
9258+#ifdef CONFIG_PAX_MEMORY_UDEREF
9259+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9260+#else
9261+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9262+#endif
9263+
9264+#endif
9265+
9266 #ifndef __ASSEMBLY__
9267
9268 extern int direct_gbpages;
9269@@ -611,11 +698,23 @@ static inline void ptep_set_wrprotect(st
9270 * dst and src can be on the same page, but the range must not overlap,
9271 * and must not cross a page boundary.
9272 */
9273-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9274+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9275 {
9276- memcpy(dst, src, count * sizeof(pgd_t));
9277+ pax_open_kernel();
9278+ while (count--)
9279+ *dst++ = *src++;
9280+ pax_close_kernel();
9281 }
9282
9283+#ifdef CONFIG_PAX_PER_CPU_PGD
9284+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9285+#endif
9286+
9287+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9288+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9289+#else
9290+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9291+#endif
9292
9293 #include <asm-generic/pgtable.h>
9294 #endif /* __ASSEMBLY__ */
9295diff -urNp linux-2.6.32.41/arch/x86/include/asm/pgtable_types.h linux-2.6.32.41/arch/x86/include/asm/pgtable_types.h
9296--- linux-2.6.32.41/arch/x86/include/asm/pgtable_types.h 2011-03-27 14:31:47.000000000 -0400
9297+++ linux-2.6.32.41/arch/x86/include/asm/pgtable_types.h 2011-04-17 15:56:46.000000000 -0400
9298@@ -16,12 +16,11 @@
9299 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9300 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9301 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9302-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9303+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9304 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9305 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9306 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9307-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9308-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9309+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9310 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9311
9312 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9313@@ -39,7 +38,6 @@
9314 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9315 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9316 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9317-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9318 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9319 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9320 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9321@@ -55,8 +53,10 @@
9322
9323 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9324 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9325-#else
9326+#elif defined(CONFIG_KMEMCHECK)
9327 #define _PAGE_NX (_AT(pteval_t, 0))
9328+#else
9329+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9330 #endif
9331
9332 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9333@@ -93,6 +93,9 @@
9334 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9335 _PAGE_ACCESSED)
9336
9337+#define PAGE_READONLY_NOEXEC PAGE_READONLY
9338+#define PAGE_SHARED_NOEXEC PAGE_SHARED
9339+
9340 #define __PAGE_KERNEL_EXEC \
9341 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9342 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9343@@ -103,8 +106,8 @@
9344 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9345 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9346 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9347-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9348-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
9349+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9350+#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
9351 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9352 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
9353 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
9354@@ -163,8 +166,8 @@
9355 * bits are combined, this will alow user to access the high address mapped
9356 * VDSO in the presence of CONFIG_COMPAT_VDSO
9357 */
9358-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9359-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9360+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9361+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9362 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9363 #endif
9364
9365@@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t p
9366 {
9367 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9368 }
9369+#endif
9370
9371+#if PAGETABLE_LEVELS == 3
9372+#include <asm-generic/pgtable-nopud.h>
9373+#endif
9374+
9375+#if PAGETABLE_LEVELS == 2
9376+#include <asm-generic/pgtable-nopmd.h>
9377+#endif
9378+
9379+#ifndef __ASSEMBLY__
9380 #if PAGETABLE_LEVELS > 3
9381 typedef struct { pudval_t pud; } pud_t;
9382
9383@@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pu
9384 return pud.pud;
9385 }
9386 #else
9387-#include <asm-generic/pgtable-nopud.h>
9388-
9389 static inline pudval_t native_pud_val(pud_t pud)
9390 {
9391 return native_pgd_val(pud.pgd);
9392@@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pm
9393 return pmd.pmd;
9394 }
9395 #else
9396-#include <asm-generic/pgtable-nopmd.h>
9397-
9398 static inline pmdval_t native_pmd_val(pmd_t pmd)
9399 {
9400 return native_pgd_val(pmd.pud.pgd);
9401@@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
9402
9403 extern pteval_t __supported_pte_mask;
9404 extern void set_nx(void);
9405+
9406+#ifdef CONFIG_X86_32
9407+#ifdef CONFIG_X86_PAE
9408 extern int nx_enabled;
9409+#else
9410+#define nx_enabled (0)
9411+#endif
9412+#else
9413+#define nx_enabled (1)
9414+#endif
9415
9416 #define pgprot_writecombine pgprot_writecombine
9417 extern pgprot_t pgprot_writecombine(pgprot_t prot);
9418diff -urNp linux-2.6.32.41/arch/x86/include/asm/processor.h linux-2.6.32.41/arch/x86/include/asm/processor.h
9419--- linux-2.6.32.41/arch/x86/include/asm/processor.h 2011-04-22 19:16:29.000000000 -0400
9420+++ linux-2.6.32.41/arch/x86/include/asm/processor.h 2011-05-11 18:25:15.000000000 -0400
9421@@ -272,7 +272,7 @@ struct tss_struct {
9422
9423 } ____cacheline_aligned;
9424
9425-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
9426+extern struct tss_struct init_tss[NR_CPUS];
9427
9428 /*
9429 * Save the original ist values for checking stack pointers during debugging
9430@@ -888,11 +888,18 @@ static inline void spin_lock_prefetch(co
9431 */
9432 #define TASK_SIZE PAGE_OFFSET
9433 #define TASK_SIZE_MAX TASK_SIZE
9434+
9435+#ifdef CONFIG_PAX_SEGMEXEC
9436+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
9437+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
9438+#else
9439 #define STACK_TOP TASK_SIZE
9440-#define STACK_TOP_MAX STACK_TOP
9441+#endif
9442+
9443+#define STACK_TOP_MAX TASK_SIZE
9444
9445 #define INIT_THREAD { \
9446- .sp0 = sizeof(init_stack) + (long)&init_stack, \
9447+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9448 .vm86_info = NULL, \
9449 .sysenter_cs = __KERNEL_CS, \
9450 .io_bitmap_ptr = NULL, \
9451@@ -906,7 +913,7 @@ static inline void spin_lock_prefetch(co
9452 */
9453 #define INIT_TSS { \
9454 .x86_tss = { \
9455- .sp0 = sizeof(init_stack) + (long)&init_stack, \
9456+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9457 .ss0 = __KERNEL_DS, \
9458 .ss1 = __KERNEL_CS, \
9459 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
9460@@ -917,11 +924,7 @@ static inline void spin_lock_prefetch(co
9461 extern unsigned long thread_saved_pc(struct task_struct *tsk);
9462
9463 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
9464-#define KSTK_TOP(info) \
9465-({ \
9466- unsigned long *__ptr = (unsigned long *)(info); \
9467- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
9468-})
9469+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
9470
9471 /*
9472 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
9473@@ -936,7 +939,7 @@ extern unsigned long thread_saved_pc(str
9474 #define task_pt_regs(task) \
9475 ({ \
9476 struct pt_regs *__regs__; \
9477- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
9478+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
9479 __regs__ - 1; \
9480 })
9481
9482@@ -946,13 +949,13 @@ extern unsigned long thread_saved_pc(str
9483 /*
9484 * User space process size. 47bits minus one guard page.
9485 */
9486-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
9487+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
9488
9489 /* This decides where the kernel will search for a free chunk of vm
9490 * space during mmap's.
9491 */
9492 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
9493- 0xc0000000 : 0xFFFFe000)
9494+ 0xc0000000 : 0xFFFFf000)
9495
9496 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
9497 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
9498@@ -963,11 +966,11 @@ extern unsigned long thread_saved_pc(str
9499 #define STACK_TOP_MAX TASK_SIZE_MAX
9500
9501 #define INIT_THREAD { \
9502- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9503+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9504 }
9505
9506 #define INIT_TSS { \
9507- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9508+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9509 }
9510
9511 /*
9512@@ -989,6 +992,10 @@ extern void start_thread(struct pt_regs
9513 */
9514 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
9515
9516+#ifdef CONFIG_PAX_SEGMEXEC
9517+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
9518+#endif
9519+
9520 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
9521
9522 /* Get/set a process' ability to use the timestamp counter instruction */
9523diff -urNp linux-2.6.32.41/arch/x86/include/asm/ptrace.h linux-2.6.32.41/arch/x86/include/asm/ptrace.h
9524--- linux-2.6.32.41/arch/x86/include/asm/ptrace.h 2011-03-27 14:31:47.000000000 -0400
9525+++ linux-2.6.32.41/arch/x86/include/asm/ptrace.h 2011-04-17 15:56:46.000000000 -0400
9526@@ -151,28 +151,29 @@ static inline unsigned long regs_return_
9527 }
9528
9529 /*
9530- * user_mode_vm(regs) determines whether a register set came from user mode.
9531+ * user_mode(regs) determines whether a register set came from user mode.
9532 * This is true if V8086 mode was enabled OR if the register set was from
9533 * protected mode with RPL-3 CS value. This tricky test checks that with
9534 * one comparison. Many places in the kernel can bypass this full check
9535- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
9536+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
9537+ * be used.
9538 */
9539-static inline int user_mode(struct pt_regs *regs)
9540+static inline int user_mode_novm(struct pt_regs *regs)
9541 {
9542 #ifdef CONFIG_X86_32
9543 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
9544 #else
9545- return !!(regs->cs & 3);
9546+ return !!(regs->cs & SEGMENT_RPL_MASK);
9547 #endif
9548 }
9549
9550-static inline int user_mode_vm(struct pt_regs *regs)
9551+static inline int user_mode(struct pt_regs *regs)
9552 {
9553 #ifdef CONFIG_X86_32
9554 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
9555 USER_RPL;
9556 #else
9557- return user_mode(regs);
9558+ return user_mode_novm(regs);
9559 #endif
9560 }
9561
9562diff -urNp linux-2.6.32.41/arch/x86/include/asm/reboot.h linux-2.6.32.41/arch/x86/include/asm/reboot.h
9563--- linux-2.6.32.41/arch/x86/include/asm/reboot.h 2011-03-27 14:31:47.000000000 -0400
9564+++ linux-2.6.32.41/arch/x86/include/asm/reboot.h 2011-05-22 23:02:03.000000000 -0400
9565@@ -6,19 +6,19 @@
9566 struct pt_regs;
9567
9568 struct machine_ops {
9569- void (*restart)(char *cmd);
9570- void (*halt)(void);
9571- void (*power_off)(void);
9572+ void (* __noreturn restart)(char *cmd);
9573+ void (* __noreturn halt)(void);
9574+ void (* __noreturn power_off)(void);
9575 void (*shutdown)(void);
9576 void (*crash_shutdown)(struct pt_regs *);
9577- void (*emergency_restart)(void);
9578+ void (* __noreturn emergency_restart)(void);
9579 };
9580
9581 extern struct machine_ops machine_ops;
9582
9583 void native_machine_crash_shutdown(struct pt_regs *regs);
9584 void native_machine_shutdown(void);
9585-void machine_real_restart(const unsigned char *code, int length);
9586+void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
9587
9588 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
9589 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
9590diff -urNp linux-2.6.32.41/arch/x86/include/asm/rwsem.h linux-2.6.32.41/arch/x86/include/asm/rwsem.h
9591--- linux-2.6.32.41/arch/x86/include/asm/rwsem.h 2011-03-27 14:31:47.000000000 -0400
9592+++ linux-2.6.32.41/arch/x86/include/asm/rwsem.h 2011-04-17 15:56:46.000000000 -0400
9593@@ -118,6 +118,14 @@ static inline void __down_read(struct rw
9594 {
9595 asm volatile("# beginning down_read\n\t"
9596 LOCK_PREFIX _ASM_INC "(%1)\n\t"
9597+
9598+#ifdef CONFIG_PAX_REFCOUNT
9599+ "jno 0f\n"
9600+ LOCK_PREFIX _ASM_DEC "(%1)\n\t"
9601+ "int $4\n0:\n"
9602+ _ASM_EXTABLE(0b, 0b)
9603+#endif
9604+
9605 /* adds 0x00000001, returns the old value */
9606 " jns 1f\n"
9607 " call call_rwsem_down_read_failed\n"
9608@@ -139,6 +147,14 @@ static inline int __down_read_trylock(st
9609 "1:\n\t"
9610 " mov %1,%2\n\t"
9611 " add %3,%2\n\t"
9612+
9613+#ifdef CONFIG_PAX_REFCOUNT
9614+ "jno 0f\n"
9615+ "sub %3,%2\n"
9616+ "int $4\n0:\n"
9617+ _ASM_EXTABLE(0b, 0b)
9618+#endif
9619+
9620 " jle 2f\n\t"
9621 LOCK_PREFIX " cmpxchg %2,%0\n\t"
9622 " jnz 1b\n\t"
9623@@ -160,6 +176,14 @@ static inline void __down_write_nested(s
9624 tmp = RWSEM_ACTIVE_WRITE_BIAS;
9625 asm volatile("# beginning down_write\n\t"
9626 LOCK_PREFIX " xadd %1,(%2)\n\t"
9627+
9628+#ifdef CONFIG_PAX_REFCOUNT
9629+ "jno 0f\n"
9630+ "mov %1,(%2)\n"
9631+ "int $4\n0:\n"
9632+ _ASM_EXTABLE(0b, 0b)
9633+#endif
9634+
9635 /* subtract 0x0000ffff, returns the old value */
9636 " test %1,%1\n\t"
9637 /* was the count 0 before? */
9638@@ -198,6 +222,14 @@ static inline void __up_read(struct rw_s
9639 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
9640 asm volatile("# beginning __up_read\n\t"
9641 LOCK_PREFIX " xadd %1,(%2)\n\t"
9642+
9643+#ifdef CONFIG_PAX_REFCOUNT
9644+ "jno 0f\n"
9645+ "mov %1,(%2)\n"
9646+ "int $4\n0:\n"
9647+ _ASM_EXTABLE(0b, 0b)
9648+#endif
9649+
9650 /* subtracts 1, returns the old value */
9651 " jns 1f\n\t"
9652 " call call_rwsem_wake\n"
9653@@ -216,6 +248,14 @@ static inline void __up_write(struct rw_
9654 rwsem_count_t tmp;
9655 asm volatile("# beginning __up_write\n\t"
9656 LOCK_PREFIX " xadd %1,(%2)\n\t"
9657+
9658+#ifdef CONFIG_PAX_REFCOUNT
9659+ "jno 0f\n"
9660+ "mov %1,(%2)\n"
9661+ "int $4\n0:\n"
9662+ _ASM_EXTABLE(0b, 0b)
9663+#endif
9664+
9665 /* tries to transition
9666 0xffff0001 -> 0x00000000 */
9667 " jz 1f\n"
9668@@ -234,6 +274,14 @@ static inline void __downgrade_write(str
9669 {
9670 asm volatile("# beginning __downgrade_write\n\t"
9671 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
9672+
9673+#ifdef CONFIG_PAX_REFCOUNT
9674+ "jno 0f\n"
9675+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
9676+ "int $4\n0:\n"
9677+ _ASM_EXTABLE(0b, 0b)
9678+#endif
9679+
9680 /*
9681 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
9682 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
9683@@ -253,7 +301,15 @@ static inline void __downgrade_write(str
9684 static inline void rwsem_atomic_add(rwsem_count_t delta,
9685 struct rw_semaphore *sem)
9686 {
9687- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
9688+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
9689+
9690+#ifdef CONFIG_PAX_REFCOUNT
9691+ "jno 0f\n"
9692+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
9693+ "int $4\n0:\n"
9694+ _ASM_EXTABLE(0b, 0b)
9695+#endif
9696+
9697 : "+m" (sem->count)
9698 : "er" (delta));
9699 }
9700@@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic
9701 {
9702 rwsem_count_t tmp = delta;
9703
9704- asm volatile(LOCK_PREFIX "xadd %0,%1"
9705+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
9706+
9707+#ifdef CONFIG_PAX_REFCOUNT
9708+ "jno 0f\n"
9709+ "mov %0,%1\n"
9710+ "int $4\n0:\n"
9711+ _ASM_EXTABLE(0b, 0b)
9712+#endif
9713+
9714 : "+r" (tmp), "+m" (sem->count)
9715 : : "memory");
9716
9717diff -urNp linux-2.6.32.41/arch/x86/include/asm/segment.h linux-2.6.32.41/arch/x86/include/asm/segment.h
9718--- linux-2.6.32.41/arch/x86/include/asm/segment.h 2011-03-27 14:31:47.000000000 -0400
9719+++ linux-2.6.32.41/arch/x86/include/asm/segment.h 2011-04-17 15:56:46.000000000 -0400
9720@@ -62,8 +62,8 @@
9721 * 26 - ESPFIX small SS
9722 * 27 - per-cpu [ offset to per-cpu data area ]
9723 * 28 - stack_canary-20 [ for stack protector ]
9724- * 29 - unused
9725- * 30 - unused
9726+ * 29 - PCI BIOS CS
9727+ * 30 - PCI BIOS DS
9728 * 31 - TSS for double fault handler
9729 */
9730 #define GDT_ENTRY_TLS_MIN 6
9731@@ -77,6 +77,8 @@
9732
9733 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
9734
9735+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
9736+
9737 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
9738
9739 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
9740@@ -88,7 +90,7 @@
9741 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
9742 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
9743
9744-#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
9745+#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
9746 #ifdef CONFIG_SMP
9747 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
9748 #else
9749@@ -102,6 +104,12 @@
9750 #define __KERNEL_STACK_CANARY 0
9751 #endif
9752
9753+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
9754+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
9755+
9756+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
9757+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
9758+
9759 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
9760
9761 /*
9762@@ -139,7 +147,7 @@
9763 */
9764
9765 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
9766-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
9767+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
9768
9769
9770 #else
9771@@ -163,6 +171,8 @@
9772 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
9773 #define __USER32_DS __USER_DS
9774
9775+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
9776+
9777 #define GDT_ENTRY_TSS 8 /* needs two entries */
9778 #define GDT_ENTRY_LDT 10 /* needs two entries */
9779 #define GDT_ENTRY_TLS_MIN 12
9780@@ -183,6 +193,7 @@
9781 #endif
9782
9783 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
9784+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
9785 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
9786 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
9787 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
9788diff -urNp linux-2.6.32.41/arch/x86/include/asm/smp.h linux-2.6.32.41/arch/x86/include/asm/smp.h
9789--- linux-2.6.32.41/arch/x86/include/asm/smp.h 2011-03-27 14:31:47.000000000 -0400
9790+++ linux-2.6.32.41/arch/x86/include/asm/smp.h 2011-04-17 15:56:46.000000000 -0400
9791@@ -24,7 +24,7 @@ extern unsigned int num_processors;
9792 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
9793 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
9794 DECLARE_PER_CPU(u16, cpu_llc_id);
9795-DECLARE_PER_CPU(int, cpu_number);
9796+DECLARE_PER_CPU(unsigned int, cpu_number);
9797
9798 static inline struct cpumask *cpu_sibling_mask(int cpu)
9799 {
9800@@ -175,14 +175,8 @@ extern unsigned disabled_cpus __cpuinitd
9801 extern int safe_smp_processor_id(void);
9802
9803 #elif defined(CONFIG_X86_64_SMP)
9804-#define raw_smp_processor_id() (percpu_read(cpu_number))
9805-
9806-#define stack_smp_processor_id() \
9807-({ \
9808- struct thread_info *ti; \
9809- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
9810- ti->cpu; \
9811-})
9812+#define raw_smp_processor_id() (percpu_read(cpu_number))
9813+#define stack_smp_processor_id() raw_smp_processor_id()
9814 #define safe_smp_processor_id() smp_processor_id()
9815
9816 #endif
9817diff -urNp linux-2.6.32.41/arch/x86/include/asm/spinlock.h linux-2.6.32.41/arch/x86/include/asm/spinlock.h
9818--- linux-2.6.32.41/arch/x86/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
9819+++ linux-2.6.32.41/arch/x86/include/asm/spinlock.h 2011-04-17 15:56:46.000000000 -0400
9820@@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(r
9821 static inline void __raw_read_lock(raw_rwlock_t *rw)
9822 {
9823 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
9824+
9825+#ifdef CONFIG_PAX_REFCOUNT
9826+ "jno 0f\n"
9827+ LOCK_PREFIX " addl $1,(%0)\n"
9828+ "int $4\n0:\n"
9829+ _ASM_EXTABLE(0b, 0b)
9830+#endif
9831+
9832 "jns 1f\n"
9833 "call __read_lock_failed\n\t"
9834 "1:\n"
9835@@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_r
9836 static inline void __raw_write_lock(raw_rwlock_t *rw)
9837 {
9838 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
9839+
9840+#ifdef CONFIG_PAX_REFCOUNT
9841+ "jno 0f\n"
9842+ LOCK_PREFIX " addl %1,(%0)\n"
9843+ "int $4\n0:\n"
9844+ _ASM_EXTABLE(0b, 0b)
9845+#endif
9846+
9847 "jz 1f\n"
9848 "call __write_lock_failed\n\t"
9849 "1:\n"
9850@@ -286,12 +302,29 @@ static inline int __raw_write_trylock(ra
9851
9852 static inline void __raw_read_unlock(raw_rwlock_t *rw)
9853 {
9854- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
9855+ asm volatile(LOCK_PREFIX "incl %0\n"
9856+
9857+#ifdef CONFIG_PAX_REFCOUNT
9858+ "jno 0f\n"
9859+ LOCK_PREFIX "decl %0\n"
9860+ "int $4\n0:\n"
9861+ _ASM_EXTABLE(0b, 0b)
9862+#endif
9863+
9864+ :"+m" (rw->lock) : : "memory");
9865 }
9866
9867 static inline void __raw_write_unlock(raw_rwlock_t *rw)
9868 {
9869- asm volatile(LOCK_PREFIX "addl %1, %0"
9870+ asm volatile(LOCK_PREFIX "addl %1, %0\n"
9871+
9872+#ifdef CONFIG_PAX_REFCOUNT
9873+ "jno 0f\n"
9874+ LOCK_PREFIX "subl %1, %0\n"
9875+ "int $4\n0:\n"
9876+ _ASM_EXTABLE(0b, 0b)
9877+#endif
9878+
9879 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
9880 }
9881
9882diff -urNp linux-2.6.32.41/arch/x86/include/asm/stackprotector.h linux-2.6.32.41/arch/x86/include/asm/stackprotector.h
9883--- linux-2.6.32.41/arch/x86/include/asm/stackprotector.h 2011-03-27 14:31:47.000000000 -0400
9884+++ linux-2.6.32.41/arch/x86/include/asm/stackprotector.h 2011-04-17 15:56:46.000000000 -0400
9885@@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
9886
9887 static inline void load_stack_canary_segment(void)
9888 {
9889-#ifdef CONFIG_X86_32
9890+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
9891 asm volatile ("mov %0, %%gs" : : "r" (0));
9892 #endif
9893 }
9894diff -urNp linux-2.6.32.41/arch/x86/include/asm/system.h linux-2.6.32.41/arch/x86/include/asm/system.h
9895--- linux-2.6.32.41/arch/x86/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
9896+++ linux-2.6.32.41/arch/x86/include/asm/system.h 2011-05-22 23:02:03.000000000 -0400
9897@@ -132,7 +132,7 @@ do { \
9898 "thread_return:\n\t" \
9899 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
9900 __switch_canary \
9901- "movq %P[thread_info](%%rsi),%%r8\n\t" \
9902+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
9903 "movq %%rax,%%rdi\n\t" \
9904 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
9905 "jnz ret_from_fork\n\t" \
9906@@ -143,7 +143,7 @@ do { \
9907 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
9908 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
9909 [_tif_fork] "i" (_TIF_FORK), \
9910- [thread_info] "i" (offsetof(struct task_struct, stack)), \
9911+ [thread_info] "m" (per_cpu_var(current_tinfo)), \
9912 [current_task] "m" (per_cpu_var(current_task)) \
9913 __switch_canary_iparam \
9914 : "memory", "cc" __EXTRA_CLOBBER)
9915@@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
9916 {
9917 unsigned long __limit;
9918 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
9919- return __limit + 1;
9920+ return __limit;
9921 }
9922
9923 static inline void native_clts(void)
9924@@ -340,12 +340,12 @@ void enable_hlt(void);
9925
9926 void cpu_idle_wait(void);
9927
9928-extern unsigned long arch_align_stack(unsigned long sp);
9929+#define arch_align_stack(x) ((x) & ~0xfUL)
9930 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
9931
9932 void default_idle(void);
9933
9934-void stop_this_cpu(void *dummy);
9935+void stop_this_cpu(void *dummy) __noreturn;
9936
9937 /*
9938 * Force strict CPU ordering.
9939diff -urNp linux-2.6.32.41/arch/x86/include/asm/thread_info.h linux-2.6.32.41/arch/x86/include/asm/thread_info.h
9940--- linux-2.6.32.41/arch/x86/include/asm/thread_info.h 2011-03-27 14:31:47.000000000 -0400
9941+++ linux-2.6.32.41/arch/x86/include/asm/thread_info.h 2011-05-17 19:26:34.000000000 -0400
9942@@ -10,6 +10,7 @@
9943 #include <linux/compiler.h>
9944 #include <asm/page.h>
9945 #include <asm/types.h>
9946+#include <asm/percpu.h>
9947
9948 /*
9949 * low level task data that entry.S needs immediate access to
9950@@ -24,7 +25,6 @@ struct exec_domain;
9951 #include <asm/atomic.h>
9952
9953 struct thread_info {
9954- struct task_struct *task; /* main task structure */
9955 struct exec_domain *exec_domain; /* execution domain */
9956 __u32 flags; /* low level flags */
9957 __u32 status; /* thread synchronous flags */
9958@@ -34,18 +34,12 @@ struct thread_info {
9959 mm_segment_t addr_limit;
9960 struct restart_block restart_block;
9961 void __user *sysenter_return;
9962-#ifdef CONFIG_X86_32
9963- unsigned long previous_esp; /* ESP of the previous stack in
9964- case of nested (IRQ) stacks
9965- */
9966- __u8 supervisor_stack[0];
9967-#endif
9968+ unsigned long lowest_stack;
9969 int uaccess_err;
9970 };
9971
9972-#define INIT_THREAD_INFO(tsk) \
9973+#define INIT_THREAD_INFO \
9974 { \
9975- .task = &tsk, \
9976 .exec_domain = &default_exec_domain, \
9977 .flags = 0, \
9978 .cpu = 0, \
9979@@ -56,7 +50,7 @@ struct thread_info {
9980 }, \
9981 }
9982
9983-#define init_thread_info (init_thread_union.thread_info)
9984+#define init_thread_info (init_thread_union.stack)
9985 #define init_stack (init_thread_union.stack)
9986
9987 #else /* !__ASSEMBLY__ */
9988@@ -163,6 +157,23 @@ struct thread_info {
9989 #define alloc_thread_info(tsk) \
9990 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
9991
9992+#ifdef __ASSEMBLY__
9993+/* how to get the thread information struct from ASM */
9994+#define GET_THREAD_INFO(reg) \
9995+ mov PER_CPU_VAR(current_tinfo), reg
9996+
9997+/* use this one if reg already contains %esp */
9998+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
9999+#else
10000+/* how to get the thread information struct from C */
10001+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10002+
10003+static __always_inline struct thread_info *current_thread_info(void)
10004+{
10005+ return percpu_read_stable(current_tinfo);
10006+}
10007+#endif
10008+
10009 #ifdef CONFIG_X86_32
10010
10011 #define STACK_WARN (THREAD_SIZE/8)
10012@@ -173,35 +184,13 @@ struct thread_info {
10013 */
10014 #ifndef __ASSEMBLY__
10015
10016-
10017 /* how to get the current stack pointer from C */
10018 register unsigned long current_stack_pointer asm("esp") __used;
10019
10020-/* how to get the thread information struct from C */
10021-static inline struct thread_info *current_thread_info(void)
10022-{
10023- return (struct thread_info *)
10024- (current_stack_pointer & ~(THREAD_SIZE - 1));
10025-}
10026-
10027-#else /* !__ASSEMBLY__ */
10028-
10029-/* how to get the thread information struct from ASM */
10030-#define GET_THREAD_INFO(reg) \
10031- movl $-THREAD_SIZE, reg; \
10032- andl %esp, reg
10033-
10034-/* use this one if reg already contains %esp */
10035-#define GET_THREAD_INFO_WITH_ESP(reg) \
10036- andl $-THREAD_SIZE, reg
10037-
10038 #endif
10039
10040 #else /* X86_32 */
10041
10042-#include <asm/percpu.h>
10043-#define KERNEL_STACK_OFFSET (5*8)
10044-
10045 /*
10046 * macros/functions for gaining access to the thread information structure
10047 * preempt_count needs to be 1 initially, until the scheduler is functional.
10048@@ -209,21 +198,8 @@ static inline struct thread_info *curren
10049 #ifndef __ASSEMBLY__
10050 DECLARE_PER_CPU(unsigned long, kernel_stack);
10051
10052-static inline struct thread_info *current_thread_info(void)
10053-{
10054- struct thread_info *ti;
10055- ti = (void *)(percpu_read_stable(kernel_stack) +
10056- KERNEL_STACK_OFFSET - THREAD_SIZE);
10057- return ti;
10058-}
10059-
10060-#else /* !__ASSEMBLY__ */
10061-
10062-/* how to get the thread information struct from ASM */
10063-#define GET_THREAD_INFO(reg) \
10064- movq PER_CPU_VAR(kernel_stack),reg ; \
10065- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10066-
10067+/* how to get the current stack pointer from C */
10068+register unsigned long current_stack_pointer asm("rsp") __used;
10069 #endif
10070
10071 #endif /* !X86_32 */
10072@@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
10073 extern void free_thread_info(struct thread_info *ti);
10074 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10075 #define arch_task_cache_init arch_task_cache_init
10076+
10077+#define __HAVE_THREAD_FUNCTIONS
10078+#define task_thread_info(task) (&(task)->tinfo)
10079+#define task_stack_page(task) ((task)->stack)
10080+#define setup_thread_stack(p, org) do {} while (0)
10081+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10082+
10083+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10084+extern struct task_struct *alloc_task_struct(void);
10085+extern void free_task_struct(struct task_struct *);
10086+
10087 #endif
10088 #endif /* _ASM_X86_THREAD_INFO_H */
10089diff -urNp linux-2.6.32.41/arch/x86/include/asm/uaccess_32.h linux-2.6.32.41/arch/x86/include/asm/uaccess_32.h
10090--- linux-2.6.32.41/arch/x86/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
10091+++ linux-2.6.32.41/arch/x86/include/asm/uaccess_32.h 2011-05-16 21:46:57.000000000 -0400
10092@@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_u
10093 static __always_inline unsigned long __must_check
10094 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10095 {
10096+ pax_track_stack();
10097+
10098+ if ((long)n < 0)
10099+ return n;
10100+
10101 if (__builtin_constant_p(n)) {
10102 unsigned long ret;
10103
10104@@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to,
10105 return ret;
10106 }
10107 }
10108+ if (!__builtin_constant_p(n))
10109+ check_object_size(from, n, true);
10110 return __copy_to_user_ll(to, from, n);
10111 }
10112
10113@@ -83,12 +90,16 @@ static __always_inline unsigned long __m
10114 __copy_to_user(void __user *to, const void *from, unsigned long n)
10115 {
10116 might_fault();
10117+
10118 return __copy_to_user_inatomic(to, from, n);
10119 }
10120
10121 static __always_inline unsigned long
10122 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
10123 {
10124+ if ((long)n < 0)
10125+ return n;
10126+
10127 /* Avoid zeroing the tail if the copy fails..
10128 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
10129 * but as the zeroing behaviour is only significant when n is not
10130@@ -138,6 +149,12 @@ static __always_inline unsigned long
10131 __copy_from_user(void *to, const void __user *from, unsigned long n)
10132 {
10133 might_fault();
10134+
10135+ pax_track_stack();
10136+
10137+ if ((long)n < 0)
10138+ return n;
10139+
10140 if (__builtin_constant_p(n)) {
10141 unsigned long ret;
10142
10143@@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __
10144 return ret;
10145 }
10146 }
10147+ if (!__builtin_constant_p(n))
10148+ check_object_size(to, n, false);
10149 return __copy_from_user_ll(to, from, n);
10150 }
10151
10152@@ -160,6 +179,10 @@ static __always_inline unsigned long __c
10153 const void __user *from, unsigned long n)
10154 {
10155 might_fault();
10156+
10157+ if ((long)n < 0)
10158+ return n;
10159+
10160 if (__builtin_constant_p(n)) {
10161 unsigned long ret;
10162
10163@@ -182,14 +205,62 @@ static __always_inline unsigned long
10164 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
10165 unsigned long n)
10166 {
10167- return __copy_from_user_ll_nocache_nozero(to, from, n);
10168+ if ((long)n < 0)
10169+ return n;
10170+
10171+ return __copy_from_user_ll_nocache_nozero(to, from, n);
10172+}
10173+
10174+/**
10175+ * copy_to_user: - Copy a block of data into user space.
10176+ * @to: Destination address, in user space.
10177+ * @from: Source address, in kernel space.
10178+ * @n: Number of bytes to copy.
10179+ *
10180+ * Context: User context only. This function may sleep.
10181+ *
10182+ * Copy data from kernel space to user space.
10183+ *
10184+ * Returns number of bytes that could not be copied.
10185+ * On success, this will be zero.
10186+ */
10187+static __always_inline unsigned long __must_check
10188+copy_to_user(void __user *to, const void *from, unsigned long n)
10189+{
10190+ if (access_ok(VERIFY_WRITE, to, n))
10191+ n = __copy_to_user(to, from, n);
10192+ return n;
10193+}
10194+
10195+/**
10196+ * copy_from_user: - Copy a block of data from user space.
10197+ * @to: Destination address, in kernel space.
10198+ * @from: Source address, in user space.
10199+ * @n: Number of bytes to copy.
10200+ *
10201+ * Context: User context only. This function may sleep.
10202+ *
10203+ * Copy data from user space to kernel space.
10204+ *
10205+ * Returns number of bytes that could not be copied.
10206+ * On success, this will be zero.
10207+ *
10208+ * If some data could not be copied, this function will pad the copied
10209+ * data to the requested size using zero bytes.
10210+ */
10211+static __always_inline unsigned long __must_check
10212+copy_from_user(void *to, const void __user *from, unsigned long n)
10213+{
10214+ if (access_ok(VERIFY_READ, from, n))
10215+ n = __copy_from_user(to, from, n);
10216+ else if ((long)n > 0) {
10217+ if (!__builtin_constant_p(n))
10218+ check_object_size(to, n, false);
10219+ memset(to, 0, n);
10220+ }
10221+ return n;
10222 }
10223
10224-unsigned long __must_check copy_to_user(void __user *to,
10225- const void *from, unsigned long n);
10226-unsigned long __must_check copy_from_user(void *to,
10227- const void __user *from,
10228- unsigned long n);
10229 long __must_check strncpy_from_user(char *dst, const char __user *src,
10230 long count);
10231 long __must_check __strncpy_from_user(char *dst,
10232diff -urNp linux-2.6.32.41/arch/x86/include/asm/uaccess_64.h linux-2.6.32.41/arch/x86/include/asm/uaccess_64.h
10233--- linux-2.6.32.41/arch/x86/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
10234+++ linux-2.6.32.41/arch/x86/include/asm/uaccess_64.h 2011-05-16 21:46:57.000000000 -0400
10235@@ -9,6 +9,9 @@
10236 #include <linux/prefetch.h>
10237 #include <linux/lockdep.h>
10238 #include <asm/page.h>
10239+#include <asm/pgtable.h>
10240+
10241+#define set_fs(x) (current_thread_info()->addr_limit = (x))
10242
10243 /*
10244 * Copy To/From Userspace
10245@@ -19,113 +22,203 @@ __must_check unsigned long
10246 copy_user_generic(void *to, const void *from, unsigned len);
10247
10248 __must_check unsigned long
10249-copy_to_user(void __user *to, const void *from, unsigned len);
10250-__must_check unsigned long
10251-copy_from_user(void *to, const void __user *from, unsigned len);
10252-__must_check unsigned long
10253 copy_in_user(void __user *to, const void __user *from, unsigned len);
10254
10255 static __always_inline __must_check
10256-int __copy_from_user(void *dst, const void __user *src, unsigned size)
10257+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
10258 {
10259- int ret = 0;
10260+ unsigned ret = 0;
10261
10262 might_fault();
10263- if (!__builtin_constant_p(size))
10264- return copy_user_generic(dst, (__force void *)src, size);
10265+
10266+ if ((int)size < 0)
10267+ return size;
10268+
10269+#ifdef CONFIG_PAX_MEMORY_UDEREF
10270+ if (!__access_ok(VERIFY_READ, src, size))
10271+ return size;
10272+#endif
10273+
10274+ if (!__builtin_constant_p(size)) {
10275+ check_object_size(dst, size, false);
10276+
10277+#ifdef CONFIG_PAX_MEMORY_UDEREF
10278+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10279+ src += PAX_USER_SHADOW_BASE;
10280+#endif
10281+
10282+ return copy_user_generic(dst, (__force const void *)src, size);
10283+ }
10284 switch (size) {
10285- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
10286+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
10287 ret, "b", "b", "=q", 1);
10288 return ret;
10289- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
10290+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
10291 ret, "w", "w", "=r", 2);
10292 return ret;
10293- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
10294+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
10295 ret, "l", "k", "=r", 4);
10296 return ret;
10297- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
10298+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10299 ret, "q", "", "=r", 8);
10300 return ret;
10301 case 10:
10302- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10303+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10304 ret, "q", "", "=r", 10);
10305 if (unlikely(ret))
10306 return ret;
10307 __get_user_asm(*(u16 *)(8 + (char *)dst),
10308- (u16 __user *)(8 + (char __user *)src),
10309+ (const u16 __user *)(8 + (const char __user *)src),
10310 ret, "w", "w", "=r", 2);
10311 return ret;
10312 case 16:
10313- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10314+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10315 ret, "q", "", "=r", 16);
10316 if (unlikely(ret))
10317 return ret;
10318 __get_user_asm(*(u64 *)(8 + (char *)dst),
10319- (u64 __user *)(8 + (char __user *)src),
10320+ (const u64 __user *)(8 + (const char __user *)src),
10321 ret, "q", "", "=r", 8);
10322 return ret;
10323 default:
10324- return copy_user_generic(dst, (__force void *)src, size);
10325+
10326+#ifdef CONFIG_PAX_MEMORY_UDEREF
10327+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10328+ src += PAX_USER_SHADOW_BASE;
10329+#endif
10330+
10331+ return copy_user_generic(dst, (__force const void *)src, size);
10332 }
10333 }
10334
10335 static __always_inline __must_check
10336-int __copy_to_user(void __user *dst, const void *src, unsigned size)
10337+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
10338 {
10339- int ret = 0;
10340+ unsigned ret = 0;
10341
10342 might_fault();
10343- if (!__builtin_constant_p(size))
10344+
10345+ pax_track_stack();
10346+
10347+ if ((int)size < 0)
10348+ return size;
10349+
10350+#ifdef CONFIG_PAX_MEMORY_UDEREF
10351+ if (!__access_ok(VERIFY_WRITE, dst, size))
10352+ return size;
10353+#endif
10354+
10355+ if (!__builtin_constant_p(size)) {
10356+ check_object_size(src, size, true);
10357+
10358+#ifdef CONFIG_PAX_MEMORY_UDEREF
10359+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10360+ dst += PAX_USER_SHADOW_BASE;
10361+#endif
10362+
10363 return copy_user_generic((__force void *)dst, src, size);
10364+ }
10365 switch (size) {
10366- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
10367+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
10368 ret, "b", "b", "iq", 1);
10369 return ret;
10370- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
10371+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
10372 ret, "w", "w", "ir", 2);
10373 return ret;
10374- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
10375+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
10376 ret, "l", "k", "ir", 4);
10377 return ret;
10378- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
10379+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10380 ret, "q", "", "er", 8);
10381 return ret;
10382 case 10:
10383- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10384+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10385 ret, "q", "", "er", 10);
10386 if (unlikely(ret))
10387 return ret;
10388 asm("":::"memory");
10389- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
10390+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
10391 ret, "w", "w", "ir", 2);
10392 return ret;
10393 case 16:
10394- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10395+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10396 ret, "q", "", "er", 16);
10397 if (unlikely(ret))
10398 return ret;
10399 asm("":::"memory");
10400- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
10401+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
10402 ret, "q", "", "er", 8);
10403 return ret;
10404 default:
10405+
10406+#ifdef CONFIG_PAX_MEMORY_UDEREF
10407+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10408+ dst += PAX_USER_SHADOW_BASE;
10409+#endif
10410+
10411 return copy_user_generic((__force void *)dst, src, size);
10412 }
10413 }
10414
10415 static __always_inline __must_check
10416-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10417+unsigned long copy_to_user(void __user *to, const void *from, unsigned len)
10418+{
10419+ if (access_ok(VERIFY_WRITE, to, len))
10420+ len = __copy_to_user(to, from, len);
10421+ return len;
10422+}
10423+
10424+static __always_inline __must_check
10425+unsigned long copy_from_user(void *to, const void __user *from, unsigned len)
10426+{
10427+ if ((int)len < 0)
10428+ return len;
10429+
10430+ if (access_ok(VERIFY_READ, from, len))
10431+ len = __copy_from_user(to, from, len);
10432+ else if ((int)len > 0) {
10433+ if (!__builtin_constant_p(len))
10434+ check_object_size(to, len, false);
10435+ memset(to, 0, len);
10436+ }
10437+ return len;
10438+}
10439+
10440+static __always_inline __must_check
10441+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10442 {
10443- int ret = 0;
10444+ unsigned ret = 0;
10445
10446 might_fault();
10447- if (!__builtin_constant_p(size))
10448+
10449+ pax_track_stack();
10450+
10451+ if ((int)size < 0)
10452+ return size;
10453+
10454+#ifdef CONFIG_PAX_MEMORY_UDEREF
10455+ if (!__access_ok(VERIFY_READ, src, size))
10456+ return size;
10457+ if (!__access_ok(VERIFY_WRITE, dst, size))
10458+ return size;
10459+#endif
10460+
10461+ if (!__builtin_constant_p(size)) {
10462+
10463+#ifdef CONFIG_PAX_MEMORY_UDEREF
10464+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10465+ src += PAX_USER_SHADOW_BASE;
10466+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10467+ dst += PAX_USER_SHADOW_BASE;
10468+#endif
10469+
10470 return copy_user_generic((__force void *)dst,
10471- (__force void *)src, size);
10472+ (__force const void *)src, size);
10473+ }
10474 switch (size) {
10475 case 1: {
10476 u8 tmp;
10477- __get_user_asm(tmp, (u8 __user *)src,
10478+ __get_user_asm(tmp, (const u8 __user *)src,
10479 ret, "b", "b", "=q", 1);
10480 if (likely(!ret))
10481 __put_user_asm(tmp, (u8 __user *)dst,
10482@@ -134,7 +227,7 @@ int __copy_in_user(void __user *dst, con
10483 }
10484 case 2: {
10485 u16 tmp;
10486- __get_user_asm(tmp, (u16 __user *)src,
10487+ __get_user_asm(tmp, (const u16 __user *)src,
10488 ret, "w", "w", "=r", 2);
10489 if (likely(!ret))
10490 __put_user_asm(tmp, (u16 __user *)dst,
10491@@ -144,7 +237,7 @@ int __copy_in_user(void __user *dst, con
10492
10493 case 4: {
10494 u32 tmp;
10495- __get_user_asm(tmp, (u32 __user *)src,
10496+ __get_user_asm(tmp, (const u32 __user *)src,
10497 ret, "l", "k", "=r", 4);
10498 if (likely(!ret))
10499 __put_user_asm(tmp, (u32 __user *)dst,
10500@@ -153,7 +246,7 @@ int __copy_in_user(void __user *dst, con
10501 }
10502 case 8: {
10503 u64 tmp;
10504- __get_user_asm(tmp, (u64 __user *)src,
10505+ __get_user_asm(tmp, (const u64 __user *)src,
10506 ret, "q", "", "=r", 8);
10507 if (likely(!ret))
10508 __put_user_asm(tmp, (u64 __user *)dst,
10509@@ -161,8 +254,16 @@ int __copy_in_user(void __user *dst, con
10510 return ret;
10511 }
10512 default:
10513+
10514+#ifdef CONFIG_PAX_MEMORY_UDEREF
10515+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10516+ src += PAX_USER_SHADOW_BASE;
10517+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10518+ dst += PAX_USER_SHADOW_BASE;
10519+#endif
10520+
10521 return copy_user_generic((__force void *)dst,
10522- (__force void *)src, size);
10523+ (__force const void *)src, size);
10524 }
10525 }
10526
10527@@ -176,33 +277,75 @@ __must_check long strlen_user(const char
10528 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
10529 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
10530
10531-__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
10532- unsigned size);
10533+static __must_check __always_inline unsigned long
10534+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
10535+{
10536+ pax_track_stack();
10537+
10538+ if ((int)size < 0)
10539+ return size;
10540
10541-static __must_check __always_inline int
10542+#ifdef CONFIG_PAX_MEMORY_UDEREF
10543+ if (!__access_ok(VERIFY_READ, src, size))
10544+ return size;
10545+
10546+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10547+ src += PAX_USER_SHADOW_BASE;
10548+#endif
10549+
10550+ return copy_user_generic(dst, (__force const void *)src, size);
10551+}
10552+
10553+static __must_check __always_inline unsigned long
10554 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
10555 {
10556+ if ((int)size < 0)
10557+ return size;
10558+
10559+#ifdef CONFIG_PAX_MEMORY_UDEREF
10560+ if (!__access_ok(VERIFY_WRITE, dst, size))
10561+ return size;
10562+
10563+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10564+ dst += PAX_USER_SHADOW_BASE;
10565+#endif
10566+
10567 return copy_user_generic((__force void *)dst, src, size);
10568 }
10569
10570-extern long __copy_user_nocache(void *dst, const void __user *src,
10571+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
10572 unsigned size, int zerorest);
10573
10574-static inline int
10575-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
10576+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
10577 {
10578 might_sleep();
10579+
10580+ if ((int)size < 0)
10581+ return size;
10582+
10583+#ifdef CONFIG_PAX_MEMORY_UDEREF
10584+ if (!__access_ok(VERIFY_READ, src, size))
10585+ return size;
10586+#endif
10587+
10588 return __copy_user_nocache(dst, src, size, 1);
10589 }
10590
10591-static inline int
10592-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
10593+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
10594 unsigned size)
10595 {
10596+ if ((int)size < 0)
10597+ return size;
10598+
10599+#ifdef CONFIG_PAX_MEMORY_UDEREF
10600+ if (!__access_ok(VERIFY_READ, src, size))
10601+ return size;
10602+#endif
10603+
10604 return __copy_user_nocache(dst, src, size, 0);
10605 }
10606
10607-unsigned long
10608+extern unsigned long
10609 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
10610
10611 #endif /* _ASM_X86_UACCESS_64_H */
10612diff -urNp linux-2.6.32.41/arch/x86/include/asm/uaccess.h linux-2.6.32.41/arch/x86/include/asm/uaccess.h
10613--- linux-2.6.32.41/arch/x86/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
10614+++ linux-2.6.32.41/arch/x86/include/asm/uaccess.h 2011-04-17 15:56:46.000000000 -0400
10615@@ -8,12 +8,15 @@
10616 #include <linux/thread_info.h>
10617 #include <linux/prefetch.h>
10618 #include <linux/string.h>
10619+#include <linux/sched.h>
10620 #include <asm/asm.h>
10621 #include <asm/page.h>
10622
10623 #define VERIFY_READ 0
10624 #define VERIFY_WRITE 1
10625
10626+extern void check_object_size(const void *ptr, unsigned long n, bool to);
10627+
10628 /*
10629 * The fs value determines whether argument validity checking should be
10630 * performed or not. If get_fs() == USER_DS, checking is performed, with
10631@@ -29,7 +32,12 @@
10632
10633 #define get_ds() (KERNEL_DS)
10634 #define get_fs() (current_thread_info()->addr_limit)
10635+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10636+void __set_fs(mm_segment_t x);
10637+void set_fs(mm_segment_t x);
10638+#else
10639 #define set_fs(x) (current_thread_info()->addr_limit = (x))
10640+#endif
10641
10642 #define segment_eq(a, b) ((a).seg == (b).seg)
10643
10644@@ -77,7 +85,33 @@
10645 * checks that the pointer is in the user space range - after calling
10646 * this function, memory access functions may still return -EFAULT.
10647 */
10648-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10649+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10650+#define access_ok(type, addr, size) \
10651+({ \
10652+ long __size = size; \
10653+ unsigned long __addr = (unsigned long)addr; \
10654+ unsigned long __addr_ao = __addr & PAGE_MASK; \
10655+ unsigned long __end_ao = __addr + __size - 1; \
10656+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
10657+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
10658+ while(__addr_ao <= __end_ao) { \
10659+ char __c_ao; \
10660+ __addr_ao += PAGE_SIZE; \
10661+ if (__size > PAGE_SIZE) \
10662+ cond_resched(); \
10663+ if (__get_user(__c_ao, (char __user *)__addr)) \
10664+ break; \
10665+ if (type != VERIFY_WRITE) { \
10666+ __addr = __addr_ao; \
10667+ continue; \
10668+ } \
10669+ if (__put_user(__c_ao, (char __user *)__addr)) \
10670+ break; \
10671+ __addr = __addr_ao; \
10672+ } \
10673+ } \
10674+ __ret_ao; \
10675+})
10676
10677 /*
10678 * The exception table consists of pairs of addresses: the first is the
10679@@ -183,12 +217,20 @@ extern int __get_user_bad(void);
10680 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
10681 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
10682
10683-
10684+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10685+#define __copyuser_seg "gs;"
10686+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
10687+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
10688+#else
10689+#define __copyuser_seg
10690+#define __COPYUSER_SET_ES
10691+#define __COPYUSER_RESTORE_ES
10692+#endif
10693
10694 #ifdef CONFIG_X86_32
10695 #define __put_user_asm_u64(x, addr, err, errret) \
10696- asm volatile("1: movl %%eax,0(%2)\n" \
10697- "2: movl %%edx,4(%2)\n" \
10698+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
10699+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
10700 "3:\n" \
10701 ".section .fixup,\"ax\"\n" \
10702 "4: movl %3,%0\n" \
10703@@ -200,8 +242,8 @@ extern int __get_user_bad(void);
10704 : "A" (x), "r" (addr), "i" (errret), "0" (err))
10705
10706 #define __put_user_asm_ex_u64(x, addr) \
10707- asm volatile("1: movl %%eax,0(%1)\n" \
10708- "2: movl %%edx,4(%1)\n" \
10709+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
10710+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
10711 "3:\n" \
10712 _ASM_EXTABLE(1b, 2b - 1b) \
10713 _ASM_EXTABLE(2b, 3b - 2b) \
10714@@ -374,7 +416,7 @@ do { \
10715 } while (0)
10716
10717 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10718- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
10719+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
10720 "2:\n" \
10721 ".section .fixup,\"ax\"\n" \
10722 "3: mov %3,%0\n" \
10723@@ -382,7 +424,7 @@ do { \
10724 " jmp 2b\n" \
10725 ".previous\n" \
10726 _ASM_EXTABLE(1b, 3b) \
10727- : "=r" (err), ltype(x) \
10728+ : "=r" (err), ltype (x) \
10729 : "m" (__m(addr)), "i" (errret), "0" (err))
10730
10731 #define __get_user_size_ex(x, ptr, size) \
10732@@ -407,7 +449,7 @@ do { \
10733 } while (0)
10734
10735 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
10736- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
10737+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
10738 "2:\n" \
10739 _ASM_EXTABLE(1b, 2b - 1b) \
10740 : ltype(x) : "m" (__m(addr)))
10741@@ -424,13 +466,24 @@ do { \
10742 int __gu_err; \
10743 unsigned long __gu_val; \
10744 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
10745- (x) = (__force __typeof__(*(ptr)))__gu_val; \
10746+ (x) = (__typeof__(*(ptr)))__gu_val; \
10747 __gu_err; \
10748 })
10749
10750 /* FIXME: this hack is definitely wrong -AK */
10751 struct __large_struct { unsigned long buf[100]; };
10752-#define __m(x) (*(struct __large_struct __user *)(x))
10753+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10754+#define ____m(x) \
10755+({ \
10756+ unsigned long ____x = (unsigned long)(x); \
10757+ if (____x < PAX_USER_SHADOW_BASE) \
10758+ ____x += PAX_USER_SHADOW_BASE; \
10759+ (void __user *)____x; \
10760+})
10761+#else
10762+#define ____m(x) (x)
10763+#endif
10764+#define __m(x) (*(struct __large_struct __user *)____m(x))
10765
10766 /*
10767 * Tell gcc we read from memory instead of writing: this is because
10768@@ -438,7 +491,7 @@ struct __large_struct { unsigned long bu
10769 * aliasing issues.
10770 */
10771 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10772- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
10773+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
10774 "2:\n" \
10775 ".section .fixup,\"ax\"\n" \
10776 "3: mov %3,%0\n" \
10777@@ -446,10 +499,10 @@ struct __large_struct { unsigned long bu
10778 ".previous\n" \
10779 _ASM_EXTABLE(1b, 3b) \
10780 : "=r"(err) \
10781- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
10782+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
10783
10784 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
10785- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
10786+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
10787 "2:\n" \
10788 _ASM_EXTABLE(1b, 2b - 1b) \
10789 : : ltype(x), "m" (__m(addr)))
10790@@ -488,8 +541,12 @@ struct __large_struct { unsigned long bu
10791 * On error, the variable @x is set to zero.
10792 */
10793
10794+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10795+#define __get_user(x, ptr) get_user((x), (ptr))
10796+#else
10797 #define __get_user(x, ptr) \
10798 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
10799+#endif
10800
10801 /**
10802 * __put_user: - Write a simple value into user space, with less checking.
10803@@ -511,8 +568,12 @@ struct __large_struct { unsigned long bu
10804 * Returns zero on success, or -EFAULT on error.
10805 */
10806
10807+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10808+#define __put_user(x, ptr) put_user((x), (ptr))
10809+#else
10810 #define __put_user(x, ptr) \
10811 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
10812+#endif
10813
10814 #define __get_user_unaligned __get_user
10815 #define __put_user_unaligned __put_user
10816@@ -530,7 +591,7 @@ struct __large_struct { unsigned long bu
10817 #define get_user_ex(x, ptr) do { \
10818 unsigned long __gue_val; \
10819 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
10820- (x) = (__force __typeof__(*(ptr)))__gue_val; \
10821+ (x) = (__typeof__(*(ptr)))__gue_val; \
10822 } while (0)
10823
10824 #ifdef CONFIG_X86_WP_WORKS_OK
10825@@ -567,6 +628,7 @@ extern struct movsl_mask {
10826
10827 #define ARCH_HAS_NOCACHE_UACCESS 1
10828
10829+#define ARCH_HAS_SORT_EXTABLE
10830 #ifdef CONFIG_X86_32
10831 # include "uaccess_32.h"
10832 #else
10833diff -urNp linux-2.6.32.41/arch/x86/include/asm/vgtod.h linux-2.6.32.41/arch/x86/include/asm/vgtod.h
10834--- linux-2.6.32.41/arch/x86/include/asm/vgtod.h 2011-03-27 14:31:47.000000000 -0400
10835+++ linux-2.6.32.41/arch/x86/include/asm/vgtod.h 2011-04-17 15:56:46.000000000 -0400
10836@@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
10837 int sysctl_enabled;
10838 struct timezone sys_tz;
10839 struct { /* extract of a clocksource struct */
10840+ char name[8];
10841 cycle_t (*vread)(void);
10842 cycle_t cycle_last;
10843 cycle_t mask;
10844diff -urNp linux-2.6.32.41/arch/x86/include/asm/vmi.h linux-2.6.32.41/arch/x86/include/asm/vmi.h
10845--- linux-2.6.32.41/arch/x86/include/asm/vmi.h 2011-03-27 14:31:47.000000000 -0400
10846+++ linux-2.6.32.41/arch/x86/include/asm/vmi.h 2011-04-17 15:56:46.000000000 -0400
10847@@ -191,6 +191,7 @@ struct vrom_header {
10848 u8 reserved[96]; /* Reserved for headers */
10849 char vmi_init[8]; /* VMI_Init jump point */
10850 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
10851+ char rom_data[8048]; /* rest of the option ROM */
10852 } __attribute__((packed));
10853
10854 struct pnp_header {
10855diff -urNp linux-2.6.32.41/arch/x86/include/asm/vsyscall.h linux-2.6.32.41/arch/x86/include/asm/vsyscall.h
10856--- linux-2.6.32.41/arch/x86/include/asm/vsyscall.h 2011-03-27 14:31:47.000000000 -0400
10857+++ linux-2.6.32.41/arch/x86/include/asm/vsyscall.h 2011-04-17 15:56:46.000000000 -0400
10858@@ -15,9 +15,10 @@ enum vsyscall_num {
10859
10860 #ifdef __KERNEL__
10861 #include <linux/seqlock.h>
10862+#include <linux/getcpu.h>
10863+#include <linux/time.h>
10864
10865 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
10866-#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
10867
10868 /* Definitions for CONFIG_GENERIC_TIME definitions */
10869 #define __section_vsyscall_gtod_data __attribute__ \
10870@@ -31,7 +32,6 @@ enum vsyscall_num {
10871 #define VGETCPU_LSL 2
10872
10873 extern int __vgetcpu_mode;
10874-extern volatile unsigned long __jiffies;
10875
10876 /* kernel space (writeable) */
10877 extern int vgetcpu_mode;
10878@@ -39,6 +39,9 @@ extern struct timezone sys_tz;
10879
10880 extern void map_vsyscall(void);
10881
10882+extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
10883+extern time_t vtime(time_t *t);
10884+extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
10885 #endif /* __KERNEL__ */
10886
10887 #endif /* _ASM_X86_VSYSCALL_H */
10888diff -urNp linux-2.6.32.41/arch/x86/include/asm/xsave.h linux-2.6.32.41/arch/x86/include/asm/xsave.h
10889--- linux-2.6.32.41/arch/x86/include/asm/xsave.h 2011-03-27 14:31:47.000000000 -0400
10890+++ linux-2.6.32.41/arch/x86/include/asm/xsave.h 2011-04-17 15:56:46.000000000 -0400
10891@@ -56,6 +56,12 @@ static inline int xrstor_checking(struct
10892 static inline int xsave_user(struct xsave_struct __user *buf)
10893 {
10894 int err;
10895+
10896+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10897+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
10898+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
10899+#endif
10900+
10901 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
10902 "2:\n"
10903 ".section .fixup,\"ax\"\n"
10904@@ -82,6 +88,11 @@ static inline int xrestore_user(struct x
10905 u32 lmask = mask;
10906 u32 hmask = mask >> 32;
10907
10908+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10909+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
10910+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
10911+#endif
10912+
10913 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
10914 "2:\n"
10915 ".section .fixup,\"ax\"\n"
10916diff -urNp linux-2.6.32.41/arch/x86/Kconfig linux-2.6.32.41/arch/x86/Kconfig
10917--- linux-2.6.32.41/arch/x86/Kconfig 2011-03-27 14:31:47.000000000 -0400
10918+++ linux-2.6.32.41/arch/x86/Kconfig 2011-04-17 15:56:46.000000000 -0400
10919@@ -223,7 +223,7 @@ config X86_TRAMPOLINE
10920
10921 config X86_32_LAZY_GS
10922 def_bool y
10923- depends on X86_32 && !CC_STACKPROTECTOR
10924+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10925
10926 config KTIME_SCALAR
10927 def_bool X86_32
10928@@ -1008,7 +1008,7 @@ choice
10929
10930 config NOHIGHMEM
10931 bool "off"
10932- depends on !X86_NUMAQ
10933+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10934 ---help---
10935 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10936 However, the address space of 32-bit x86 processors is only 4
10937@@ -1045,7 +1045,7 @@ config NOHIGHMEM
10938
10939 config HIGHMEM4G
10940 bool "4GB"
10941- depends on !X86_NUMAQ
10942+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10943 ---help---
10944 Select this if you have a 32-bit processor and between 1 and 4
10945 gigabytes of physical RAM.
10946@@ -1099,7 +1099,7 @@ config PAGE_OFFSET
10947 hex
10948 default 0xB0000000 if VMSPLIT_3G_OPT
10949 default 0x80000000 if VMSPLIT_2G
10950- default 0x78000000 if VMSPLIT_2G_OPT
10951+ default 0x70000000 if VMSPLIT_2G_OPT
10952 default 0x40000000 if VMSPLIT_1G
10953 default 0xC0000000
10954 depends on X86_32
10955@@ -1430,7 +1430,7 @@ config ARCH_USES_PG_UNCACHED
10956
10957 config EFI
10958 bool "EFI runtime service support"
10959- depends on ACPI
10960+ depends on ACPI && !PAX_KERNEXEC
10961 ---help---
10962 This enables the kernel to use EFI runtime services that are
10963 available (such as the EFI variable services).
10964@@ -1460,6 +1460,7 @@ config SECCOMP
10965
10966 config CC_STACKPROTECTOR
10967 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
10968+ depends on X86_64 || !PAX_MEMORY_UDEREF
10969 ---help---
10970 This option turns on the -fstack-protector GCC feature. This
10971 feature puts, at the beginning of functions, a canary value on
10972@@ -1517,6 +1518,7 @@ config KEXEC_JUMP
10973 config PHYSICAL_START
10974 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
10975 default "0x1000000"
10976+ range 0x400000 0x40000000
10977 ---help---
10978 This gives the physical address where the kernel is loaded.
10979
10980@@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
10981 hex
10982 prompt "Alignment value to which kernel should be aligned" if X86_32
10983 default "0x1000000"
10984+ range 0x400000 0x1000000 if PAX_KERNEXEC
10985 range 0x2000 0x1000000
10986 ---help---
10987 This value puts the alignment restrictions on physical address
10988@@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
10989 Say N if you want to disable CPU hotplug.
10990
10991 config COMPAT_VDSO
10992- def_bool y
10993+ def_bool n
10994 prompt "Compat VDSO support"
10995 depends on X86_32 || IA32_EMULATION
10996+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
10997 ---help---
10998 Map the 32-bit VDSO to the predictable old-style address too.
10999 ---help---
11000diff -urNp linux-2.6.32.41/arch/x86/Kconfig.cpu linux-2.6.32.41/arch/x86/Kconfig.cpu
11001--- linux-2.6.32.41/arch/x86/Kconfig.cpu 2011-03-27 14:31:47.000000000 -0400
11002+++ linux-2.6.32.41/arch/x86/Kconfig.cpu 2011-04-17 15:56:46.000000000 -0400
11003@@ -340,7 +340,7 @@ config X86_PPRO_FENCE
11004
11005 config X86_F00F_BUG
11006 def_bool y
11007- depends on M586MMX || M586TSC || M586 || M486 || M386
11008+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
11009
11010 config X86_WP_WORKS_OK
11011 def_bool y
11012@@ -360,7 +360,7 @@ config X86_POPAD_OK
11013
11014 config X86_ALIGNMENT_16
11015 def_bool y
11016- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11017+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11018
11019 config X86_INTEL_USERCOPY
11020 def_bool y
11021@@ -406,7 +406,7 @@ config X86_CMPXCHG64
11022 # generates cmov.
11023 config X86_CMOV
11024 def_bool y
11025- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11026+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11027
11028 config X86_MINIMUM_CPU_FAMILY
11029 int
11030diff -urNp linux-2.6.32.41/arch/x86/Kconfig.debug linux-2.6.32.41/arch/x86/Kconfig.debug
11031--- linux-2.6.32.41/arch/x86/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
11032+++ linux-2.6.32.41/arch/x86/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
11033@@ -99,7 +99,7 @@ config X86_PTDUMP
11034 config DEBUG_RODATA
11035 bool "Write protect kernel read-only data structures"
11036 default y
11037- depends on DEBUG_KERNEL
11038+ depends on DEBUG_KERNEL && BROKEN
11039 ---help---
11040 Mark the kernel read-only data as write-protected in the pagetables,
11041 in order to catch accidental (and incorrect) writes to such const
11042diff -urNp linux-2.6.32.41/arch/x86/kernel/acpi/realmode/wakeup.S linux-2.6.32.41/arch/x86/kernel/acpi/realmode/wakeup.S
11043--- linux-2.6.32.41/arch/x86/kernel/acpi/realmode/wakeup.S 2011-03-27 14:31:47.000000000 -0400
11044+++ linux-2.6.32.41/arch/x86/kernel/acpi/realmode/wakeup.S 2011-04-17 15:56:46.000000000 -0400
11045@@ -104,7 +104,7 @@ _start:
11046 movl %eax, %ecx
11047 orl %edx, %ecx
11048 jz 1f
11049- movl $0xc0000080, %ecx
11050+ mov $MSR_EFER, %ecx
11051 wrmsr
11052 1:
11053
11054diff -urNp linux-2.6.32.41/arch/x86/kernel/acpi/sleep.c linux-2.6.32.41/arch/x86/kernel/acpi/sleep.c
11055--- linux-2.6.32.41/arch/x86/kernel/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
11056+++ linux-2.6.32.41/arch/x86/kernel/acpi/sleep.c 2011-04-17 15:56:46.000000000 -0400
11057@@ -11,11 +11,12 @@
11058 #include <linux/cpumask.h>
11059 #include <asm/segment.h>
11060 #include <asm/desc.h>
11061+#include <asm/e820.h>
11062
11063 #include "realmode/wakeup.h"
11064 #include "sleep.h"
11065
11066-unsigned long acpi_wakeup_address;
11067+unsigned long acpi_wakeup_address = 0x2000;
11068 unsigned long acpi_realmode_flags;
11069
11070 /* address in low memory of the wakeup routine. */
11071@@ -99,8 +100,12 @@ int acpi_save_state_mem(void)
11072 header->trampoline_segment = setup_trampoline() >> 4;
11073 #ifdef CONFIG_SMP
11074 stack_start.sp = temp_stack + sizeof(temp_stack);
11075+
11076+ pax_open_kernel();
11077 early_gdt_descr.address =
11078 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11079+ pax_close_kernel();
11080+
11081 initial_gs = per_cpu_offset(smp_processor_id());
11082 #endif
11083 initial_code = (unsigned long)wakeup_long64;
11084@@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
11085 return;
11086 }
11087
11088- acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
11089-
11090- if (!acpi_realmode) {
11091- printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
11092- return;
11093- }
11094-
11095- acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
11096+ reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
11097+ acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
11098 }
11099
11100
11101diff -urNp linux-2.6.32.41/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.32.41/arch/x86/kernel/acpi/wakeup_32.S
11102--- linux-2.6.32.41/arch/x86/kernel/acpi/wakeup_32.S 2011-03-27 14:31:47.000000000 -0400
11103+++ linux-2.6.32.41/arch/x86/kernel/acpi/wakeup_32.S 2011-04-17 15:56:46.000000000 -0400
11104@@ -30,13 +30,11 @@ wakeup_pmode_return:
11105 # and restore the stack ... but you need gdt for this to work
11106 movl saved_context_esp, %esp
11107
11108- movl %cs:saved_magic, %eax
11109- cmpl $0x12345678, %eax
11110+ cmpl $0x12345678, saved_magic
11111 jne bogus_magic
11112
11113 # jump to place where we left off
11114- movl saved_eip, %eax
11115- jmp *%eax
11116+ jmp *(saved_eip)
11117
11118 bogus_magic:
11119 jmp bogus_magic
11120diff -urNp linux-2.6.32.41/arch/x86/kernel/alternative.c linux-2.6.32.41/arch/x86/kernel/alternative.c
11121--- linux-2.6.32.41/arch/x86/kernel/alternative.c 2011-03-27 14:31:47.000000000 -0400
11122+++ linux-2.6.32.41/arch/x86/kernel/alternative.c 2011-04-17 15:56:46.000000000 -0400
11123@@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(str
11124
11125 BUG_ON(p->len > MAX_PATCH_LEN);
11126 /* prep the buffer with the original instructions */
11127- memcpy(insnbuf, p->instr, p->len);
11128+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11129 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11130 (unsigned long)p->instr, p->len);
11131
11132@@ -475,7 +475,7 @@ void __init alternative_instructions(voi
11133 if (smp_alt_once)
11134 free_init_pages("SMP alternatives",
11135 (unsigned long)__smp_locks,
11136- (unsigned long)__smp_locks_end);
11137+ PAGE_ALIGN((unsigned long)__smp_locks_end));
11138
11139 restart_nmi();
11140 }
11141@@ -492,13 +492,17 @@ void __init alternative_instructions(voi
11142 * instructions. And on the local CPU you need to be protected again NMI or MCE
11143 * handlers seeing an inconsistent instruction while you patch.
11144 */
11145-static void *__init_or_module text_poke_early(void *addr, const void *opcode,
11146+static void *__kprobes text_poke_early(void *addr, const void *opcode,
11147 size_t len)
11148 {
11149 unsigned long flags;
11150 local_irq_save(flags);
11151- memcpy(addr, opcode, len);
11152+
11153+ pax_open_kernel();
11154+ memcpy(ktla_ktva(addr), opcode, len);
11155 sync_core();
11156+ pax_close_kernel();
11157+
11158 local_irq_restore(flags);
11159 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11160 that causes hangs on some VIA CPUs. */
11161@@ -520,35 +524,21 @@ static void *__init_or_module text_poke_
11162 */
11163 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11164 {
11165- unsigned long flags;
11166- char *vaddr;
11167+ unsigned char *vaddr = ktla_ktva(addr);
11168 struct page *pages[2];
11169- int i;
11170+ size_t i;
11171
11172 if (!core_kernel_text((unsigned long)addr)) {
11173- pages[0] = vmalloc_to_page(addr);
11174- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11175+ pages[0] = vmalloc_to_page(vaddr);
11176+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11177 } else {
11178- pages[0] = virt_to_page(addr);
11179+ pages[0] = virt_to_page(vaddr);
11180 WARN_ON(!PageReserved(pages[0]));
11181- pages[1] = virt_to_page(addr + PAGE_SIZE);
11182+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11183 }
11184 BUG_ON(!pages[0]);
11185- local_irq_save(flags);
11186- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11187- if (pages[1])
11188- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11189- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11190- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11191- clear_fixmap(FIX_TEXT_POKE0);
11192- if (pages[1])
11193- clear_fixmap(FIX_TEXT_POKE1);
11194- local_flush_tlb();
11195- sync_core();
11196- /* Could also do a CLFLUSH here to speed up CPU recovery; but
11197- that causes hangs on some VIA CPUs. */
11198+ text_poke_early(addr, opcode, len);
11199 for (i = 0; i < len; i++)
11200- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11201- local_irq_restore(flags);
11202+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11203 return addr;
11204 }
11205diff -urNp linux-2.6.32.41/arch/x86/kernel/amd_iommu.c linux-2.6.32.41/arch/x86/kernel/amd_iommu.c
11206--- linux-2.6.32.41/arch/x86/kernel/amd_iommu.c 2011-03-27 14:31:47.000000000 -0400
11207+++ linux-2.6.32.41/arch/x86/kernel/amd_iommu.c 2011-04-17 15:56:46.000000000 -0400
11208@@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(
11209 }
11210 }
11211
11212-static struct dma_map_ops amd_iommu_dma_ops = {
11213+static const struct dma_map_ops amd_iommu_dma_ops = {
11214 .alloc_coherent = alloc_coherent,
11215 .free_coherent = free_coherent,
11216 .map_page = map_page,
11217diff -urNp linux-2.6.32.41/arch/x86/kernel/apic/apic.c linux-2.6.32.41/arch/x86/kernel/apic/apic.c
11218--- linux-2.6.32.41/arch/x86/kernel/apic/apic.c 2011-03-27 14:31:47.000000000 -0400
11219+++ linux-2.6.32.41/arch/x86/kernel/apic/apic.c 2011-05-16 21:46:57.000000000 -0400
11220@@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs
11221 apic_write(APIC_ESR, 0);
11222 v1 = apic_read(APIC_ESR);
11223 ack_APIC_irq();
11224- atomic_inc(&irq_err_count);
11225+ atomic_inc_unchecked(&irq_err_count);
11226
11227 /*
11228 * Here is what the APIC error bits mean:
11229@@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(vo
11230 u16 *bios_cpu_apicid;
11231 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
11232
11233+ pax_track_stack();
11234+
11235 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
11236 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
11237
11238diff -urNp linux-2.6.32.41/arch/x86/kernel/apic/io_apic.c linux-2.6.32.41/arch/x86/kernel/apic/io_apic.c
11239--- linux-2.6.32.41/arch/x86/kernel/apic/io_apic.c 2011-03-27 14:31:47.000000000 -0400
11240+++ linux-2.6.32.41/arch/x86/kernel/apic/io_apic.c 2011-05-04 17:56:20.000000000 -0400
11241@@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapi
11242 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
11243 GFP_ATOMIC);
11244 if (!ioapic_entries)
11245- return 0;
11246+ return NULL;
11247
11248 for (apic = 0; apic < nr_ioapics; apic++) {
11249 ioapic_entries[apic] =
11250@@ -733,7 +733,7 @@ nomem:
11251 kfree(ioapic_entries[apic]);
11252 kfree(ioapic_entries);
11253
11254- return 0;
11255+ return NULL;
11256 }
11257
11258 /*
11259@@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
11260 }
11261 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
11262
11263-void lock_vector_lock(void)
11264+void lock_vector_lock(void) __acquires(vector_lock)
11265 {
11266 /* Used to the online set of cpus does not change
11267 * during assign_irq_vector.
11268@@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
11269 spin_lock(&vector_lock);
11270 }
11271
11272-void unlock_vector_lock(void)
11273+void unlock_vector_lock(void) __releases(vector_lock)
11274 {
11275 spin_unlock(&vector_lock);
11276 }
11277@@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int i
11278 ack_APIC_irq();
11279 }
11280
11281-atomic_t irq_mis_count;
11282+atomic_unchecked_t irq_mis_count;
11283
11284 static void ack_apic_level(unsigned int irq)
11285 {
11286@@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int
11287
11288 /* Tail end of version 0x11 I/O APIC bug workaround */
11289 if (!(v & (1 << (i & 0x1f)))) {
11290- atomic_inc(&irq_mis_count);
11291+ atomic_inc_unchecked(&irq_mis_count);
11292 spin_lock(&ioapic_lock);
11293 __mask_and_edge_IO_APIC_irq(cfg);
11294 __unmask_and_level_IO_APIC_irq(cfg);
11295diff -urNp linux-2.6.32.41/arch/x86/kernel/apm_32.c linux-2.6.32.41/arch/x86/kernel/apm_32.c
11296--- linux-2.6.32.41/arch/x86/kernel/apm_32.c 2011-03-27 14:31:47.000000000 -0400
11297+++ linux-2.6.32.41/arch/x86/kernel/apm_32.c 2011-04-23 12:56:10.000000000 -0400
11298@@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
11299 * This is for buggy BIOS's that refer to (real mode) segment 0x40
11300 * even though they are called in protected mode.
11301 */
11302-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
11303+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
11304 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
11305
11306 static const char driver_version[] = "1.16ac"; /* no spaces */
11307@@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
11308 BUG_ON(cpu != 0);
11309 gdt = get_cpu_gdt_table(cpu);
11310 save_desc_40 = gdt[0x40 / 8];
11311+
11312+ pax_open_kernel();
11313 gdt[0x40 / 8] = bad_bios_desc;
11314+ pax_close_kernel();
11315
11316 apm_irq_save(flags);
11317 APM_DO_SAVE_SEGS;
11318@@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
11319 &call->esi);
11320 APM_DO_RESTORE_SEGS;
11321 apm_irq_restore(flags);
11322+
11323+ pax_open_kernel();
11324 gdt[0x40 / 8] = save_desc_40;
11325+ pax_close_kernel();
11326+
11327 put_cpu();
11328
11329 return call->eax & 0xff;
11330@@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void
11331 BUG_ON(cpu != 0);
11332 gdt = get_cpu_gdt_table(cpu);
11333 save_desc_40 = gdt[0x40 / 8];
11334+
11335+ pax_open_kernel();
11336 gdt[0x40 / 8] = bad_bios_desc;
11337+ pax_close_kernel();
11338
11339 apm_irq_save(flags);
11340 APM_DO_SAVE_SEGS;
11341@@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void
11342 &call->eax);
11343 APM_DO_RESTORE_SEGS;
11344 apm_irq_restore(flags);
11345+
11346+ pax_open_kernel();
11347 gdt[0x40 / 8] = save_desc_40;
11348+ pax_close_kernel();
11349+
11350 put_cpu();
11351 return error;
11352 }
11353@@ -975,7 +989,7 @@ recalc:
11354
11355 static void apm_power_off(void)
11356 {
11357- unsigned char po_bios_call[] = {
11358+ const unsigned char po_bios_call[] = {
11359 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
11360 0x8e, 0xd0, /* movw ax,ss */
11361 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
11362@@ -2357,12 +2371,15 @@ static int __init apm_init(void)
11363 * code to that CPU.
11364 */
11365 gdt = get_cpu_gdt_table(0);
11366+
11367+ pax_open_kernel();
11368 set_desc_base(&gdt[APM_CS >> 3],
11369 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
11370 set_desc_base(&gdt[APM_CS_16 >> 3],
11371 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
11372 set_desc_base(&gdt[APM_DS >> 3],
11373 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
11374+ pax_close_kernel();
11375
11376 proc_create("apm", 0, NULL, &apm_file_ops);
11377
11378diff -urNp linux-2.6.32.41/arch/x86/kernel/asm-offsets_32.c linux-2.6.32.41/arch/x86/kernel/asm-offsets_32.c
11379--- linux-2.6.32.41/arch/x86/kernel/asm-offsets_32.c 2011-03-27 14:31:47.000000000 -0400
11380+++ linux-2.6.32.41/arch/x86/kernel/asm-offsets_32.c 2011-05-16 21:46:57.000000000 -0400
11381@@ -51,7 +51,6 @@ void foo(void)
11382 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
11383 BLANK();
11384
11385- OFFSET(TI_task, thread_info, task);
11386 OFFSET(TI_exec_domain, thread_info, exec_domain);
11387 OFFSET(TI_flags, thread_info, flags);
11388 OFFSET(TI_status, thread_info, status);
11389@@ -60,6 +59,8 @@ void foo(void)
11390 OFFSET(TI_restart_block, thread_info, restart_block);
11391 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
11392 OFFSET(TI_cpu, thread_info, cpu);
11393+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
11394+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11395 BLANK();
11396
11397 OFFSET(GDS_size, desc_ptr, size);
11398@@ -99,6 +100,7 @@ void foo(void)
11399
11400 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11401 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
11402+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
11403 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
11404 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
11405 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
11406@@ -115,6 +117,11 @@ void foo(void)
11407 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
11408 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11409 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11410+
11411+#ifdef CONFIG_PAX_KERNEXEC
11412+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11413+#endif
11414+
11415 #endif
11416
11417 #ifdef CONFIG_XEN
11418diff -urNp linux-2.6.32.41/arch/x86/kernel/asm-offsets_64.c linux-2.6.32.41/arch/x86/kernel/asm-offsets_64.c
11419--- linux-2.6.32.41/arch/x86/kernel/asm-offsets_64.c 2011-03-27 14:31:47.000000000 -0400
11420+++ linux-2.6.32.41/arch/x86/kernel/asm-offsets_64.c 2011-05-16 21:46:57.000000000 -0400
11421@@ -44,6 +44,8 @@ int main(void)
11422 ENTRY(addr_limit);
11423 ENTRY(preempt_count);
11424 ENTRY(status);
11425+ ENTRY(lowest_stack);
11426+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11427 #ifdef CONFIG_IA32_EMULATION
11428 ENTRY(sysenter_return);
11429 #endif
11430@@ -63,6 +65,18 @@ int main(void)
11431 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11432 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
11433 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
11434+
11435+#ifdef CONFIG_PAX_KERNEXEC
11436+ OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11437+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11438+#endif
11439+
11440+#ifdef CONFIG_PAX_MEMORY_UDEREF
11441+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
11442+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
11443+ OFFSET(PV_MMU_set_pgd, pv_mmu_ops, set_pgd);
11444+#endif
11445+
11446 #endif
11447
11448
11449@@ -115,6 +129,7 @@ int main(void)
11450 ENTRY(cr8);
11451 BLANK();
11452 #undef ENTRY
11453+ DEFINE(TSS_size, sizeof(struct tss_struct));
11454 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
11455 BLANK();
11456 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
11457@@ -130,6 +145,7 @@ int main(void)
11458
11459 BLANK();
11460 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11461+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
11462 #ifdef CONFIG_XEN
11463 BLANK();
11464 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
11465diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/amd.c linux-2.6.32.41/arch/x86/kernel/cpu/amd.c
11466--- linux-2.6.32.41/arch/x86/kernel/cpu/amd.c 2011-05-23 16:56:59.000000000 -0400
11467+++ linux-2.6.32.41/arch/x86/kernel/cpu/amd.c 2011-05-23 16:57:13.000000000 -0400
11468@@ -596,7 +596,7 @@ static unsigned int __cpuinit amd_size_c
11469 unsigned int size)
11470 {
11471 /* AMD errata T13 (order #21922) */
11472- if ((c->x86 == 6)) {
11473+ if (c->x86 == 6) {
11474 /* Duron Rev A0 */
11475 if (c->x86_model == 3 && c->x86_mask == 0)
11476 size = 64;
11477diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/common.c linux-2.6.32.41/arch/x86/kernel/cpu/common.c
11478--- linux-2.6.32.41/arch/x86/kernel/cpu/common.c 2011-03-27 14:31:47.000000000 -0400
11479+++ linux-2.6.32.41/arch/x86/kernel/cpu/common.c 2011-05-11 18:25:15.000000000 -0400
11480@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
11481
11482 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
11483
11484-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
11485-#ifdef CONFIG_X86_64
11486- /*
11487- * We need valid kernel segments for data and code in long mode too
11488- * IRET will check the segment types kkeil 2000/10/28
11489- * Also sysret mandates a special GDT layout
11490- *
11491- * TLS descriptors are currently at a different place compared to i386.
11492- * Hopefully nobody expects them at a fixed place (Wine?)
11493- */
11494- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
11495- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
11496- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
11497- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
11498- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
11499- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
11500-#else
11501- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
11502- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11503- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
11504- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
11505- /*
11506- * Segments used for calling PnP BIOS have byte granularity.
11507- * They code segments and data segments have fixed 64k limits,
11508- * the transfer segment sizes are set at run time.
11509- */
11510- /* 32-bit code */
11511- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11512- /* 16-bit code */
11513- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11514- /* 16-bit data */
11515- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
11516- /* 16-bit data */
11517- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
11518- /* 16-bit data */
11519- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
11520- /*
11521- * The APM segments have byte granularity and their bases
11522- * are set at run time. All have 64k limits.
11523- */
11524- /* 32-bit code */
11525- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11526- /* 16-bit code */
11527- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11528- /* data */
11529- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
11530-
11531- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11532- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11533- GDT_STACK_CANARY_INIT
11534-#endif
11535-} };
11536-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
11537-
11538 static int __init x86_xsave_setup(char *s)
11539 {
11540 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
11541@@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
11542 {
11543 struct desc_ptr gdt_descr;
11544
11545- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
11546+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
11547 gdt_descr.size = GDT_SIZE - 1;
11548 load_gdt(&gdt_descr);
11549 /* Reload the per-cpu base */
11550@@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struc
11551 /* Filter out anything that depends on CPUID levels we don't have */
11552 filter_cpuid_features(c, true);
11553
11554+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
11555+ setup_clear_cpu_cap(X86_FEATURE_SEP);
11556+#endif
11557+
11558 /* If the model name is still unset, do table lookup. */
11559 if (!c->x86_model_id[0]) {
11560 const char *p;
11561@@ -980,6 +930,9 @@ static __init int setup_disablecpuid(cha
11562 }
11563 __setup("clearcpuid=", setup_disablecpuid);
11564
11565+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
11566+EXPORT_PER_CPU_SYMBOL(current_tinfo);
11567+
11568 #ifdef CONFIG_X86_64
11569 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
11570
11571@@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
11572 EXPORT_PER_CPU_SYMBOL(current_task);
11573
11574 DEFINE_PER_CPU(unsigned long, kernel_stack) =
11575- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
11576+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
11577 EXPORT_PER_CPU_SYMBOL(kernel_stack);
11578
11579 DEFINE_PER_CPU(char *, irq_stack_ptr) =
11580@@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(str
11581 {
11582 memset(regs, 0, sizeof(struct pt_regs));
11583 regs->fs = __KERNEL_PERCPU;
11584- regs->gs = __KERNEL_STACK_CANARY;
11585+ savesegment(gs, regs->gs);
11586
11587 return regs;
11588 }
11589@@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
11590 int i;
11591
11592 cpu = stack_smp_processor_id();
11593- t = &per_cpu(init_tss, cpu);
11594+ t = init_tss + cpu;
11595 orig_ist = &per_cpu(orig_ist, cpu);
11596
11597 #ifdef CONFIG_NUMA
11598@@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
11599 switch_to_new_gdt(cpu);
11600 loadsegment(fs, 0);
11601
11602- load_idt((const struct desc_ptr *)&idt_descr);
11603+ load_idt(&idt_descr);
11604
11605 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
11606 syscall_init();
11607@@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
11608 wrmsrl(MSR_KERNEL_GS_BASE, 0);
11609 barrier();
11610
11611- check_efer();
11612 if (cpu != 0)
11613 enable_x2apic();
11614
11615@@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
11616 {
11617 int cpu = smp_processor_id();
11618 struct task_struct *curr = current;
11619- struct tss_struct *t = &per_cpu(init_tss, cpu);
11620+ struct tss_struct *t = init_tss + cpu;
11621 struct thread_struct *thread = &curr->thread;
11622
11623 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
11624diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/intel.c linux-2.6.32.41/arch/x86/kernel/cpu/intel.c
11625--- linux-2.6.32.41/arch/x86/kernel/cpu/intel.c 2011-03-27 14:31:47.000000000 -0400
11626+++ linux-2.6.32.41/arch/x86/kernel/cpu/intel.c 2011-04-17 15:56:46.000000000 -0400
11627@@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug
11628 * Update the IDT descriptor and reload the IDT so that
11629 * it uses the read-only mapped virtual address.
11630 */
11631- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
11632+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
11633 load_idt(&idt_descr);
11634 }
11635 #endif
11636diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/intel_cacheinfo.c linux-2.6.32.41/arch/x86/kernel/cpu/intel_cacheinfo.c
11637--- linux-2.6.32.41/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
11638+++ linux-2.6.32.41/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-04-17 15:56:46.000000000 -0400
11639@@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kob
11640 return ret;
11641 }
11642
11643-static struct sysfs_ops sysfs_ops = {
11644+static const struct sysfs_ops sysfs_ops = {
11645 .show = show,
11646 .store = store,
11647 };
11648diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/Makefile linux-2.6.32.41/arch/x86/kernel/cpu/Makefile
11649--- linux-2.6.32.41/arch/x86/kernel/cpu/Makefile 2011-03-27 14:31:47.000000000 -0400
11650+++ linux-2.6.32.41/arch/x86/kernel/cpu/Makefile 2011-04-17 15:56:46.000000000 -0400
11651@@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
11652 CFLAGS_REMOVE_common.o = -pg
11653 endif
11654
11655-# Make sure load_percpu_segment has no stackprotector
11656-nostackp := $(call cc-option, -fno-stack-protector)
11657-CFLAGS_common.o := $(nostackp)
11658-
11659 obj-y := intel_cacheinfo.o addon_cpuid_features.o
11660 obj-y += proc.o capflags.o powerflags.o common.o
11661 obj-y += vmware.o hypervisor.o sched.o
11662diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/mcheck/mce_amd.c linux-2.6.32.41/arch/x86/kernel/cpu/mcheck/mce_amd.c
11663--- linux-2.6.32.41/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:56:59.000000000 -0400
11664+++ linux-2.6.32.41/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:57:13.000000000 -0400
11665@@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kob
11666 return ret;
11667 }
11668
11669-static struct sysfs_ops threshold_ops = {
11670+static const struct sysfs_ops threshold_ops = {
11671 .show = show,
11672 .store = store,
11673 };
11674diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/mcheck/mce.c linux-2.6.32.41/arch/x86/kernel/cpu/mcheck/mce.c
11675--- linux-2.6.32.41/arch/x86/kernel/cpu/mcheck/mce.c 2011-03-27 14:31:47.000000000 -0400
11676+++ linux-2.6.32.41/arch/x86/kernel/cpu/mcheck/mce.c 2011-05-04 17:56:20.000000000 -0400
11677@@ -43,6 +43,7 @@
11678 #include <asm/ipi.h>
11679 #include <asm/mce.h>
11680 #include <asm/msr.h>
11681+#include <asm/local.h>
11682
11683 #include "mce-internal.h"
11684
11685@@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
11686 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
11687 m->cs, m->ip);
11688
11689- if (m->cs == __KERNEL_CS)
11690+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
11691 print_symbol("{%s}", m->ip);
11692 pr_cont("\n");
11693 }
11694@@ -221,10 +222,10 @@ static void print_mce_tail(void)
11695
11696 #define PANIC_TIMEOUT 5 /* 5 seconds */
11697
11698-static atomic_t mce_paniced;
11699+static atomic_unchecked_t mce_paniced;
11700
11701 static int fake_panic;
11702-static atomic_t mce_fake_paniced;
11703+static atomic_unchecked_t mce_fake_paniced;
11704
11705 /* Panic in progress. Enable interrupts and wait for final IPI */
11706 static void wait_for_panic(void)
11707@@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct
11708 /*
11709 * Make sure only one CPU runs in machine check panic
11710 */
11711- if (atomic_inc_return(&mce_paniced) > 1)
11712+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
11713 wait_for_panic();
11714 barrier();
11715
11716@@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct
11717 console_verbose();
11718 } else {
11719 /* Don't log too much for fake panic */
11720- if (atomic_inc_return(&mce_fake_paniced) > 1)
11721+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
11722 return;
11723 }
11724 print_mce_head();
11725@@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
11726 * might have been modified by someone else.
11727 */
11728 rmb();
11729- if (atomic_read(&mce_paniced))
11730+ if (atomic_read_unchecked(&mce_paniced))
11731 wait_for_panic();
11732 if (!monarch_timeout)
11733 goto out;
11734@@ -1429,14 +1430,14 @@ void __cpuinit mcheck_init(struct cpuinf
11735 */
11736
11737 static DEFINE_SPINLOCK(mce_state_lock);
11738-static int open_count; /* #times opened */
11739+static local_t open_count; /* #times opened */
11740 static int open_exclu; /* already open exclusive? */
11741
11742 static int mce_open(struct inode *inode, struct file *file)
11743 {
11744 spin_lock(&mce_state_lock);
11745
11746- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
11747+ if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
11748 spin_unlock(&mce_state_lock);
11749
11750 return -EBUSY;
11751@@ -1444,7 +1445,7 @@ static int mce_open(struct inode *inode,
11752
11753 if (file->f_flags & O_EXCL)
11754 open_exclu = 1;
11755- open_count++;
11756+ local_inc(&open_count);
11757
11758 spin_unlock(&mce_state_lock);
11759
11760@@ -1455,7 +1456,7 @@ static int mce_release(struct inode *ino
11761 {
11762 spin_lock(&mce_state_lock);
11763
11764- open_count--;
11765+ local_dec(&open_count);
11766 open_exclu = 0;
11767
11768 spin_unlock(&mce_state_lock);
11769@@ -2082,7 +2083,7 @@ struct dentry *mce_get_debugfs_dir(void)
11770 static void mce_reset(void)
11771 {
11772 cpu_missing = 0;
11773- atomic_set(&mce_fake_paniced, 0);
11774+ atomic_set_unchecked(&mce_fake_paniced, 0);
11775 atomic_set(&mce_executing, 0);
11776 atomic_set(&mce_callin, 0);
11777 atomic_set(&global_nwo, 0);
11778diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/amd.c linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/amd.c
11779--- linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/amd.c 2011-03-27 14:31:47.000000000 -0400
11780+++ linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/amd.c 2011-04-17 15:56:46.000000000 -0400
11781@@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base
11782 return 0;
11783 }
11784
11785-static struct mtrr_ops amd_mtrr_ops = {
11786+static const struct mtrr_ops amd_mtrr_ops = {
11787 .vendor = X86_VENDOR_AMD,
11788 .set = amd_set_mtrr,
11789 .get = amd_get_mtrr,
11790diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/centaur.c linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/centaur.c
11791--- linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/centaur.c 2011-03-27 14:31:47.000000000 -0400
11792+++ linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/centaur.c 2011-04-17 15:56:46.000000000 -0400
11793@@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long
11794 return 0;
11795 }
11796
11797-static struct mtrr_ops centaur_mtrr_ops = {
11798+static const struct mtrr_ops centaur_mtrr_ops = {
11799 .vendor = X86_VENDOR_CENTAUR,
11800 .set = centaur_set_mcr,
11801 .get = centaur_get_mcr,
11802diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/cyrix.c linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/cyrix.c
11803--- linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-03-27 14:31:47.000000000 -0400
11804+++ linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-04-17 15:56:46.000000000 -0400
11805@@ -265,7 +265,7 @@ static void cyrix_set_all(void)
11806 post_set();
11807 }
11808
11809-static struct mtrr_ops cyrix_mtrr_ops = {
11810+static const struct mtrr_ops cyrix_mtrr_ops = {
11811 .vendor = X86_VENDOR_CYRIX,
11812 .set_all = cyrix_set_all,
11813 .set = cyrix_set_arr,
11814diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/generic.c linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/generic.c
11815--- linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/generic.c 2011-03-27 14:31:47.000000000 -0400
11816+++ linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/generic.c 2011-04-23 12:56:10.000000000 -0400
11817@@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
11818 /*
11819 * Generic structure...
11820 */
11821-struct mtrr_ops generic_mtrr_ops = {
11822+const struct mtrr_ops generic_mtrr_ops = {
11823 .use_intel_if = 1,
11824 .set_all = generic_set_all,
11825 .get = generic_get_mtrr,
11826diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/main.c linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/main.c
11827--- linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:00:52.000000000 -0400
11828+++ linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:03:05.000000000 -0400
11829@@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
11830 u64 size_or_mask, size_and_mask;
11831 static bool mtrr_aps_delayed_init;
11832
11833-static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
11834+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
11835
11836-struct mtrr_ops *mtrr_if;
11837+const struct mtrr_ops *mtrr_if;
11838
11839 static void set_mtrr(unsigned int reg, unsigned long base,
11840 unsigned long size, mtrr_type type);
11841
11842-void set_mtrr_ops(struct mtrr_ops *ops)
11843+void set_mtrr_ops(const struct mtrr_ops *ops)
11844 {
11845 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
11846 mtrr_ops[ops->vendor] = ops;
11847diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/mtrr.h linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/mtrr.h
11848--- linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-03-27 14:31:47.000000000 -0400
11849+++ linux-2.6.32.41/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-04-17 15:56:46.000000000 -0400
11850@@ -12,19 +12,19 @@
11851 extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
11852
11853 struct mtrr_ops {
11854- u32 vendor;
11855- u32 use_intel_if;
11856- void (*set)(unsigned int reg, unsigned long base,
11857+ const u32 vendor;
11858+ const u32 use_intel_if;
11859+ void (* const set)(unsigned int reg, unsigned long base,
11860 unsigned long size, mtrr_type type);
11861- void (*set_all)(void);
11862+ void (* const set_all)(void);
11863
11864- void (*get)(unsigned int reg, unsigned long *base,
11865+ void (* const get)(unsigned int reg, unsigned long *base,
11866 unsigned long *size, mtrr_type *type);
11867- int (*get_free_region)(unsigned long base, unsigned long size,
11868+ int (* const get_free_region)(unsigned long base, unsigned long size,
11869 int replace_reg);
11870- int (*validate_add_page)(unsigned long base, unsigned long size,
11871+ int (* const validate_add_page)(unsigned long base, unsigned long size,
11872 unsigned int type);
11873- int (*have_wrcomb)(void);
11874+ int (* const have_wrcomb)(void);
11875 };
11876
11877 extern int generic_get_free_region(unsigned long base, unsigned long size,
11878@@ -32,7 +32,7 @@ extern int generic_get_free_region(unsig
11879 extern int generic_validate_add_page(unsigned long base, unsigned long size,
11880 unsigned int type);
11881
11882-extern struct mtrr_ops generic_mtrr_ops;
11883+extern const struct mtrr_ops generic_mtrr_ops;
11884
11885 extern int positive_have_wrcomb(void);
11886
11887@@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int in
11888 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
11889 void get_mtrr_state(void);
11890
11891-extern void set_mtrr_ops(struct mtrr_ops *ops);
11892+extern void set_mtrr_ops(const struct mtrr_ops *ops);
11893
11894 extern u64 size_or_mask, size_and_mask;
11895-extern struct mtrr_ops *mtrr_if;
11896+extern const struct mtrr_ops *mtrr_if;
11897
11898 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
11899 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
11900diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/perfctr-watchdog.c linux-2.6.32.41/arch/x86/kernel/cpu/perfctr-watchdog.c
11901--- linux-2.6.32.41/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-03-27 14:31:47.000000000 -0400
11902+++ linux-2.6.32.41/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-04-17 15:56:46.000000000 -0400
11903@@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
11904
11905 /* Interface defining a CPU specific perfctr watchdog */
11906 struct wd_ops {
11907- int (*reserve)(void);
11908- void (*unreserve)(void);
11909- int (*setup)(unsigned nmi_hz);
11910- void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
11911- void (*stop)(void);
11912+ int (* const reserve)(void);
11913+ void (* const unreserve)(void);
11914+ int (* const setup)(unsigned nmi_hz);
11915+ void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
11916+ void (* const stop)(void);
11917 unsigned perfctr;
11918 unsigned evntsel;
11919 u64 checkbit;
11920@@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
11921 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
11922 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
11923
11924+/* cannot be const */
11925 static struct wd_ops intel_arch_wd_ops;
11926
11927 static int setup_intel_arch_watchdog(unsigned nmi_hz)
11928@@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(uns
11929 return 1;
11930 }
11931
11932+/* cannot be const */
11933 static struct wd_ops intel_arch_wd_ops __read_mostly = {
11934 .reserve = single_msr_reserve,
11935 .unreserve = single_msr_unreserve,
11936diff -urNp linux-2.6.32.41/arch/x86/kernel/cpu/perf_event.c linux-2.6.32.41/arch/x86/kernel/cpu/perf_event.c
11937--- linux-2.6.32.41/arch/x86/kernel/cpu/perf_event.c 2011-03-27 14:31:47.000000000 -0400
11938+++ linux-2.6.32.41/arch/x86/kernel/cpu/perf_event.c 2011-05-04 17:56:20.000000000 -0400
11939@@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event
11940 * count to the generic event atomically:
11941 */
11942 again:
11943- prev_raw_count = atomic64_read(&hwc->prev_count);
11944+ prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
11945 rdmsrl(hwc->event_base + idx, new_raw_count);
11946
11947- if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
11948+ if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
11949 new_raw_count) != prev_raw_count)
11950 goto again;
11951
11952@@ -741,7 +741,7 @@ again:
11953 delta = (new_raw_count << shift) - (prev_raw_count << shift);
11954 delta >>= shift;
11955
11956- atomic64_add(delta, &event->count);
11957+ atomic64_add_unchecked(delta, &event->count);
11958 atomic64_sub(delta, &hwc->period_left);
11959
11960 return new_raw_count;
11961@@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_ev
11962 * The hw event starts counting from this event offset,
11963 * mark it to be able to extra future deltas:
11964 */
11965- atomic64_set(&hwc->prev_count, (u64)-left);
11966+ atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
11967
11968 err = checking_wrmsrl(hwc->event_base + idx,
11969 (u64)(-left) & x86_pmu.event_mask);
11970@@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs
11971 break;
11972
11973 callchain_store(entry, frame.return_address);
11974- fp = frame.next_frame;
11975+ fp = (__force const void __user *)frame.next_frame;
11976 }
11977 }
11978
11979diff -urNp linux-2.6.32.41/arch/x86/kernel/crash.c linux-2.6.32.41/arch/x86/kernel/crash.c
11980--- linux-2.6.32.41/arch/x86/kernel/crash.c 2011-03-27 14:31:47.000000000 -0400
11981+++ linux-2.6.32.41/arch/x86/kernel/crash.c 2011-04-17 15:56:46.000000000 -0400
11982@@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu,
11983 regs = args->regs;
11984
11985 #ifdef CONFIG_X86_32
11986- if (!user_mode_vm(regs)) {
11987+ if (!user_mode(regs)) {
11988 crash_fixup_ss_esp(&fixed_regs, regs);
11989 regs = &fixed_regs;
11990 }
11991diff -urNp linux-2.6.32.41/arch/x86/kernel/doublefault_32.c linux-2.6.32.41/arch/x86/kernel/doublefault_32.c
11992--- linux-2.6.32.41/arch/x86/kernel/doublefault_32.c 2011-03-27 14:31:47.000000000 -0400
11993+++ linux-2.6.32.41/arch/x86/kernel/doublefault_32.c 2011-04-17 15:56:46.000000000 -0400
11994@@ -11,7 +11,7 @@
11995
11996 #define DOUBLEFAULT_STACKSIZE (1024)
11997 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
11998-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
11999+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12000
12001 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12002
12003@@ -21,7 +21,7 @@ static void doublefault_fn(void)
12004 unsigned long gdt, tss;
12005
12006 store_gdt(&gdt_desc);
12007- gdt = gdt_desc.address;
12008+ gdt = (unsigned long)gdt_desc.address;
12009
12010 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12011
12012@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
12013 /* 0x2 bit is always set */
12014 .flags = X86_EFLAGS_SF | 0x2,
12015 .sp = STACK_START,
12016- .es = __USER_DS,
12017+ .es = __KERNEL_DS,
12018 .cs = __KERNEL_CS,
12019 .ss = __KERNEL_DS,
12020- .ds = __USER_DS,
12021+ .ds = __KERNEL_DS,
12022 .fs = __KERNEL_PERCPU,
12023
12024 .__cr3 = __pa_nodebug(swapper_pg_dir),
12025diff -urNp linux-2.6.32.41/arch/x86/kernel/dumpstack_32.c linux-2.6.32.41/arch/x86/kernel/dumpstack_32.c
12026--- linux-2.6.32.41/arch/x86/kernel/dumpstack_32.c 2011-03-27 14:31:47.000000000 -0400
12027+++ linux-2.6.32.41/arch/x86/kernel/dumpstack_32.c 2011-04-17 15:56:46.000000000 -0400
12028@@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task
12029 #endif
12030
12031 for (;;) {
12032- struct thread_info *context;
12033+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12034+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12035
12036- context = (struct thread_info *)
12037- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12038- bp = print_context_stack(context, stack, bp, ops,
12039- data, NULL, &graph);
12040-
12041- stack = (unsigned long *)context->previous_esp;
12042- if (!stack)
12043+ if (stack_start == task_stack_page(task))
12044 break;
12045+ stack = *(unsigned long **)stack_start;
12046 if (ops->stack(data, "IRQ") < 0)
12047 break;
12048 touch_nmi_watchdog();
12049@@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs
12050 * When in-kernel, we also print out the stack and code at the
12051 * time of the fault..
12052 */
12053- if (!user_mode_vm(regs)) {
12054+ if (!user_mode(regs)) {
12055 unsigned int code_prologue = code_bytes * 43 / 64;
12056 unsigned int code_len = code_bytes;
12057 unsigned char c;
12058 u8 *ip;
12059+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12060
12061 printk(KERN_EMERG "Stack:\n");
12062 show_stack_log_lvl(NULL, regs, &regs->sp,
12063@@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs
12064
12065 printk(KERN_EMERG "Code: ");
12066
12067- ip = (u8 *)regs->ip - code_prologue;
12068+ ip = (u8 *)regs->ip - code_prologue + cs_base;
12069 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12070 /* try starting at IP */
12071- ip = (u8 *)regs->ip;
12072+ ip = (u8 *)regs->ip + cs_base;
12073 code_len = code_len - code_prologue + 1;
12074 }
12075 for (i = 0; i < code_len; i++, ip++) {
12076@@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs
12077 printk(" Bad EIP value.");
12078 break;
12079 }
12080- if (ip == (u8 *)regs->ip)
12081+ if (ip == (u8 *)regs->ip + cs_base)
12082 printk("<%02x> ", c);
12083 else
12084 printk("%02x ", c);
12085@@ -149,6 +146,7 @@ int is_valid_bugaddr(unsigned long ip)
12086 {
12087 unsigned short ud2;
12088
12089+ ip = ktla_ktva(ip);
12090 if (ip < PAGE_OFFSET)
12091 return 0;
12092 if (probe_kernel_address((unsigned short *)ip, ud2))
12093diff -urNp linux-2.6.32.41/arch/x86/kernel/dumpstack_64.c linux-2.6.32.41/arch/x86/kernel/dumpstack_64.c
12094--- linux-2.6.32.41/arch/x86/kernel/dumpstack_64.c 2011-03-27 14:31:47.000000000 -0400
12095+++ linux-2.6.32.41/arch/x86/kernel/dumpstack_64.c 2011-04-17 15:56:46.000000000 -0400
12096@@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task
12097 unsigned long *irq_stack_end =
12098 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12099 unsigned used = 0;
12100- struct thread_info *tinfo;
12101 int graph = 0;
12102+ void *stack_start;
12103
12104 if (!task)
12105 task = current;
12106@@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task
12107 * current stack address. If the stacks consist of nested
12108 * exceptions
12109 */
12110- tinfo = task_thread_info(task);
12111 for (;;) {
12112 char *id;
12113 unsigned long *estack_end;
12114+
12115 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12116 &used, &id);
12117
12118@@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task
12119 if (ops->stack(data, id) < 0)
12120 break;
12121
12122- bp = print_context_stack(tinfo, stack, bp, ops,
12123+ bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12124 data, estack_end, &graph);
12125 ops->stack(data, "<EOE>");
12126 /*
12127@@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task
12128 if (stack >= irq_stack && stack < irq_stack_end) {
12129 if (ops->stack(data, "IRQ") < 0)
12130 break;
12131- bp = print_context_stack(tinfo, stack, bp,
12132+ bp = print_context_stack(task, irq_stack, stack, bp,
12133 ops, data, irq_stack_end, &graph);
12134 /*
12135 * We link to the next stack (which would be
12136@@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task
12137 /*
12138 * This handles the process stack:
12139 */
12140- bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12141+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12142+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12143 put_cpu();
12144 }
12145 EXPORT_SYMBOL(dump_trace);
12146diff -urNp linux-2.6.32.41/arch/x86/kernel/dumpstack.c linux-2.6.32.41/arch/x86/kernel/dumpstack.c
12147--- linux-2.6.32.41/arch/x86/kernel/dumpstack.c 2011-03-27 14:31:47.000000000 -0400
12148+++ linux-2.6.32.41/arch/x86/kernel/dumpstack.c 2011-04-17 15:56:46.000000000 -0400
12149@@ -2,6 +2,9 @@
12150 * Copyright (C) 1991, 1992 Linus Torvalds
12151 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12152 */
12153+#ifdef CONFIG_GRKERNSEC_HIDESYM
12154+#define __INCLUDED_BY_HIDESYM 1
12155+#endif
12156 #include <linux/kallsyms.h>
12157 #include <linux/kprobes.h>
12158 #include <linux/uaccess.h>
12159@@ -28,7 +31,7 @@ static int die_counter;
12160
12161 void printk_address(unsigned long address, int reliable)
12162 {
12163- printk(" [<%p>] %s%pS\n", (void *) address,
12164+ printk(" [<%p>] %s%pA\n", (void *) address,
12165 reliable ? "" : "? ", (void *) address);
12166 }
12167
12168@@ -36,9 +39,8 @@ void printk_address(unsigned long addres
12169 static void
12170 print_ftrace_graph_addr(unsigned long addr, void *data,
12171 const struct stacktrace_ops *ops,
12172- struct thread_info *tinfo, int *graph)
12173+ struct task_struct *task, int *graph)
12174 {
12175- struct task_struct *task = tinfo->task;
12176 unsigned long ret_addr;
12177 int index = task->curr_ret_stack;
12178
12179@@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long ad
12180 static inline void
12181 print_ftrace_graph_addr(unsigned long addr, void *data,
12182 const struct stacktrace_ops *ops,
12183- struct thread_info *tinfo, int *graph)
12184+ struct task_struct *task, int *graph)
12185 { }
12186 #endif
12187
12188@@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long ad
12189 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12190 */
12191
12192-static inline int valid_stack_ptr(struct thread_info *tinfo,
12193- void *p, unsigned int size, void *end)
12194+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12195 {
12196- void *t = tinfo;
12197 if (end) {
12198 if (p < end && p >= (end-THREAD_SIZE))
12199 return 1;
12200@@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct
12201 }
12202
12203 unsigned long
12204-print_context_stack(struct thread_info *tinfo,
12205+print_context_stack(struct task_struct *task, void *stack_start,
12206 unsigned long *stack, unsigned long bp,
12207 const struct stacktrace_ops *ops, void *data,
12208 unsigned long *end, int *graph)
12209 {
12210 struct stack_frame *frame = (struct stack_frame *)bp;
12211
12212- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12213+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12214 unsigned long addr;
12215
12216 addr = *stack;
12217@@ -103,7 +103,7 @@ print_context_stack(struct thread_info *
12218 } else {
12219 ops->address(data, addr, 0);
12220 }
12221- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12222+ print_ftrace_graph_addr(addr, data, ops, task, graph);
12223 }
12224 stack++;
12225 }
12226@@ -180,7 +180,7 @@ void dump_stack(void)
12227 #endif
12228
12229 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
12230- current->pid, current->comm, print_tainted(),
12231+ task_pid_nr(current), current->comm, print_tainted(),
12232 init_utsname()->release,
12233 (int)strcspn(init_utsname()->version, " "),
12234 init_utsname()->version);
12235@@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
12236 return flags;
12237 }
12238
12239+extern void gr_handle_kernel_exploit(void);
12240+
12241 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12242 {
12243 if (regs && kexec_should_crash(current))
12244@@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long fl
12245 panic("Fatal exception in interrupt");
12246 if (panic_on_oops)
12247 panic("Fatal exception");
12248- do_exit(signr);
12249+
12250+ gr_handle_kernel_exploit();
12251+
12252+ do_group_exit(signr);
12253 }
12254
12255 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12256@@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs
12257 unsigned long flags = oops_begin();
12258 int sig = SIGSEGV;
12259
12260- if (!user_mode_vm(regs))
12261+ if (!user_mode(regs))
12262 report_bug(regs->ip, regs);
12263
12264 if (__die(str, regs, err))
12265diff -urNp linux-2.6.32.41/arch/x86/kernel/dumpstack.h linux-2.6.32.41/arch/x86/kernel/dumpstack.h
12266--- linux-2.6.32.41/arch/x86/kernel/dumpstack.h 2011-03-27 14:31:47.000000000 -0400
12267+++ linux-2.6.32.41/arch/x86/kernel/dumpstack.h 2011-04-23 13:25:26.000000000 -0400
12268@@ -15,7 +15,7 @@
12269 #endif
12270
12271 extern unsigned long
12272-print_context_stack(struct thread_info *tinfo,
12273+print_context_stack(struct task_struct *task, void *stack_start,
12274 unsigned long *stack, unsigned long bp,
12275 const struct stacktrace_ops *ops, void *data,
12276 unsigned long *end, int *graph);
12277diff -urNp linux-2.6.32.41/arch/x86/kernel/e820.c linux-2.6.32.41/arch/x86/kernel/e820.c
12278--- linux-2.6.32.41/arch/x86/kernel/e820.c 2011-03-27 14:31:47.000000000 -0400
12279+++ linux-2.6.32.41/arch/x86/kernel/e820.c 2011-04-17 15:56:46.000000000 -0400
12280@@ -733,7 +733,7 @@ struct early_res {
12281 };
12282 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
12283 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
12284- {}
12285+ { 0, 0, {0}, 0 }
12286 };
12287
12288 static int __init find_overlapped_early(u64 start, u64 end)
12289diff -urNp linux-2.6.32.41/arch/x86/kernel/early_printk.c linux-2.6.32.41/arch/x86/kernel/early_printk.c
12290--- linux-2.6.32.41/arch/x86/kernel/early_printk.c 2011-03-27 14:31:47.000000000 -0400
12291+++ linux-2.6.32.41/arch/x86/kernel/early_printk.c 2011-05-16 21:46:57.000000000 -0400
12292@@ -7,6 +7,7 @@
12293 #include <linux/pci_regs.h>
12294 #include <linux/pci_ids.h>
12295 #include <linux/errno.h>
12296+#include <linux/sched.h>
12297 #include <asm/io.h>
12298 #include <asm/processor.h>
12299 #include <asm/fcntl.h>
12300@@ -170,6 +171,8 @@ asmlinkage void early_printk(const char
12301 int n;
12302 va_list ap;
12303
12304+ pax_track_stack();
12305+
12306 va_start(ap, fmt);
12307 n = vscnprintf(buf, sizeof(buf), fmt, ap);
12308 early_console->write(early_console, buf, n);
12309diff -urNp linux-2.6.32.41/arch/x86/kernel/efi_32.c linux-2.6.32.41/arch/x86/kernel/efi_32.c
12310--- linux-2.6.32.41/arch/x86/kernel/efi_32.c 2011-03-27 14:31:47.000000000 -0400
12311+++ linux-2.6.32.41/arch/x86/kernel/efi_32.c 2011-04-17 15:56:46.000000000 -0400
12312@@ -38,70 +38,38 @@
12313 */
12314
12315 static unsigned long efi_rt_eflags;
12316-static pgd_t efi_bak_pg_dir_pointer[2];
12317+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
12318
12319-void efi_call_phys_prelog(void)
12320+void __init efi_call_phys_prelog(void)
12321 {
12322- unsigned long cr4;
12323- unsigned long temp;
12324 struct desc_ptr gdt_descr;
12325
12326 local_irq_save(efi_rt_eflags);
12327
12328- /*
12329- * If I don't have PAE, I should just duplicate two entries in page
12330- * directory. If I have PAE, I just need to duplicate one entry in
12331- * page directory.
12332- */
12333- cr4 = read_cr4_safe();
12334
12335- if (cr4 & X86_CR4_PAE) {
12336- efi_bak_pg_dir_pointer[0].pgd =
12337- swapper_pg_dir[pgd_index(0)].pgd;
12338- swapper_pg_dir[0].pgd =
12339- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
12340- } else {
12341- efi_bak_pg_dir_pointer[0].pgd =
12342- swapper_pg_dir[pgd_index(0)].pgd;
12343- efi_bak_pg_dir_pointer[1].pgd =
12344- swapper_pg_dir[pgd_index(0x400000)].pgd;
12345- swapper_pg_dir[pgd_index(0)].pgd =
12346- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
12347- temp = PAGE_OFFSET + 0x400000;
12348- swapper_pg_dir[pgd_index(0x400000)].pgd =
12349- swapper_pg_dir[pgd_index(temp)].pgd;
12350- }
12351+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
12352+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
12353+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
12354
12355 /*
12356 * After the lock is released, the original page table is restored.
12357 */
12358 __flush_tlb_all();
12359
12360- gdt_descr.address = __pa(get_cpu_gdt_table(0));
12361+ gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
12362 gdt_descr.size = GDT_SIZE - 1;
12363 load_gdt(&gdt_descr);
12364 }
12365
12366-void efi_call_phys_epilog(void)
12367+void __init efi_call_phys_epilog(void)
12368 {
12369- unsigned long cr4;
12370 struct desc_ptr gdt_descr;
12371
12372- gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
12373+ gdt_descr.address = get_cpu_gdt_table(0);
12374 gdt_descr.size = GDT_SIZE - 1;
12375 load_gdt(&gdt_descr);
12376
12377- cr4 = read_cr4_safe();
12378-
12379- if (cr4 & X86_CR4_PAE) {
12380- swapper_pg_dir[pgd_index(0)].pgd =
12381- efi_bak_pg_dir_pointer[0].pgd;
12382- } else {
12383- swapper_pg_dir[pgd_index(0)].pgd =
12384- efi_bak_pg_dir_pointer[0].pgd;
12385- swapper_pg_dir[pgd_index(0x400000)].pgd =
12386- efi_bak_pg_dir_pointer[1].pgd;
12387- }
12388+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
12389
12390 /*
12391 * After the lock is released, the original page table is restored.
12392diff -urNp linux-2.6.32.41/arch/x86/kernel/efi_stub_32.S linux-2.6.32.41/arch/x86/kernel/efi_stub_32.S
12393--- linux-2.6.32.41/arch/x86/kernel/efi_stub_32.S 2011-03-27 14:31:47.000000000 -0400
12394+++ linux-2.6.32.41/arch/x86/kernel/efi_stub_32.S 2011-04-17 15:56:46.000000000 -0400
12395@@ -6,6 +6,7 @@
12396 */
12397
12398 #include <linux/linkage.h>
12399+#include <linux/init.h>
12400 #include <asm/page_types.h>
12401
12402 /*
12403@@ -20,7 +21,7 @@
12404 * service functions will comply with gcc calling convention, too.
12405 */
12406
12407-.text
12408+__INIT
12409 ENTRY(efi_call_phys)
12410 /*
12411 * 0. The function can only be called in Linux kernel. So CS has been
12412@@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
12413 * The mapping of lower virtual memory has been created in prelog and
12414 * epilog.
12415 */
12416- movl $1f, %edx
12417- subl $__PAGE_OFFSET, %edx
12418- jmp *%edx
12419+ jmp 1f-__PAGE_OFFSET
12420 1:
12421
12422 /*
12423@@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
12424 * parameter 2, ..., param n. To make things easy, we save the return
12425 * address of efi_call_phys in a global variable.
12426 */
12427- popl %edx
12428- movl %edx, saved_return_addr
12429- /* get the function pointer into ECX*/
12430- popl %ecx
12431- movl %ecx, efi_rt_function_ptr
12432- movl $2f, %edx
12433- subl $__PAGE_OFFSET, %edx
12434- pushl %edx
12435+ popl (saved_return_addr)
12436+ popl (efi_rt_function_ptr)
12437
12438 /*
12439 * 3. Clear PG bit in %CR0.
12440@@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
12441 /*
12442 * 5. Call the physical function.
12443 */
12444- jmp *%ecx
12445+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
12446
12447-2:
12448 /*
12449 * 6. After EFI runtime service returns, control will return to
12450 * following instruction. We'd better readjust stack pointer first.
12451@@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
12452 movl %cr0, %edx
12453 orl $0x80000000, %edx
12454 movl %edx, %cr0
12455- jmp 1f
12456-1:
12457+
12458 /*
12459 * 8. Now restore the virtual mode from flat mode by
12460 * adding EIP with PAGE_OFFSET.
12461 */
12462- movl $1f, %edx
12463- jmp *%edx
12464+ jmp 1f+__PAGE_OFFSET
12465 1:
12466
12467 /*
12468 * 9. Balance the stack. And because EAX contain the return value,
12469 * we'd better not clobber it.
12470 */
12471- leal efi_rt_function_ptr, %edx
12472- movl (%edx), %ecx
12473- pushl %ecx
12474+ pushl (efi_rt_function_ptr)
12475
12476 /*
12477- * 10. Push the saved return address onto the stack and return.
12478+ * 10. Return to the saved return address.
12479 */
12480- leal saved_return_addr, %edx
12481- movl (%edx), %ecx
12482- pushl %ecx
12483- ret
12484+ jmpl *(saved_return_addr)
12485 ENDPROC(efi_call_phys)
12486 .previous
12487
12488-.data
12489+__INITDATA
12490 saved_return_addr:
12491 .long 0
12492 efi_rt_function_ptr:
12493diff -urNp linux-2.6.32.41/arch/x86/kernel/entry_32.S linux-2.6.32.41/arch/x86/kernel/entry_32.S
12494--- linux-2.6.32.41/arch/x86/kernel/entry_32.S 2011-03-27 14:31:47.000000000 -0400
12495+++ linux-2.6.32.41/arch/x86/kernel/entry_32.S 2011-05-22 23:02:03.000000000 -0400
12496@@ -185,13 +185,146 @@
12497 /*CFI_REL_OFFSET gs, PT_GS*/
12498 .endm
12499 .macro SET_KERNEL_GS reg
12500+
12501+#ifdef CONFIG_CC_STACKPROTECTOR
12502 movl $(__KERNEL_STACK_CANARY), \reg
12503+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12504+ movl $(__USER_DS), \reg
12505+#else
12506+ xorl \reg, \reg
12507+#endif
12508+
12509 movl \reg, %gs
12510 .endm
12511
12512 #endif /* CONFIG_X86_32_LAZY_GS */
12513
12514-.macro SAVE_ALL
12515+.macro pax_enter_kernel
12516+#ifdef CONFIG_PAX_KERNEXEC
12517+ call pax_enter_kernel
12518+#endif
12519+.endm
12520+
12521+.macro pax_exit_kernel
12522+#ifdef CONFIG_PAX_KERNEXEC
12523+ call pax_exit_kernel
12524+#endif
12525+.endm
12526+
12527+#ifdef CONFIG_PAX_KERNEXEC
12528+ENTRY(pax_enter_kernel)
12529+#ifdef CONFIG_PARAVIRT
12530+ pushl %eax
12531+ pushl %ecx
12532+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
12533+ mov %eax, %esi
12534+#else
12535+ mov %cr0, %esi
12536+#endif
12537+ bts $16, %esi
12538+ jnc 1f
12539+ mov %cs, %esi
12540+ cmp $__KERNEL_CS, %esi
12541+ jz 3f
12542+ ljmp $__KERNEL_CS, $3f
12543+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
12544+2:
12545+#ifdef CONFIG_PARAVIRT
12546+ mov %esi, %eax
12547+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
12548+#else
12549+ mov %esi, %cr0
12550+#endif
12551+3:
12552+#ifdef CONFIG_PARAVIRT
12553+ popl %ecx
12554+ popl %eax
12555+#endif
12556+ ret
12557+ENDPROC(pax_enter_kernel)
12558+
12559+ENTRY(pax_exit_kernel)
12560+#ifdef CONFIG_PARAVIRT
12561+ pushl %eax
12562+ pushl %ecx
12563+#endif
12564+ mov %cs, %esi
12565+ cmp $__KERNEXEC_KERNEL_CS, %esi
12566+ jnz 2f
12567+#ifdef CONFIG_PARAVIRT
12568+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
12569+ mov %eax, %esi
12570+#else
12571+ mov %cr0, %esi
12572+#endif
12573+ btr $16, %esi
12574+ ljmp $__KERNEL_CS, $1f
12575+1:
12576+#ifdef CONFIG_PARAVIRT
12577+ mov %esi, %eax
12578+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
12579+#else
12580+ mov %esi, %cr0
12581+#endif
12582+2:
12583+#ifdef CONFIG_PARAVIRT
12584+ popl %ecx
12585+ popl %eax
12586+#endif
12587+ ret
12588+ENDPROC(pax_exit_kernel)
12589+#endif
12590+
12591+.macro pax_erase_kstack
12592+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12593+ call pax_erase_kstack
12594+#endif
12595+.endm
12596+
12597+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12598+/*
12599+ * ebp: thread_info
12600+ * ecx, edx: can be clobbered
12601+ */
12602+ENTRY(pax_erase_kstack)
12603+ pushl %edi
12604+ pushl %eax
12605+
12606+ mov TI_lowest_stack(%ebp), %edi
12607+ mov $-0xBEEF, %eax
12608+ std
12609+
12610+1: mov %edi, %ecx
12611+ and $THREAD_SIZE_asm - 1, %ecx
12612+ shr $2, %ecx
12613+ repne scasl
12614+ jecxz 2f
12615+
12616+ cmp $2*16, %ecx
12617+ jc 2f
12618+
12619+ mov $2*16, %ecx
12620+ repe scasl
12621+ jecxz 2f
12622+ jne 1b
12623+
12624+2: cld
12625+ mov %esp, %ecx
12626+ sub %edi, %ecx
12627+ shr $2, %ecx
12628+ rep stosl
12629+
12630+ mov TI_task_thread_sp0(%ebp), %edi
12631+ sub $128, %edi
12632+ mov %edi, TI_lowest_stack(%ebp)
12633+
12634+ popl %eax
12635+ popl %edi
12636+ ret
12637+ENDPROC(pax_erase_kstack)
12638+#endif
12639+
12640+.macro __SAVE_ALL _DS
12641 cld
12642 PUSH_GS
12643 pushl %fs
12644@@ -224,7 +357,7 @@
12645 pushl %ebx
12646 CFI_ADJUST_CFA_OFFSET 4
12647 CFI_REL_OFFSET ebx, 0
12648- movl $(__USER_DS), %edx
12649+ movl $\_DS, %edx
12650 movl %edx, %ds
12651 movl %edx, %es
12652 movl $(__KERNEL_PERCPU), %edx
12653@@ -232,6 +365,15 @@
12654 SET_KERNEL_GS %edx
12655 .endm
12656
12657+.macro SAVE_ALL
12658+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
12659+ __SAVE_ALL __KERNEL_DS
12660+ pax_enter_kernel
12661+#else
12662+ __SAVE_ALL __USER_DS
12663+#endif
12664+.endm
12665+
12666 .macro RESTORE_INT_REGS
12667 popl %ebx
12668 CFI_ADJUST_CFA_OFFSET -4
12669@@ -352,7 +494,15 @@ check_userspace:
12670 movb PT_CS(%esp), %al
12671 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
12672 cmpl $USER_RPL, %eax
12673+
12674+#ifdef CONFIG_PAX_KERNEXEC
12675+ jae resume_userspace
12676+
12677+ PAX_EXIT_KERNEL
12678+ jmp resume_kernel
12679+#else
12680 jb resume_kernel # not returning to v8086 or userspace
12681+#endif
12682
12683 ENTRY(resume_userspace)
12684 LOCKDEP_SYS_EXIT
12685@@ -364,7 +514,7 @@ ENTRY(resume_userspace)
12686 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
12687 # int/exception return?
12688 jne work_pending
12689- jmp restore_all
12690+ jmp restore_all_pax
12691 END(ret_from_exception)
12692
12693 #ifdef CONFIG_PREEMPT
12694@@ -414,25 +564,36 @@ sysenter_past_esp:
12695 /*CFI_REL_OFFSET cs, 0*/
12696 /*
12697 * Push current_thread_info()->sysenter_return to the stack.
12698- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
12699- * pushed above; +8 corresponds to copy_thread's esp0 setting.
12700 */
12701- pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
12702+ pushl $0
12703 CFI_ADJUST_CFA_OFFSET 4
12704 CFI_REL_OFFSET eip, 0
12705
12706 pushl %eax
12707 CFI_ADJUST_CFA_OFFSET 4
12708 SAVE_ALL
12709+ GET_THREAD_INFO(%ebp)
12710+ movl TI_sysenter_return(%ebp),%ebp
12711+ movl %ebp,PT_EIP(%esp)
12712 ENABLE_INTERRUPTS(CLBR_NONE)
12713
12714 /*
12715 * Load the potential sixth argument from user stack.
12716 * Careful about security.
12717 */
12718+ movl PT_OLDESP(%esp),%ebp
12719+
12720+#ifdef CONFIG_PAX_MEMORY_UDEREF
12721+ mov PT_OLDSS(%esp),%ds
12722+1: movl %ds:(%ebp),%ebp
12723+ push %ss
12724+ pop %ds
12725+#else
12726 cmpl $__PAGE_OFFSET-3,%ebp
12727 jae syscall_fault
12728 1: movl (%ebp),%ebp
12729+#endif
12730+
12731 movl %ebp,PT_EBP(%esp)
12732 .section __ex_table,"a"
12733 .align 4
12734@@ -455,12 +616,23 @@ sysenter_do_call:
12735 testl $_TIF_ALLWORK_MASK, %ecx
12736 jne sysexit_audit
12737 sysenter_exit:
12738+
12739+#ifdef CONFIG_PAX_RANDKSTACK
12740+ pushl_cfi %eax
12741+ call pax_randomize_kstack
12742+ popl_cfi %eax
12743+#endif
12744+
12745+ pax_erase_kstack
12746+
12747 /* if something modifies registers it must also disable sysexit */
12748 movl PT_EIP(%esp), %edx
12749 movl PT_OLDESP(%esp), %ecx
12750 xorl %ebp,%ebp
12751 TRACE_IRQS_ON
12752 1: mov PT_FS(%esp), %fs
12753+2: mov PT_DS(%esp), %ds
12754+3: mov PT_ES(%esp), %es
12755 PTGS_TO_GS
12756 ENABLE_INTERRUPTS_SYSEXIT
12757
12758@@ -477,6 +649,9 @@ sysenter_audit:
12759 movl %eax,%edx /* 2nd arg: syscall number */
12760 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
12761 call audit_syscall_entry
12762+
12763+ pax_erase_kstack
12764+
12765 pushl %ebx
12766 CFI_ADJUST_CFA_OFFSET 4
12767 movl PT_EAX(%esp),%eax /* reload syscall number */
12768@@ -504,11 +679,17 @@ sysexit_audit:
12769
12770 CFI_ENDPROC
12771 .pushsection .fixup,"ax"
12772-2: movl $0,PT_FS(%esp)
12773+4: movl $0,PT_FS(%esp)
12774+ jmp 1b
12775+5: movl $0,PT_DS(%esp)
12776+ jmp 1b
12777+6: movl $0,PT_ES(%esp)
12778 jmp 1b
12779 .section __ex_table,"a"
12780 .align 4
12781- .long 1b,2b
12782+ .long 1b,4b
12783+ .long 2b,5b
12784+ .long 3b,6b
12785 .popsection
12786 PTGS_TO_GS_EX
12787 ENDPROC(ia32_sysenter_target)
12788@@ -538,6 +719,14 @@ syscall_exit:
12789 testl $_TIF_ALLWORK_MASK, %ecx # current->work
12790 jne syscall_exit_work
12791
12792+restore_all_pax:
12793+
12794+#ifdef CONFIG_PAX_RANDKSTACK
12795+ call pax_randomize_kstack
12796+#endif
12797+
12798+ pax_erase_kstack
12799+
12800 restore_all:
12801 TRACE_IRQS_IRET
12802 restore_all_notrace:
12803@@ -602,7 +791,13 @@ ldt_ss:
12804 mov PT_OLDESP(%esp), %eax /* load userspace esp */
12805 mov %dx, %ax /* eax: new kernel esp */
12806 sub %eax, %edx /* offset (low word is 0) */
12807- PER_CPU(gdt_page, %ebx)
12808+#ifdef CONFIG_SMP
12809+ movl PER_CPU_VAR(cpu_number), %ebx
12810+ shll $PAGE_SHIFT_asm, %ebx
12811+ addl $cpu_gdt_table, %ebx
12812+#else
12813+ movl $cpu_gdt_table, %ebx
12814+#endif
12815 shr $16, %edx
12816 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
12817 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
12818@@ -636,31 +831,25 @@ work_resched:
12819 movl TI_flags(%ebp), %ecx
12820 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
12821 # than syscall tracing?
12822- jz restore_all
12823+ jz restore_all_pax
12824 testb $_TIF_NEED_RESCHED, %cl
12825 jnz work_resched
12826
12827 work_notifysig: # deal with pending signals and
12828 # notify-resume requests
12829+ movl %esp, %eax
12830 #ifdef CONFIG_VM86
12831 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
12832- movl %esp, %eax
12833- jne work_notifysig_v86 # returning to kernel-space or
12834+ jz 1f # returning to kernel-space or
12835 # vm86-space
12836- xorl %edx, %edx
12837- call do_notify_resume
12838- jmp resume_userspace_sig
12839
12840- ALIGN
12841-work_notifysig_v86:
12842 pushl %ecx # save ti_flags for do_notify_resume
12843 CFI_ADJUST_CFA_OFFSET 4
12844 call save_v86_state # %eax contains pt_regs pointer
12845 popl %ecx
12846 CFI_ADJUST_CFA_OFFSET -4
12847 movl %eax, %esp
12848-#else
12849- movl %esp, %eax
12850+1:
12851 #endif
12852 xorl %edx, %edx
12853 call do_notify_resume
12854@@ -673,6 +862,9 @@ syscall_trace_entry:
12855 movl $-ENOSYS,PT_EAX(%esp)
12856 movl %esp, %eax
12857 call syscall_trace_enter
12858+
12859+ pax_erase_kstack
12860+
12861 /* What it returned is what we'll actually use. */
12862 cmpl $(nr_syscalls), %eax
12863 jnae syscall_call
12864@@ -695,6 +887,10 @@ END(syscall_exit_work)
12865
12866 RING0_INT_FRAME # can't unwind into user space anyway
12867 syscall_fault:
12868+#ifdef CONFIG_PAX_MEMORY_UDEREF
12869+ push %ss
12870+ pop %ds
12871+#endif
12872 GET_THREAD_INFO(%ebp)
12873 movl $-EFAULT,PT_EAX(%esp)
12874 jmp resume_userspace
12875@@ -726,6 +922,33 @@ PTREGSCALL(rt_sigreturn)
12876 PTREGSCALL(vm86)
12877 PTREGSCALL(vm86old)
12878
12879+ ALIGN;
12880+ENTRY(kernel_execve)
12881+ push %ebp
12882+ sub $PT_OLDSS+4,%esp
12883+ push %edi
12884+ push %ecx
12885+ push %eax
12886+ lea 3*4(%esp),%edi
12887+ mov $PT_OLDSS/4+1,%ecx
12888+ xorl %eax,%eax
12889+ rep stosl
12890+ pop %eax
12891+ pop %ecx
12892+ pop %edi
12893+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
12894+ mov %eax,PT_EBX(%esp)
12895+ mov %edx,PT_ECX(%esp)
12896+ mov %ecx,PT_EDX(%esp)
12897+ mov %esp,%eax
12898+ call sys_execve
12899+ GET_THREAD_INFO(%ebp)
12900+ test %eax,%eax
12901+ jz syscall_exit
12902+ add $PT_OLDSS+4,%esp
12903+ pop %ebp
12904+ ret
12905+
12906 .macro FIXUP_ESPFIX_STACK
12907 /*
12908 * Switch back for ESPFIX stack to the normal zerobased stack
12909@@ -735,7 +958,13 @@ PTREGSCALL(vm86old)
12910 * normal stack and adjusts ESP with the matching offset.
12911 */
12912 /* fixup the stack */
12913- PER_CPU(gdt_page, %ebx)
12914+#ifdef CONFIG_SMP
12915+ movl PER_CPU_VAR(cpu_number), %ebx
12916+ shll $PAGE_SHIFT_asm, %ebx
12917+ addl $cpu_gdt_table, %ebx
12918+#else
12919+ movl $cpu_gdt_table, %ebx
12920+#endif
12921 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
12922 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
12923 shl $16, %eax
12924@@ -1198,7 +1427,6 @@ return_to_handler:
12925 ret
12926 #endif
12927
12928-.section .rodata,"a"
12929 #include "syscall_table_32.S"
12930
12931 syscall_table_size=(.-sys_call_table)
12932@@ -1255,9 +1483,12 @@ error_code:
12933 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
12934 REG_TO_PTGS %ecx
12935 SET_KERNEL_GS %ecx
12936- movl $(__USER_DS), %ecx
12937+ movl $(__KERNEL_DS), %ecx
12938 movl %ecx, %ds
12939 movl %ecx, %es
12940+
12941+ pax_enter_kernel
12942+
12943 TRACE_IRQS_OFF
12944 movl %esp,%eax # pt_regs pointer
12945 call *%edi
12946@@ -1351,6 +1582,9 @@ nmi_stack_correct:
12947 xorl %edx,%edx # zero error code
12948 movl %esp,%eax # pt_regs pointer
12949 call do_nmi
12950+
12951+ pax_exit_kernel
12952+
12953 jmp restore_all_notrace
12954 CFI_ENDPROC
12955
12956@@ -1391,6 +1625,9 @@ nmi_espfix_stack:
12957 FIXUP_ESPFIX_STACK # %eax == %esp
12958 xorl %edx,%edx # zero error code
12959 call do_nmi
12960+
12961+ pax_exit_kernel
12962+
12963 RESTORE_REGS
12964 lss 12+4(%esp), %esp # back to espfix stack
12965 CFI_ADJUST_CFA_OFFSET -24
12966diff -urNp linux-2.6.32.41/arch/x86/kernel/entry_64.S linux-2.6.32.41/arch/x86/kernel/entry_64.S
12967--- linux-2.6.32.41/arch/x86/kernel/entry_64.S 2011-03-27 14:31:47.000000000 -0400
12968+++ linux-2.6.32.41/arch/x86/kernel/entry_64.S 2011-05-22 23:18:48.000000000 -0400
12969@@ -53,6 +53,7 @@
12970 #include <asm/paravirt.h>
12971 #include <asm/ftrace.h>
12972 #include <asm/percpu.h>
12973+#include <asm/pgtable.h>
12974
12975 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
12976 #include <linux/elf-em.h>
12977@@ -174,6 +175,259 @@ ENTRY(native_usergs_sysret64)
12978 ENDPROC(native_usergs_sysret64)
12979 #endif /* CONFIG_PARAVIRT */
12980
12981+ .macro ljmpq sel, off
12982+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
12983+ .byte 0x48; ljmp *1234f(%rip)
12984+ .pushsection .rodata
12985+ .align 16
12986+ 1234: .quad \off; .word \sel
12987+ .popsection
12988+#else
12989+ pushq $\sel
12990+ pushq $\off
12991+ lretq
12992+#endif
12993+ .endm
12994+
12995+ .macro pax_enter_kernel
12996+#ifdef CONFIG_PAX_KERNEXEC
12997+ call pax_enter_kernel
12998+#endif
12999+ .endm
13000+
13001+ .macro pax_exit_kernel
13002+#ifdef CONFIG_PAX_KERNEXEC
13003+ call pax_exit_kernel
13004+#endif
13005+ .endm
13006+
13007+#ifdef CONFIG_PAX_KERNEXEC
13008+ENTRY(pax_enter_kernel)
13009+ pushq %rdi
13010+
13011+#ifdef CONFIG_PARAVIRT
13012+ PV_SAVE_REGS(CLBR_RDI)
13013+#endif
13014+
13015+ GET_CR0_INTO_RDI
13016+ bts $16,%rdi
13017+ jnc 1f
13018+ mov %cs,%edi
13019+ cmp $__KERNEL_CS,%edi
13020+ jz 3f
13021+ ljmpq __KERNEL_CS,3f
13022+1: ljmpq __KERNEXEC_KERNEL_CS,2f
13023+2: SET_RDI_INTO_CR0
13024+3:
13025+
13026+#ifdef CONFIG_PARAVIRT
13027+ PV_RESTORE_REGS(CLBR_RDI)
13028+#endif
13029+
13030+ popq %rdi
13031+ retq
13032+ENDPROC(pax_enter_kernel)
13033+
13034+ENTRY(pax_exit_kernel)
13035+ pushq %rdi
13036+
13037+#ifdef CONFIG_PARAVIRT
13038+ PV_SAVE_REGS(CLBR_RDI)
13039+#endif
13040+
13041+ mov %cs,%rdi
13042+ cmp $__KERNEXEC_KERNEL_CS,%edi
13043+ jnz 2f
13044+ GET_CR0_INTO_RDI
13045+ btr $16,%rdi
13046+ ljmpq __KERNEL_CS,1f
13047+1: SET_RDI_INTO_CR0
13048+2:
13049+
13050+#ifdef CONFIG_PARAVIRT
13051+ PV_RESTORE_REGS(CLBR_RDI);
13052+#endif
13053+
13054+ popq %rdi
13055+ retq
13056+ENDPROC(pax_exit_kernel)
13057+#endif
13058+
13059+ .macro pax_enter_kernel_user
13060+#ifdef CONFIG_PAX_MEMORY_UDEREF
13061+ call pax_enter_kernel_user
13062+#endif
13063+ .endm
13064+
13065+ .macro pax_exit_kernel_user
13066+#ifdef CONFIG_PAX_MEMORY_UDEREF
13067+ call pax_exit_kernel_user
13068+#endif
13069+#ifdef CONFIG_PAX_RANDKSTACK
13070+ push %rax
13071+ call pax_randomize_kstack
13072+ pop %rax
13073+#endif
13074+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13075+ call pax_erase_kstack
13076+#endif
13077+ .endm
13078+
13079+#ifdef CONFIG_PAX_MEMORY_UDEREF
13080+ENTRY(pax_enter_kernel_user)
13081+ pushq %rdi
13082+ pushq %rbx
13083+
13084+#ifdef CONFIG_PARAVIRT
13085+ PV_SAVE_REGS(CLBR_RDI)
13086+#endif
13087+
13088+ GET_CR3_INTO_RDI
13089+ mov %rdi,%rbx
13090+ add $__START_KERNEL_map,%rbx
13091+ sub phys_base(%rip),%rbx
13092+
13093+#ifdef CONFIG_PARAVIRT
13094+ pushq %rdi
13095+ cmpl $0, pv_info+PARAVIRT_enabled
13096+ jz 1f
13097+ i = 0
13098+ .rept USER_PGD_PTRS
13099+ mov i*8(%rbx),%rsi
13100+ mov $0,%sil
13101+ lea i*8(%rbx),%rdi
13102+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
13103+ i = i + 1
13104+ .endr
13105+ jmp 2f
13106+1:
13107+#endif
13108+
13109+ i = 0
13110+ .rept USER_PGD_PTRS
13111+ movb $0,i*8(%rbx)
13112+ i = i + 1
13113+ .endr
13114+
13115+#ifdef CONFIG_PARAVIRT
13116+2: popq %rdi
13117+#endif
13118+ SET_RDI_INTO_CR3
13119+
13120+#ifdef CONFIG_PAX_KERNEXEC
13121+ GET_CR0_INTO_RDI
13122+ bts $16,%rdi
13123+ SET_RDI_INTO_CR0
13124+#endif
13125+
13126+#ifdef CONFIG_PARAVIRT
13127+ PV_RESTORE_REGS(CLBR_RDI)
13128+#endif
13129+
13130+ popq %rbx
13131+ popq %rdi
13132+ retq
13133+ENDPROC(pax_enter_kernel_user)
13134+
13135+ENTRY(pax_exit_kernel_user)
13136+ push %rdi
13137+
13138+#ifdef CONFIG_PARAVIRT
13139+ pushq %rbx
13140+ PV_SAVE_REGS(CLBR_RDI)
13141+#endif
13142+
13143+#ifdef CONFIG_PAX_KERNEXEC
13144+ GET_CR0_INTO_RDI
13145+ btr $16,%rdi
13146+ SET_RDI_INTO_CR0
13147+#endif
13148+
13149+ GET_CR3_INTO_RDI
13150+ add $__START_KERNEL_map,%rdi
13151+ sub phys_base(%rip),%rdi
13152+
13153+#ifdef CONFIG_PARAVIRT
13154+ cmpl $0, pv_info+PARAVIRT_enabled
13155+ jz 1f
13156+ mov %rdi,%rbx
13157+ i = 0
13158+ .rept USER_PGD_PTRS
13159+ mov i*8(%rbx),%rsi
13160+ mov $0x67,%sil
13161+ lea i*8(%rbx),%rdi
13162+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
13163+ i = i + 1
13164+ .endr
13165+ jmp 2f
13166+1:
13167+#endif
13168+
13169+ i = 0
13170+ .rept USER_PGD_PTRS
13171+ movb $0x67,i*8(%rdi)
13172+ i = i + 1
13173+ .endr
13174+
13175+#ifdef CONFIG_PARAVIRT
13176+2: PV_RESTORE_REGS(CLBR_RDI)
13177+ popq %rbx
13178+#endif
13179+
13180+ popq %rdi
13181+ retq
13182+ENDPROC(pax_exit_kernel_user)
13183+#endif
13184+
13185+ .macro pax_erase_kstack
13186+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13187+ call pax_erase_kstack
13188+#endif
13189+ .endm
13190+
13191+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13192+/*
13193+ * r10: thread_info
13194+ * rcx, rdx: can be clobbered
13195+ */
13196+ENTRY(pax_erase_kstack)
13197+ pushq %rdi
13198+ pushq %rax
13199+
13200+ GET_THREAD_INFO(%r10)
13201+ mov TI_lowest_stack(%r10), %rdi
13202+ mov $-0xBEEF, %rax
13203+ std
13204+
13205+1: mov %edi, %ecx
13206+ and $THREAD_SIZE_asm - 1, %ecx
13207+ shr $3, %ecx
13208+ repne scasq
13209+ jecxz 2f
13210+
13211+ cmp $2*8, %ecx
13212+ jc 2f
13213+
13214+ mov $2*8, %ecx
13215+ repe scasq
13216+ jecxz 2f
13217+ jne 1b
13218+
13219+2: cld
13220+ mov %esp, %ecx
13221+ sub %edi, %ecx
13222+ shr $3, %ecx
13223+ rep stosq
13224+
13225+ mov TI_task_thread_sp0(%r10), %rdi
13226+ sub $256, %rdi
13227+ mov %rdi, TI_lowest_stack(%r10)
13228+
13229+ popq %rax
13230+ popq %rdi
13231+ ret
13232+ENDPROC(pax_erase_kstack)
13233+#endif
13234
13235 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
13236 #ifdef CONFIG_TRACE_IRQFLAGS
13237@@ -317,7 +571,7 @@ ENTRY(save_args)
13238 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
13239 movq_cfi rbp, 8 /* push %rbp */
13240 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
13241- testl $3, CS(%rdi)
13242+ testb $3, CS(%rdi)
13243 je 1f
13244 SWAPGS
13245 /*
13246@@ -409,7 +663,7 @@ ENTRY(ret_from_fork)
13247
13248 RESTORE_REST
13249
13250- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13251+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13252 je int_ret_from_sys_call
13253
13254 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
13255@@ -455,7 +709,7 @@ END(ret_from_fork)
13256 ENTRY(system_call)
13257 CFI_STARTPROC simple
13258 CFI_SIGNAL_FRAME
13259- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
13260+ CFI_DEF_CFA rsp,0
13261 CFI_REGISTER rip,rcx
13262 /*CFI_REGISTER rflags,r11*/
13263 SWAPGS_UNSAFE_STACK
13264@@ -468,12 +722,13 @@ ENTRY(system_call_after_swapgs)
13265
13266 movq %rsp,PER_CPU_VAR(old_rsp)
13267 movq PER_CPU_VAR(kernel_stack),%rsp
13268+ pax_enter_kernel_user
13269 /*
13270 * No need to follow this irqs off/on section - it's straight
13271 * and short:
13272 */
13273 ENABLE_INTERRUPTS(CLBR_NONE)
13274- SAVE_ARGS 8,1
13275+ SAVE_ARGS 8*6,1
13276 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
13277 movq %rcx,RIP-ARGOFFSET(%rsp)
13278 CFI_REL_OFFSET rip,RIP-ARGOFFSET
13279@@ -502,6 +757,7 @@ sysret_check:
13280 andl %edi,%edx
13281 jnz sysret_careful
13282 CFI_REMEMBER_STATE
13283+ pax_exit_kernel_user
13284 /*
13285 * sysretq will re-enable interrupts:
13286 */
13287@@ -562,6 +818,9 @@ auditsys:
13288 movq %rax,%rsi /* 2nd arg: syscall number */
13289 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
13290 call audit_syscall_entry
13291+
13292+ pax_erase_kstack
13293+
13294 LOAD_ARGS 0 /* reload call-clobbered registers */
13295 jmp system_call_fastpath
13296
13297@@ -592,6 +851,9 @@ tracesys:
13298 FIXUP_TOP_OF_STACK %rdi
13299 movq %rsp,%rdi
13300 call syscall_trace_enter
13301+
13302+ pax_erase_kstack
13303+
13304 /*
13305 * Reload arg registers from stack in case ptrace changed them.
13306 * We don't reload %rax because syscall_trace_enter() returned
13307@@ -613,7 +875,7 @@ tracesys:
13308 GLOBAL(int_ret_from_sys_call)
13309 DISABLE_INTERRUPTS(CLBR_NONE)
13310 TRACE_IRQS_OFF
13311- testl $3,CS-ARGOFFSET(%rsp)
13312+ testb $3,CS-ARGOFFSET(%rsp)
13313 je retint_restore_args
13314 movl $_TIF_ALLWORK_MASK,%edi
13315 /* edi: mask to check */
13316@@ -800,6 +1062,16 @@ END(interrupt)
13317 CFI_ADJUST_CFA_OFFSET 10*8
13318 call save_args
13319 PARTIAL_FRAME 0
13320+#ifdef CONFIG_PAX_MEMORY_UDEREF
13321+ testb $3, CS(%rdi)
13322+ jnz 1f
13323+ pax_enter_kernel
13324+ jmp 2f
13325+1: pax_enter_kernel_user
13326+2:
13327+#else
13328+ pax_enter_kernel
13329+#endif
13330 call \func
13331 .endm
13332
13333@@ -822,7 +1094,7 @@ ret_from_intr:
13334 CFI_ADJUST_CFA_OFFSET -8
13335 exit_intr:
13336 GET_THREAD_INFO(%rcx)
13337- testl $3,CS-ARGOFFSET(%rsp)
13338+ testb $3,CS-ARGOFFSET(%rsp)
13339 je retint_kernel
13340
13341 /* Interrupt came from user space */
13342@@ -844,12 +1116,14 @@ retint_swapgs: /* return to user-space
13343 * The iretq could re-enable interrupts:
13344 */
13345 DISABLE_INTERRUPTS(CLBR_ANY)
13346+ pax_exit_kernel_user
13347 TRACE_IRQS_IRETQ
13348 SWAPGS
13349 jmp restore_args
13350
13351 retint_restore_args: /* return to kernel space */
13352 DISABLE_INTERRUPTS(CLBR_ANY)
13353+ pax_exit_kernel
13354 /*
13355 * The iretq could re-enable interrupts:
13356 */
13357@@ -1032,6 +1306,16 @@ ENTRY(\sym)
13358 CFI_ADJUST_CFA_OFFSET 15*8
13359 call error_entry
13360 DEFAULT_FRAME 0
13361+#ifdef CONFIG_PAX_MEMORY_UDEREF
13362+ testb $3, CS(%rsp)
13363+ jnz 1f
13364+ pax_enter_kernel
13365+ jmp 2f
13366+1: pax_enter_kernel_user
13367+2:
13368+#else
13369+ pax_enter_kernel
13370+#endif
13371 movq %rsp,%rdi /* pt_regs pointer */
13372 xorl %esi,%esi /* no error code */
13373 call \do_sym
13374@@ -1049,6 +1333,16 @@ ENTRY(\sym)
13375 subq $15*8, %rsp
13376 call save_paranoid
13377 TRACE_IRQS_OFF
13378+#ifdef CONFIG_PAX_MEMORY_UDEREF
13379+ testb $3, CS(%rsp)
13380+ jnz 1f
13381+ pax_enter_kernel
13382+ jmp 2f
13383+1: pax_enter_kernel_user
13384+2:
13385+#else
13386+ pax_enter_kernel
13387+#endif
13388 movq %rsp,%rdi /* pt_regs pointer */
13389 xorl %esi,%esi /* no error code */
13390 call \do_sym
13391@@ -1066,9 +1360,24 @@ ENTRY(\sym)
13392 subq $15*8, %rsp
13393 call save_paranoid
13394 TRACE_IRQS_OFF
13395+#ifdef CONFIG_PAX_MEMORY_UDEREF
13396+ testb $3, CS(%rsp)
13397+ jnz 1f
13398+ pax_enter_kernel
13399+ jmp 2f
13400+1: pax_enter_kernel_user
13401+2:
13402+#else
13403+ pax_enter_kernel
13404+#endif
13405 movq %rsp,%rdi /* pt_regs pointer */
13406 xorl %esi,%esi /* no error code */
13407- PER_CPU(init_tss, %rbp)
13408+#ifdef CONFIG_SMP
13409+ imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
13410+ lea init_tss(%rbp), %rbp
13411+#else
13412+ lea init_tss(%rip), %rbp
13413+#endif
13414 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
13415 call \do_sym
13416 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
13417@@ -1085,6 +1394,16 @@ ENTRY(\sym)
13418 CFI_ADJUST_CFA_OFFSET 15*8
13419 call error_entry
13420 DEFAULT_FRAME 0
13421+#ifdef CONFIG_PAX_MEMORY_UDEREF
13422+ testb $3, CS(%rsp)
13423+ jnz 1f
13424+ pax_enter_kernel
13425+ jmp 2f
13426+1: pax_enter_kernel_user
13427+2:
13428+#else
13429+ pax_enter_kernel
13430+#endif
13431 movq %rsp,%rdi /* pt_regs pointer */
13432 movq ORIG_RAX(%rsp),%rsi /* get error code */
13433 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
13434@@ -1104,6 +1423,16 @@ ENTRY(\sym)
13435 call save_paranoid
13436 DEFAULT_FRAME 0
13437 TRACE_IRQS_OFF
13438+#ifdef CONFIG_PAX_MEMORY_UDEREF
13439+ testb $3, CS(%rsp)
13440+ jnz 1f
13441+ pax_enter_kernel
13442+ jmp 2f
13443+1: pax_enter_kernel_user
13444+2:
13445+#else
13446+ pax_enter_kernel
13447+#endif
13448 movq %rsp,%rdi /* pt_regs pointer */
13449 movq ORIG_RAX(%rsp),%rsi /* get error code */
13450 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
13451@@ -1405,14 +1734,27 @@ ENTRY(paranoid_exit)
13452 TRACE_IRQS_OFF
13453 testl %ebx,%ebx /* swapgs needed? */
13454 jnz paranoid_restore
13455- testl $3,CS(%rsp)
13456+ testb $3,CS(%rsp)
13457 jnz paranoid_userspace
13458+#ifdef CONFIG_PAX_MEMORY_UDEREF
13459+ pax_exit_kernel
13460+ TRACE_IRQS_IRETQ 0
13461+ SWAPGS_UNSAFE_STACK
13462+ RESTORE_ALL 8
13463+ jmp irq_return
13464+#endif
13465 paranoid_swapgs:
13466+#ifdef CONFIG_PAX_MEMORY_UDEREF
13467+ pax_exit_kernel_user
13468+#else
13469+ pax_exit_kernel
13470+#endif
13471 TRACE_IRQS_IRETQ 0
13472 SWAPGS_UNSAFE_STACK
13473 RESTORE_ALL 8
13474 jmp irq_return
13475 paranoid_restore:
13476+ pax_exit_kernel
13477 TRACE_IRQS_IRETQ 0
13478 RESTORE_ALL 8
13479 jmp irq_return
13480@@ -1470,7 +1812,7 @@ ENTRY(error_entry)
13481 movq_cfi r14, R14+8
13482 movq_cfi r15, R15+8
13483 xorl %ebx,%ebx
13484- testl $3,CS+8(%rsp)
13485+ testb $3,CS+8(%rsp)
13486 je error_kernelspace
13487 error_swapgs:
13488 SWAPGS
13489@@ -1529,6 +1871,16 @@ ENTRY(nmi)
13490 CFI_ADJUST_CFA_OFFSET 15*8
13491 call save_paranoid
13492 DEFAULT_FRAME 0
13493+#ifdef CONFIG_PAX_MEMORY_UDEREF
13494+ testb $3, CS(%rsp)
13495+ jnz 1f
13496+ pax_enter_kernel
13497+ jmp 2f
13498+1: pax_enter_kernel_user
13499+2:
13500+#else
13501+ pax_enter_kernel
13502+#endif
13503 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
13504 movq %rsp,%rdi
13505 movq $-1,%rsi
13506@@ -1539,11 +1891,25 @@ ENTRY(nmi)
13507 DISABLE_INTERRUPTS(CLBR_NONE)
13508 testl %ebx,%ebx /* swapgs needed? */
13509 jnz nmi_restore
13510- testl $3,CS(%rsp)
13511+ testb $3,CS(%rsp)
13512 jnz nmi_userspace
13513+#ifdef CONFIG_PAX_MEMORY_UDEREF
13514+ pax_exit_kernel
13515+ SWAPGS_UNSAFE_STACK
13516+ RESTORE_ALL 8
13517+ jmp irq_return
13518+#endif
13519 nmi_swapgs:
13520+#ifdef CONFIG_PAX_MEMORY_UDEREF
13521+ pax_exit_kernel_user
13522+#else
13523+ pax_exit_kernel
13524+#endif
13525 SWAPGS_UNSAFE_STACK
13526+ RESTORE_ALL 8
13527+ jmp irq_return
13528 nmi_restore:
13529+ pax_exit_kernel
13530 RESTORE_ALL 8
13531 jmp irq_return
13532 nmi_userspace:
13533diff -urNp linux-2.6.32.41/arch/x86/kernel/ftrace.c linux-2.6.32.41/arch/x86/kernel/ftrace.c
13534--- linux-2.6.32.41/arch/x86/kernel/ftrace.c 2011-03-27 14:31:47.000000000 -0400
13535+++ linux-2.6.32.41/arch/x86/kernel/ftrace.c 2011-05-04 17:56:20.000000000 -0400
13536@@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the
13537 static void *mod_code_newcode; /* holds the text to write to the IP */
13538
13539 static unsigned nmi_wait_count;
13540-static atomic_t nmi_update_count = ATOMIC_INIT(0);
13541+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
13542
13543 int ftrace_arch_read_dyn_info(char *buf, int size)
13544 {
13545@@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf,
13546
13547 r = snprintf(buf, size, "%u %u",
13548 nmi_wait_count,
13549- atomic_read(&nmi_update_count));
13550+ atomic_read_unchecked(&nmi_update_count));
13551 return r;
13552 }
13553
13554@@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
13555 {
13556 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
13557 smp_rmb();
13558+ pax_open_kernel();
13559 ftrace_mod_code();
13560- atomic_inc(&nmi_update_count);
13561+ pax_close_kernel();
13562+ atomic_inc_unchecked(&nmi_update_count);
13563 }
13564 /* Must have previous changes seen before executions */
13565 smp_mb();
13566@@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, voi
13567
13568
13569
13570-static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
13571+static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
13572
13573 static unsigned char *ftrace_nop_replace(void)
13574 {
13575@@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, uns
13576 {
13577 unsigned char replaced[MCOUNT_INSN_SIZE];
13578
13579+ ip = ktla_ktva(ip);
13580+
13581 /*
13582 * Note: Due to modules and __init, code can
13583 * disappear and change, we need to protect against faulting
13584@@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_fun
13585 unsigned char old[MCOUNT_INSN_SIZE], *new;
13586 int ret;
13587
13588- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
13589+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
13590 new = ftrace_call_replace(ip, (unsigned long)func);
13591 ret = ftrace_modify_code(ip, old, new);
13592
13593@@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *da
13594 switch (faulted) {
13595 case 0:
13596 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
13597- memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
13598+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
13599 break;
13600 case 1:
13601 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
13602- memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
13603+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
13604 break;
13605 case 2:
13606 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
13607- memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
13608+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
13609 break;
13610 }
13611
13612@@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long
13613 {
13614 unsigned char code[MCOUNT_INSN_SIZE];
13615
13616+ ip = ktla_ktva(ip);
13617+
13618 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
13619 return -EFAULT;
13620
13621diff -urNp linux-2.6.32.41/arch/x86/kernel/head32.c linux-2.6.32.41/arch/x86/kernel/head32.c
13622--- linux-2.6.32.41/arch/x86/kernel/head32.c 2011-03-27 14:31:47.000000000 -0400
13623+++ linux-2.6.32.41/arch/x86/kernel/head32.c 2011-04-17 15:56:46.000000000 -0400
13624@@ -16,6 +16,7 @@
13625 #include <asm/apic.h>
13626 #include <asm/io_apic.h>
13627 #include <asm/bios_ebda.h>
13628+#include <asm/boot.h>
13629
13630 static void __init i386_default_early_setup(void)
13631 {
13632@@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
13633 {
13634 reserve_trampoline_memory();
13635
13636- reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
13637+ reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
13638
13639 #ifdef CONFIG_BLK_DEV_INITRD
13640 /* Reserve INITRD */
13641diff -urNp linux-2.6.32.41/arch/x86/kernel/head_32.S linux-2.6.32.41/arch/x86/kernel/head_32.S
13642--- linux-2.6.32.41/arch/x86/kernel/head_32.S 2011-03-27 14:31:47.000000000 -0400
13643+++ linux-2.6.32.41/arch/x86/kernel/head_32.S 2011-04-17 15:56:46.000000000 -0400
13644@@ -19,10 +19,17 @@
13645 #include <asm/setup.h>
13646 #include <asm/processor-flags.h>
13647 #include <asm/percpu.h>
13648+#include <asm/msr-index.h>
13649
13650 /* Physical address */
13651 #define pa(X) ((X) - __PAGE_OFFSET)
13652
13653+#ifdef CONFIG_PAX_KERNEXEC
13654+#define ta(X) (X)
13655+#else
13656+#define ta(X) ((X) - __PAGE_OFFSET)
13657+#endif
13658+
13659 /*
13660 * References to members of the new_cpu_data structure.
13661 */
13662@@ -52,11 +59,7 @@
13663 * and small than max_low_pfn, otherwise will waste some page table entries
13664 */
13665
13666-#if PTRS_PER_PMD > 1
13667-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
13668-#else
13669-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
13670-#endif
13671+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
13672
13673 /* Enough space to fit pagetables for the low memory linear map */
13674 MAPPING_BEYOND_END = \
13675@@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
13676 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
13677
13678 /*
13679+ * Real beginning of normal "text" segment
13680+ */
13681+ENTRY(stext)
13682+ENTRY(_stext)
13683+
13684+/*
13685 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
13686 * %esi points to the real-mode code as a 32-bit pointer.
13687 * CS and DS must be 4 GB flat segments, but we don't depend on
13688@@ -80,6 +89,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
13689 * can.
13690 */
13691 __HEAD
13692+
13693+#ifdef CONFIG_PAX_KERNEXEC
13694+ jmp startup_32
13695+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
13696+.fill PAGE_SIZE-5,1,0xcc
13697+#endif
13698+
13699 ENTRY(startup_32)
13700 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
13701 us to not reload segments */
13702@@ -97,6 +113,57 @@ ENTRY(startup_32)
13703 movl %eax,%gs
13704 2:
13705
13706+#ifdef CONFIG_SMP
13707+ movl $pa(cpu_gdt_table),%edi
13708+ movl $__per_cpu_load,%eax
13709+ movw %ax,__KERNEL_PERCPU + 2(%edi)
13710+ rorl $16,%eax
13711+ movb %al,__KERNEL_PERCPU + 4(%edi)
13712+ movb %ah,__KERNEL_PERCPU + 7(%edi)
13713+ movl $__per_cpu_end - 1,%eax
13714+ subl $__per_cpu_start,%eax
13715+ movw %ax,__KERNEL_PERCPU + 0(%edi)
13716+#endif
13717+
13718+#ifdef CONFIG_PAX_MEMORY_UDEREF
13719+ movl $NR_CPUS,%ecx
13720+ movl $pa(cpu_gdt_table),%edi
13721+1:
13722+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
13723+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
13724+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
13725+ addl $PAGE_SIZE_asm,%edi
13726+ loop 1b
13727+#endif
13728+
13729+#ifdef CONFIG_PAX_KERNEXEC
13730+ movl $pa(boot_gdt),%edi
13731+ movl $__LOAD_PHYSICAL_ADDR,%eax
13732+ movw %ax,__BOOT_CS + 2(%edi)
13733+ rorl $16,%eax
13734+ movb %al,__BOOT_CS + 4(%edi)
13735+ movb %ah,__BOOT_CS + 7(%edi)
13736+ rorl $16,%eax
13737+
13738+ ljmp $(__BOOT_CS),$1f
13739+1:
13740+
13741+ movl $NR_CPUS,%ecx
13742+ movl $pa(cpu_gdt_table),%edi
13743+ addl $__PAGE_OFFSET,%eax
13744+1:
13745+ movw %ax,__KERNEL_CS + 2(%edi)
13746+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
13747+ rorl $16,%eax
13748+ movb %al,__KERNEL_CS + 4(%edi)
13749+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
13750+ movb %ah,__KERNEL_CS + 7(%edi)
13751+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
13752+ rorl $16,%eax
13753+ addl $PAGE_SIZE_asm,%edi
13754+ loop 1b
13755+#endif
13756+
13757 /*
13758 * Clear BSS first so that there are no surprises...
13759 */
13760@@ -140,9 +207,7 @@ ENTRY(startup_32)
13761 cmpl $num_subarch_entries, %eax
13762 jae bad_subarch
13763
13764- movl pa(subarch_entries)(,%eax,4), %eax
13765- subl $__PAGE_OFFSET, %eax
13766- jmp *%eax
13767+ jmp *pa(subarch_entries)(,%eax,4)
13768
13769 bad_subarch:
13770 WEAK(lguest_entry)
13771@@ -154,10 +219,10 @@ WEAK(xen_entry)
13772 __INITDATA
13773
13774 subarch_entries:
13775- .long default_entry /* normal x86/PC */
13776- .long lguest_entry /* lguest hypervisor */
13777- .long xen_entry /* Xen hypervisor */
13778- .long default_entry /* Moorestown MID */
13779+ .long ta(default_entry) /* normal x86/PC */
13780+ .long ta(lguest_entry) /* lguest hypervisor */
13781+ .long ta(xen_entry) /* Xen hypervisor */
13782+ .long ta(default_entry) /* Moorestown MID */
13783 num_subarch_entries = (. - subarch_entries) / 4
13784 .previous
13785 #endif /* CONFIG_PARAVIRT */
13786@@ -218,8 +283,11 @@ default_entry:
13787 movl %eax, pa(max_pfn_mapped)
13788
13789 /* Do early initialization of the fixmap area */
13790- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
13791- movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
13792+#ifdef CONFIG_COMPAT_VDSO
13793+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
13794+#else
13795+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
13796+#endif
13797 #else /* Not PAE */
13798
13799 page_pde_offset = (__PAGE_OFFSET >> 20);
13800@@ -249,8 +317,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
13801 movl %eax, pa(max_pfn_mapped)
13802
13803 /* Do early initialization of the fixmap area */
13804- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
13805- movl %eax,pa(swapper_pg_dir+0xffc)
13806+#ifdef CONFIG_COMPAT_VDSO
13807+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
13808+#else
13809+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
13810+#endif
13811 #endif
13812 jmp 3f
13813 /*
13814@@ -297,6 +368,7 @@ ENTRY(startup_32_smp)
13815 orl %edx,%eax
13816 movl %eax,%cr4
13817
13818+#ifdef CONFIG_X86_PAE
13819 btl $5, %eax # check if PAE is enabled
13820 jnc 6f
13821
13822@@ -312,13 +384,17 @@ ENTRY(startup_32_smp)
13823 jnc 6f
13824
13825 /* Setup EFER (Extended Feature Enable Register) */
13826- movl $0xc0000080, %ecx
13827+ movl $MSR_EFER, %ecx
13828 rdmsr
13829
13830 btsl $11, %eax
13831 /* Make changes effective */
13832 wrmsr
13833
13834+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
13835+ movl $1,pa(nx_enabled)
13836+#endif
13837+
13838 6:
13839
13840 /*
13841@@ -344,9 +420,7 @@ ENTRY(startup_32_smp)
13842
13843 #ifdef CONFIG_SMP
13844 cmpb $0, ready
13845- jz 1f /* Initial CPU cleans BSS */
13846- jmp checkCPUtype
13847-1:
13848+ jnz checkCPUtype /* Initial CPU cleans BSS */
13849 #endif /* CONFIG_SMP */
13850
13851 /*
13852@@ -424,7 +498,7 @@ is386: movl $2,%ecx # set MP
13853 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
13854 movl %eax,%ss # after changing gdt.
13855
13856- movl $(__USER_DS),%eax # DS/ES contains default USER segment
13857+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
13858 movl %eax,%ds
13859 movl %eax,%es
13860
13861@@ -438,15 +512,22 @@ is386: movl $2,%ecx # set MP
13862 */
13863 cmpb $0,ready
13864 jne 1f
13865- movl $per_cpu__gdt_page,%eax
13866+ movl $cpu_gdt_table,%eax
13867 movl $per_cpu__stack_canary,%ecx
13868+#ifdef CONFIG_SMP
13869+ addl $__per_cpu_load,%ecx
13870+#endif
13871 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
13872 shrl $16, %ecx
13873 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
13874 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
13875 1:
13876-#endif
13877 movl $(__KERNEL_STACK_CANARY),%eax
13878+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
13879+ movl $(__USER_DS),%eax
13880+#else
13881+ xorl %eax,%eax
13882+#endif
13883 movl %eax,%gs
13884
13885 xorl %eax,%eax # Clear LDT
13886@@ -457,10 +538,6 @@ is386: movl $2,%ecx # set MP
13887 #ifdef CONFIG_SMP
13888 movb ready, %cl
13889 movb $1, ready
13890- cmpb $0,%cl # the first CPU calls start_kernel
13891- je 1f
13892- movl (stack_start), %esp
13893-1:
13894 #endif /* CONFIG_SMP */
13895 jmp *(initial_code)
13896
13897@@ -546,22 +623,22 @@ early_page_fault:
13898 jmp early_fault
13899
13900 early_fault:
13901- cld
13902 #ifdef CONFIG_PRINTK
13903+ cmpl $1,%ss:early_recursion_flag
13904+ je hlt_loop
13905+ incl %ss:early_recursion_flag
13906+ cld
13907 pusha
13908 movl $(__KERNEL_DS),%eax
13909 movl %eax,%ds
13910 movl %eax,%es
13911- cmpl $2,early_recursion_flag
13912- je hlt_loop
13913- incl early_recursion_flag
13914 movl %cr2,%eax
13915 pushl %eax
13916 pushl %edx /* trapno */
13917 pushl $fault_msg
13918 call printk
13919+; call dump_stack
13920 #endif
13921- call dump_stack
13922 hlt_loop:
13923 hlt
13924 jmp hlt_loop
13925@@ -569,8 +646,11 @@ hlt_loop:
13926 /* This is the default interrupt "handler" :-) */
13927 ALIGN
13928 ignore_int:
13929- cld
13930 #ifdef CONFIG_PRINTK
13931+ cmpl $2,%ss:early_recursion_flag
13932+ je hlt_loop
13933+ incl %ss:early_recursion_flag
13934+ cld
13935 pushl %eax
13936 pushl %ecx
13937 pushl %edx
13938@@ -579,9 +659,6 @@ ignore_int:
13939 movl $(__KERNEL_DS),%eax
13940 movl %eax,%ds
13941 movl %eax,%es
13942- cmpl $2,early_recursion_flag
13943- je hlt_loop
13944- incl early_recursion_flag
13945 pushl 16(%esp)
13946 pushl 24(%esp)
13947 pushl 32(%esp)
13948@@ -610,31 +687,47 @@ ENTRY(initial_page_table)
13949 /*
13950 * BSS section
13951 */
13952-__PAGE_ALIGNED_BSS
13953- .align PAGE_SIZE_asm
13954 #ifdef CONFIG_X86_PAE
13955+.section .swapper_pg_pmd,"a",@progbits
13956 swapper_pg_pmd:
13957 .fill 1024*KPMDS,4,0
13958 #else
13959+.section .swapper_pg_dir,"a",@progbits
13960 ENTRY(swapper_pg_dir)
13961 .fill 1024,4,0
13962 #endif
13963+.section .swapper_pg_fixmap,"a",@progbits
13964 swapper_pg_fixmap:
13965 .fill 1024,4,0
13966 #ifdef CONFIG_X86_TRAMPOLINE
13967+.section .trampoline_pg_dir,"a",@progbits
13968 ENTRY(trampoline_pg_dir)
13969+#ifdef CONFIG_X86_PAE
13970+ .fill 4,8,0
13971+#else
13972 .fill 1024,4,0
13973 #endif
13974+#endif
13975+
13976+.section .empty_zero_page,"a",@progbits
13977 ENTRY(empty_zero_page)
13978 .fill 4096,1,0
13979
13980 /*
13981+ * The IDT has to be page-aligned to simplify the Pentium
13982+ * F0 0F bug workaround.. We have a special link segment
13983+ * for this.
13984+ */
13985+.section .idt,"a",@progbits
13986+ENTRY(idt_table)
13987+ .fill 256,8,0
13988+
13989+/*
13990 * This starts the data section.
13991 */
13992 #ifdef CONFIG_X86_PAE
13993-__PAGE_ALIGNED_DATA
13994- /* Page-aligned for the benefit of paravirt? */
13995- .align PAGE_SIZE_asm
13996+.section .swapper_pg_dir,"a",@progbits
13997+
13998 ENTRY(swapper_pg_dir)
13999 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
14000 # if KPMDS == 3
14001@@ -653,15 +746,24 @@ ENTRY(swapper_pg_dir)
14002 # error "Kernel PMDs should be 1, 2 or 3"
14003 # endif
14004 .align PAGE_SIZE_asm /* needs to be page-sized too */
14005+
14006+#ifdef CONFIG_PAX_PER_CPU_PGD
14007+ENTRY(cpu_pgd)
14008+ .rept NR_CPUS
14009+ .fill 4,8,0
14010+ .endr
14011+#endif
14012+
14013 #endif
14014
14015 .data
14016 ENTRY(stack_start)
14017- .long init_thread_union+THREAD_SIZE
14018+ .long init_thread_union+THREAD_SIZE-8
14019 .long __BOOT_DS
14020
14021 ready: .byte 0
14022
14023+.section .rodata,"a",@progbits
14024 early_recursion_flag:
14025 .long 0
14026
14027@@ -697,7 +799,7 @@ fault_msg:
14028 .word 0 # 32 bit align gdt_desc.address
14029 boot_gdt_descr:
14030 .word __BOOT_DS+7
14031- .long boot_gdt - __PAGE_OFFSET
14032+ .long pa(boot_gdt)
14033
14034 .word 0 # 32-bit align idt_desc.address
14035 idt_descr:
14036@@ -708,7 +810,7 @@ idt_descr:
14037 .word 0 # 32 bit align gdt_desc.address
14038 ENTRY(early_gdt_descr)
14039 .word GDT_ENTRIES*8-1
14040- .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
14041+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
14042
14043 /*
14044 * The boot_gdt must mirror the equivalent in setup.S and is
14045@@ -717,5 +819,65 @@ ENTRY(early_gdt_descr)
14046 .align L1_CACHE_BYTES
14047 ENTRY(boot_gdt)
14048 .fill GDT_ENTRY_BOOT_CS,8,0
14049- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
14050- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
14051+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
14052+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
14053+
14054+ .align PAGE_SIZE_asm
14055+ENTRY(cpu_gdt_table)
14056+ .rept NR_CPUS
14057+ .quad 0x0000000000000000 /* NULL descriptor */
14058+ .quad 0x0000000000000000 /* 0x0b reserved */
14059+ .quad 0x0000000000000000 /* 0x13 reserved */
14060+ .quad 0x0000000000000000 /* 0x1b reserved */
14061+
14062+#ifdef CONFIG_PAX_KERNEXEC
14063+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
14064+#else
14065+ .quad 0x0000000000000000 /* 0x20 unused */
14066+#endif
14067+
14068+ .quad 0x0000000000000000 /* 0x28 unused */
14069+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
14070+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
14071+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
14072+ .quad 0x0000000000000000 /* 0x4b reserved */
14073+ .quad 0x0000000000000000 /* 0x53 reserved */
14074+ .quad 0x0000000000000000 /* 0x5b reserved */
14075+
14076+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
14077+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
14078+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
14079+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
14080+
14081+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
14082+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
14083+
14084+ /*
14085+ * Segments used for calling PnP BIOS have byte granularity.
14086+ * The code segments and data segments have fixed 64k limits,
14087+ * the transfer segment sizes are set at run time.
14088+ */
14089+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
14090+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
14091+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
14092+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
14093+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
14094+
14095+ /*
14096+ * The APM segments have byte granularity and their bases
14097+ * are set at run time. All have 64k limits.
14098+ */
14099+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
14100+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
14101+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
14102+
14103+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
14104+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
14105+ .quad 0x0040910000000018 /* 0xe0 - STACK_CANARY */
14106+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
14107+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
14108+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
14109+
14110+ /* Be sure this is zeroed to avoid false validations in Xen */
14111+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
14112+ .endr
14113diff -urNp linux-2.6.32.41/arch/x86/kernel/head_64.S linux-2.6.32.41/arch/x86/kernel/head_64.S
14114--- linux-2.6.32.41/arch/x86/kernel/head_64.S 2011-03-27 14:31:47.000000000 -0400
14115+++ linux-2.6.32.41/arch/x86/kernel/head_64.S 2011-04-17 15:56:46.000000000 -0400
14116@@ -19,6 +19,7 @@
14117 #include <asm/cache.h>
14118 #include <asm/processor-flags.h>
14119 #include <asm/percpu.h>
14120+#include <asm/cpufeature.h>
14121
14122 #ifdef CONFIG_PARAVIRT
14123 #include <asm/asm-offsets.h>
14124@@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
14125 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
14126 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
14127 L3_START_KERNEL = pud_index(__START_KERNEL_map)
14128+L4_VMALLOC_START = pgd_index(VMALLOC_START)
14129+L3_VMALLOC_START = pud_index(VMALLOC_START)
14130+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
14131+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
14132
14133 .text
14134 __HEAD
14135@@ -85,35 +90,22 @@ startup_64:
14136 */
14137 addq %rbp, init_level4_pgt + 0(%rip)
14138 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
14139+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
14140+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
14141 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
14142
14143 addq %rbp, level3_ident_pgt + 0(%rip)
14144+#ifndef CONFIG_XEN
14145+ addq %rbp, level3_ident_pgt + 8(%rip)
14146+#endif
14147
14148- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
14149- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
14150+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
14151
14152- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14153+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
14154+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
14155
14156- /* Add an Identity mapping if I am above 1G */
14157- leaq _text(%rip), %rdi
14158- andq $PMD_PAGE_MASK, %rdi
14159-
14160- movq %rdi, %rax
14161- shrq $PUD_SHIFT, %rax
14162- andq $(PTRS_PER_PUD - 1), %rax
14163- jz ident_complete
14164-
14165- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
14166- leaq level3_ident_pgt(%rip), %rbx
14167- movq %rdx, 0(%rbx, %rax, 8)
14168-
14169- movq %rdi, %rax
14170- shrq $PMD_SHIFT, %rax
14171- andq $(PTRS_PER_PMD - 1), %rax
14172- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
14173- leaq level2_spare_pgt(%rip), %rbx
14174- movq %rdx, 0(%rbx, %rax, 8)
14175-ident_complete:
14176+ addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14177+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
14178
14179 /*
14180 * Fixup the kernel text+data virtual addresses. Note that
14181@@ -161,8 +153,8 @@ ENTRY(secondary_startup_64)
14182 * after the boot processor executes this code.
14183 */
14184
14185- /* Enable PAE mode and PGE */
14186- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
14187+ /* Enable PAE mode and PSE/PGE */
14188+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
14189 movq %rax, %cr4
14190
14191 /* Setup early boot stage 4 level pagetables. */
14192@@ -184,9 +176,13 @@ ENTRY(secondary_startup_64)
14193 movl $MSR_EFER, %ecx
14194 rdmsr
14195 btsl $_EFER_SCE, %eax /* Enable System Call */
14196- btl $20,%edi /* No Execute supported? */
14197+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
14198 jnc 1f
14199 btsl $_EFER_NX, %eax
14200+ leaq init_level4_pgt(%rip), %rdi
14201+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
14202+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
14203+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
14204 1: wrmsr /* Make changes effective */
14205
14206 /* Setup cr0 */
14207@@ -262,16 +258,16 @@ ENTRY(secondary_startup_64)
14208 .quad x86_64_start_kernel
14209 ENTRY(initial_gs)
14210 .quad INIT_PER_CPU_VAR(irq_stack_union)
14211- __FINITDATA
14212
14213 ENTRY(stack_start)
14214 .quad init_thread_union+THREAD_SIZE-8
14215 .word 0
14216+ __FINITDATA
14217
14218 bad_address:
14219 jmp bad_address
14220
14221- .section ".init.text","ax"
14222+ __INIT
14223 #ifdef CONFIG_EARLY_PRINTK
14224 .globl early_idt_handlers
14225 early_idt_handlers:
14226@@ -316,18 +312,23 @@ ENTRY(early_idt_handler)
14227 #endif /* EARLY_PRINTK */
14228 1: hlt
14229 jmp 1b
14230+ .previous
14231
14232 #ifdef CONFIG_EARLY_PRINTK
14233+ __INITDATA
14234 early_recursion_flag:
14235 .long 0
14236+ .previous
14237
14238+ .section .rodata,"a",@progbits
14239 early_idt_msg:
14240 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
14241 early_idt_ripmsg:
14242 .asciz "RIP %s\n"
14243-#endif /* CONFIG_EARLY_PRINTK */
14244 .previous
14245+#endif /* CONFIG_EARLY_PRINTK */
14246
14247+ .section .rodata,"a",@progbits
14248 #define NEXT_PAGE(name) \
14249 .balign PAGE_SIZE; \
14250 ENTRY(name)
14251@@ -350,13 +351,36 @@ NEXT_PAGE(init_level4_pgt)
14252 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14253 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
14254 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14255+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
14256+ .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
14257+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
14258+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
14259 .org init_level4_pgt + L4_START_KERNEL*8, 0
14260 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
14261 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
14262
14263+#ifdef CONFIG_PAX_PER_CPU_PGD
14264+NEXT_PAGE(cpu_pgd)
14265+ .rept NR_CPUS
14266+ .fill 512,8,0
14267+ .endr
14268+#endif
14269+
14270 NEXT_PAGE(level3_ident_pgt)
14271 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14272+#ifdef CONFIG_XEN
14273 .fill 511,8,0
14274+#else
14275+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
14276+ .fill 510,8,0
14277+#endif
14278+
14279+NEXT_PAGE(level3_vmalloc_pgt)
14280+ .fill 512,8,0
14281+
14282+NEXT_PAGE(level3_vmemmap_pgt)
14283+ .fill L3_VMEMMAP_START,8,0
14284+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
14285
14286 NEXT_PAGE(level3_kernel_pgt)
14287 .fill L3_START_KERNEL,8,0
14288@@ -364,20 +388,23 @@ NEXT_PAGE(level3_kernel_pgt)
14289 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
14290 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
14291
14292+NEXT_PAGE(level2_vmemmap_pgt)
14293+ .fill 512,8,0
14294+
14295 NEXT_PAGE(level2_fixmap_pgt)
14296- .fill 506,8,0
14297- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
14298- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
14299- .fill 5,8,0
14300+ .fill 507,8,0
14301+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
14302+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
14303+ .fill 4,8,0
14304
14305-NEXT_PAGE(level1_fixmap_pgt)
14306+NEXT_PAGE(level1_vsyscall_pgt)
14307 .fill 512,8,0
14308
14309-NEXT_PAGE(level2_ident_pgt)
14310- /* Since I easily can, map the first 1G.
14311+ /* Since I easily can, map the first 2G.
14312 * Don't set NX because code runs from these pages.
14313 */
14314- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
14315+NEXT_PAGE(level2_ident_pgt)
14316+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
14317
14318 NEXT_PAGE(level2_kernel_pgt)
14319 /*
14320@@ -390,33 +417,55 @@ NEXT_PAGE(level2_kernel_pgt)
14321 * If you want to increase this then increase MODULES_VADDR
14322 * too.)
14323 */
14324- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
14325- KERNEL_IMAGE_SIZE/PMD_SIZE)
14326-
14327-NEXT_PAGE(level2_spare_pgt)
14328- .fill 512, 8, 0
14329+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
14330
14331 #undef PMDS
14332 #undef NEXT_PAGE
14333
14334- .data
14335+ .align PAGE_SIZE
14336+ENTRY(cpu_gdt_table)
14337+ .rept NR_CPUS
14338+ .quad 0x0000000000000000 /* NULL descriptor */
14339+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
14340+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
14341+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
14342+ .quad 0x00cffb000000ffff /* __USER32_CS */
14343+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
14344+ .quad 0x00affb000000ffff /* __USER_CS */
14345+
14346+#ifdef CONFIG_PAX_KERNEXEC
14347+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
14348+#else
14349+ .quad 0x0 /* unused */
14350+#endif
14351+
14352+ .quad 0,0 /* TSS */
14353+ .quad 0,0 /* LDT */
14354+ .quad 0,0,0 /* three TLS descriptors */
14355+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
14356+ /* asm/segment.h:GDT_ENTRIES must match this */
14357+
14358+ /* zero the remaining page */
14359+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
14360+ .endr
14361+
14362 .align 16
14363 .globl early_gdt_descr
14364 early_gdt_descr:
14365 .word GDT_ENTRIES*8-1
14366 early_gdt_descr_base:
14367- .quad INIT_PER_CPU_VAR(gdt_page)
14368+ .quad cpu_gdt_table
14369
14370 ENTRY(phys_base)
14371 /* This must match the first entry in level2_kernel_pgt */
14372 .quad 0x0000000000000000
14373
14374 #include "../../x86/xen/xen-head.S"
14375-
14376- .section .bss, "aw", @nobits
14377+
14378+ .section .rodata,"a",@progbits
14379 .align L1_CACHE_BYTES
14380 ENTRY(idt_table)
14381- .skip IDT_ENTRIES * 16
14382+ .fill 512,8,0
14383
14384 __PAGE_ALIGNED_BSS
14385 .align PAGE_SIZE
14386diff -urNp linux-2.6.32.41/arch/x86/kernel/i386_ksyms_32.c linux-2.6.32.41/arch/x86/kernel/i386_ksyms_32.c
14387--- linux-2.6.32.41/arch/x86/kernel/i386_ksyms_32.c 2011-03-27 14:31:47.000000000 -0400
14388+++ linux-2.6.32.41/arch/x86/kernel/i386_ksyms_32.c 2011-04-17 15:56:46.000000000 -0400
14389@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
14390 EXPORT_SYMBOL(cmpxchg8b_emu);
14391 #endif
14392
14393+EXPORT_SYMBOL_GPL(cpu_gdt_table);
14394+
14395 /* Networking helper routines. */
14396 EXPORT_SYMBOL(csum_partial_copy_generic);
14397+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
14398+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
14399
14400 EXPORT_SYMBOL(__get_user_1);
14401 EXPORT_SYMBOL(__get_user_2);
14402@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
14403
14404 EXPORT_SYMBOL(csum_partial);
14405 EXPORT_SYMBOL(empty_zero_page);
14406+
14407+#ifdef CONFIG_PAX_KERNEXEC
14408+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
14409+#endif
14410diff -urNp linux-2.6.32.41/arch/x86/kernel/i8259.c linux-2.6.32.41/arch/x86/kernel/i8259.c
14411--- linux-2.6.32.41/arch/x86/kernel/i8259.c 2011-03-27 14:31:47.000000000 -0400
14412+++ linux-2.6.32.41/arch/x86/kernel/i8259.c 2011-05-04 17:56:28.000000000 -0400
14413@@ -208,7 +208,7 @@ spurious_8259A_irq:
14414 "spurious 8259A interrupt: IRQ%d.\n", irq);
14415 spurious_irq_mask |= irqmask;
14416 }
14417- atomic_inc(&irq_err_count);
14418+ atomic_inc_unchecked(&irq_err_count);
14419 /*
14420 * Theoretically we do not have to handle this IRQ,
14421 * but in Linux this does not cause problems and is
14422diff -urNp linux-2.6.32.41/arch/x86/kernel/init_task.c linux-2.6.32.41/arch/x86/kernel/init_task.c
14423--- linux-2.6.32.41/arch/x86/kernel/init_task.c 2011-03-27 14:31:47.000000000 -0400
14424+++ linux-2.6.32.41/arch/x86/kernel/init_task.c 2011-04-17 15:56:46.000000000 -0400
14425@@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
14426 * way process stacks are handled. This is done by having a special
14427 * "init_task" linker map entry..
14428 */
14429-union thread_union init_thread_union __init_task_data =
14430- { INIT_THREAD_INFO(init_task) };
14431+union thread_union init_thread_union __init_task_data;
14432
14433 /*
14434 * Initial task structure.
14435@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
14436 * section. Since TSS's are completely CPU-local, we want them
14437 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
14438 */
14439-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
14440-
14441+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
14442+EXPORT_SYMBOL(init_tss);
14443diff -urNp linux-2.6.32.41/arch/x86/kernel/ioport.c linux-2.6.32.41/arch/x86/kernel/ioport.c
14444--- linux-2.6.32.41/arch/x86/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
14445+++ linux-2.6.32.41/arch/x86/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
14446@@ -6,6 +6,7 @@
14447 #include <linux/sched.h>
14448 #include <linux/kernel.h>
14449 #include <linux/capability.h>
14450+#include <linux/security.h>
14451 #include <linux/errno.h>
14452 #include <linux/types.h>
14453 #include <linux/ioport.h>
14454@@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long
14455
14456 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
14457 return -EINVAL;
14458+#ifdef CONFIG_GRKERNSEC_IO
14459+ if (turn_on && grsec_disable_privio) {
14460+ gr_handle_ioperm();
14461+ return -EPERM;
14462+ }
14463+#endif
14464 if (turn_on && !capable(CAP_SYS_RAWIO))
14465 return -EPERM;
14466
14467@@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long
14468 * because the ->io_bitmap_max value must match the bitmap
14469 * contents:
14470 */
14471- tss = &per_cpu(init_tss, get_cpu());
14472+ tss = init_tss + get_cpu();
14473
14474 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
14475
14476@@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, s
14477 return -EINVAL;
14478 /* Trying to gain more privileges? */
14479 if (level > old) {
14480+#ifdef CONFIG_GRKERNSEC_IO
14481+ if (grsec_disable_privio) {
14482+ gr_handle_iopl();
14483+ return -EPERM;
14484+ }
14485+#endif
14486 if (!capable(CAP_SYS_RAWIO))
14487 return -EPERM;
14488 }
14489diff -urNp linux-2.6.32.41/arch/x86/kernel/irq_32.c linux-2.6.32.41/arch/x86/kernel/irq_32.c
14490--- linux-2.6.32.41/arch/x86/kernel/irq_32.c 2011-03-27 14:31:47.000000000 -0400
14491+++ linux-2.6.32.41/arch/x86/kernel/irq_32.c 2011-04-23 13:26:46.000000000 -0400
14492@@ -35,7 +35,7 @@ static int check_stack_overflow(void)
14493 __asm__ __volatile__("andl %%esp,%0" :
14494 "=r" (sp) : "0" (THREAD_SIZE - 1));
14495
14496- return sp < (sizeof(struct thread_info) + STACK_WARN);
14497+ return sp < STACK_WARN;
14498 }
14499
14500 static void print_stack_overflow(void)
14501@@ -54,9 +54,9 @@ static inline void print_stack_overflow(
14502 * per-CPU IRQ handling contexts (thread information and stack)
14503 */
14504 union irq_ctx {
14505- struct thread_info tinfo;
14506- u32 stack[THREAD_SIZE/sizeof(u32)];
14507-} __attribute__((aligned(PAGE_SIZE)));
14508+ unsigned long previous_esp;
14509+ u32 stack[THREAD_SIZE/sizeof(u32)];
14510+} __attribute__((aligned(THREAD_SIZE)));
14511
14512 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
14513 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
14514@@ -78,10 +78,9 @@ static void call_on_stack(void *func, vo
14515 static inline int
14516 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
14517 {
14518- union irq_ctx *curctx, *irqctx;
14519+ union irq_ctx *irqctx;
14520 u32 *isp, arg1, arg2;
14521
14522- curctx = (union irq_ctx *) current_thread_info();
14523 irqctx = __get_cpu_var(hardirq_ctx);
14524
14525 /*
14526@@ -90,21 +89,17 @@ execute_on_irq_stack(int overflow, struc
14527 * handler) we can't do that and just have to keep using the
14528 * current stack (which is the irq stack already after all)
14529 */
14530- if (unlikely(curctx == irqctx))
14531+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
14532 return 0;
14533
14534 /* build the stack frame on the IRQ stack */
14535- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
14536- irqctx->tinfo.task = curctx->tinfo.task;
14537- irqctx->tinfo.previous_esp = current_stack_pointer;
14538+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
14539+ irqctx->previous_esp = current_stack_pointer;
14540+ add_preempt_count(HARDIRQ_OFFSET);
14541
14542- /*
14543- * Copy the softirq bits in preempt_count so that the
14544- * softirq checks work in the hardirq context.
14545- */
14546- irqctx->tinfo.preempt_count =
14547- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
14548- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
14549+#ifdef CONFIG_PAX_MEMORY_UDEREF
14550+ __set_fs(MAKE_MM_SEG(0));
14551+#endif
14552
14553 if (unlikely(overflow))
14554 call_on_stack(print_stack_overflow, isp);
14555@@ -116,6 +111,12 @@ execute_on_irq_stack(int overflow, struc
14556 : "0" (irq), "1" (desc), "2" (isp),
14557 "D" (desc->handle_irq)
14558 : "memory", "cc", "ecx");
14559+
14560+#ifdef CONFIG_PAX_MEMORY_UDEREF
14561+ __set_fs(current_thread_info()->addr_limit);
14562+#endif
14563+
14564+ sub_preempt_count(HARDIRQ_OFFSET);
14565 return 1;
14566 }
14567
14568@@ -124,28 +125,11 @@ execute_on_irq_stack(int overflow, struc
14569 */
14570 void __cpuinit irq_ctx_init(int cpu)
14571 {
14572- union irq_ctx *irqctx;
14573-
14574 if (per_cpu(hardirq_ctx, cpu))
14575 return;
14576
14577- irqctx = &per_cpu(hardirq_stack, cpu);
14578- irqctx->tinfo.task = NULL;
14579- irqctx->tinfo.exec_domain = NULL;
14580- irqctx->tinfo.cpu = cpu;
14581- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
14582- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
14583-
14584- per_cpu(hardirq_ctx, cpu) = irqctx;
14585-
14586- irqctx = &per_cpu(softirq_stack, cpu);
14587- irqctx->tinfo.task = NULL;
14588- irqctx->tinfo.exec_domain = NULL;
14589- irqctx->tinfo.cpu = cpu;
14590- irqctx->tinfo.preempt_count = 0;
14591- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
14592-
14593- per_cpu(softirq_ctx, cpu) = irqctx;
14594+ per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
14595+ per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
14596
14597 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
14598 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
14599@@ -159,7 +143,6 @@ void irq_ctx_exit(int cpu)
14600 asmlinkage void do_softirq(void)
14601 {
14602 unsigned long flags;
14603- struct thread_info *curctx;
14604 union irq_ctx *irqctx;
14605 u32 *isp;
14606
14607@@ -169,15 +152,22 @@ asmlinkage void do_softirq(void)
14608 local_irq_save(flags);
14609
14610 if (local_softirq_pending()) {
14611- curctx = current_thread_info();
14612 irqctx = __get_cpu_var(softirq_ctx);
14613- irqctx->tinfo.task = curctx->task;
14614- irqctx->tinfo.previous_esp = current_stack_pointer;
14615+ irqctx->previous_esp = current_stack_pointer;
14616
14617 /* build the stack frame on the softirq stack */
14618- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
14619+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
14620+
14621+#ifdef CONFIG_PAX_MEMORY_UDEREF
14622+ __set_fs(MAKE_MM_SEG(0));
14623+#endif
14624
14625 call_on_stack(__do_softirq, isp);
14626+
14627+#ifdef CONFIG_PAX_MEMORY_UDEREF
14628+ __set_fs(current_thread_info()->addr_limit);
14629+#endif
14630+
14631 /*
14632 * Shouldnt happen, we returned above if in_interrupt():
14633 */
14634diff -urNp linux-2.6.32.41/arch/x86/kernel/irq.c linux-2.6.32.41/arch/x86/kernel/irq.c
14635--- linux-2.6.32.41/arch/x86/kernel/irq.c 2011-03-27 14:31:47.000000000 -0400
14636+++ linux-2.6.32.41/arch/x86/kernel/irq.c 2011-05-04 17:56:28.000000000 -0400
14637@@ -15,7 +15,7 @@
14638 #include <asm/mce.h>
14639 #include <asm/hw_irq.h>
14640
14641-atomic_t irq_err_count;
14642+atomic_unchecked_t irq_err_count;
14643
14644 /* Function pointer for generic interrupt vector handling */
14645 void (*generic_interrupt_extension)(void) = NULL;
14646@@ -114,9 +114,9 @@ static int show_other_interrupts(struct
14647 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
14648 seq_printf(p, " Machine check polls\n");
14649 #endif
14650- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
14651+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
14652 #if defined(CONFIG_X86_IO_APIC)
14653- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
14654+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
14655 #endif
14656 return 0;
14657 }
14658@@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
14659
14660 u64 arch_irq_stat(void)
14661 {
14662- u64 sum = atomic_read(&irq_err_count);
14663+ u64 sum = atomic_read_unchecked(&irq_err_count);
14664
14665 #ifdef CONFIG_X86_IO_APIC
14666- sum += atomic_read(&irq_mis_count);
14667+ sum += atomic_read_unchecked(&irq_mis_count);
14668 #endif
14669 return sum;
14670 }
14671diff -urNp linux-2.6.32.41/arch/x86/kernel/kgdb.c linux-2.6.32.41/arch/x86/kernel/kgdb.c
14672--- linux-2.6.32.41/arch/x86/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
14673+++ linux-2.6.32.41/arch/x86/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
14674@@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vec
14675
14676 /* clear the trace bit */
14677 linux_regs->flags &= ~X86_EFLAGS_TF;
14678- atomic_set(&kgdb_cpu_doing_single_step, -1);
14679+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
14680
14681 /* set the trace bit if we're stepping */
14682 if (remcomInBuffer[0] == 's') {
14683 linux_regs->flags |= X86_EFLAGS_TF;
14684 kgdb_single_step = 1;
14685- atomic_set(&kgdb_cpu_doing_single_step,
14686+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
14687 raw_smp_processor_id());
14688 }
14689
14690@@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args
14691 break;
14692
14693 case DIE_DEBUG:
14694- if (atomic_read(&kgdb_cpu_doing_single_step) ==
14695+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
14696 raw_smp_processor_id()) {
14697 if (user_mode(regs))
14698 return single_step_cont(regs, args);
14699@@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception
14700 return instruction_pointer(regs);
14701 }
14702
14703-struct kgdb_arch arch_kgdb_ops = {
14704+const struct kgdb_arch arch_kgdb_ops = {
14705 /* Breakpoint instruction: */
14706 .gdb_bpt_instr = { 0xcc },
14707 .flags = KGDB_HW_BREAKPOINT,
14708diff -urNp linux-2.6.32.41/arch/x86/kernel/kprobes.c linux-2.6.32.41/arch/x86/kernel/kprobes.c
14709--- linux-2.6.32.41/arch/x86/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
14710+++ linux-2.6.32.41/arch/x86/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
14711@@ -166,9 +166,13 @@ static void __kprobes set_jmp_op(void *f
14712 char op;
14713 s32 raddr;
14714 } __attribute__((packed)) * jop;
14715- jop = (struct __arch_jmp_op *)from;
14716+
14717+ jop = (struct __arch_jmp_op *)(ktla_ktva(from));
14718+
14719+ pax_open_kernel();
14720 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
14721 jop->op = RELATIVEJUMP_INSTRUCTION;
14722+ pax_close_kernel();
14723 }
14724
14725 /*
14726@@ -193,7 +197,7 @@ static int __kprobes can_boost(kprobe_op
14727 kprobe_opcode_t opcode;
14728 kprobe_opcode_t *orig_opcodes = opcodes;
14729
14730- if (search_exception_tables((unsigned long)opcodes))
14731+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
14732 return 0; /* Page fault may occur on this address. */
14733
14734 retry:
14735@@ -337,7 +341,9 @@ static void __kprobes fix_riprel(struct
14736 disp = (u8 *) p->addr + *((s32 *) insn) -
14737 (u8 *) p->ainsn.insn;
14738 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
14739+ pax_open_kernel();
14740 *(s32 *)insn = (s32) disp;
14741+ pax_close_kernel();
14742 }
14743 }
14744 #endif
14745@@ -345,16 +351,18 @@ static void __kprobes fix_riprel(struct
14746
14747 static void __kprobes arch_copy_kprobe(struct kprobe *p)
14748 {
14749- memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
14750+ pax_open_kernel();
14751+ memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
14752+ pax_close_kernel();
14753
14754 fix_riprel(p);
14755
14756- if (can_boost(p->addr))
14757+ if (can_boost(ktla_ktva(p->addr)))
14758 p->ainsn.boostable = 0;
14759 else
14760 p->ainsn.boostable = -1;
14761
14762- p->opcode = *p->addr;
14763+ p->opcode = *(ktla_ktva(p->addr));
14764 }
14765
14766 int __kprobes arch_prepare_kprobe(struct kprobe *p)
14767@@ -432,7 +440,7 @@ static void __kprobes prepare_singlestep
14768 if (p->opcode == BREAKPOINT_INSTRUCTION)
14769 regs->ip = (unsigned long)p->addr;
14770 else
14771- regs->ip = (unsigned long)p->ainsn.insn;
14772+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
14773 }
14774
14775 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
14776@@ -453,7 +461,7 @@ static void __kprobes setup_singlestep(s
14777 if (p->ainsn.boostable == 1 && !p->post_handler) {
14778 /* Boost up -- we can execute copied instructions directly */
14779 reset_current_kprobe();
14780- regs->ip = (unsigned long)p->ainsn.insn;
14781+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
14782 preempt_enable_no_resched();
14783 return;
14784 }
14785@@ -523,7 +531,7 @@ static int __kprobes kprobe_handler(stru
14786 struct kprobe_ctlblk *kcb;
14787
14788 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
14789- if (*addr != BREAKPOINT_INSTRUCTION) {
14790+ if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
14791 /*
14792 * The breakpoint instruction was removed right
14793 * after we hit it. Another cpu has removed
14794@@ -775,7 +783,7 @@ static void __kprobes resume_execution(s
14795 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
14796 {
14797 unsigned long *tos = stack_addr(regs);
14798- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
14799+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
14800 unsigned long orig_ip = (unsigned long)p->addr;
14801 kprobe_opcode_t *insn = p->ainsn.insn;
14802
14803@@ -958,7 +966,7 @@ int __kprobes kprobe_exceptions_notify(s
14804 struct die_args *args = data;
14805 int ret = NOTIFY_DONE;
14806
14807- if (args->regs && user_mode_vm(args->regs))
14808+ if (args->regs && user_mode(args->regs))
14809 return ret;
14810
14811 switch (val) {
14812diff -urNp linux-2.6.32.41/arch/x86/kernel/ldt.c linux-2.6.32.41/arch/x86/kernel/ldt.c
14813--- linux-2.6.32.41/arch/x86/kernel/ldt.c 2011-03-27 14:31:47.000000000 -0400
14814+++ linux-2.6.32.41/arch/x86/kernel/ldt.c 2011-04-17 15:56:46.000000000 -0400
14815@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, i
14816 if (reload) {
14817 #ifdef CONFIG_SMP
14818 preempt_disable();
14819- load_LDT(pc);
14820+ load_LDT_nolock(pc);
14821 if (!cpumask_equal(mm_cpumask(current->mm),
14822 cpumask_of(smp_processor_id())))
14823 smp_call_function(flush_ldt, current->mm, 1);
14824 preempt_enable();
14825 #else
14826- load_LDT(pc);
14827+ load_LDT_nolock(pc);
14828 #endif
14829 }
14830 if (oldsize) {
14831@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t
14832 return err;
14833
14834 for (i = 0; i < old->size; i++)
14835- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
14836+ write_ldt_entry(new->ldt, i, old->ldt + i);
14837 return 0;
14838 }
14839
14840@@ -115,6 +115,24 @@ int init_new_context(struct task_struct
14841 retval = copy_ldt(&mm->context, &old_mm->context);
14842 mutex_unlock(&old_mm->context.lock);
14843 }
14844+
14845+ if (tsk == current) {
14846+ mm->context.vdso = 0;
14847+
14848+#ifdef CONFIG_X86_32
14849+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
14850+ mm->context.user_cs_base = 0UL;
14851+ mm->context.user_cs_limit = ~0UL;
14852+
14853+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
14854+ cpus_clear(mm->context.cpu_user_cs_mask);
14855+#endif
14856+
14857+#endif
14858+#endif
14859+
14860+ }
14861+
14862 return retval;
14863 }
14864
14865@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, u
14866 }
14867 }
14868
14869+#ifdef CONFIG_PAX_SEGMEXEC
14870+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
14871+ error = -EINVAL;
14872+ goto out_unlock;
14873+ }
14874+#endif
14875+
14876 fill_ldt(&ldt, &ldt_info);
14877 if (oldmode)
14878 ldt.avl = 0;
14879diff -urNp linux-2.6.32.41/arch/x86/kernel/machine_kexec_32.c linux-2.6.32.41/arch/x86/kernel/machine_kexec_32.c
14880--- linux-2.6.32.41/arch/x86/kernel/machine_kexec_32.c 2011-03-27 14:31:47.000000000 -0400
14881+++ linux-2.6.32.41/arch/x86/kernel/machine_kexec_32.c 2011-04-17 15:56:46.000000000 -0400
14882@@ -26,7 +26,7 @@
14883 #include <asm/system.h>
14884 #include <asm/cacheflush.h>
14885
14886-static void set_idt(void *newidt, __u16 limit)
14887+static void set_idt(struct desc_struct *newidt, __u16 limit)
14888 {
14889 struct desc_ptr curidt;
14890
14891@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16
14892 }
14893
14894
14895-static void set_gdt(void *newgdt, __u16 limit)
14896+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
14897 {
14898 struct desc_ptr curgdt;
14899
14900@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
14901 }
14902
14903 control_page = page_address(image->control_code_page);
14904- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
14905+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
14906
14907 relocate_kernel_ptr = control_page;
14908 page_list[PA_CONTROL_PAGE] = __pa(control_page);
14909diff -urNp linux-2.6.32.41/arch/x86/kernel/microcode_amd.c linux-2.6.32.41/arch/x86/kernel/microcode_amd.c
14910--- linux-2.6.32.41/arch/x86/kernel/microcode_amd.c 2011-04-17 17:00:52.000000000 -0400
14911+++ linux-2.6.32.41/arch/x86/kernel/microcode_amd.c 2011-04-17 17:03:05.000000000 -0400
14912@@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int c
14913 uci->mc = NULL;
14914 }
14915
14916-static struct microcode_ops microcode_amd_ops = {
14917+static const struct microcode_ops microcode_amd_ops = {
14918 .request_microcode_user = request_microcode_user,
14919 .request_microcode_fw = request_microcode_fw,
14920 .collect_cpu_info = collect_cpu_info_amd,
14921@@ -372,7 +372,7 @@ static struct microcode_ops microcode_am
14922 .microcode_fini_cpu = microcode_fini_cpu_amd,
14923 };
14924
14925-struct microcode_ops * __init init_amd_microcode(void)
14926+const struct microcode_ops * __init init_amd_microcode(void)
14927 {
14928 return &microcode_amd_ops;
14929 }
14930diff -urNp linux-2.6.32.41/arch/x86/kernel/microcode_core.c linux-2.6.32.41/arch/x86/kernel/microcode_core.c
14931--- linux-2.6.32.41/arch/x86/kernel/microcode_core.c 2011-03-27 14:31:47.000000000 -0400
14932+++ linux-2.6.32.41/arch/x86/kernel/microcode_core.c 2011-04-17 15:56:46.000000000 -0400
14933@@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
14934
14935 #define MICROCODE_VERSION "2.00"
14936
14937-static struct microcode_ops *microcode_ops;
14938+static const struct microcode_ops *microcode_ops;
14939
14940 /*
14941 * Synchronization.
14942diff -urNp linux-2.6.32.41/arch/x86/kernel/microcode_intel.c linux-2.6.32.41/arch/x86/kernel/microcode_intel.c
14943--- linux-2.6.32.41/arch/x86/kernel/microcode_intel.c 2011-03-27 14:31:47.000000000 -0400
14944+++ linux-2.6.32.41/arch/x86/kernel/microcode_intel.c 2011-04-17 15:56:46.000000000 -0400
14945@@ -443,13 +443,13 @@ static enum ucode_state request_microcod
14946
14947 static int get_ucode_user(void *to, const void *from, size_t n)
14948 {
14949- return copy_from_user(to, from, n);
14950+ return copy_from_user(to, (__force const void __user *)from, n);
14951 }
14952
14953 static enum ucode_state
14954 request_microcode_user(int cpu, const void __user *buf, size_t size)
14955 {
14956- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
14957+ return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
14958 }
14959
14960 static void microcode_fini_cpu(int cpu)
14961@@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
14962 uci->mc = NULL;
14963 }
14964
14965-static struct microcode_ops microcode_intel_ops = {
14966+static const struct microcode_ops microcode_intel_ops = {
14967 .request_microcode_user = request_microcode_user,
14968 .request_microcode_fw = request_microcode_fw,
14969 .collect_cpu_info = collect_cpu_info,
14970@@ -468,7 +468,7 @@ static struct microcode_ops microcode_in
14971 .microcode_fini_cpu = microcode_fini_cpu,
14972 };
14973
14974-struct microcode_ops * __init init_intel_microcode(void)
14975+const struct microcode_ops * __init init_intel_microcode(void)
14976 {
14977 return &microcode_intel_ops;
14978 }
14979diff -urNp linux-2.6.32.41/arch/x86/kernel/module.c linux-2.6.32.41/arch/x86/kernel/module.c
14980--- linux-2.6.32.41/arch/x86/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
14981+++ linux-2.6.32.41/arch/x86/kernel/module.c 2011-04-17 15:56:46.000000000 -0400
14982@@ -34,7 +34,7 @@
14983 #define DEBUGP(fmt...)
14984 #endif
14985
14986-void *module_alloc(unsigned long size)
14987+static void *__module_alloc(unsigned long size, pgprot_t prot)
14988 {
14989 struct vm_struct *area;
14990
14991@@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
14992 if (!area)
14993 return NULL;
14994
14995- return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
14996- PAGE_KERNEL_EXEC);
14997+ return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
14998+}
14999+
15000+void *module_alloc(unsigned long size)
15001+{
15002+
15003+#ifdef CONFIG_PAX_KERNEXEC
15004+ return __module_alloc(size, PAGE_KERNEL);
15005+#else
15006+ return __module_alloc(size, PAGE_KERNEL_EXEC);
15007+#endif
15008+
15009 }
15010
15011 /* Free memory returned from module_alloc */
15012@@ -58,6 +68,40 @@ void module_free(struct module *mod, voi
15013 vfree(module_region);
15014 }
15015
15016+#ifdef CONFIG_PAX_KERNEXEC
15017+#ifdef CONFIG_X86_32
15018+void *module_alloc_exec(unsigned long size)
15019+{
15020+ struct vm_struct *area;
15021+
15022+ if (size == 0)
15023+ return NULL;
15024+
15025+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
15026+ return area ? area->addr : NULL;
15027+}
15028+EXPORT_SYMBOL(module_alloc_exec);
15029+
15030+void module_free_exec(struct module *mod, void *module_region)
15031+{
15032+ vunmap(module_region);
15033+}
15034+EXPORT_SYMBOL(module_free_exec);
15035+#else
15036+void module_free_exec(struct module *mod, void *module_region)
15037+{
15038+ module_free(mod, module_region);
15039+}
15040+EXPORT_SYMBOL(module_free_exec);
15041+
15042+void *module_alloc_exec(unsigned long size)
15043+{
15044+ return __module_alloc(size, PAGE_KERNEL_RX);
15045+}
15046+EXPORT_SYMBOL(module_alloc_exec);
15047+#endif
15048+#endif
15049+
15050 /* We don't need anything special. */
15051 int module_frob_arch_sections(Elf_Ehdr *hdr,
15052 Elf_Shdr *sechdrs,
15053@@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15054 unsigned int i;
15055 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
15056 Elf32_Sym *sym;
15057- uint32_t *location;
15058+ uint32_t *plocation, location;
15059
15060 DEBUGP("Applying relocate section %u to %u\n", relsec,
15061 sechdrs[relsec].sh_info);
15062 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
15063 /* This is where to make the change */
15064- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
15065- + rel[i].r_offset;
15066+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
15067+ location = (uint32_t)plocation;
15068+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
15069+ plocation = ktla_ktva((void *)plocation);
15070 /* This is the symbol it is referring to. Note that all
15071 undefined symbols have been resolved. */
15072 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
15073@@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15074 switch (ELF32_R_TYPE(rel[i].r_info)) {
15075 case R_386_32:
15076 /* We add the value into the location given */
15077- *location += sym->st_value;
15078+ pax_open_kernel();
15079+ *plocation += sym->st_value;
15080+ pax_close_kernel();
15081 break;
15082 case R_386_PC32:
15083 /* Add the value, subtract its postition */
15084- *location += sym->st_value - (uint32_t)location;
15085+ pax_open_kernel();
15086+ *plocation += sym->st_value - location;
15087+ pax_close_kernel();
15088 break;
15089 default:
15090 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
15091@@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
15092 case R_X86_64_NONE:
15093 break;
15094 case R_X86_64_64:
15095+ pax_open_kernel();
15096 *(u64 *)loc = val;
15097+ pax_close_kernel();
15098 break;
15099 case R_X86_64_32:
15100+ pax_open_kernel();
15101 *(u32 *)loc = val;
15102+ pax_close_kernel();
15103 if (val != *(u32 *)loc)
15104 goto overflow;
15105 break;
15106 case R_X86_64_32S:
15107+ pax_open_kernel();
15108 *(s32 *)loc = val;
15109+ pax_close_kernel();
15110 if ((s64)val != *(s32 *)loc)
15111 goto overflow;
15112 break;
15113 case R_X86_64_PC32:
15114 val -= (u64)loc;
15115+ pax_open_kernel();
15116 *(u32 *)loc = val;
15117+ pax_close_kernel();
15118+
15119 #if 0
15120 if ((s64)val != *(s32 *)loc)
15121 goto overflow;
15122diff -urNp linux-2.6.32.41/arch/x86/kernel/paravirt.c linux-2.6.32.41/arch/x86/kernel/paravirt.c
15123--- linux-2.6.32.41/arch/x86/kernel/paravirt.c 2011-03-27 14:31:47.000000000 -0400
15124+++ linux-2.6.32.41/arch/x86/kernel/paravirt.c 2011-05-16 21:46:57.000000000 -0400
15125@@ -122,7 +122,7 @@ unsigned paravirt_patch_jmp(void *insnbu
15126 * corresponding structure. */
15127 static void *get_call_destination(u8 type)
15128 {
15129- struct paravirt_patch_template tmpl = {
15130+ const struct paravirt_patch_template tmpl = {
15131 .pv_init_ops = pv_init_ops,
15132 .pv_time_ops = pv_time_ops,
15133 .pv_cpu_ops = pv_cpu_ops,
15134@@ -133,6 +133,9 @@ static void *get_call_destination(u8 typ
15135 .pv_lock_ops = pv_lock_ops,
15136 #endif
15137 };
15138+
15139+ pax_track_stack();
15140+
15141 return *((void **)&tmpl + type);
15142 }
15143
15144@@ -145,14 +148,14 @@ unsigned paravirt_patch_default(u8 type,
15145 if (opfunc == NULL)
15146 /* If there's no function, patch it with a ud2a (BUG) */
15147 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
15148- else if (opfunc == _paravirt_nop)
15149+ else if (opfunc == (void *)_paravirt_nop)
15150 /* If the operation is a nop, then nop the callsite */
15151 ret = paravirt_patch_nop();
15152
15153 /* identity functions just return their single argument */
15154- else if (opfunc == _paravirt_ident_32)
15155+ else if (opfunc == (void *)_paravirt_ident_32)
15156 ret = paravirt_patch_ident_32(insnbuf, len);
15157- else if (opfunc == _paravirt_ident_64)
15158+ else if (opfunc == (void *)_paravirt_ident_64)
15159 ret = paravirt_patch_ident_64(insnbuf, len);
15160
15161 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
15162@@ -178,7 +181,7 @@ unsigned paravirt_patch_insns(void *insn
15163 if (insn_len > len || start == NULL)
15164 insn_len = len;
15165 else
15166- memcpy(insnbuf, start, insn_len);
15167+ memcpy(insnbuf, ktla_ktva(start), insn_len);
15168
15169 return insn_len;
15170 }
15171@@ -294,22 +297,22 @@ void arch_flush_lazy_mmu_mode(void)
15172 preempt_enable();
15173 }
15174
15175-struct pv_info pv_info = {
15176+struct pv_info pv_info __read_only = {
15177 .name = "bare hardware",
15178 .paravirt_enabled = 0,
15179 .kernel_rpl = 0,
15180 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
15181 };
15182
15183-struct pv_init_ops pv_init_ops = {
15184+struct pv_init_ops pv_init_ops __read_only = {
15185 .patch = native_patch,
15186 };
15187
15188-struct pv_time_ops pv_time_ops = {
15189+struct pv_time_ops pv_time_ops __read_only = {
15190 .sched_clock = native_sched_clock,
15191 };
15192
15193-struct pv_irq_ops pv_irq_ops = {
15194+struct pv_irq_ops pv_irq_ops __read_only = {
15195 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
15196 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
15197 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
15198@@ -321,7 +324,7 @@ struct pv_irq_ops pv_irq_ops = {
15199 #endif
15200 };
15201
15202-struct pv_cpu_ops pv_cpu_ops = {
15203+struct pv_cpu_ops pv_cpu_ops __read_only = {
15204 .cpuid = native_cpuid,
15205 .get_debugreg = native_get_debugreg,
15206 .set_debugreg = native_set_debugreg,
15207@@ -382,7 +385,7 @@ struct pv_cpu_ops pv_cpu_ops = {
15208 .end_context_switch = paravirt_nop,
15209 };
15210
15211-struct pv_apic_ops pv_apic_ops = {
15212+struct pv_apic_ops pv_apic_ops __read_only = {
15213 #ifdef CONFIG_X86_LOCAL_APIC
15214 .startup_ipi_hook = paravirt_nop,
15215 #endif
15216@@ -396,7 +399,7 @@ struct pv_apic_ops pv_apic_ops = {
15217 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
15218 #endif
15219
15220-struct pv_mmu_ops pv_mmu_ops = {
15221+struct pv_mmu_ops pv_mmu_ops __read_only = {
15222
15223 .read_cr2 = native_read_cr2,
15224 .write_cr2 = native_write_cr2,
15225@@ -467,6 +470,12 @@ struct pv_mmu_ops pv_mmu_ops = {
15226 },
15227
15228 .set_fixmap = native_set_fixmap,
15229+
15230+#ifdef CONFIG_PAX_KERNEXEC
15231+ .pax_open_kernel = native_pax_open_kernel,
15232+ .pax_close_kernel = native_pax_close_kernel,
15233+#endif
15234+
15235 };
15236
15237 EXPORT_SYMBOL_GPL(pv_time_ops);
15238diff -urNp linux-2.6.32.41/arch/x86/kernel/paravirt-spinlocks.c linux-2.6.32.41/arch/x86/kernel/paravirt-spinlocks.c
15239--- linux-2.6.32.41/arch/x86/kernel/paravirt-spinlocks.c 2011-03-27 14:31:47.000000000 -0400
15240+++ linux-2.6.32.41/arch/x86/kernel/paravirt-spinlocks.c 2011-04-17 15:56:46.000000000 -0400
15241@@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *
15242 __raw_spin_lock(lock);
15243 }
15244
15245-struct pv_lock_ops pv_lock_ops = {
15246+struct pv_lock_ops pv_lock_ops __read_only = {
15247 #ifdef CONFIG_SMP
15248 .spin_is_locked = __ticket_spin_is_locked,
15249 .spin_is_contended = __ticket_spin_is_contended,
15250diff -urNp linux-2.6.32.41/arch/x86/kernel/pci-calgary_64.c linux-2.6.32.41/arch/x86/kernel/pci-calgary_64.c
15251--- linux-2.6.32.41/arch/x86/kernel/pci-calgary_64.c 2011-03-27 14:31:47.000000000 -0400
15252+++ linux-2.6.32.41/arch/x86/kernel/pci-calgary_64.c 2011-04-17 15:56:46.000000000 -0400
15253@@ -477,7 +477,7 @@ static void calgary_free_coherent(struct
15254 free_pages((unsigned long)vaddr, get_order(size));
15255 }
15256
15257-static struct dma_map_ops calgary_dma_ops = {
15258+static const struct dma_map_ops calgary_dma_ops = {
15259 .alloc_coherent = calgary_alloc_coherent,
15260 .free_coherent = calgary_free_coherent,
15261 .map_sg = calgary_map_sg,
15262diff -urNp linux-2.6.32.41/arch/x86/kernel/pci-dma.c linux-2.6.32.41/arch/x86/kernel/pci-dma.c
15263--- linux-2.6.32.41/arch/x86/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
15264+++ linux-2.6.32.41/arch/x86/kernel/pci-dma.c 2011-04-17 15:56:46.000000000 -0400
15265@@ -14,7 +14,7 @@
15266
15267 static int forbid_dac __read_mostly;
15268
15269-struct dma_map_ops *dma_ops;
15270+const struct dma_map_ops *dma_ops;
15271 EXPORT_SYMBOL(dma_ops);
15272
15273 static int iommu_sac_force __read_mostly;
15274@@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
15275
15276 int dma_supported(struct device *dev, u64 mask)
15277 {
15278- struct dma_map_ops *ops = get_dma_ops(dev);
15279+ const struct dma_map_ops *ops = get_dma_ops(dev);
15280
15281 #ifdef CONFIG_PCI
15282 if (mask > 0xffffffff && forbid_dac > 0) {
15283diff -urNp linux-2.6.32.41/arch/x86/kernel/pci-gart_64.c linux-2.6.32.41/arch/x86/kernel/pci-gart_64.c
15284--- linux-2.6.32.41/arch/x86/kernel/pci-gart_64.c 2011-03-27 14:31:47.000000000 -0400
15285+++ linux-2.6.32.41/arch/x86/kernel/pci-gart_64.c 2011-04-17 15:56:46.000000000 -0400
15286@@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct ag
15287 return -1;
15288 }
15289
15290-static struct dma_map_ops gart_dma_ops = {
15291+static const struct dma_map_ops gart_dma_ops = {
15292 .map_sg = gart_map_sg,
15293 .unmap_sg = gart_unmap_sg,
15294 .map_page = gart_map_page,
15295diff -urNp linux-2.6.32.41/arch/x86/kernel/pci-nommu.c linux-2.6.32.41/arch/x86/kernel/pci-nommu.c
15296--- linux-2.6.32.41/arch/x86/kernel/pci-nommu.c 2011-03-27 14:31:47.000000000 -0400
15297+++ linux-2.6.32.41/arch/x86/kernel/pci-nommu.c 2011-04-17 15:56:46.000000000 -0400
15298@@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(str
15299 flush_write_buffers();
15300 }
15301
15302-struct dma_map_ops nommu_dma_ops = {
15303+const struct dma_map_ops nommu_dma_ops = {
15304 .alloc_coherent = dma_generic_alloc_coherent,
15305 .free_coherent = nommu_free_coherent,
15306 .map_sg = nommu_map_sg,
15307diff -urNp linux-2.6.32.41/arch/x86/kernel/pci-swiotlb.c linux-2.6.32.41/arch/x86/kernel/pci-swiotlb.c
15308--- linux-2.6.32.41/arch/x86/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
15309+++ linux-2.6.32.41/arch/x86/kernel/pci-swiotlb.c 2011-04-17 15:56:46.000000000 -0400
15310@@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(
15311 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
15312 }
15313
15314-static struct dma_map_ops swiotlb_dma_ops = {
15315+static const struct dma_map_ops swiotlb_dma_ops = {
15316 .mapping_error = swiotlb_dma_mapping_error,
15317 .alloc_coherent = x86_swiotlb_alloc_coherent,
15318 .free_coherent = swiotlb_free_coherent,
15319diff -urNp linux-2.6.32.41/arch/x86/kernel/process_32.c linux-2.6.32.41/arch/x86/kernel/process_32.c
15320--- linux-2.6.32.41/arch/x86/kernel/process_32.c 2011-03-27 14:31:47.000000000 -0400
15321+++ linux-2.6.32.41/arch/x86/kernel/process_32.c 2011-05-16 21:46:57.000000000 -0400
15322@@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __as
15323 unsigned long thread_saved_pc(struct task_struct *tsk)
15324 {
15325 return ((unsigned long *)tsk->thread.sp)[3];
15326+//XXX return tsk->thread.eip;
15327 }
15328
15329 #ifndef CONFIG_SMP
15330@@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, i
15331 unsigned short ss, gs;
15332 const char *board;
15333
15334- if (user_mode_vm(regs)) {
15335+ if (user_mode(regs)) {
15336 sp = regs->sp;
15337 ss = regs->ss & 0xffff;
15338- gs = get_user_gs(regs);
15339 } else {
15340 sp = (unsigned long) (&regs->sp);
15341 savesegment(ss, ss);
15342- savesegment(gs, gs);
15343 }
15344+ gs = get_user_gs(regs);
15345
15346 printk("\n");
15347
15348@@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), voi
15349 regs.bx = (unsigned long) fn;
15350 regs.dx = (unsigned long) arg;
15351
15352- regs.ds = __USER_DS;
15353- regs.es = __USER_DS;
15354+ regs.ds = __KERNEL_DS;
15355+ regs.es = __KERNEL_DS;
15356 regs.fs = __KERNEL_PERCPU;
15357- regs.gs = __KERNEL_STACK_CANARY;
15358+ savesegment(gs, regs.gs);
15359 regs.orig_ax = -1;
15360 regs.ip = (unsigned long) kernel_thread_helper;
15361 regs.cs = __KERNEL_CS | get_kernel_rpl();
15362@@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flag
15363 struct task_struct *tsk;
15364 int err;
15365
15366- childregs = task_pt_regs(p);
15367+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
15368 *childregs = *regs;
15369 childregs->ax = 0;
15370 childregs->sp = sp;
15371
15372 p->thread.sp = (unsigned long) childregs;
15373 p->thread.sp0 = (unsigned long) (childregs+1);
15374+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
15375
15376 p->thread.ip = (unsigned long) ret_from_fork;
15377
15378@@ -346,7 +347,7 @@ __switch_to(struct task_struct *prev_p,
15379 struct thread_struct *prev = &prev_p->thread,
15380 *next = &next_p->thread;
15381 int cpu = smp_processor_id();
15382- struct tss_struct *tss = &per_cpu(init_tss, cpu);
15383+ struct tss_struct *tss = init_tss + cpu;
15384 bool preload_fpu;
15385
15386 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
15387@@ -381,6 +382,10 @@ __switch_to(struct task_struct *prev_p,
15388 */
15389 lazy_save_gs(prev->gs);
15390
15391+#ifdef CONFIG_PAX_MEMORY_UDEREF
15392+ __set_fs(task_thread_info(next_p)->addr_limit);
15393+#endif
15394+
15395 /*
15396 * Load the per-thread Thread-Local Storage descriptor.
15397 */
15398@@ -416,6 +421,9 @@ __switch_to(struct task_struct *prev_p,
15399 */
15400 arch_end_context_switch(next_p);
15401
15402+ percpu_write(current_task, next_p);
15403+ percpu_write(current_tinfo, &next_p->tinfo);
15404+
15405 if (preload_fpu)
15406 __math_state_restore();
15407
15408@@ -425,8 +433,6 @@ __switch_to(struct task_struct *prev_p,
15409 if (prev->gs | next->gs)
15410 lazy_load_gs(next->gs);
15411
15412- percpu_write(current_task, next_p);
15413-
15414 return prev_p;
15415 }
15416
15417@@ -496,4 +502,3 @@ unsigned long get_wchan(struct task_stru
15418 } while (count++ < 16);
15419 return 0;
15420 }
15421-
15422diff -urNp linux-2.6.32.41/arch/x86/kernel/process_64.c linux-2.6.32.41/arch/x86/kernel/process_64.c
15423--- linux-2.6.32.41/arch/x86/kernel/process_64.c 2011-03-27 14:31:47.000000000 -0400
15424+++ linux-2.6.32.41/arch/x86/kernel/process_64.c 2011-05-16 21:46:57.000000000 -0400
15425@@ -91,7 +91,7 @@ static void __exit_idle(void)
15426 void exit_idle(void)
15427 {
15428 /* idle loop has pid 0 */
15429- if (current->pid)
15430+ if (task_pid_nr(current))
15431 return;
15432 __exit_idle();
15433 }
15434@@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, i
15435 if (!board)
15436 board = "";
15437 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
15438- current->pid, current->comm, print_tainted(),
15439+ task_pid_nr(current), current->comm, print_tainted(),
15440 init_utsname()->release,
15441 (int)strcspn(init_utsname()->version, " "),
15442 init_utsname()->version, board);
15443@@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flag
15444 struct pt_regs *childregs;
15445 struct task_struct *me = current;
15446
15447- childregs = ((struct pt_regs *)
15448- (THREAD_SIZE + task_stack_page(p))) - 1;
15449+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
15450 *childregs = *regs;
15451
15452 childregs->ax = 0;
15453@@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flag
15454 p->thread.sp = (unsigned long) childregs;
15455 p->thread.sp0 = (unsigned long) (childregs+1);
15456 p->thread.usersp = me->thread.usersp;
15457+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
15458
15459 set_tsk_thread_flag(p, TIF_FORK);
15460
15461@@ -380,7 +380,7 @@ __switch_to(struct task_struct *prev_p,
15462 struct thread_struct *prev = &prev_p->thread;
15463 struct thread_struct *next = &next_p->thread;
15464 int cpu = smp_processor_id();
15465- struct tss_struct *tss = &per_cpu(init_tss, cpu);
15466+ struct tss_struct *tss = init_tss + cpu;
15467 unsigned fsindex, gsindex;
15468 bool preload_fpu;
15469
15470@@ -476,10 +476,9 @@ __switch_to(struct task_struct *prev_p,
15471 prev->usersp = percpu_read(old_rsp);
15472 percpu_write(old_rsp, next->usersp);
15473 percpu_write(current_task, next_p);
15474+ percpu_write(current_tinfo, &next_p->tinfo);
15475
15476- percpu_write(kernel_stack,
15477- (unsigned long)task_stack_page(next_p) +
15478- THREAD_SIZE - KERNEL_STACK_OFFSET);
15479+ percpu_write(kernel_stack, next->sp0);
15480
15481 /*
15482 * Now maybe reload the debug registers and handle I/O bitmaps
15483@@ -560,12 +559,11 @@ unsigned long get_wchan(struct task_stru
15484 if (!p || p == current || p->state == TASK_RUNNING)
15485 return 0;
15486 stack = (unsigned long)task_stack_page(p);
15487- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
15488+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
15489 return 0;
15490 fp = *(u64 *)(p->thread.sp);
15491 do {
15492- if (fp < (unsigned long)stack ||
15493- fp >= (unsigned long)stack+THREAD_SIZE)
15494+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
15495 return 0;
15496 ip = *(u64 *)(fp+8);
15497 if (!in_sched_functions(ip))
15498diff -urNp linux-2.6.32.41/arch/x86/kernel/process.c linux-2.6.32.41/arch/x86/kernel/process.c
15499--- linux-2.6.32.41/arch/x86/kernel/process.c 2011-04-22 19:16:29.000000000 -0400
15500+++ linux-2.6.32.41/arch/x86/kernel/process.c 2011-05-22 23:02:03.000000000 -0400
15501@@ -51,16 +51,33 @@ void free_thread_xstate(struct task_stru
15502
15503 void free_thread_info(struct thread_info *ti)
15504 {
15505- free_thread_xstate(ti->task);
15506 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
15507 }
15508
15509+static struct kmem_cache *task_struct_cachep;
15510+
15511 void arch_task_cache_init(void)
15512 {
15513- task_xstate_cachep =
15514- kmem_cache_create("task_xstate", xstate_size,
15515+ /* create a slab on which task_structs can be allocated */
15516+ task_struct_cachep =
15517+ kmem_cache_create("task_struct", sizeof(struct task_struct),
15518+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
15519+
15520+ task_xstate_cachep =
15521+ kmem_cache_create("task_xstate", xstate_size,
15522 __alignof__(union thread_xstate),
15523- SLAB_PANIC | SLAB_NOTRACK, NULL);
15524+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
15525+}
15526+
15527+struct task_struct *alloc_task_struct(void)
15528+{
15529+ return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
15530+}
15531+
15532+void free_task_struct(struct task_struct *task)
15533+{
15534+ free_thread_xstate(task);
15535+ kmem_cache_free(task_struct_cachep, task);
15536 }
15537
15538 /*
15539@@ -73,7 +90,7 @@ void exit_thread(void)
15540 unsigned long *bp = t->io_bitmap_ptr;
15541
15542 if (bp) {
15543- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
15544+ struct tss_struct *tss = init_tss + get_cpu();
15545
15546 t->io_bitmap_ptr = NULL;
15547 clear_thread_flag(TIF_IO_BITMAP);
15548@@ -93,6 +110,9 @@ void flush_thread(void)
15549
15550 clear_tsk_thread_flag(tsk, TIF_DEBUG);
15551
15552+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
15553+ loadsegment(gs, 0);
15554+#endif
15555 tsk->thread.debugreg0 = 0;
15556 tsk->thread.debugreg1 = 0;
15557 tsk->thread.debugreg2 = 0;
15558@@ -307,7 +327,7 @@ void default_idle(void)
15559 EXPORT_SYMBOL(default_idle);
15560 #endif
15561
15562-void stop_this_cpu(void *dummy)
15563+__noreturn void stop_this_cpu(void *dummy)
15564 {
15565 local_irq_disable();
15566 /*
15567@@ -568,16 +588,35 @@ static int __init idle_setup(char *str)
15568 }
15569 early_param("idle", idle_setup);
15570
15571-unsigned long arch_align_stack(unsigned long sp)
15572+#ifdef CONFIG_PAX_RANDKSTACK
15573+asmlinkage void pax_randomize_kstack(void)
15574 {
15575- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
15576- sp -= get_random_int() % 8192;
15577- return sp & ~0xf;
15578-}
15579+ struct thread_struct *thread = &current->thread;
15580+ unsigned long time;
15581
15582-unsigned long arch_randomize_brk(struct mm_struct *mm)
15583-{
15584- unsigned long range_end = mm->brk + 0x02000000;
15585- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
15586+ if (!randomize_va_space)
15587+ return;
15588+
15589+ rdtscl(time);
15590+
15591+ /* P4 seems to return a 0 LSB, ignore it */
15592+#ifdef CONFIG_MPENTIUM4
15593+ time &= 0x3EUL;
15594+ time <<= 2;
15595+#elif defined(CONFIG_X86_64)
15596+ time &= 0xFUL;
15597+ time <<= 4;
15598+#else
15599+ time &= 0x1FUL;
15600+ time <<= 3;
15601+#endif
15602+
15603+ thread->sp0 ^= time;
15604+ load_sp0(init_tss + smp_processor_id(), thread);
15605+
15606+#ifdef CONFIG_X86_64
15607+ percpu_write(kernel_stack, thread->sp0);
15608+#endif
15609 }
15610+#endif
15611
15612diff -urNp linux-2.6.32.41/arch/x86/kernel/ptrace.c linux-2.6.32.41/arch/x86/kernel/ptrace.c
15613--- linux-2.6.32.41/arch/x86/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
15614+++ linux-2.6.32.41/arch/x86/kernel/ptrace.c 2011-04-17 15:56:46.000000000 -0400
15615@@ -925,7 +925,7 @@ static const struct user_regset_view use
15616 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
15617 {
15618 int ret;
15619- unsigned long __user *datap = (unsigned long __user *)data;
15620+ unsigned long __user *datap = (__force unsigned long __user *)data;
15621
15622 switch (request) {
15623 /* read the word at location addr in the USER area. */
15624@@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *chi
15625 if (addr < 0)
15626 return -EIO;
15627 ret = do_get_thread_area(child, addr,
15628- (struct user_desc __user *) data);
15629+ (__force struct user_desc __user *) data);
15630 break;
15631
15632 case PTRACE_SET_THREAD_AREA:
15633 if (addr < 0)
15634 return -EIO;
15635 ret = do_set_thread_area(child, addr,
15636- (struct user_desc __user *) data, 0);
15637+ (__force struct user_desc __user *) data, 0);
15638 break;
15639 #endif
15640
15641@@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *chi
15642 #ifdef CONFIG_X86_PTRACE_BTS
15643 case PTRACE_BTS_CONFIG:
15644 ret = ptrace_bts_config
15645- (child, data, (struct ptrace_bts_config __user *)addr);
15646+ (child, data, (__force struct ptrace_bts_config __user *)addr);
15647 break;
15648
15649 case PTRACE_BTS_STATUS:
15650 ret = ptrace_bts_status
15651- (child, data, (struct ptrace_bts_config __user *)addr);
15652+ (child, data, (__force struct ptrace_bts_config __user *)addr);
15653 break;
15654
15655 case PTRACE_BTS_SIZE:
15656@@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *chi
15657
15658 case PTRACE_BTS_GET:
15659 ret = ptrace_bts_read_record
15660- (child, data, (struct bts_struct __user *) addr);
15661+ (child, data, (__force struct bts_struct __user *) addr);
15662 break;
15663
15664 case PTRACE_BTS_CLEAR:
15665@@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *chi
15666
15667 case PTRACE_BTS_DRAIN:
15668 ret = ptrace_bts_drain
15669- (child, data, (struct bts_struct __user *) addr);
15670+ (child, data, (__force struct bts_struct __user *) addr);
15671 break;
15672 #endif /* CONFIG_X86_PTRACE_BTS */
15673
15674@@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *ts
15675 info.si_code = si_code;
15676
15677 /* User-mode ip? */
15678- info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
15679+ info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
15680
15681 /* Send us the fake SIGTRAP */
15682 force_sig_info(SIGTRAP, &info, tsk);
15683@@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *ts
15684 * We must return the syscall number to actually look up in the table.
15685 * This can be -1L to skip running any syscall at all.
15686 */
15687-asmregparm long syscall_trace_enter(struct pt_regs *regs)
15688+long syscall_trace_enter(struct pt_regs *regs)
15689 {
15690 long ret = 0;
15691
15692@@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(stru
15693 return ret ?: regs->orig_ax;
15694 }
15695
15696-asmregparm void syscall_trace_leave(struct pt_regs *regs)
15697+void syscall_trace_leave(struct pt_regs *regs)
15698 {
15699 if (unlikely(current->audit_context))
15700 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
15701diff -urNp linux-2.6.32.41/arch/x86/kernel/reboot.c linux-2.6.32.41/arch/x86/kernel/reboot.c
15702--- linux-2.6.32.41/arch/x86/kernel/reboot.c 2011-03-27 14:31:47.000000000 -0400
15703+++ linux-2.6.32.41/arch/x86/kernel/reboot.c 2011-05-22 23:02:03.000000000 -0400
15704@@ -33,7 +33,7 @@ void (*pm_power_off)(void);
15705 EXPORT_SYMBOL(pm_power_off);
15706
15707 static const struct desc_ptr no_idt = {};
15708-static int reboot_mode;
15709+static unsigned short reboot_mode;
15710 enum reboot_type reboot_type = BOOT_KBD;
15711 int reboot_force;
15712
15713@@ -292,12 +292,12 @@ core_initcall(reboot_init);
15714 controller to pulse the CPU reset line, which is more thorough, but
15715 doesn't work with at least one type of 486 motherboard. It is easy
15716 to stop this code working; hence the copious comments. */
15717-static const unsigned long long
15718-real_mode_gdt_entries [3] =
15719+static struct desc_struct
15720+real_mode_gdt_entries [3] __read_only =
15721 {
15722- 0x0000000000000000ULL, /* Null descriptor */
15723- 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
15724- 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
15725+ GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
15726+ GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
15727+ GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
15728 };
15729
15730 static const struct desc_ptr
15731@@ -346,7 +346,7 @@ static const unsigned char jump_to_bios
15732 * specified by the code and length parameters.
15733 * We assume that length will aways be less that 100!
15734 */
15735-void machine_real_restart(const unsigned char *code, int length)
15736+__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
15737 {
15738 local_irq_disable();
15739
15740@@ -366,8 +366,8 @@ void machine_real_restart(const unsigned
15741 /* Remap the kernel at virtual address zero, as well as offset zero
15742 from the kernel segment. This assumes the kernel segment starts at
15743 virtual address PAGE_OFFSET. */
15744- memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
15745- sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
15746+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
15747+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
15748
15749 /*
15750 * Use `swapper_pg_dir' as our page directory.
15751@@ -379,16 +379,15 @@ void machine_real_restart(const unsigned
15752 boot)". This seems like a fairly standard thing that gets set by
15753 REBOOT.COM programs, and the previous reset routine did this
15754 too. */
15755- *((unsigned short *)0x472) = reboot_mode;
15756+ *(unsigned short *)(__va(0x472)) = reboot_mode;
15757
15758 /* For the switch to real mode, copy some code to low memory. It has
15759 to be in the first 64k because it is running in 16-bit mode, and it
15760 has to have the same physical and virtual address, because it turns
15761 off paging. Copy it near the end of the first page, out of the way
15762 of BIOS variables. */
15763- memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
15764- real_mode_switch, sizeof (real_mode_switch));
15765- memcpy((void *)(0x1000 - 100), code, length);
15766+ memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
15767+ memcpy(__va(0x1000 - 100), code, length);
15768
15769 /* Set up the IDT for real mode. */
15770 load_idt(&real_mode_idt);
15771@@ -416,6 +415,7 @@ void machine_real_restart(const unsigned
15772 __asm__ __volatile__ ("ljmp $0x0008,%0"
15773 :
15774 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
15775+ do { } while (1);
15776 }
15777 #ifdef CONFIG_APM_MODULE
15778 EXPORT_SYMBOL(machine_real_restart);
15779@@ -536,7 +536,7 @@ void __attribute__((weak)) mach_reboot_f
15780 {
15781 }
15782
15783-static void native_machine_emergency_restart(void)
15784+__noreturn static void native_machine_emergency_restart(void)
15785 {
15786 int i;
15787
15788@@ -651,13 +651,13 @@ void native_machine_shutdown(void)
15789 #endif
15790 }
15791
15792-static void __machine_emergency_restart(int emergency)
15793+static __noreturn void __machine_emergency_restart(int emergency)
15794 {
15795 reboot_emergency = emergency;
15796 machine_ops.emergency_restart();
15797 }
15798
15799-static void native_machine_restart(char *__unused)
15800+static __noreturn void native_machine_restart(char *__unused)
15801 {
15802 printk("machine restart\n");
15803
15804@@ -666,7 +666,7 @@ static void native_machine_restart(char
15805 __machine_emergency_restart(0);
15806 }
15807
15808-static void native_machine_halt(void)
15809+static __noreturn void native_machine_halt(void)
15810 {
15811 /* stop other cpus and apics */
15812 machine_shutdown();
15813@@ -677,7 +677,7 @@ static void native_machine_halt(void)
15814 stop_this_cpu(NULL);
15815 }
15816
15817-static void native_machine_power_off(void)
15818+__noreturn static void native_machine_power_off(void)
15819 {
15820 if (pm_power_off) {
15821 if (!reboot_force)
15822@@ -686,6 +686,7 @@ static void native_machine_power_off(voi
15823 }
15824 /* a fallback in case there is no PM info available */
15825 tboot_shutdown(TB_SHUTDOWN_HALT);
15826+ do { } while (1);
15827 }
15828
15829 struct machine_ops machine_ops = {
15830diff -urNp linux-2.6.32.41/arch/x86/kernel/setup.c linux-2.6.32.41/arch/x86/kernel/setup.c
15831--- linux-2.6.32.41/arch/x86/kernel/setup.c 2011-04-17 17:00:52.000000000 -0400
15832+++ linux-2.6.32.41/arch/x86/kernel/setup.c 2011-04-17 17:03:05.000000000 -0400
15833@@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
15834
15835 if (!boot_params.hdr.root_flags)
15836 root_mountflags &= ~MS_RDONLY;
15837- init_mm.start_code = (unsigned long) _text;
15838- init_mm.end_code = (unsigned long) _etext;
15839+ init_mm.start_code = ktla_ktva((unsigned long) _text);
15840+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
15841 init_mm.end_data = (unsigned long) _edata;
15842 init_mm.brk = _brk_end;
15843
15844- code_resource.start = virt_to_phys(_text);
15845- code_resource.end = virt_to_phys(_etext)-1;
15846- data_resource.start = virt_to_phys(_etext);
15847+ code_resource.start = virt_to_phys(ktla_ktva(_text));
15848+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
15849+ data_resource.start = virt_to_phys(_sdata);
15850 data_resource.end = virt_to_phys(_edata)-1;
15851 bss_resource.start = virt_to_phys(&__bss_start);
15852 bss_resource.end = virt_to_phys(&__bss_stop)-1;
15853diff -urNp linux-2.6.32.41/arch/x86/kernel/setup_percpu.c linux-2.6.32.41/arch/x86/kernel/setup_percpu.c
15854--- linux-2.6.32.41/arch/x86/kernel/setup_percpu.c 2011-03-27 14:31:47.000000000 -0400
15855+++ linux-2.6.32.41/arch/x86/kernel/setup_percpu.c 2011-04-17 15:56:46.000000000 -0400
15856@@ -25,19 +25,17 @@
15857 # define DBG(x...)
15858 #endif
15859
15860-DEFINE_PER_CPU(int, cpu_number);
15861+#ifdef CONFIG_SMP
15862+DEFINE_PER_CPU(unsigned int, cpu_number);
15863 EXPORT_PER_CPU_SYMBOL(cpu_number);
15864+#endif
15865
15866-#ifdef CONFIG_X86_64
15867 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
15868-#else
15869-#define BOOT_PERCPU_OFFSET 0
15870-#endif
15871
15872 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
15873 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
15874
15875-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
15876+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
15877 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
15878 };
15879 EXPORT_SYMBOL(__per_cpu_offset);
15880@@ -159,10 +157,10 @@ static inline void setup_percpu_segment(
15881 {
15882 #ifdef CONFIG_X86_32
15883 struct desc_struct gdt;
15884+ unsigned long base = per_cpu_offset(cpu);
15885
15886- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
15887- 0x2 | DESCTYPE_S, 0x8);
15888- gdt.s = 1;
15889+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
15890+ 0x83 | DESCTYPE_S, 0xC);
15891 write_gdt_entry(get_cpu_gdt_table(cpu),
15892 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
15893 #endif
15894@@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
15895 /* alrighty, percpu areas up and running */
15896 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
15897 for_each_possible_cpu(cpu) {
15898+#ifdef CONFIG_CC_STACKPROTECTOR
15899+#ifdef CONFIG_x86_32
15900+ unsigned long canary = per_cpu(stack_canary, cpu);
15901+#endif
15902+#endif
15903 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
15904 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
15905 per_cpu(cpu_number, cpu) = cpu;
15906@@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
15907 early_per_cpu_map(x86_cpu_to_node_map, cpu);
15908 #endif
15909 #endif
15910+#ifdef CONFIG_CC_STACKPROTECTOR
15911+#ifdef CONFIG_x86_32
15912+ if (cpu == boot_cpu_id)
15913+ per_cpu(stack_canary, cpu) = canary;
15914+#endif
15915+#endif
15916 /*
15917 * Up to this point, the boot CPU has been using .data.init
15918 * area. Reload any changed state for the boot CPU.
15919diff -urNp linux-2.6.32.41/arch/x86/kernel/signal.c linux-2.6.32.41/arch/x86/kernel/signal.c
15920--- linux-2.6.32.41/arch/x86/kernel/signal.c 2011-03-27 14:31:47.000000000 -0400
15921+++ linux-2.6.32.41/arch/x86/kernel/signal.c 2011-05-22 23:02:03.000000000 -0400
15922@@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsi
15923 * Align the stack pointer according to the i386 ABI,
15924 * i.e. so that on function entry ((sp + 4) & 15) == 0.
15925 */
15926- sp = ((sp + 4) & -16ul) - 4;
15927+ sp = ((sp - 12) & -16ul) - 4;
15928 #else /* !CONFIG_X86_32 */
15929 sp = round_down(sp, 16) - 8;
15930 #endif
15931@@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, str
15932 * Return an always-bogus address instead so we will die with SIGSEGV.
15933 */
15934 if (onsigstack && !likely(on_sig_stack(sp)))
15935- return (void __user *)-1L;
15936+ return (__force void __user *)-1L;
15937
15938 /* save i387 state */
15939 if (used_math() && save_i387_xstate(*fpstate) < 0)
15940- return (void __user *)-1L;
15941+ return (__force void __user *)-1L;
15942
15943 return (void __user *)sp;
15944 }
15945@@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigactio
15946 }
15947
15948 if (current->mm->context.vdso)
15949- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
15950+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
15951 else
15952- restorer = &frame->retcode;
15953+ restorer = (void __user *)&frame->retcode;
15954 if (ka->sa.sa_flags & SA_RESTORER)
15955 restorer = ka->sa.sa_restorer;
15956
15957@@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigactio
15958 * reasons and because gdb uses it as a signature to notice
15959 * signal handler stack frames.
15960 */
15961- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
15962+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
15963
15964 if (err)
15965 return -EFAULT;
15966@@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, str
15967 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
15968
15969 /* Set up to return from userspace. */
15970- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
15971+ if (current->mm->context.vdso)
15972+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
15973+ else
15974+ restorer = (void __user *)&frame->retcode;
15975 if (ka->sa.sa_flags & SA_RESTORER)
15976 restorer = ka->sa.sa_restorer;
15977 put_user_ex(restorer, &frame->pretcode);
15978@@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, str
15979 * reasons and because gdb uses it as a signature to notice
15980 * signal handler stack frames.
15981 */
15982- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
15983+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
15984 } put_user_catch(err);
15985
15986 if (err)
15987@@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *re
15988 int signr;
15989 sigset_t *oldset;
15990
15991+ pax_track_stack();
15992+
15993 /*
15994 * We want the common case to go fast, which is why we may in certain
15995 * cases get here from kernel mode. Just return without doing anything
15996@@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *re
15997 * X86_32: vm86 regs switched out by assembly code before reaching
15998 * here, so testing against kernel CS suffices.
15999 */
16000- if (!user_mode(regs))
16001+ if (!user_mode_novm(regs))
16002 return;
16003
16004 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
16005diff -urNp linux-2.6.32.41/arch/x86/kernel/smpboot.c linux-2.6.32.41/arch/x86/kernel/smpboot.c
16006--- linux-2.6.32.41/arch/x86/kernel/smpboot.c 2011-03-27 14:31:47.000000000 -0400
16007+++ linux-2.6.32.41/arch/x86/kernel/smpboot.c 2011-05-11 18:25:15.000000000 -0400
16008@@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct
16009 */
16010 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
16011
16012-void cpu_hotplug_driver_lock()
16013+void cpu_hotplug_driver_lock(void)
16014 {
16015- mutex_lock(&x86_cpu_hotplug_driver_mutex);
16016+ mutex_lock(&x86_cpu_hotplug_driver_mutex);
16017 }
16018
16019-void cpu_hotplug_driver_unlock()
16020+void cpu_hotplug_driver_unlock(void)
16021 {
16022- mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16023+ mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16024 }
16025
16026 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
16027@@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int api
16028 set_idle_for_cpu(cpu, c_idle.idle);
16029 do_rest:
16030 per_cpu(current_task, cpu) = c_idle.idle;
16031+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
16032 #ifdef CONFIG_X86_32
16033 /* Stack for startup_32 can be just as for start_secondary onwards */
16034 irq_ctx_init(cpu);
16035@@ -750,11 +751,13 @@ do_rest:
16036 #else
16037 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
16038 initial_gs = per_cpu_offset(cpu);
16039- per_cpu(kernel_stack, cpu) =
16040- (unsigned long)task_stack_page(c_idle.idle) -
16041- KERNEL_STACK_OFFSET + THREAD_SIZE;
16042+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
16043 #endif
16044+
16045+ pax_open_kernel();
16046 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
16047+ pax_close_kernel();
16048+
16049 initial_code = (unsigned long)start_secondary;
16050 stack_start.sp = (void *) c_idle.idle->thread.sp;
16051
16052@@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int
16053
16054 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
16055
16056+#ifdef CONFIG_PAX_PER_CPU_PGD
16057+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
16058+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16059+ KERNEL_PGD_PTRS);
16060+#endif
16061+
16062 err = do_boot_cpu(apicid, cpu);
16063
16064 if (err) {
16065diff -urNp linux-2.6.32.41/arch/x86/kernel/step.c linux-2.6.32.41/arch/x86/kernel/step.c
16066--- linux-2.6.32.41/arch/x86/kernel/step.c 2011-03-27 14:31:47.000000000 -0400
16067+++ linux-2.6.32.41/arch/x86/kernel/step.c 2011-04-17 15:56:46.000000000 -0400
16068@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
16069 struct desc_struct *desc;
16070 unsigned long base;
16071
16072- seg &= ~7UL;
16073+ seg >>= 3;
16074
16075 mutex_lock(&child->mm->context.lock);
16076- if (unlikely((seg >> 3) >= child->mm->context.size))
16077+ if (unlikely(seg >= child->mm->context.size))
16078 addr = -1L; /* bogus selector, access would fault */
16079 else {
16080 desc = child->mm->context.ldt + seg;
16081@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
16082 addr += base;
16083 }
16084 mutex_unlock(&child->mm->context.lock);
16085- }
16086+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
16087+ addr = ktla_ktva(addr);
16088
16089 return addr;
16090 }
16091@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
16092 unsigned char opcode[15];
16093 unsigned long addr = convert_ip_to_linear(child, regs);
16094
16095+ if (addr == -EINVAL)
16096+ return 0;
16097+
16098 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
16099 for (i = 0; i < copied; i++) {
16100 switch (opcode[i]) {
16101@@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
16102
16103 #ifdef CONFIG_X86_64
16104 case 0x40 ... 0x4f:
16105- if (regs->cs != __USER_CS)
16106+ if ((regs->cs & 0xffff) != __USER_CS)
16107 /* 32-bit mode: register increment */
16108 return 0;
16109 /* 64-bit mode: REX prefix */
16110diff -urNp linux-2.6.32.41/arch/x86/kernel/syscall_table_32.S linux-2.6.32.41/arch/x86/kernel/syscall_table_32.S
16111--- linux-2.6.32.41/arch/x86/kernel/syscall_table_32.S 2011-03-27 14:31:47.000000000 -0400
16112+++ linux-2.6.32.41/arch/x86/kernel/syscall_table_32.S 2011-04-17 15:56:46.000000000 -0400
16113@@ -1,3 +1,4 @@
16114+.section .rodata,"a",@progbits
16115 ENTRY(sys_call_table)
16116 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
16117 .long sys_exit
16118diff -urNp linux-2.6.32.41/arch/x86/kernel/sys_i386_32.c linux-2.6.32.41/arch/x86/kernel/sys_i386_32.c
16119--- linux-2.6.32.41/arch/x86/kernel/sys_i386_32.c 2011-03-27 14:31:47.000000000 -0400
16120+++ linux-2.6.32.41/arch/x86/kernel/sys_i386_32.c 2011-04-17 15:56:46.000000000 -0400
16121@@ -24,6 +24,21 @@
16122
16123 #include <asm/syscalls.h>
16124
16125+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
16126+{
16127+ unsigned long pax_task_size = TASK_SIZE;
16128+
16129+#ifdef CONFIG_PAX_SEGMEXEC
16130+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
16131+ pax_task_size = SEGMEXEC_TASK_SIZE;
16132+#endif
16133+
16134+ if (len > pax_task_size || addr > pax_task_size - len)
16135+ return -EINVAL;
16136+
16137+ return 0;
16138+}
16139+
16140 /*
16141 * Perform the select(nd, in, out, ex, tv) and mmap() system
16142 * calls. Linux/i386 didn't use to be able to handle more than
16143@@ -58,6 +73,212 @@ out:
16144 return err;
16145 }
16146
16147+unsigned long
16148+arch_get_unmapped_area(struct file *filp, unsigned long addr,
16149+ unsigned long len, unsigned long pgoff, unsigned long flags)
16150+{
16151+ struct mm_struct *mm = current->mm;
16152+ struct vm_area_struct *vma;
16153+ unsigned long start_addr, pax_task_size = TASK_SIZE;
16154+
16155+#ifdef CONFIG_PAX_SEGMEXEC
16156+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
16157+ pax_task_size = SEGMEXEC_TASK_SIZE;
16158+#endif
16159+
16160+ pax_task_size -= PAGE_SIZE;
16161+
16162+ if (len > pax_task_size)
16163+ return -ENOMEM;
16164+
16165+ if (flags & MAP_FIXED)
16166+ return addr;
16167+
16168+#ifdef CONFIG_PAX_RANDMMAP
16169+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16170+#endif
16171+
16172+ if (addr) {
16173+ addr = PAGE_ALIGN(addr);
16174+ if (pax_task_size - len >= addr) {
16175+ vma = find_vma(mm, addr);
16176+ if (check_heap_stack_gap(vma, addr, len))
16177+ return addr;
16178+ }
16179+ }
16180+ if (len > mm->cached_hole_size) {
16181+ start_addr = addr = mm->free_area_cache;
16182+ } else {
16183+ start_addr = addr = mm->mmap_base;
16184+ mm->cached_hole_size = 0;
16185+ }
16186+
16187+#ifdef CONFIG_PAX_PAGEEXEC
16188+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
16189+ start_addr = 0x00110000UL;
16190+
16191+#ifdef CONFIG_PAX_RANDMMAP
16192+ if (mm->pax_flags & MF_PAX_RANDMMAP)
16193+ start_addr += mm->delta_mmap & 0x03FFF000UL;
16194+#endif
16195+
16196+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
16197+ start_addr = addr = mm->mmap_base;
16198+ else
16199+ addr = start_addr;
16200+ }
16201+#endif
16202+
16203+full_search:
16204+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
16205+ /* At this point: (!vma || addr < vma->vm_end). */
16206+ if (pax_task_size - len < addr) {
16207+ /*
16208+ * Start a new search - just in case we missed
16209+ * some holes.
16210+ */
16211+ if (start_addr != mm->mmap_base) {
16212+ start_addr = addr = mm->mmap_base;
16213+ mm->cached_hole_size = 0;
16214+ goto full_search;
16215+ }
16216+ return -ENOMEM;
16217+ }
16218+ if (check_heap_stack_gap(vma, addr, len))
16219+ break;
16220+ if (addr + mm->cached_hole_size < vma->vm_start)
16221+ mm->cached_hole_size = vma->vm_start - addr;
16222+ addr = vma->vm_end;
16223+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
16224+ start_addr = addr = mm->mmap_base;
16225+ mm->cached_hole_size = 0;
16226+ goto full_search;
16227+ }
16228+ }
16229+
16230+ /*
16231+ * Remember the place where we stopped the search:
16232+ */
16233+ mm->free_area_cache = addr + len;
16234+ return addr;
16235+}
16236+
16237+unsigned long
16238+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16239+ const unsigned long len, const unsigned long pgoff,
16240+ const unsigned long flags)
16241+{
16242+ struct vm_area_struct *vma;
16243+ struct mm_struct *mm = current->mm;
16244+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
16245+
16246+#ifdef CONFIG_PAX_SEGMEXEC
16247+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
16248+ pax_task_size = SEGMEXEC_TASK_SIZE;
16249+#endif
16250+
16251+ pax_task_size -= PAGE_SIZE;
16252+
16253+ /* requested length too big for entire address space */
16254+ if (len > pax_task_size)
16255+ return -ENOMEM;
16256+
16257+ if (flags & MAP_FIXED)
16258+ return addr;
16259+
16260+#ifdef CONFIG_PAX_PAGEEXEC
16261+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
16262+ goto bottomup;
16263+#endif
16264+
16265+#ifdef CONFIG_PAX_RANDMMAP
16266+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16267+#endif
16268+
16269+ /* requesting a specific address */
16270+ if (addr) {
16271+ addr = PAGE_ALIGN(addr);
16272+ if (pax_task_size - len >= addr) {
16273+ vma = find_vma(mm, addr);
16274+ if (check_heap_stack_gap(vma, addr, len))
16275+ return addr;
16276+ }
16277+ }
16278+
16279+ /* check if free_area_cache is useful for us */
16280+ if (len <= mm->cached_hole_size) {
16281+ mm->cached_hole_size = 0;
16282+ mm->free_area_cache = mm->mmap_base;
16283+ }
16284+
16285+ /* either no address requested or can't fit in requested address hole */
16286+ addr = mm->free_area_cache;
16287+
16288+ /* make sure it can fit in the remaining address space */
16289+ if (addr > len) {
16290+ vma = find_vma(mm, addr-len);
16291+ if (check_heap_stack_gap(vma, addr - len, len))
16292+ /* remember the address as a hint for next time */
16293+ return (mm->free_area_cache = addr-len);
16294+ }
16295+
16296+ if (mm->mmap_base < len)
16297+ goto bottomup;
16298+
16299+ addr = mm->mmap_base-len;
16300+
16301+ do {
16302+ /*
16303+ * Lookup failure means no vma is above this address,
16304+ * else if new region fits below vma->vm_start,
16305+ * return with success:
16306+ */
16307+ vma = find_vma(mm, addr);
16308+ if (check_heap_stack_gap(vma, addr, len))
16309+ /* remember the address as a hint for next time */
16310+ return (mm->free_area_cache = addr);
16311+
16312+ /* remember the largest hole we saw so far */
16313+ if (addr + mm->cached_hole_size < vma->vm_start)
16314+ mm->cached_hole_size = vma->vm_start - addr;
16315+
16316+ /* try just below the current vma->vm_start */
16317+ addr = skip_heap_stack_gap(vma, len);
16318+ } while (!IS_ERR_VALUE(addr));
16319+
16320+bottomup:
16321+ /*
16322+ * A failed mmap() very likely causes application failure,
16323+ * so fall back to the bottom-up function here. This scenario
16324+ * can happen with large stack limits and large mmap()
16325+ * allocations.
16326+ */
16327+
16328+#ifdef CONFIG_PAX_SEGMEXEC
16329+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
16330+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
16331+ else
16332+#endif
16333+
16334+ mm->mmap_base = TASK_UNMAPPED_BASE;
16335+
16336+#ifdef CONFIG_PAX_RANDMMAP
16337+ if (mm->pax_flags & MF_PAX_RANDMMAP)
16338+ mm->mmap_base += mm->delta_mmap;
16339+#endif
16340+
16341+ mm->free_area_cache = mm->mmap_base;
16342+ mm->cached_hole_size = ~0UL;
16343+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
16344+ /*
16345+ * Restore the topdown base:
16346+ */
16347+ mm->mmap_base = base;
16348+ mm->free_area_cache = base;
16349+ mm->cached_hole_size = ~0UL;
16350+
16351+ return addr;
16352+}
16353
16354 struct sel_arg_struct {
16355 unsigned long n;
16356@@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int fi
16357 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
16358 case SEMTIMEDOP:
16359 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
16360- (const struct timespec __user *)fifth);
16361+ (__force const struct timespec __user *)fifth);
16362
16363 case SEMGET:
16364 return sys_semget(first, second, third);
16365@@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int fi
16366 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
16367 if (ret)
16368 return ret;
16369- return put_user(raddr, (ulong __user *) third);
16370+ return put_user(raddr, (__force ulong __user *) third);
16371 }
16372 case 1: /* iBCS2 emulator entry point */
16373 if (!segment_eq(get_fs(), get_ds()))
16374@@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldol
16375
16376 return error;
16377 }
16378-
16379-
16380-/*
16381- * Do a system call from kernel instead of calling sys_execve so we
16382- * end up with proper pt_regs.
16383- */
16384-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
16385-{
16386- long __res;
16387- asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
16388- : "=a" (__res)
16389- : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
16390- return __res;
16391-}
16392diff -urNp linux-2.6.32.41/arch/x86/kernel/sys_x86_64.c linux-2.6.32.41/arch/x86/kernel/sys_x86_64.c
16393--- linux-2.6.32.41/arch/x86/kernel/sys_x86_64.c 2011-03-27 14:31:47.000000000 -0400
16394+++ linux-2.6.32.41/arch/x86/kernel/sys_x86_64.c 2011-04-17 15:56:46.000000000 -0400
16395@@ -32,8 +32,8 @@ out:
16396 return error;
16397 }
16398
16399-static void find_start_end(unsigned long flags, unsigned long *begin,
16400- unsigned long *end)
16401+static void find_start_end(struct mm_struct *mm, unsigned long flags,
16402+ unsigned long *begin, unsigned long *end)
16403 {
16404 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
16405 unsigned long new_begin;
16406@@ -52,7 +52,7 @@ static void find_start_end(unsigned long
16407 *begin = new_begin;
16408 }
16409 } else {
16410- *begin = TASK_UNMAPPED_BASE;
16411+ *begin = mm->mmap_base;
16412 *end = TASK_SIZE;
16413 }
16414 }
16415@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
16416 if (flags & MAP_FIXED)
16417 return addr;
16418
16419- find_start_end(flags, &begin, &end);
16420+ find_start_end(mm, flags, &begin, &end);
16421
16422 if (len > end)
16423 return -ENOMEM;
16424
16425+#ifdef CONFIG_PAX_RANDMMAP
16426+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16427+#endif
16428+
16429 if (addr) {
16430 addr = PAGE_ALIGN(addr);
16431 vma = find_vma(mm, addr);
16432- if (end - len >= addr &&
16433- (!vma || addr + len <= vma->vm_start))
16434+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
16435 return addr;
16436 }
16437 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
16438@@ -106,7 +109,7 @@ full_search:
16439 }
16440 return -ENOMEM;
16441 }
16442- if (!vma || addr + len <= vma->vm_start) {
16443+ if (check_heap_stack_gap(vma, addr, len)) {
16444 /*
16445 * Remember the place where we stopped the search:
16446 */
16447@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
16448 {
16449 struct vm_area_struct *vma;
16450 struct mm_struct *mm = current->mm;
16451- unsigned long addr = addr0;
16452+ unsigned long base = mm->mmap_base, addr = addr0;
16453
16454 /* requested length too big for entire address space */
16455 if (len > TASK_SIZE)
16456@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
16457 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
16458 goto bottomup;
16459
16460+#ifdef CONFIG_PAX_RANDMMAP
16461+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16462+#endif
16463+
16464 /* requesting a specific address */
16465 if (addr) {
16466 addr = PAGE_ALIGN(addr);
16467- vma = find_vma(mm, addr);
16468- if (TASK_SIZE - len >= addr &&
16469- (!vma || addr + len <= vma->vm_start))
16470- return addr;
16471+ if (TASK_SIZE - len >= addr) {
16472+ vma = find_vma(mm, addr);
16473+ if (check_heap_stack_gap(vma, addr, len))
16474+ return addr;
16475+ }
16476 }
16477
16478 /* check if free_area_cache is useful for us */
16479@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
16480 /* make sure it can fit in the remaining address space */
16481 if (addr > len) {
16482 vma = find_vma(mm, addr-len);
16483- if (!vma || addr <= vma->vm_start)
16484+ if (check_heap_stack_gap(vma, addr - len, len))
16485 /* remember the address as a hint for next time */
16486 return mm->free_area_cache = addr-len;
16487 }
16488@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
16489 * return with success:
16490 */
16491 vma = find_vma(mm, addr);
16492- if (!vma || addr+len <= vma->vm_start)
16493+ if (check_heap_stack_gap(vma, addr, len))
16494 /* remember the address as a hint for next time */
16495 return mm->free_area_cache = addr;
16496
16497@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
16498 mm->cached_hole_size = vma->vm_start - addr;
16499
16500 /* try just below the current vma->vm_start */
16501- addr = vma->vm_start-len;
16502- } while (len < vma->vm_start);
16503+ addr = skip_heap_stack_gap(vma, len);
16504+ } while (!IS_ERR_VALUE(addr));
16505
16506 bottomup:
16507 /*
16508@@ -198,13 +206,21 @@ bottomup:
16509 * can happen with large stack limits and large mmap()
16510 * allocations.
16511 */
16512+ mm->mmap_base = TASK_UNMAPPED_BASE;
16513+
16514+#ifdef CONFIG_PAX_RANDMMAP
16515+ if (mm->pax_flags & MF_PAX_RANDMMAP)
16516+ mm->mmap_base += mm->delta_mmap;
16517+#endif
16518+
16519+ mm->free_area_cache = mm->mmap_base;
16520 mm->cached_hole_size = ~0UL;
16521- mm->free_area_cache = TASK_UNMAPPED_BASE;
16522 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
16523 /*
16524 * Restore the topdown base:
16525 */
16526- mm->free_area_cache = mm->mmap_base;
16527+ mm->mmap_base = base;
16528+ mm->free_area_cache = base;
16529 mm->cached_hole_size = ~0UL;
16530
16531 return addr;
16532diff -urNp linux-2.6.32.41/arch/x86/kernel/tboot.c linux-2.6.32.41/arch/x86/kernel/tboot.c
16533--- linux-2.6.32.41/arch/x86/kernel/tboot.c 2011-03-27 14:31:47.000000000 -0400
16534+++ linux-2.6.32.41/arch/x86/kernel/tboot.c 2011-05-22 23:02:03.000000000 -0400
16535@@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
16536
16537 void tboot_shutdown(u32 shutdown_type)
16538 {
16539- void (*shutdown)(void);
16540+ void (* __noreturn shutdown)(void);
16541
16542 if (!tboot_enabled())
16543 return;
16544@@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
16545
16546 switch_to_tboot_pt();
16547
16548- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
16549+ shutdown = (void *)tboot->shutdown_entry;
16550 shutdown();
16551
16552 /* should not reach here */
16553@@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
16554 tboot_shutdown(acpi_shutdown_map[sleep_state]);
16555 }
16556
16557-static atomic_t ap_wfs_count;
16558+static atomic_unchecked_t ap_wfs_count;
16559
16560 static int tboot_wait_for_aps(int num_aps)
16561 {
16562@@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(
16563 {
16564 switch (action) {
16565 case CPU_DYING:
16566- atomic_inc(&ap_wfs_count);
16567+ atomic_inc_unchecked(&ap_wfs_count);
16568 if (num_online_cpus() == 1)
16569- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
16570+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
16571 return NOTIFY_BAD;
16572 break;
16573 }
16574@@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
16575
16576 tboot_create_trampoline();
16577
16578- atomic_set(&ap_wfs_count, 0);
16579+ atomic_set_unchecked(&ap_wfs_count, 0);
16580 register_hotcpu_notifier(&tboot_cpu_notifier);
16581 return 0;
16582 }
16583diff -urNp linux-2.6.32.41/arch/x86/kernel/time.c linux-2.6.32.41/arch/x86/kernel/time.c
16584--- linux-2.6.32.41/arch/x86/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
16585+++ linux-2.6.32.41/arch/x86/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
16586@@ -26,17 +26,13 @@
16587 int timer_ack;
16588 #endif
16589
16590-#ifdef CONFIG_X86_64
16591-volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
16592-#endif
16593-
16594 unsigned long profile_pc(struct pt_regs *regs)
16595 {
16596 unsigned long pc = instruction_pointer(regs);
16597
16598- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
16599+ if (!user_mode(regs) && in_lock_functions(pc)) {
16600 #ifdef CONFIG_FRAME_POINTER
16601- return *(unsigned long *)(regs->bp + sizeof(long));
16602+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
16603 #else
16604 unsigned long *sp =
16605 (unsigned long *)kernel_stack_pointer(regs);
16606@@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
16607 * or above a saved flags. Eflags has bits 22-31 zero,
16608 * kernel addresses don't.
16609 */
16610+
16611+#ifdef CONFIG_PAX_KERNEXEC
16612+ return ktla_ktva(sp[0]);
16613+#else
16614 if (sp[0] >> 22)
16615 return sp[0];
16616 if (sp[1] >> 22)
16617 return sp[1];
16618 #endif
16619+
16620+#endif
16621 }
16622 return pc;
16623 }
16624diff -urNp linux-2.6.32.41/arch/x86/kernel/tls.c linux-2.6.32.41/arch/x86/kernel/tls.c
16625--- linux-2.6.32.41/arch/x86/kernel/tls.c 2011-03-27 14:31:47.000000000 -0400
16626+++ linux-2.6.32.41/arch/x86/kernel/tls.c 2011-04-17 15:56:46.000000000 -0400
16627@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
16628 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
16629 return -EINVAL;
16630
16631+#ifdef CONFIG_PAX_SEGMEXEC
16632+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
16633+ return -EINVAL;
16634+#endif
16635+
16636 set_tls_desc(p, idx, &info, 1);
16637
16638 return 0;
16639diff -urNp linux-2.6.32.41/arch/x86/kernel/trampoline_32.S linux-2.6.32.41/arch/x86/kernel/trampoline_32.S
16640--- linux-2.6.32.41/arch/x86/kernel/trampoline_32.S 2011-03-27 14:31:47.000000000 -0400
16641+++ linux-2.6.32.41/arch/x86/kernel/trampoline_32.S 2011-04-17 15:56:46.000000000 -0400
16642@@ -32,6 +32,12 @@
16643 #include <asm/segment.h>
16644 #include <asm/page_types.h>
16645
16646+#ifdef CONFIG_PAX_KERNEXEC
16647+#define ta(X) (X)
16648+#else
16649+#define ta(X) ((X) - __PAGE_OFFSET)
16650+#endif
16651+
16652 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
16653 __CPUINITRODATA
16654 .code16
16655@@ -60,7 +66,7 @@ r_base = .
16656 inc %ax # protected mode (PE) bit
16657 lmsw %ax # into protected mode
16658 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
16659- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
16660+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
16661
16662 # These need to be in the same 64K segment as the above;
16663 # hence we don't use the boot_gdt_descr defined in head.S
16664diff -urNp linux-2.6.32.41/arch/x86/kernel/trampoline_64.S linux-2.6.32.41/arch/x86/kernel/trampoline_64.S
16665--- linux-2.6.32.41/arch/x86/kernel/trampoline_64.S 2011-03-27 14:31:47.000000000 -0400
16666+++ linux-2.6.32.41/arch/x86/kernel/trampoline_64.S 2011-04-17 15:56:46.000000000 -0400
16667@@ -91,7 +91,7 @@ startup_32:
16668 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
16669 movl %eax, %ds
16670
16671- movl $X86_CR4_PAE, %eax
16672+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
16673 movl %eax, %cr4 # Enable PAE mode
16674
16675 # Setup trampoline 4 level pagetables
16676@@ -138,7 +138,7 @@ tidt:
16677 # so the kernel can live anywhere
16678 .balign 4
16679 tgdt:
16680- .short tgdt_end - tgdt # gdt limit
16681+ .short tgdt_end - tgdt - 1 # gdt limit
16682 .long tgdt - r_base
16683 .short 0
16684 .quad 0x00cf9b000000ffff # __KERNEL32_CS
16685diff -urNp linux-2.6.32.41/arch/x86/kernel/traps.c linux-2.6.32.41/arch/x86/kernel/traps.c
16686--- linux-2.6.32.41/arch/x86/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
16687+++ linux-2.6.32.41/arch/x86/kernel/traps.c 2011-04-17 15:56:46.000000000 -0400
16688@@ -69,12 +69,6 @@ asmlinkage int system_call(void);
16689
16690 /* Do we ignore FPU interrupts ? */
16691 char ignore_fpu_irq;
16692-
16693-/*
16694- * The IDT has to be page-aligned to simplify the Pentium
16695- * F0 0F bug workaround.
16696- */
16697-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
16698 #endif
16699
16700 DECLARE_BITMAP(used_vectors, NR_VECTORS);
16701@@ -112,19 +106,19 @@ static inline void preempt_conditional_c
16702 static inline void
16703 die_if_kernel(const char *str, struct pt_regs *regs, long err)
16704 {
16705- if (!user_mode_vm(regs))
16706+ if (!user_mode(regs))
16707 die(str, regs, err);
16708 }
16709 #endif
16710
16711 static void __kprobes
16712-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
16713+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
16714 long error_code, siginfo_t *info)
16715 {
16716 struct task_struct *tsk = current;
16717
16718 #ifdef CONFIG_X86_32
16719- if (regs->flags & X86_VM_MASK) {
16720+ if (v8086_mode(regs)) {
16721 /*
16722 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
16723 * On nmi (interrupt 2), do_trap should not be called.
16724@@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str
16725 }
16726 #endif
16727
16728- if (!user_mode(regs))
16729+ if (!user_mode_novm(regs))
16730 goto kernel_trap;
16731
16732 #ifdef CONFIG_X86_32
16733@@ -158,7 +152,7 @@ trap_signal:
16734 printk_ratelimit()) {
16735 printk(KERN_INFO
16736 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
16737- tsk->comm, tsk->pid, str,
16738+ tsk->comm, task_pid_nr(tsk), str,
16739 regs->ip, regs->sp, error_code);
16740 print_vma_addr(" in ", regs->ip);
16741 printk("\n");
16742@@ -175,8 +169,20 @@ kernel_trap:
16743 if (!fixup_exception(regs)) {
16744 tsk->thread.error_code = error_code;
16745 tsk->thread.trap_no = trapnr;
16746+
16747+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16748+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
16749+ str = "PAX: suspicious stack segment fault";
16750+#endif
16751+
16752 die(str, regs, error_code);
16753 }
16754+
16755+#ifdef CONFIG_PAX_REFCOUNT
16756+ if (trapnr == 4)
16757+ pax_report_refcount_overflow(regs);
16758+#endif
16759+
16760 return;
16761
16762 #ifdef CONFIG_X86_32
16763@@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *re
16764 conditional_sti(regs);
16765
16766 #ifdef CONFIG_X86_32
16767- if (regs->flags & X86_VM_MASK)
16768+ if (v8086_mode(regs))
16769 goto gp_in_vm86;
16770 #endif
16771
16772 tsk = current;
16773- if (!user_mode(regs))
16774+ if (!user_mode_novm(regs))
16775 goto gp_in_kernel;
16776
16777+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
16778+ if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
16779+ struct mm_struct *mm = tsk->mm;
16780+ unsigned long limit;
16781+
16782+ down_write(&mm->mmap_sem);
16783+ limit = mm->context.user_cs_limit;
16784+ if (limit < TASK_SIZE) {
16785+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
16786+ up_write(&mm->mmap_sem);
16787+ return;
16788+ }
16789+ up_write(&mm->mmap_sem);
16790+ }
16791+#endif
16792+
16793 tsk->thread.error_code = error_code;
16794 tsk->thread.trap_no = 13;
16795
16796@@ -305,6 +327,13 @@ gp_in_kernel:
16797 if (notify_die(DIE_GPF, "general protection fault", regs,
16798 error_code, 13, SIGSEGV) == NOTIFY_STOP)
16799 return;
16800+
16801+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16802+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
16803+ die("PAX: suspicious general protection fault", regs, error_code);
16804+ else
16805+#endif
16806+
16807 die("general protection fault", regs, error_code);
16808 }
16809
16810@@ -558,7 +587,7 @@ dotraplinkage void __kprobes do_debug(st
16811 }
16812
16813 #ifdef CONFIG_X86_32
16814- if (regs->flags & X86_VM_MASK)
16815+ if (v8086_mode(regs))
16816 goto debug_vm86;
16817 #endif
16818
16819@@ -570,7 +599,7 @@ dotraplinkage void __kprobes do_debug(st
16820 * kernel space (but re-enable TF when returning to user mode).
16821 */
16822 if (condition & DR_STEP) {
16823- if (!user_mode(regs))
16824+ if (!user_mode_novm(regs))
16825 goto clear_TF_reenable;
16826 }
16827
16828@@ -757,7 +786,7 @@ do_simd_coprocessor_error(struct pt_regs
16829 * Handle strange cache flush from user space exception
16830 * in all other cases. This is undocumented behaviour.
16831 */
16832- if (regs->flags & X86_VM_MASK) {
16833+ if (v8086_mode(regs)) {
16834 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
16835 return;
16836 }
16837@@ -798,7 +827,7 @@ asmlinkage void __attribute__((weak)) sm
16838 void __math_state_restore(void)
16839 {
16840 struct thread_info *thread = current_thread_info();
16841- struct task_struct *tsk = thread->task;
16842+ struct task_struct *tsk = current;
16843
16844 /*
16845 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
16846@@ -825,8 +854,7 @@ void __math_state_restore(void)
16847 */
16848 asmlinkage void math_state_restore(void)
16849 {
16850- struct thread_info *thread = current_thread_info();
16851- struct task_struct *tsk = thread->task;
16852+ struct task_struct *tsk = current;
16853
16854 if (!tsk_used_math(tsk)) {
16855 local_irq_enable();
16856diff -urNp linux-2.6.32.41/arch/x86/kernel/vm86_32.c linux-2.6.32.41/arch/x86/kernel/vm86_32.c
16857--- linux-2.6.32.41/arch/x86/kernel/vm86_32.c 2011-03-27 14:31:47.000000000 -0400
16858+++ linux-2.6.32.41/arch/x86/kernel/vm86_32.c 2011-04-17 15:56:46.000000000 -0400
16859@@ -41,6 +41,7 @@
16860 #include <linux/ptrace.h>
16861 #include <linux/audit.h>
16862 #include <linux/stddef.h>
16863+#include <linux/grsecurity.h>
16864
16865 #include <asm/uaccess.h>
16866 #include <asm/io.h>
16867@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
16868 do_exit(SIGSEGV);
16869 }
16870
16871- tss = &per_cpu(init_tss, get_cpu());
16872+ tss = init_tss + get_cpu();
16873 current->thread.sp0 = current->thread.saved_sp0;
16874 current->thread.sysenter_cs = __KERNEL_CS;
16875 load_sp0(tss, &current->thread);
16876@@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
16877 struct task_struct *tsk;
16878 int tmp, ret = -EPERM;
16879
16880+#ifdef CONFIG_GRKERNSEC_VM86
16881+ if (!capable(CAP_SYS_RAWIO)) {
16882+ gr_handle_vm86();
16883+ goto out;
16884+ }
16885+#endif
16886+
16887 tsk = current;
16888 if (tsk->thread.saved_sp0)
16889 goto out;
16890@@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
16891 int tmp, ret;
16892 struct vm86plus_struct __user *v86;
16893
16894+#ifdef CONFIG_GRKERNSEC_VM86
16895+ if (!capable(CAP_SYS_RAWIO)) {
16896+ gr_handle_vm86();
16897+ ret = -EPERM;
16898+ goto out;
16899+ }
16900+#endif
16901+
16902 tsk = current;
16903 switch (regs->bx) {
16904 case VM86_REQUEST_IRQ:
16905@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
16906 tsk->thread.saved_fs = info->regs32->fs;
16907 tsk->thread.saved_gs = get_user_gs(info->regs32);
16908
16909- tss = &per_cpu(init_tss, get_cpu());
16910+ tss = init_tss + get_cpu();
16911 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
16912 if (cpu_has_sep)
16913 tsk->thread.sysenter_cs = 0;
16914@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
16915 goto cannot_handle;
16916 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
16917 goto cannot_handle;
16918- intr_ptr = (unsigned long __user *) (i << 2);
16919+ intr_ptr = (__force unsigned long __user *) (i << 2);
16920 if (get_user(segoffs, intr_ptr))
16921 goto cannot_handle;
16922 if ((segoffs >> 16) == BIOSSEG)
16923diff -urNp linux-2.6.32.41/arch/x86/kernel/vmi_32.c linux-2.6.32.41/arch/x86/kernel/vmi_32.c
16924--- linux-2.6.32.41/arch/x86/kernel/vmi_32.c 2011-03-27 14:31:47.000000000 -0400
16925+++ linux-2.6.32.41/arch/x86/kernel/vmi_32.c 2011-04-17 15:56:46.000000000 -0400
16926@@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1)))
16927 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
16928
16929 #define call_vrom_func(rom,func) \
16930- (((VROMFUNC *)(rom->func))())
16931+ (((VROMFUNC *)(ktva_ktla(rom.func)))())
16932
16933 #define call_vrom_long_func(rom,func,arg) \
16934- (((VROMLONGFUNC *)(rom->func)) (arg))
16935+({\
16936+ u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
16937+ struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
16938+ __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
16939+ __reloc;\
16940+})
16941
16942-static struct vrom_header *vmi_rom;
16943+static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
16944 static int disable_pge;
16945 static int disable_pse;
16946 static int disable_sep;
16947@@ -76,10 +81,10 @@ static struct {
16948 void (*set_initial_ap_state)(int, int);
16949 void (*halt)(void);
16950 void (*set_lazy_mode)(int mode);
16951-} vmi_ops;
16952+} vmi_ops __read_only;
16953
16954 /* Cached VMI operations */
16955-struct vmi_timer_ops vmi_timer_ops;
16956+struct vmi_timer_ops vmi_timer_ops __read_only;
16957
16958 /*
16959 * VMI patching routines.
16960@@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
16961 static inline void patch_offset(void *insnbuf,
16962 unsigned long ip, unsigned long dest)
16963 {
16964- *(unsigned long *)(insnbuf+1) = dest-ip-5;
16965+ *(unsigned long *)(insnbuf+1) = dest-ip-5;
16966 }
16967
16968 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
16969@@ -102,6 +107,7 @@ static unsigned patch_internal(int call,
16970 {
16971 u64 reloc;
16972 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
16973+
16974 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
16975 switch(rel->type) {
16976 case VMI_RELOCATION_CALL_REL:
16977@@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud
16978
16979 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
16980 {
16981- const pte_t pte = { .pte = 0 };
16982+ const pte_t pte = __pte(0ULL);
16983 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
16984 }
16985
16986 static void vmi_pmd_clear(pmd_t *pmd)
16987 {
16988- const pte_t pte = { .pte = 0 };
16989+ const pte_t pte = __pte(0ULL);
16990 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
16991 }
16992 #endif
16993@@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, un
16994 ap.ss = __KERNEL_DS;
16995 ap.esp = (unsigned long) start_esp;
16996
16997- ap.ds = __USER_DS;
16998- ap.es = __USER_DS;
16999+ ap.ds = __KERNEL_DS;
17000+ ap.es = __KERNEL_DS;
17001 ap.fs = __KERNEL_PERCPU;
17002- ap.gs = __KERNEL_STACK_CANARY;
17003+ savesegment(gs, ap.gs);
17004
17005 ap.eflags = 0;
17006
17007@@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
17008 paravirt_leave_lazy_mmu();
17009 }
17010
17011+#ifdef CONFIG_PAX_KERNEXEC
17012+static unsigned long vmi_pax_open_kernel(void)
17013+{
17014+ return 0;
17015+}
17016+
17017+static unsigned long vmi_pax_close_kernel(void)
17018+{
17019+ return 0;
17020+}
17021+#endif
17022+
17023 static inline int __init check_vmi_rom(struct vrom_header *rom)
17024 {
17025 struct pci_header *pci;
17026@@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(s
17027 return 0;
17028 if (rom->vrom_signature != VMI_SIGNATURE)
17029 return 0;
17030+ if (rom->rom_length * 512 > sizeof(*rom)) {
17031+ printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
17032+ return 0;
17033+ }
17034 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
17035 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
17036 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
17037@@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(v
17038 struct vrom_header *romstart;
17039 romstart = (struct vrom_header *)isa_bus_to_virt(base);
17040 if (check_vmi_rom(romstart)) {
17041- vmi_rom = romstart;
17042+ vmi_rom = *romstart;
17043 return 1;
17044 }
17045 }
17046@@ -836,6 +858,11 @@ static inline int __init activate_vmi(vo
17047
17048 para_fill(pv_irq_ops.safe_halt, Halt);
17049
17050+#ifdef CONFIG_PAX_KERNEXEC
17051+ pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
17052+ pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
17053+#endif
17054+
17055 /*
17056 * Alternative instruction rewriting doesn't happen soon enough
17057 * to convert VMI_IRET to a call instead of a jump; so we have
17058@@ -853,16 +880,16 @@ static inline int __init activate_vmi(vo
17059
17060 void __init vmi_init(void)
17061 {
17062- if (!vmi_rom)
17063+ if (!vmi_rom.rom_signature)
17064 probe_vmi_rom();
17065 else
17066- check_vmi_rom(vmi_rom);
17067+ check_vmi_rom(&vmi_rom);
17068
17069 /* In case probing for or validating the ROM failed, basil */
17070- if (!vmi_rom)
17071+ if (!vmi_rom.rom_signature)
17072 return;
17073
17074- reserve_top_address(-vmi_rom->virtual_top);
17075+ reserve_top_address(-vmi_rom.virtual_top);
17076
17077 #ifdef CONFIG_X86_IO_APIC
17078 /* This is virtual hardware; timer routing is wired correctly */
17079@@ -874,7 +901,7 @@ void __init vmi_activate(void)
17080 {
17081 unsigned long flags;
17082
17083- if (!vmi_rom)
17084+ if (!vmi_rom.rom_signature)
17085 return;
17086
17087 local_irq_save(flags);
17088diff -urNp linux-2.6.32.41/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.41/arch/x86/kernel/vmlinux.lds.S
17089--- linux-2.6.32.41/arch/x86/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
17090+++ linux-2.6.32.41/arch/x86/kernel/vmlinux.lds.S 2011-04-17 15:56:46.000000000 -0400
17091@@ -26,6 +26,13 @@
17092 #include <asm/page_types.h>
17093 #include <asm/cache.h>
17094 #include <asm/boot.h>
17095+#include <asm/segment.h>
17096+
17097+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17098+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
17099+#else
17100+#define __KERNEL_TEXT_OFFSET 0
17101+#endif
17102
17103 #undef i386 /* in case the preprocessor is a 32bit one */
17104
17105@@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
17106 #ifdef CONFIG_X86_32
17107 OUTPUT_ARCH(i386)
17108 ENTRY(phys_startup_32)
17109-jiffies = jiffies_64;
17110 #else
17111 OUTPUT_ARCH(i386:x86-64)
17112 ENTRY(phys_startup_64)
17113-jiffies_64 = jiffies;
17114 #endif
17115
17116 PHDRS {
17117 text PT_LOAD FLAGS(5); /* R_E */
17118- data PT_LOAD FLAGS(7); /* RWE */
17119+#ifdef CONFIG_X86_32
17120+ module PT_LOAD FLAGS(5); /* R_E */
17121+#endif
17122+#ifdef CONFIG_XEN
17123+ rodata PT_LOAD FLAGS(5); /* R_E */
17124+#else
17125+ rodata PT_LOAD FLAGS(4); /* R__ */
17126+#endif
17127+ data PT_LOAD FLAGS(6); /* RW_ */
17128 #ifdef CONFIG_X86_64
17129 user PT_LOAD FLAGS(5); /* R_E */
17130+#endif
17131+ init.begin PT_LOAD FLAGS(6); /* RW_ */
17132 #ifdef CONFIG_SMP
17133 percpu PT_LOAD FLAGS(6); /* RW_ */
17134 #endif
17135+ text.init PT_LOAD FLAGS(5); /* R_E */
17136+ text.exit PT_LOAD FLAGS(5); /* R_E */
17137 init PT_LOAD FLAGS(7); /* RWE */
17138-#endif
17139 note PT_NOTE FLAGS(0); /* ___ */
17140 }
17141
17142 SECTIONS
17143 {
17144 #ifdef CONFIG_X86_32
17145- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
17146- phys_startup_32 = startup_32 - LOAD_OFFSET;
17147+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
17148 #else
17149- . = __START_KERNEL;
17150- phys_startup_64 = startup_64 - LOAD_OFFSET;
17151+ . = __START_KERNEL;
17152 #endif
17153
17154 /* Text and read-only data */
17155- .text : AT(ADDR(.text) - LOAD_OFFSET) {
17156- _text = .;
17157+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17158 /* bootstrapping code */
17159+#ifdef CONFIG_X86_32
17160+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17161+#else
17162+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17163+#endif
17164+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17165+ _text = .;
17166 HEAD_TEXT
17167 #ifdef CONFIG_X86_32
17168 . = ALIGN(PAGE_SIZE);
17169@@ -82,28 +102,71 @@ SECTIONS
17170 IRQENTRY_TEXT
17171 *(.fixup)
17172 *(.gnu.warning)
17173- /* End of text section */
17174- _etext = .;
17175 } :text = 0x9090
17176
17177- NOTES :text :note
17178+ . += __KERNEL_TEXT_OFFSET;
17179+
17180+#ifdef CONFIG_X86_32
17181+ . = ALIGN(PAGE_SIZE);
17182+ .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
17183+ *(.vmi.rom)
17184+ } :module
17185+
17186+ . = ALIGN(PAGE_SIZE);
17187+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
17188+
17189+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
17190+ MODULES_EXEC_VADDR = .;
17191+ BYTE(0)
17192+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
17193+ . = ALIGN(HPAGE_SIZE);
17194+ MODULES_EXEC_END = . - 1;
17195+#endif
17196+
17197+ } :module
17198+#endif
17199
17200- EXCEPTION_TABLE(16) :text = 0x9090
17201+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
17202+ /* End of text section */
17203+ _etext = . - __KERNEL_TEXT_OFFSET;
17204+ }
17205+
17206+#ifdef CONFIG_X86_32
17207+ . = ALIGN(PAGE_SIZE);
17208+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
17209+ *(.idt)
17210+ . = ALIGN(PAGE_SIZE);
17211+ *(.empty_zero_page)
17212+ *(.swapper_pg_fixmap)
17213+ *(.swapper_pg_pmd)
17214+ *(.swapper_pg_dir)
17215+ *(.trampoline_pg_dir)
17216+ } :rodata
17217+#endif
17218+
17219+ . = ALIGN(PAGE_SIZE);
17220+ NOTES :rodata :note
17221+
17222+ EXCEPTION_TABLE(16) :rodata
17223
17224 RO_DATA(PAGE_SIZE)
17225
17226 /* Data */
17227 .data : AT(ADDR(.data) - LOAD_OFFSET) {
17228+
17229+#ifdef CONFIG_PAX_KERNEXEC
17230+ . = ALIGN(HPAGE_SIZE);
17231+#else
17232+ . = ALIGN(PAGE_SIZE);
17233+#endif
17234+
17235 /* Start of data section */
17236 _sdata = .;
17237
17238 /* init_task */
17239 INIT_TASK_DATA(THREAD_SIZE)
17240
17241-#ifdef CONFIG_X86_32
17242- /* 32 bit has nosave before _edata */
17243 NOSAVE_DATA
17244-#endif
17245
17246 PAGE_ALIGNED_DATA(PAGE_SIZE)
17247
17248@@ -112,6 +175,8 @@ SECTIONS
17249 DATA_DATA
17250 CONSTRUCTORS
17251
17252+ jiffies = jiffies_64;
17253+
17254 /* rarely changed data like cpu maps */
17255 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
17256
17257@@ -166,12 +231,6 @@ SECTIONS
17258 }
17259 vgetcpu_mode = VVIRT(.vgetcpu_mode);
17260
17261- . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
17262- .jiffies : AT(VLOAD(.jiffies)) {
17263- *(.jiffies)
17264- }
17265- jiffies = VVIRT(.jiffies);
17266-
17267 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
17268 *(.vsyscall_3)
17269 }
17270@@ -187,12 +246,19 @@ SECTIONS
17271 #endif /* CONFIG_X86_64 */
17272
17273 /* Init code and data - will be freed after init */
17274- . = ALIGN(PAGE_SIZE);
17275 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
17276+ BYTE(0)
17277+
17278+#ifdef CONFIG_PAX_KERNEXEC
17279+ . = ALIGN(HPAGE_SIZE);
17280+#else
17281+ . = ALIGN(PAGE_SIZE);
17282+#endif
17283+
17284 __init_begin = .; /* paired with __init_end */
17285- }
17286+ } :init.begin
17287
17288-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
17289+#ifdef CONFIG_SMP
17290 /*
17291 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
17292 * output PHDR, so the next output section - .init.text - should
17293@@ -201,12 +267,27 @@ SECTIONS
17294 PERCPU_VADDR(0, :percpu)
17295 #endif
17296
17297- INIT_TEXT_SECTION(PAGE_SIZE)
17298-#ifdef CONFIG_X86_64
17299- :init
17300-#endif
17301+ . = ALIGN(PAGE_SIZE);
17302+ init_begin = .;
17303+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
17304+ VMLINUX_SYMBOL(_sinittext) = .;
17305+ INIT_TEXT
17306+ VMLINUX_SYMBOL(_einittext) = .;
17307+ . = ALIGN(PAGE_SIZE);
17308+ } :text.init
17309
17310- INIT_DATA_SECTION(16)
17311+ /*
17312+ * .exit.text is discard at runtime, not link time, to deal with
17313+ * references from .altinstructions and .eh_frame
17314+ */
17315+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17316+ EXIT_TEXT
17317+ . = ALIGN(16);
17318+ } :text.exit
17319+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
17320+
17321+ . = ALIGN(PAGE_SIZE);
17322+ INIT_DATA_SECTION(16) :init
17323
17324 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
17325 __x86_cpu_dev_start = .;
17326@@ -232,19 +313,11 @@ SECTIONS
17327 *(.altinstr_replacement)
17328 }
17329
17330- /*
17331- * .exit.text is discard at runtime, not link time, to deal with
17332- * references from .altinstructions and .eh_frame
17333- */
17334- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
17335- EXIT_TEXT
17336- }
17337-
17338 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
17339 EXIT_DATA
17340 }
17341
17342-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
17343+#ifndef CONFIG_SMP
17344 PERCPU(PAGE_SIZE)
17345 #endif
17346
17347@@ -267,12 +340,6 @@ SECTIONS
17348 . = ALIGN(PAGE_SIZE);
17349 }
17350
17351-#ifdef CONFIG_X86_64
17352- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
17353- NOSAVE_DATA
17354- }
17355-#endif
17356-
17357 /* BSS */
17358 . = ALIGN(PAGE_SIZE);
17359 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
17360@@ -288,6 +355,7 @@ SECTIONS
17361 __brk_base = .;
17362 . += 64 * 1024; /* 64k alignment slop space */
17363 *(.brk_reservation) /* areas brk users have reserved */
17364+ . = ALIGN(HPAGE_SIZE);
17365 __brk_limit = .;
17366 }
17367
17368@@ -316,13 +384,12 @@ SECTIONS
17369 * for the boot processor.
17370 */
17371 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
17372-INIT_PER_CPU(gdt_page);
17373 INIT_PER_CPU(irq_stack_union);
17374
17375 /*
17376 * Build-time check on the image size:
17377 */
17378-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
17379+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
17380 "kernel image bigger than KERNEL_IMAGE_SIZE");
17381
17382 #ifdef CONFIG_SMP
17383diff -urNp linux-2.6.32.41/arch/x86/kernel/vsyscall_64.c linux-2.6.32.41/arch/x86/kernel/vsyscall_64.c
17384--- linux-2.6.32.41/arch/x86/kernel/vsyscall_64.c 2011-03-27 14:31:47.000000000 -0400
17385+++ linux-2.6.32.41/arch/x86/kernel/vsyscall_64.c 2011-04-23 12:56:10.000000000 -0400
17386@@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wa
17387
17388 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
17389 /* copy vsyscall data */
17390+ strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
17391 vsyscall_gtod_data.clock.vread = clock->vread;
17392 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
17393 vsyscall_gtod_data.clock.mask = clock->mask;
17394@@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s
17395 We do this here because otherwise user space would do it on
17396 its own in a likely inferior way (no access to jiffies).
17397 If you don't like it pass NULL. */
17398- if (tcache && tcache->blob[0] == (j = __jiffies)) {
17399+ if (tcache && tcache->blob[0] == (j = jiffies)) {
17400 p = tcache->blob[1];
17401 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
17402 /* Load per CPU data from RDTSCP */
17403diff -urNp linux-2.6.32.41/arch/x86/kernel/x8664_ksyms_64.c linux-2.6.32.41/arch/x86/kernel/x8664_ksyms_64.c
17404--- linux-2.6.32.41/arch/x86/kernel/x8664_ksyms_64.c 2011-03-27 14:31:47.000000000 -0400
17405+++ linux-2.6.32.41/arch/x86/kernel/x8664_ksyms_64.c 2011-04-17 15:56:46.000000000 -0400
17406@@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
17407
17408 EXPORT_SYMBOL(copy_user_generic);
17409 EXPORT_SYMBOL(__copy_user_nocache);
17410-EXPORT_SYMBOL(copy_from_user);
17411-EXPORT_SYMBOL(copy_to_user);
17412 EXPORT_SYMBOL(__copy_from_user_inatomic);
17413
17414 EXPORT_SYMBOL(copy_page);
17415diff -urNp linux-2.6.32.41/arch/x86/kernel/xsave.c linux-2.6.32.41/arch/x86/kernel/xsave.c
17416--- linux-2.6.32.41/arch/x86/kernel/xsave.c 2011-03-27 14:31:47.000000000 -0400
17417+++ linux-2.6.32.41/arch/x86/kernel/xsave.c 2011-04-17 15:56:46.000000000 -0400
17418@@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_
17419 fx_sw_user->xstate_size > fx_sw_user->extended_size)
17420 return -1;
17421
17422- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
17423+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
17424 fx_sw_user->extended_size -
17425 FP_XSTATE_MAGIC2_SIZE));
17426 /*
17427@@ -196,7 +196,7 @@ fx_only:
17428 * the other extended state.
17429 */
17430 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
17431- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
17432+ return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
17433 }
17434
17435 /*
17436@@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf
17437 if (task_thread_info(tsk)->status & TS_XSAVE)
17438 err = restore_user_xstate(buf);
17439 else
17440- err = fxrstor_checking((__force struct i387_fxsave_struct *)
17441+ err = fxrstor_checking((struct i387_fxsave_struct __user *)
17442 buf);
17443 if (unlikely(err)) {
17444 /*
17445diff -urNp linux-2.6.32.41/arch/x86/kvm/emulate.c linux-2.6.32.41/arch/x86/kvm/emulate.c
17446--- linux-2.6.32.41/arch/x86/kvm/emulate.c 2011-03-27 14:31:47.000000000 -0400
17447+++ linux-2.6.32.41/arch/x86/kvm/emulate.c 2011-04-17 15:56:46.000000000 -0400
17448@@ -81,8 +81,8 @@
17449 #define Src2CL (1<<29)
17450 #define Src2ImmByte (2<<29)
17451 #define Src2One (3<<29)
17452-#define Src2Imm16 (4<<29)
17453-#define Src2Mask (7<<29)
17454+#define Src2Imm16 (4U<<29)
17455+#define Src2Mask (7U<<29)
17456
17457 enum {
17458 Group1_80, Group1_81, Group1_82, Group1_83,
17459@@ -411,6 +411,7 @@ static u32 group2_table[] = {
17460
17461 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
17462 do { \
17463+ unsigned long _tmp; \
17464 __asm__ __volatile__ ( \
17465 _PRE_EFLAGS("0", "4", "2") \
17466 _op _suffix " %"_x"3,%1; " \
17467@@ -424,8 +425,6 @@ static u32 group2_table[] = {
17468 /* Raw emulation: instruction has two explicit operands. */
17469 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
17470 do { \
17471- unsigned long _tmp; \
17472- \
17473 switch ((_dst).bytes) { \
17474 case 2: \
17475 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
17476@@ -441,7 +440,6 @@ static u32 group2_table[] = {
17477
17478 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
17479 do { \
17480- unsigned long _tmp; \
17481 switch ((_dst).bytes) { \
17482 case 1: \
17483 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
17484diff -urNp linux-2.6.32.41/arch/x86/kvm/lapic.c linux-2.6.32.41/arch/x86/kvm/lapic.c
17485--- linux-2.6.32.41/arch/x86/kvm/lapic.c 2011-03-27 14:31:47.000000000 -0400
17486+++ linux-2.6.32.41/arch/x86/kvm/lapic.c 2011-04-17 15:56:46.000000000 -0400
17487@@ -52,7 +52,7 @@
17488 #define APIC_BUS_CYCLE_NS 1
17489
17490 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
17491-#define apic_debug(fmt, arg...)
17492+#define apic_debug(fmt, arg...) do {} while (0)
17493
17494 #define APIC_LVT_NUM 6
17495 /* 14 is the version for Xeon and Pentium 8.4.8*/
17496diff -urNp linux-2.6.32.41/arch/x86/kvm/paging_tmpl.h linux-2.6.32.41/arch/x86/kvm/paging_tmpl.h
17497--- linux-2.6.32.41/arch/x86/kvm/paging_tmpl.h 2011-03-27 14:31:47.000000000 -0400
17498+++ linux-2.6.32.41/arch/x86/kvm/paging_tmpl.h 2011-05-16 21:46:57.000000000 -0400
17499@@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_
17500 int level = PT_PAGE_TABLE_LEVEL;
17501 unsigned long mmu_seq;
17502
17503+ pax_track_stack();
17504+
17505 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
17506 kvm_mmu_audit(vcpu, "pre page fault");
17507
17508diff -urNp linux-2.6.32.41/arch/x86/kvm/svm.c linux-2.6.32.41/arch/x86/kvm/svm.c
17509--- linux-2.6.32.41/arch/x86/kvm/svm.c 2011-03-27 14:31:47.000000000 -0400
17510+++ linux-2.6.32.41/arch/x86/kvm/svm.c 2011-04-17 15:56:46.000000000 -0400
17511@@ -2483,9 +2483,12 @@ static int handle_exit(struct kvm_run *k
17512 static void reload_tss(struct kvm_vcpu *vcpu)
17513 {
17514 int cpu = raw_smp_processor_id();
17515-
17516 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
17517+
17518+ pax_open_kernel();
17519 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
17520+ pax_close_kernel();
17521+
17522 load_TR_desc();
17523 }
17524
17525@@ -2946,7 +2949,7 @@ static bool svm_gb_page_enable(void)
17526 return true;
17527 }
17528
17529-static struct kvm_x86_ops svm_x86_ops = {
17530+static const struct kvm_x86_ops svm_x86_ops = {
17531 .cpu_has_kvm_support = has_svm,
17532 .disabled_by_bios = is_disabled,
17533 .hardware_setup = svm_hardware_setup,
17534diff -urNp linux-2.6.32.41/arch/x86/kvm/vmx.c linux-2.6.32.41/arch/x86/kvm/vmx.c
17535--- linux-2.6.32.41/arch/x86/kvm/vmx.c 2011-03-27 14:31:47.000000000 -0400
17536+++ linux-2.6.32.41/arch/x86/kvm/vmx.c 2011-05-04 17:56:20.000000000 -0400
17537@@ -570,7 +570,11 @@ static void reload_tss(void)
17538
17539 kvm_get_gdt(&gdt);
17540 descs = (void *)gdt.base;
17541+
17542+ pax_open_kernel();
17543 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
17544+ pax_close_kernel();
17545+
17546 load_TR_desc();
17547 }
17548
17549@@ -1409,8 +1413,11 @@ static __init int hardware_setup(void)
17550 if (!cpu_has_vmx_flexpriority())
17551 flexpriority_enabled = 0;
17552
17553- if (!cpu_has_vmx_tpr_shadow())
17554- kvm_x86_ops->update_cr8_intercept = NULL;
17555+ if (!cpu_has_vmx_tpr_shadow()) {
17556+ pax_open_kernel();
17557+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
17558+ pax_close_kernel();
17559+ }
17560
17561 if (enable_ept && !cpu_has_vmx_ept_2m_page())
17562 kvm_disable_largepages();
17563@@ -2361,7 +2368,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
17564 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
17565
17566 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
17567- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
17568+ vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
17569 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
17570 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
17571 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
17572@@ -3717,6 +3724,12 @@ static void vmx_vcpu_run(struct kvm_vcpu
17573 "jmp .Lkvm_vmx_return \n\t"
17574 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
17575 ".Lkvm_vmx_return: "
17576+
17577+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17578+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
17579+ ".Lkvm_vmx_return2: "
17580+#endif
17581+
17582 /* Save guest registers, load host registers, keep flags */
17583 "xchg %0, (%%"R"sp) \n\t"
17584 "mov %%"R"ax, %c[rax](%0) \n\t"
17585@@ -3763,8 +3776,13 @@ static void vmx_vcpu_run(struct kvm_vcpu
17586 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
17587 #endif
17588 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
17589+
17590+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17591+ ,[cs]"i"(__KERNEL_CS)
17592+#endif
17593+
17594 : "cc", "memory"
17595- , R"bx", R"di", R"si"
17596+ , R"ax", R"bx", R"di", R"si"
17597 #ifdef CONFIG_X86_64
17598 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
17599 #endif
17600@@ -3781,7 +3799,16 @@ static void vmx_vcpu_run(struct kvm_vcpu
17601 if (vmx->rmode.irq.pending)
17602 fixup_rmode_irq(vmx);
17603
17604- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
17605+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
17606+
17607+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17608+ loadsegment(fs, __KERNEL_PERCPU);
17609+#endif
17610+
17611+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17612+ __set_fs(current_thread_info()->addr_limit);
17613+#endif
17614+
17615 vmx->launched = 1;
17616
17617 vmx_complete_interrupts(vmx);
17618@@ -3956,7 +3983,7 @@ static bool vmx_gb_page_enable(void)
17619 return false;
17620 }
17621
17622-static struct kvm_x86_ops vmx_x86_ops = {
17623+static const struct kvm_x86_ops vmx_x86_ops = {
17624 .cpu_has_kvm_support = cpu_has_kvm_support,
17625 .disabled_by_bios = vmx_disabled_by_bios,
17626 .hardware_setup = hardware_setup,
17627diff -urNp linux-2.6.32.41/arch/x86/kvm/x86.c linux-2.6.32.41/arch/x86/kvm/x86.c
17628--- linux-2.6.32.41/arch/x86/kvm/x86.c 2011-05-10 22:12:01.000000000 -0400
17629+++ linux-2.6.32.41/arch/x86/kvm/x86.c 2011-05-10 22:12:26.000000000 -0400
17630@@ -82,7 +82,7 @@ static void update_cr8_intercept(struct
17631 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
17632 struct kvm_cpuid_entry2 __user *entries);
17633
17634-struct kvm_x86_ops *kvm_x86_ops;
17635+const struct kvm_x86_ops *kvm_x86_ops;
17636 EXPORT_SYMBOL_GPL(kvm_x86_ops);
17637
17638 int ignore_msrs = 0;
17639@@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
17640 struct kvm_cpuid2 *cpuid,
17641 struct kvm_cpuid_entry2 __user *entries)
17642 {
17643- int r;
17644+ int r, i;
17645
17646 r = -E2BIG;
17647 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
17648 goto out;
17649 r = -EFAULT;
17650- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
17651- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
17652+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
17653 goto out;
17654+ for (i = 0; i < cpuid->nent; ++i) {
17655+ struct kvm_cpuid_entry2 cpuid_entry;
17656+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
17657+ goto out;
17658+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
17659+ }
17660 vcpu->arch.cpuid_nent = cpuid->nent;
17661 kvm_apic_set_version(vcpu);
17662 return 0;
17663@@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
17664 struct kvm_cpuid2 *cpuid,
17665 struct kvm_cpuid_entry2 __user *entries)
17666 {
17667- int r;
17668+ int r, i;
17669
17670 vcpu_load(vcpu);
17671 r = -E2BIG;
17672 if (cpuid->nent < vcpu->arch.cpuid_nent)
17673 goto out;
17674 r = -EFAULT;
17675- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
17676- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
17677+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
17678 goto out;
17679+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
17680+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
17681+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
17682+ goto out;
17683+ }
17684 return 0;
17685
17686 out:
17687@@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
17688 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
17689 struct kvm_interrupt *irq)
17690 {
17691- if (irq->irq < 0 || irq->irq >= 256)
17692+ if (irq->irq >= 256)
17693 return -EINVAL;
17694 if (irqchip_in_kernel(vcpu->kvm))
17695 return -ENXIO;
17696@@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cp
17697 .notifier_call = kvmclock_cpufreq_notifier
17698 };
17699
17700-int kvm_arch_init(void *opaque)
17701+int kvm_arch_init(const void *opaque)
17702 {
17703 int r, cpu;
17704- struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
17705+ const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
17706
17707 if (kvm_x86_ops) {
17708 printk(KERN_ERR "kvm: already loaded the other module\n");
17709diff -urNp linux-2.6.32.41/arch/x86/lib/atomic64_32.c linux-2.6.32.41/arch/x86/lib/atomic64_32.c
17710--- linux-2.6.32.41/arch/x86/lib/atomic64_32.c 2011-03-27 14:31:47.000000000 -0400
17711+++ linux-2.6.32.41/arch/x86/lib/atomic64_32.c 2011-05-04 17:56:28.000000000 -0400
17712@@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u6
17713 }
17714 EXPORT_SYMBOL(atomic64_cmpxchg);
17715
17716+u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
17717+{
17718+ return cmpxchg8b(&ptr->counter, old_val, new_val);
17719+}
17720+EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
17721+
17722 /**
17723 * atomic64_xchg - xchg atomic64 variable
17724 * @ptr: pointer to type atomic64_t
17725@@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 n
17726 EXPORT_SYMBOL(atomic64_xchg);
17727
17728 /**
17729+ * atomic64_xchg_unchecked - xchg atomic64 variable
17730+ * @ptr: pointer to type atomic64_unchecked_t
17731+ * @new_val: value to assign
17732+ *
17733+ * Atomically xchgs the value of @ptr to @new_val and returns
17734+ * the old value.
17735+ */
17736+u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
17737+{
17738+ /*
17739+ * Try first with a (possibly incorrect) assumption about
17740+ * what we have there. We'll do two loops most likely,
17741+ * but we'll get an ownership MESI transaction straight away
17742+ * instead of a read transaction followed by a
17743+ * flush-for-ownership transaction:
17744+ */
17745+ u64 old_val, real_val = 0;
17746+
17747+ do {
17748+ old_val = real_val;
17749+
17750+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
17751+
17752+ } while (real_val != old_val);
17753+
17754+ return old_val;
17755+}
17756+EXPORT_SYMBOL(atomic64_xchg_unchecked);
17757+
17758+/**
17759 * atomic64_set - set atomic64 variable
17760 * @ptr: pointer to type atomic64_t
17761 * @new_val: value to assign
17762@@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 n
17763 EXPORT_SYMBOL(atomic64_set);
17764
17765 /**
17766-EXPORT_SYMBOL(atomic64_read);
17767+ * atomic64_unchecked_set - set atomic64 variable
17768+ * @ptr: pointer to type atomic64_unchecked_t
17769+ * @new_val: value to assign
17770+ *
17771+ * Atomically sets the value of @ptr to @new_val.
17772+ */
17773+void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
17774+{
17775+ atomic64_xchg_unchecked(ptr, new_val);
17776+}
17777+EXPORT_SYMBOL(atomic64_set_unchecked);
17778+
17779+/**
17780 * atomic64_add_return - add and return
17781 * @delta: integer value to add
17782 * @ptr: pointer to type atomic64_t
17783@@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 del
17784 }
17785 EXPORT_SYMBOL(atomic64_add_return);
17786
17787+/**
17788+ * atomic64_add_return_unchecked - add and return
17789+ * @delta: integer value to add
17790+ * @ptr: pointer to type atomic64_unchecked_t
17791+ *
17792+ * Atomically adds @delta to @ptr and returns @delta + *@ptr
17793+ */
17794+noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
17795+{
17796+ /*
17797+ * Try first with a (possibly incorrect) assumption about
17798+ * what we have there. We'll do two loops most likely,
17799+ * but we'll get an ownership MESI transaction straight away
17800+ * instead of a read transaction followed by a
17801+ * flush-for-ownership transaction:
17802+ */
17803+ u64 old_val, new_val, real_val = 0;
17804+
17805+ do {
17806+ old_val = real_val;
17807+ new_val = old_val + delta;
17808+
17809+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
17810+
17811+ } while (real_val != old_val);
17812+
17813+ return new_val;
17814+}
17815+EXPORT_SYMBOL(atomic64_add_return_unchecked);
17816+
17817 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
17818 {
17819 return atomic64_add_return(-delta, ptr);
17820 }
17821 EXPORT_SYMBOL(atomic64_sub_return);
17822
17823+u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
17824+{
17825+ return atomic64_add_return_unchecked(-delta, ptr);
17826+}
17827+EXPORT_SYMBOL(atomic64_sub_return_unchecked);
17828+
17829 u64 atomic64_inc_return(atomic64_t *ptr)
17830 {
17831 return atomic64_add_return(1, ptr);
17832 }
17833 EXPORT_SYMBOL(atomic64_inc_return);
17834
17835+u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
17836+{
17837+ return atomic64_add_return_unchecked(1, ptr);
17838+}
17839+EXPORT_SYMBOL(atomic64_inc_return_unchecked);
17840+
17841 u64 atomic64_dec_return(atomic64_t *ptr)
17842 {
17843 return atomic64_sub_return(1, ptr);
17844 }
17845 EXPORT_SYMBOL(atomic64_dec_return);
17846
17847+u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
17848+{
17849+ return atomic64_sub_return_unchecked(1, ptr);
17850+}
17851+EXPORT_SYMBOL(atomic64_dec_return_unchecked);
17852+
17853 /**
17854 * atomic64_add - add integer to atomic64 variable
17855 * @delta: integer value to add
17856@@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t
17857 EXPORT_SYMBOL(atomic64_add);
17858
17859 /**
17860+ * atomic64_add_unchecked - add integer to atomic64 variable
17861+ * @delta: integer value to add
17862+ * @ptr: pointer to type atomic64_unchecked_t
17863+ *
17864+ * Atomically adds @delta to @ptr.
17865+ */
17866+void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
17867+{
17868+ atomic64_add_return_unchecked(delta, ptr);
17869+}
17870+EXPORT_SYMBOL(atomic64_add_unchecked);
17871+
17872+/**
17873 * atomic64_sub - subtract the atomic64 variable
17874 * @delta: integer value to subtract
17875 * @ptr: pointer to type atomic64_t
17876@@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t
17877 EXPORT_SYMBOL(atomic64_sub);
17878
17879 /**
17880+ * atomic64_sub_unchecked - subtract the atomic64 variable
17881+ * @delta: integer value to subtract
17882+ * @ptr: pointer to type atomic64_unchecked_t
17883+ *
17884+ * Atomically subtracts @delta from @ptr.
17885+ */
17886+void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
17887+{
17888+ atomic64_add_unchecked(-delta, ptr);
17889+}
17890+EXPORT_SYMBOL(atomic64_sub_unchecked);
17891+
17892+/**
17893 * atomic64_sub_and_test - subtract value from variable and test result
17894 * @delta: integer value to subtract
17895 * @ptr: pointer to type atomic64_t
17896@@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
17897 EXPORT_SYMBOL(atomic64_inc);
17898
17899 /**
17900+ * atomic64_inc_unchecked - increment atomic64 variable
17901+ * @ptr: pointer to type atomic64_unchecked_t
17902+ *
17903+ * Atomically increments @ptr by 1.
17904+ */
17905+void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
17906+{
17907+ atomic64_add_unchecked(1, ptr);
17908+}
17909+EXPORT_SYMBOL(atomic64_inc_unchecked);
17910+
17911+/**
17912 * atomic64_dec - decrement atomic64 variable
17913 * @ptr: pointer to type atomic64_t
17914 *
17915@@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
17916 EXPORT_SYMBOL(atomic64_dec);
17917
17918 /**
17919+ * atomic64_dec_unchecked - decrement atomic64 variable
17920+ * @ptr: pointer to type atomic64_unchecked_t
17921+ *
17922+ * Atomically decrements @ptr by 1.
17923+ */
17924+void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
17925+{
17926+ atomic64_sub_unchecked(1, ptr);
17927+}
17928+EXPORT_SYMBOL(atomic64_dec_unchecked);
17929+
17930+/**
17931 * atomic64_dec_and_test - decrement and test
17932 * @ptr: pointer to type atomic64_t
17933 *
17934diff -urNp linux-2.6.32.41/arch/x86/lib/checksum_32.S linux-2.6.32.41/arch/x86/lib/checksum_32.S
17935--- linux-2.6.32.41/arch/x86/lib/checksum_32.S 2011-03-27 14:31:47.000000000 -0400
17936+++ linux-2.6.32.41/arch/x86/lib/checksum_32.S 2011-04-17 15:56:46.000000000 -0400
17937@@ -28,7 +28,8 @@
17938 #include <linux/linkage.h>
17939 #include <asm/dwarf2.h>
17940 #include <asm/errno.h>
17941-
17942+#include <asm/segment.h>
17943+
17944 /*
17945 * computes a partial checksum, e.g. for TCP/UDP fragments
17946 */
17947@@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (
17948
17949 #define ARGBASE 16
17950 #define FP 12
17951-
17952-ENTRY(csum_partial_copy_generic)
17953+
17954+ENTRY(csum_partial_copy_generic_to_user)
17955 CFI_STARTPROC
17956+
17957+#ifdef CONFIG_PAX_MEMORY_UDEREF
17958+ pushl %gs
17959+ CFI_ADJUST_CFA_OFFSET 4
17960+ popl %es
17961+ CFI_ADJUST_CFA_OFFSET -4
17962+ jmp csum_partial_copy_generic
17963+#endif
17964+
17965+ENTRY(csum_partial_copy_generic_from_user)
17966+
17967+#ifdef CONFIG_PAX_MEMORY_UDEREF
17968+ pushl %gs
17969+ CFI_ADJUST_CFA_OFFSET 4
17970+ popl %ds
17971+ CFI_ADJUST_CFA_OFFSET -4
17972+#endif
17973+
17974+ENTRY(csum_partial_copy_generic)
17975 subl $4,%esp
17976 CFI_ADJUST_CFA_OFFSET 4
17977 pushl %edi
17978@@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
17979 jmp 4f
17980 SRC(1: movw (%esi), %bx )
17981 addl $2, %esi
17982-DST( movw %bx, (%edi) )
17983+DST( movw %bx, %es:(%edi) )
17984 addl $2, %edi
17985 addw %bx, %ax
17986 adcl $0, %eax
17987@@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
17988 SRC(1: movl (%esi), %ebx )
17989 SRC( movl 4(%esi), %edx )
17990 adcl %ebx, %eax
17991-DST( movl %ebx, (%edi) )
17992+DST( movl %ebx, %es:(%edi) )
17993 adcl %edx, %eax
17994-DST( movl %edx, 4(%edi) )
17995+DST( movl %edx, %es:4(%edi) )
17996
17997 SRC( movl 8(%esi), %ebx )
17998 SRC( movl 12(%esi), %edx )
17999 adcl %ebx, %eax
18000-DST( movl %ebx, 8(%edi) )
18001+DST( movl %ebx, %es:8(%edi) )
18002 adcl %edx, %eax
18003-DST( movl %edx, 12(%edi) )
18004+DST( movl %edx, %es:12(%edi) )
18005
18006 SRC( movl 16(%esi), %ebx )
18007 SRC( movl 20(%esi), %edx )
18008 adcl %ebx, %eax
18009-DST( movl %ebx, 16(%edi) )
18010+DST( movl %ebx, %es:16(%edi) )
18011 adcl %edx, %eax
18012-DST( movl %edx, 20(%edi) )
18013+DST( movl %edx, %es:20(%edi) )
18014
18015 SRC( movl 24(%esi), %ebx )
18016 SRC( movl 28(%esi), %edx )
18017 adcl %ebx, %eax
18018-DST( movl %ebx, 24(%edi) )
18019+DST( movl %ebx, %es:24(%edi) )
18020 adcl %edx, %eax
18021-DST( movl %edx, 28(%edi) )
18022+DST( movl %edx, %es:28(%edi) )
18023
18024 lea 32(%esi), %esi
18025 lea 32(%edi), %edi
18026@@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
18027 shrl $2, %edx # This clears CF
18028 SRC(3: movl (%esi), %ebx )
18029 adcl %ebx, %eax
18030-DST( movl %ebx, (%edi) )
18031+DST( movl %ebx, %es:(%edi) )
18032 lea 4(%esi), %esi
18033 lea 4(%edi), %edi
18034 dec %edx
18035@@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
18036 jb 5f
18037 SRC( movw (%esi), %cx )
18038 leal 2(%esi), %esi
18039-DST( movw %cx, (%edi) )
18040+DST( movw %cx, %es:(%edi) )
18041 leal 2(%edi), %edi
18042 je 6f
18043 shll $16,%ecx
18044 SRC(5: movb (%esi), %cl )
18045-DST( movb %cl, (%edi) )
18046+DST( movb %cl, %es:(%edi) )
18047 6: addl %ecx, %eax
18048 adcl $0, %eax
18049 7:
18050@@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
18051
18052 6001:
18053 movl ARGBASE+20(%esp), %ebx # src_err_ptr
18054- movl $-EFAULT, (%ebx)
18055+ movl $-EFAULT, %ss:(%ebx)
18056
18057 # zero the complete destination - computing the rest
18058 # is too much work
18059@@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
18060
18061 6002:
18062 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
18063- movl $-EFAULT,(%ebx)
18064+ movl $-EFAULT,%ss:(%ebx)
18065 jmp 5000b
18066
18067 .previous
18068
18069+ pushl %ss
18070+ CFI_ADJUST_CFA_OFFSET 4
18071+ popl %ds
18072+ CFI_ADJUST_CFA_OFFSET -4
18073+ pushl %ss
18074+ CFI_ADJUST_CFA_OFFSET 4
18075+ popl %es
18076+ CFI_ADJUST_CFA_OFFSET -4
18077 popl %ebx
18078 CFI_ADJUST_CFA_OFFSET -4
18079 CFI_RESTORE ebx
18080@@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
18081 CFI_ADJUST_CFA_OFFSET -4
18082 ret
18083 CFI_ENDPROC
18084-ENDPROC(csum_partial_copy_generic)
18085+ENDPROC(csum_partial_copy_generic_to_user)
18086
18087 #else
18088
18089 /* Version for PentiumII/PPro */
18090
18091 #define ROUND1(x) \
18092+ nop; nop; nop; \
18093 SRC(movl x(%esi), %ebx ) ; \
18094 addl %ebx, %eax ; \
18095- DST(movl %ebx, x(%edi) ) ;
18096+ DST(movl %ebx, %es:x(%edi)) ;
18097
18098 #define ROUND(x) \
18099+ nop; nop; nop; \
18100 SRC(movl x(%esi), %ebx ) ; \
18101 adcl %ebx, %eax ; \
18102- DST(movl %ebx, x(%edi) ) ;
18103+ DST(movl %ebx, %es:x(%edi)) ;
18104
18105 #define ARGBASE 12
18106-
18107-ENTRY(csum_partial_copy_generic)
18108+
18109+ENTRY(csum_partial_copy_generic_to_user)
18110 CFI_STARTPROC
18111+
18112+#ifdef CONFIG_PAX_MEMORY_UDEREF
18113+ pushl %gs
18114+ CFI_ADJUST_CFA_OFFSET 4
18115+ popl %es
18116+ CFI_ADJUST_CFA_OFFSET -4
18117+ jmp csum_partial_copy_generic
18118+#endif
18119+
18120+ENTRY(csum_partial_copy_generic_from_user)
18121+
18122+#ifdef CONFIG_PAX_MEMORY_UDEREF
18123+ pushl %gs
18124+ CFI_ADJUST_CFA_OFFSET 4
18125+ popl %ds
18126+ CFI_ADJUST_CFA_OFFSET -4
18127+#endif
18128+
18129+ENTRY(csum_partial_copy_generic)
18130 pushl %ebx
18131 CFI_ADJUST_CFA_OFFSET 4
18132 CFI_REL_OFFSET ebx, 0
18133@@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
18134 subl %ebx, %edi
18135 lea -1(%esi),%edx
18136 andl $-32,%edx
18137- lea 3f(%ebx,%ebx), %ebx
18138+ lea 3f(%ebx,%ebx,2), %ebx
18139 testl %esi, %esi
18140 jmp *%ebx
18141 1: addl $64,%esi
18142@@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
18143 jb 5f
18144 SRC( movw (%esi), %dx )
18145 leal 2(%esi), %esi
18146-DST( movw %dx, (%edi) )
18147+DST( movw %dx, %es:(%edi) )
18148 leal 2(%edi), %edi
18149 je 6f
18150 shll $16,%edx
18151 5:
18152 SRC( movb (%esi), %dl )
18153-DST( movb %dl, (%edi) )
18154+DST( movb %dl, %es:(%edi) )
18155 6: addl %edx, %eax
18156 adcl $0, %eax
18157 7:
18158 .section .fixup, "ax"
18159 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
18160- movl $-EFAULT, (%ebx)
18161+ movl $-EFAULT, %ss:(%ebx)
18162 # zero the complete destination (computing the rest is too much work)
18163 movl ARGBASE+8(%esp),%edi # dst
18164 movl ARGBASE+12(%esp),%ecx # len
18165@@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
18166 rep; stosb
18167 jmp 7b
18168 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
18169- movl $-EFAULT, (%ebx)
18170+ movl $-EFAULT, %ss:(%ebx)
18171 jmp 7b
18172 .previous
18173
18174+#ifdef CONFIG_PAX_MEMORY_UDEREF
18175+ pushl %ss
18176+ CFI_ADJUST_CFA_OFFSET 4
18177+ popl %ds
18178+ CFI_ADJUST_CFA_OFFSET -4
18179+ pushl %ss
18180+ CFI_ADJUST_CFA_OFFSET 4
18181+ popl %es
18182+ CFI_ADJUST_CFA_OFFSET -4
18183+#endif
18184+
18185 popl %esi
18186 CFI_ADJUST_CFA_OFFSET -4
18187 CFI_RESTORE esi
18188@@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
18189 CFI_RESTORE ebx
18190 ret
18191 CFI_ENDPROC
18192-ENDPROC(csum_partial_copy_generic)
18193+ENDPROC(csum_partial_copy_generic_to_user)
18194
18195 #undef ROUND
18196 #undef ROUND1
18197diff -urNp linux-2.6.32.41/arch/x86/lib/clear_page_64.S linux-2.6.32.41/arch/x86/lib/clear_page_64.S
18198--- linux-2.6.32.41/arch/x86/lib/clear_page_64.S 2011-03-27 14:31:47.000000000 -0400
18199+++ linux-2.6.32.41/arch/x86/lib/clear_page_64.S 2011-04-17 15:56:46.000000000 -0400
18200@@ -43,7 +43,7 @@ ENDPROC(clear_page)
18201
18202 #include <asm/cpufeature.h>
18203
18204- .section .altinstr_replacement,"ax"
18205+ .section .altinstr_replacement,"a"
18206 1: .byte 0xeb /* jmp <disp8> */
18207 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
18208 2:
18209diff -urNp linux-2.6.32.41/arch/x86/lib/copy_page_64.S linux-2.6.32.41/arch/x86/lib/copy_page_64.S
18210--- linux-2.6.32.41/arch/x86/lib/copy_page_64.S 2011-03-27 14:31:47.000000000 -0400
18211+++ linux-2.6.32.41/arch/x86/lib/copy_page_64.S 2011-04-17 15:56:46.000000000 -0400
18212@@ -104,7 +104,7 @@ ENDPROC(copy_page)
18213
18214 #include <asm/cpufeature.h>
18215
18216- .section .altinstr_replacement,"ax"
18217+ .section .altinstr_replacement,"a"
18218 1: .byte 0xeb /* jmp <disp8> */
18219 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
18220 2:
18221diff -urNp linux-2.6.32.41/arch/x86/lib/copy_user_64.S linux-2.6.32.41/arch/x86/lib/copy_user_64.S
18222--- linux-2.6.32.41/arch/x86/lib/copy_user_64.S 2011-03-27 14:31:47.000000000 -0400
18223+++ linux-2.6.32.41/arch/x86/lib/copy_user_64.S 2011-04-17 15:56:46.000000000 -0400
18224@@ -15,13 +15,14 @@
18225 #include <asm/asm-offsets.h>
18226 #include <asm/thread_info.h>
18227 #include <asm/cpufeature.h>
18228+#include <asm/pgtable.h>
18229
18230 .macro ALTERNATIVE_JUMP feature,orig,alt
18231 0:
18232 .byte 0xe9 /* 32bit jump */
18233 .long \orig-1f /* by default jump to orig */
18234 1:
18235- .section .altinstr_replacement,"ax"
18236+ .section .altinstr_replacement,"a"
18237 2: .byte 0xe9 /* near jump with 32bit immediate */
18238 .long \alt-1b /* offset */ /* or alternatively to alt */
18239 .previous
18240@@ -64,49 +65,19 @@
18241 #endif
18242 .endm
18243
18244-/* Standard copy_to_user with segment limit checking */
18245-ENTRY(copy_to_user)
18246- CFI_STARTPROC
18247- GET_THREAD_INFO(%rax)
18248- movq %rdi,%rcx
18249- addq %rdx,%rcx
18250- jc bad_to_user
18251- cmpq TI_addr_limit(%rax),%rcx
18252- jae bad_to_user
18253- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
18254- CFI_ENDPROC
18255-ENDPROC(copy_to_user)
18256-
18257-/* Standard copy_from_user with segment limit checking */
18258-ENTRY(copy_from_user)
18259- CFI_STARTPROC
18260- GET_THREAD_INFO(%rax)
18261- movq %rsi,%rcx
18262- addq %rdx,%rcx
18263- jc bad_from_user
18264- cmpq TI_addr_limit(%rax),%rcx
18265- jae bad_from_user
18266- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
18267- CFI_ENDPROC
18268-ENDPROC(copy_from_user)
18269-
18270 ENTRY(copy_user_generic)
18271 CFI_STARTPROC
18272 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
18273 CFI_ENDPROC
18274 ENDPROC(copy_user_generic)
18275
18276-ENTRY(__copy_from_user_inatomic)
18277- CFI_STARTPROC
18278- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
18279- CFI_ENDPROC
18280-ENDPROC(__copy_from_user_inatomic)
18281-
18282 .section .fixup,"ax"
18283 /* must zero dest */
18284 ENTRY(bad_from_user)
18285 bad_from_user:
18286 CFI_STARTPROC
18287+ testl %edx,%edx
18288+ js bad_to_user
18289 movl %edx,%ecx
18290 xorl %eax,%eax
18291 rep
18292diff -urNp linux-2.6.32.41/arch/x86/lib/copy_user_nocache_64.S linux-2.6.32.41/arch/x86/lib/copy_user_nocache_64.S
18293--- linux-2.6.32.41/arch/x86/lib/copy_user_nocache_64.S 2011-03-27 14:31:47.000000000 -0400
18294+++ linux-2.6.32.41/arch/x86/lib/copy_user_nocache_64.S 2011-04-17 15:56:46.000000000 -0400
18295@@ -14,6 +14,7 @@
18296 #include <asm/current.h>
18297 #include <asm/asm-offsets.h>
18298 #include <asm/thread_info.h>
18299+#include <asm/pgtable.h>
18300
18301 .macro ALIGN_DESTINATION
18302 #ifdef FIX_ALIGNMENT
18303@@ -50,6 +51,15 @@
18304 */
18305 ENTRY(__copy_user_nocache)
18306 CFI_STARTPROC
18307+
18308+#ifdef CONFIG_PAX_MEMORY_UDEREF
18309+ mov $PAX_USER_SHADOW_BASE,%rcx
18310+ cmp %rcx,%rsi
18311+ jae 1f
18312+ add %rcx,%rsi
18313+1:
18314+#endif
18315+
18316 cmpl $8,%edx
18317 jb 20f /* less then 8 bytes, go to byte copy loop */
18318 ALIGN_DESTINATION
18319diff -urNp linux-2.6.32.41/arch/x86/lib/csum-wrappers_64.c linux-2.6.32.41/arch/x86/lib/csum-wrappers_64.c
18320--- linux-2.6.32.41/arch/x86/lib/csum-wrappers_64.c 2011-03-27 14:31:47.000000000 -0400
18321+++ linux-2.6.32.41/arch/x86/lib/csum-wrappers_64.c 2011-05-04 17:56:20.000000000 -0400
18322@@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
18323 len -= 2;
18324 }
18325 }
18326+
18327+#ifdef CONFIG_PAX_MEMORY_UDEREF
18328+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
18329+ src += PAX_USER_SHADOW_BASE;
18330+#endif
18331+
18332 isum = csum_partial_copy_generic((__force const void *)src,
18333 dst, len, isum, errp, NULL);
18334 if (unlikely(*errp))
18335@@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
18336 }
18337
18338 *errp = 0;
18339+
18340+#ifdef CONFIG_PAX_MEMORY_UDEREF
18341+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
18342+ dst += PAX_USER_SHADOW_BASE;
18343+#endif
18344+
18345 return csum_partial_copy_generic(src, (void __force *)dst,
18346 len, isum, NULL, errp);
18347 }
18348diff -urNp linux-2.6.32.41/arch/x86/lib/getuser.S linux-2.6.32.41/arch/x86/lib/getuser.S
18349--- linux-2.6.32.41/arch/x86/lib/getuser.S 2011-03-27 14:31:47.000000000 -0400
18350+++ linux-2.6.32.41/arch/x86/lib/getuser.S 2011-04-17 15:56:46.000000000 -0400
18351@@ -33,14 +33,35 @@
18352 #include <asm/asm-offsets.h>
18353 #include <asm/thread_info.h>
18354 #include <asm/asm.h>
18355+#include <asm/segment.h>
18356+#include <asm/pgtable.h>
18357+
18358+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18359+#define __copyuser_seg gs;
18360+#else
18361+#define __copyuser_seg
18362+#endif
18363
18364 .text
18365 ENTRY(__get_user_1)
18366 CFI_STARTPROC
18367+
18368+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18369 GET_THREAD_INFO(%_ASM_DX)
18370 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18371 jae bad_get_user
18372-1: movzb (%_ASM_AX),%edx
18373+
18374+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18375+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18376+ cmp %_ASM_DX,%_ASM_AX
18377+ jae 1234f
18378+ add %_ASM_DX,%_ASM_AX
18379+1234:
18380+#endif
18381+
18382+#endif
18383+
18384+1: __copyuser_seg movzb (%_ASM_AX),%edx
18385 xor %eax,%eax
18386 ret
18387 CFI_ENDPROC
18388@@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
18389 ENTRY(__get_user_2)
18390 CFI_STARTPROC
18391 add $1,%_ASM_AX
18392+
18393+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18394 jc bad_get_user
18395 GET_THREAD_INFO(%_ASM_DX)
18396 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18397 jae bad_get_user
18398-2: movzwl -1(%_ASM_AX),%edx
18399+
18400+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18401+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18402+ cmp %_ASM_DX,%_ASM_AX
18403+ jae 1234f
18404+ add %_ASM_DX,%_ASM_AX
18405+1234:
18406+#endif
18407+
18408+#endif
18409+
18410+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
18411 xor %eax,%eax
18412 ret
18413 CFI_ENDPROC
18414@@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
18415 ENTRY(__get_user_4)
18416 CFI_STARTPROC
18417 add $3,%_ASM_AX
18418+
18419+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18420 jc bad_get_user
18421 GET_THREAD_INFO(%_ASM_DX)
18422 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18423 jae bad_get_user
18424-3: mov -3(%_ASM_AX),%edx
18425+
18426+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18427+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18428+ cmp %_ASM_DX,%_ASM_AX
18429+ jae 1234f
18430+ add %_ASM_DX,%_ASM_AX
18431+1234:
18432+#endif
18433+
18434+#endif
18435+
18436+3: __copyuser_seg mov -3(%_ASM_AX),%edx
18437 xor %eax,%eax
18438 ret
18439 CFI_ENDPROC
18440@@ -80,6 +127,15 @@ ENTRY(__get_user_8)
18441 GET_THREAD_INFO(%_ASM_DX)
18442 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18443 jae bad_get_user
18444+
18445+#ifdef CONFIG_PAX_MEMORY_UDEREF
18446+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18447+ cmp %_ASM_DX,%_ASM_AX
18448+ jae 1234f
18449+ add %_ASM_DX,%_ASM_AX
18450+1234:
18451+#endif
18452+
18453 4: movq -7(%_ASM_AX),%_ASM_DX
18454 xor %eax,%eax
18455 ret
18456diff -urNp linux-2.6.32.41/arch/x86/lib/memcpy_64.S linux-2.6.32.41/arch/x86/lib/memcpy_64.S
18457--- linux-2.6.32.41/arch/x86/lib/memcpy_64.S 2011-03-27 14:31:47.000000000 -0400
18458+++ linux-2.6.32.41/arch/x86/lib/memcpy_64.S 2011-04-17 15:56:46.000000000 -0400
18459@@ -128,7 +128,7 @@ ENDPROC(__memcpy)
18460 * It is also a lot simpler. Use this when possible:
18461 */
18462
18463- .section .altinstr_replacement, "ax"
18464+ .section .altinstr_replacement, "a"
18465 1: .byte 0xeb /* jmp <disp8> */
18466 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
18467 2:
18468diff -urNp linux-2.6.32.41/arch/x86/lib/memset_64.S linux-2.6.32.41/arch/x86/lib/memset_64.S
18469--- linux-2.6.32.41/arch/x86/lib/memset_64.S 2011-03-27 14:31:47.000000000 -0400
18470+++ linux-2.6.32.41/arch/x86/lib/memset_64.S 2011-04-17 15:56:46.000000000 -0400
18471@@ -118,7 +118,7 @@ ENDPROC(__memset)
18472
18473 #include <asm/cpufeature.h>
18474
18475- .section .altinstr_replacement,"ax"
18476+ .section .altinstr_replacement,"a"
18477 1: .byte 0xeb /* jmp <disp8> */
18478 .byte (memset_c - memset) - (2f - 1b) /* offset */
18479 2:
18480diff -urNp linux-2.6.32.41/arch/x86/lib/mmx_32.c linux-2.6.32.41/arch/x86/lib/mmx_32.c
18481--- linux-2.6.32.41/arch/x86/lib/mmx_32.c 2011-03-27 14:31:47.000000000 -0400
18482+++ linux-2.6.32.41/arch/x86/lib/mmx_32.c 2011-04-17 15:56:46.000000000 -0400
18483@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
18484 {
18485 void *p;
18486 int i;
18487+ unsigned long cr0;
18488
18489 if (unlikely(in_interrupt()))
18490 return __memcpy(to, from, len);
18491@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
18492 kernel_fpu_begin();
18493
18494 __asm__ __volatile__ (
18495- "1: prefetch (%0)\n" /* This set is 28 bytes */
18496- " prefetch 64(%0)\n"
18497- " prefetch 128(%0)\n"
18498- " prefetch 192(%0)\n"
18499- " prefetch 256(%0)\n"
18500+ "1: prefetch (%1)\n" /* This set is 28 bytes */
18501+ " prefetch 64(%1)\n"
18502+ " prefetch 128(%1)\n"
18503+ " prefetch 192(%1)\n"
18504+ " prefetch 256(%1)\n"
18505 "2: \n"
18506 ".section .fixup, \"ax\"\n"
18507- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18508+ "3: \n"
18509+
18510+#ifdef CONFIG_PAX_KERNEXEC
18511+ " movl %%cr0, %0\n"
18512+ " movl %0, %%eax\n"
18513+ " andl $0xFFFEFFFF, %%eax\n"
18514+ " movl %%eax, %%cr0\n"
18515+#endif
18516+
18517+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18518+
18519+#ifdef CONFIG_PAX_KERNEXEC
18520+ " movl %0, %%cr0\n"
18521+#endif
18522+
18523 " jmp 2b\n"
18524 ".previous\n"
18525 _ASM_EXTABLE(1b, 3b)
18526- : : "r" (from));
18527+ : "=&r" (cr0) : "r" (from) : "ax");
18528
18529 for ( ; i > 5; i--) {
18530 __asm__ __volatile__ (
18531- "1: prefetch 320(%0)\n"
18532- "2: movq (%0), %%mm0\n"
18533- " movq 8(%0), %%mm1\n"
18534- " movq 16(%0), %%mm2\n"
18535- " movq 24(%0), %%mm3\n"
18536- " movq %%mm0, (%1)\n"
18537- " movq %%mm1, 8(%1)\n"
18538- " movq %%mm2, 16(%1)\n"
18539- " movq %%mm3, 24(%1)\n"
18540- " movq 32(%0), %%mm0\n"
18541- " movq 40(%0), %%mm1\n"
18542- " movq 48(%0), %%mm2\n"
18543- " movq 56(%0), %%mm3\n"
18544- " movq %%mm0, 32(%1)\n"
18545- " movq %%mm1, 40(%1)\n"
18546- " movq %%mm2, 48(%1)\n"
18547- " movq %%mm3, 56(%1)\n"
18548+ "1: prefetch 320(%1)\n"
18549+ "2: movq (%1), %%mm0\n"
18550+ " movq 8(%1), %%mm1\n"
18551+ " movq 16(%1), %%mm2\n"
18552+ " movq 24(%1), %%mm3\n"
18553+ " movq %%mm0, (%2)\n"
18554+ " movq %%mm1, 8(%2)\n"
18555+ " movq %%mm2, 16(%2)\n"
18556+ " movq %%mm3, 24(%2)\n"
18557+ " movq 32(%1), %%mm0\n"
18558+ " movq 40(%1), %%mm1\n"
18559+ " movq 48(%1), %%mm2\n"
18560+ " movq 56(%1), %%mm3\n"
18561+ " movq %%mm0, 32(%2)\n"
18562+ " movq %%mm1, 40(%2)\n"
18563+ " movq %%mm2, 48(%2)\n"
18564+ " movq %%mm3, 56(%2)\n"
18565 ".section .fixup, \"ax\"\n"
18566- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18567+ "3:\n"
18568+
18569+#ifdef CONFIG_PAX_KERNEXEC
18570+ " movl %%cr0, %0\n"
18571+ " movl %0, %%eax\n"
18572+ " andl $0xFFFEFFFF, %%eax\n"
18573+ " movl %%eax, %%cr0\n"
18574+#endif
18575+
18576+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18577+
18578+#ifdef CONFIG_PAX_KERNEXEC
18579+ " movl %0, %%cr0\n"
18580+#endif
18581+
18582 " jmp 2b\n"
18583 ".previous\n"
18584 _ASM_EXTABLE(1b, 3b)
18585- : : "r" (from), "r" (to) : "memory");
18586+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18587
18588 from += 64;
18589 to += 64;
18590@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
18591 static void fast_copy_page(void *to, void *from)
18592 {
18593 int i;
18594+ unsigned long cr0;
18595
18596 kernel_fpu_begin();
18597
18598@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
18599 * but that is for later. -AV
18600 */
18601 __asm__ __volatile__(
18602- "1: prefetch (%0)\n"
18603- " prefetch 64(%0)\n"
18604- " prefetch 128(%0)\n"
18605- " prefetch 192(%0)\n"
18606- " prefetch 256(%0)\n"
18607+ "1: prefetch (%1)\n"
18608+ " prefetch 64(%1)\n"
18609+ " prefetch 128(%1)\n"
18610+ " prefetch 192(%1)\n"
18611+ " prefetch 256(%1)\n"
18612 "2: \n"
18613 ".section .fixup, \"ax\"\n"
18614- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18615+ "3: \n"
18616+
18617+#ifdef CONFIG_PAX_KERNEXEC
18618+ " movl %%cr0, %0\n"
18619+ " movl %0, %%eax\n"
18620+ " andl $0xFFFEFFFF, %%eax\n"
18621+ " movl %%eax, %%cr0\n"
18622+#endif
18623+
18624+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18625+
18626+#ifdef CONFIG_PAX_KERNEXEC
18627+ " movl %0, %%cr0\n"
18628+#endif
18629+
18630 " jmp 2b\n"
18631 ".previous\n"
18632- _ASM_EXTABLE(1b, 3b) : : "r" (from));
18633+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
18634
18635 for (i = 0; i < (4096-320)/64; i++) {
18636 __asm__ __volatile__ (
18637- "1: prefetch 320(%0)\n"
18638- "2: movq (%0), %%mm0\n"
18639- " movntq %%mm0, (%1)\n"
18640- " movq 8(%0), %%mm1\n"
18641- " movntq %%mm1, 8(%1)\n"
18642- " movq 16(%0), %%mm2\n"
18643- " movntq %%mm2, 16(%1)\n"
18644- " movq 24(%0), %%mm3\n"
18645- " movntq %%mm3, 24(%1)\n"
18646- " movq 32(%0), %%mm4\n"
18647- " movntq %%mm4, 32(%1)\n"
18648- " movq 40(%0), %%mm5\n"
18649- " movntq %%mm5, 40(%1)\n"
18650- " movq 48(%0), %%mm6\n"
18651- " movntq %%mm6, 48(%1)\n"
18652- " movq 56(%0), %%mm7\n"
18653- " movntq %%mm7, 56(%1)\n"
18654+ "1: prefetch 320(%1)\n"
18655+ "2: movq (%1), %%mm0\n"
18656+ " movntq %%mm0, (%2)\n"
18657+ " movq 8(%1), %%mm1\n"
18658+ " movntq %%mm1, 8(%2)\n"
18659+ " movq 16(%1), %%mm2\n"
18660+ " movntq %%mm2, 16(%2)\n"
18661+ " movq 24(%1), %%mm3\n"
18662+ " movntq %%mm3, 24(%2)\n"
18663+ " movq 32(%1), %%mm4\n"
18664+ " movntq %%mm4, 32(%2)\n"
18665+ " movq 40(%1), %%mm5\n"
18666+ " movntq %%mm5, 40(%2)\n"
18667+ " movq 48(%1), %%mm6\n"
18668+ " movntq %%mm6, 48(%2)\n"
18669+ " movq 56(%1), %%mm7\n"
18670+ " movntq %%mm7, 56(%2)\n"
18671 ".section .fixup, \"ax\"\n"
18672- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18673+ "3:\n"
18674+
18675+#ifdef CONFIG_PAX_KERNEXEC
18676+ " movl %%cr0, %0\n"
18677+ " movl %0, %%eax\n"
18678+ " andl $0xFFFEFFFF, %%eax\n"
18679+ " movl %%eax, %%cr0\n"
18680+#endif
18681+
18682+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18683+
18684+#ifdef CONFIG_PAX_KERNEXEC
18685+ " movl %0, %%cr0\n"
18686+#endif
18687+
18688 " jmp 2b\n"
18689 ".previous\n"
18690- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
18691+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18692
18693 from += 64;
18694 to += 64;
18695@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
18696 static void fast_copy_page(void *to, void *from)
18697 {
18698 int i;
18699+ unsigned long cr0;
18700
18701 kernel_fpu_begin();
18702
18703 __asm__ __volatile__ (
18704- "1: prefetch (%0)\n"
18705- " prefetch 64(%0)\n"
18706- " prefetch 128(%0)\n"
18707- " prefetch 192(%0)\n"
18708- " prefetch 256(%0)\n"
18709+ "1: prefetch (%1)\n"
18710+ " prefetch 64(%1)\n"
18711+ " prefetch 128(%1)\n"
18712+ " prefetch 192(%1)\n"
18713+ " prefetch 256(%1)\n"
18714 "2: \n"
18715 ".section .fixup, \"ax\"\n"
18716- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18717+ "3: \n"
18718+
18719+#ifdef CONFIG_PAX_KERNEXEC
18720+ " movl %%cr0, %0\n"
18721+ " movl %0, %%eax\n"
18722+ " andl $0xFFFEFFFF, %%eax\n"
18723+ " movl %%eax, %%cr0\n"
18724+#endif
18725+
18726+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18727+
18728+#ifdef CONFIG_PAX_KERNEXEC
18729+ " movl %0, %%cr0\n"
18730+#endif
18731+
18732 " jmp 2b\n"
18733 ".previous\n"
18734- _ASM_EXTABLE(1b, 3b) : : "r" (from));
18735+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
18736
18737 for (i = 0; i < 4096/64; i++) {
18738 __asm__ __volatile__ (
18739- "1: prefetch 320(%0)\n"
18740- "2: movq (%0), %%mm0\n"
18741- " movq 8(%0), %%mm1\n"
18742- " movq 16(%0), %%mm2\n"
18743- " movq 24(%0), %%mm3\n"
18744- " movq %%mm0, (%1)\n"
18745- " movq %%mm1, 8(%1)\n"
18746- " movq %%mm2, 16(%1)\n"
18747- " movq %%mm3, 24(%1)\n"
18748- " movq 32(%0), %%mm0\n"
18749- " movq 40(%0), %%mm1\n"
18750- " movq 48(%0), %%mm2\n"
18751- " movq 56(%0), %%mm3\n"
18752- " movq %%mm0, 32(%1)\n"
18753- " movq %%mm1, 40(%1)\n"
18754- " movq %%mm2, 48(%1)\n"
18755- " movq %%mm3, 56(%1)\n"
18756+ "1: prefetch 320(%1)\n"
18757+ "2: movq (%1), %%mm0\n"
18758+ " movq 8(%1), %%mm1\n"
18759+ " movq 16(%1), %%mm2\n"
18760+ " movq 24(%1), %%mm3\n"
18761+ " movq %%mm0, (%2)\n"
18762+ " movq %%mm1, 8(%2)\n"
18763+ " movq %%mm2, 16(%2)\n"
18764+ " movq %%mm3, 24(%2)\n"
18765+ " movq 32(%1), %%mm0\n"
18766+ " movq 40(%1), %%mm1\n"
18767+ " movq 48(%1), %%mm2\n"
18768+ " movq 56(%1), %%mm3\n"
18769+ " movq %%mm0, 32(%2)\n"
18770+ " movq %%mm1, 40(%2)\n"
18771+ " movq %%mm2, 48(%2)\n"
18772+ " movq %%mm3, 56(%2)\n"
18773 ".section .fixup, \"ax\"\n"
18774- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18775+ "3:\n"
18776+
18777+#ifdef CONFIG_PAX_KERNEXEC
18778+ " movl %%cr0, %0\n"
18779+ " movl %0, %%eax\n"
18780+ " andl $0xFFFEFFFF, %%eax\n"
18781+ " movl %%eax, %%cr0\n"
18782+#endif
18783+
18784+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18785+
18786+#ifdef CONFIG_PAX_KERNEXEC
18787+ " movl %0, %%cr0\n"
18788+#endif
18789+
18790 " jmp 2b\n"
18791 ".previous\n"
18792 _ASM_EXTABLE(1b, 3b)
18793- : : "r" (from), "r" (to) : "memory");
18794+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18795
18796 from += 64;
18797 to += 64;
18798diff -urNp linux-2.6.32.41/arch/x86/lib/putuser.S linux-2.6.32.41/arch/x86/lib/putuser.S
18799--- linux-2.6.32.41/arch/x86/lib/putuser.S 2011-03-27 14:31:47.000000000 -0400
18800+++ linux-2.6.32.41/arch/x86/lib/putuser.S 2011-04-17 15:56:46.000000000 -0400
18801@@ -15,7 +15,8 @@
18802 #include <asm/thread_info.h>
18803 #include <asm/errno.h>
18804 #include <asm/asm.h>
18805-
18806+#include <asm/segment.h>
18807+#include <asm/pgtable.h>
18808
18809 /*
18810 * __put_user_X
18811@@ -29,52 +30,119 @@
18812 * as they get called from within inline assembly.
18813 */
18814
18815-#define ENTER CFI_STARTPROC ; \
18816- GET_THREAD_INFO(%_ASM_BX)
18817+#define ENTER CFI_STARTPROC
18818 #define EXIT ret ; \
18819 CFI_ENDPROC
18820
18821+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18822+#define _DEST %_ASM_CX,%_ASM_BX
18823+#else
18824+#define _DEST %_ASM_CX
18825+#endif
18826+
18827+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18828+#define __copyuser_seg gs;
18829+#else
18830+#define __copyuser_seg
18831+#endif
18832+
18833 .text
18834 ENTRY(__put_user_1)
18835 ENTER
18836+
18837+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18838+ GET_THREAD_INFO(%_ASM_BX)
18839 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
18840 jae bad_put_user
18841-1: movb %al,(%_ASM_CX)
18842+
18843+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18844+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18845+ cmp %_ASM_BX,%_ASM_CX
18846+ jb 1234f
18847+ xor %ebx,%ebx
18848+1234:
18849+#endif
18850+
18851+#endif
18852+
18853+1: __copyuser_seg movb %al,(_DEST)
18854 xor %eax,%eax
18855 EXIT
18856 ENDPROC(__put_user_1)
18857
18858 ENTRY(__put_user_2)
18859 ENTER
18860+
18861+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18862+ GET_THREAD_INFO(%_ASM_BX)
18863 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18864 sub $1,%_ASM_BX
18865 cmp %_ASM_BX,%_ASM_CX
18866 jae bad_put_user
18867-2: movw %ax,(%_ASM_CX)
18868+
18869+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18870+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18871+ cmp %_ASM_BX,%_ASM_CX
18872+ jb 1234f
18873+ xor %ebx,%ebx
18874+1234:
18875+#endif
18876+
18877+#endif
18878+
18879+2: __copyuser_seg movw %ax,(_DEST)
18880 xor %eax,%eax
18881 EXIT
18882 ENDPROC(__put_user_2)
18883
18884 ENTRY(__put_user_4)
18885 ENTER
18886+
18887+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18888+ GET_THREAD_INFO(%_ASM_BX)
18889 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18890 sub $3,%_ASM_BX
18891 cmp %_ASM_BX,%_ASM_CX
18892 jae bad_put_user
18893-3: movl %eax,(%_ASM_CX)
18894+
18895+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18896+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18897+ cmp %_ASM_BX,%_ASM_CX
18898+ jb 1234f
18899+ xor %ebx,%ebx
18900+1234:
18901+#endif
18902+
18903+#endif
18904+
18905+3: __copyuser_seg movl %eax,(_DEST)
18906 xor %eax,%eax
18907 EXIT
18908 ENDPROC(__put_user_4)
18909
18910 ENTRY(__put_user_8)
18911 ENTER
18912+
18913+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18914+ GET_THREAD_INFO(%_ASM_BX)
18915 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
18916 sub $7,%_ASM_BX
18917 cmp %_ASM_BX,%_ASM_CX
18918 jae bad_put_user
18919-4: mov %_ASM_AX,(%_ASM_CX)
18920+
18921+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18922+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
18923+ cmp %_ASM_BX,%_ASM_CX
18924+ jb 1234f
18925+ xor %ebx,%ebx
18926+1234:
18927+#endif
18928+
18929+#endif
18930+
18931+4: __copyuser_seg mov %_ASM_AX,(_DEST)
18932 #ifdef CONFIG_X86_32
18933-5: movl %edx,4(%_ASM_CX)
18934+5: __copyuser_seg movl %edx,4(_DEST)
18935 #endif
18936 xor %eax,%eax
18937 EXIT
18938diff -urNp linux-2.6.32.41/arch/x86/lib/usercopy_32.c linux-2.6.32.41/arch/x86/lib/usercopy_32.c
18939--- linux-2.6.32.41/arch/x86/lib/usercopy_32.c 2011-03-27 14:31:47.000000000 -0400
18940+++ linux-2.6.32.41/arch/x86/lib/usercopy_32.c 2011-04-23 21:12:28.000000000 -0400
18941@@ -43,7 +43,7 @@ do { \
18942 __asm__ __volatile__( \
18943 " testl %1,%1\n" \
18944 " jz 2f\n" \
18945- "0: lodsb\n" \
18946+ "0: "__copyuser_seg"lodsb\n" \
18947 " stosb\n" \
18948 " testb %%al,%%al\n" \
18949 " jz 1f\n" \
18950@@ -128,10 +128,12 @@ do { \
18951 int __d0; \
18952 might_fault(); \
18953 __asm__ __volatile__( \
18954+ __COPYUSER_SET_ES \
18955 "0: rep; stosl\n" \
18956 " movl %2,%0\n" \
18957 "1: rep; stosb\n" \
18958 "2:\n" \
18959+ __COPYUSER_RESTORE_ES \
18960 ".section .fixup,\"ax\"\n" \
18961 "3: lea 0(%2,%0,4),%0\n" \
18962 " jmp 2b\n" \
18963@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
18964 might_fault();
18965
18966 __asm__ __volatile__(
18967+ __COPYUSER_SET_ES
18968 " testl %0, %0\n"
18969 " jz 3f\n"
18970 " andl %0,%%ecx\n"
18971@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
18972 " subl %%ecx,%0\n"
18973 " addl %0,%%eax\n"
18974 "1:\n"
18975+ __COPYUSER_RESTORE_ES
18976 ".section .fixup,\"ax\"\n"
18977 "2: xorl %%eax,%%eax\n"
18978 " jmp 1b\n"
18979@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
18980
18981 #ifdef CONFIG_X86_INTEL_USERCOPY
18982 static unsigned long
18983-__copy_user_intel(void __user *to, const void *from, unsigned long size)
18984+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
18985 {
18986 int d0, d1;
18987 __asm__ __volatile__(
18988@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
18989 " .align 2,0x90\n"
18990 "3: movl 0(%4), %%eax\n"
18991 "4: movl 4(%4), %%edx\n"
18992- "5: movl %%eax, 0(%3)\n"
18993- "6: movl %%edx, 4(%3)\n"
18994+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
18995+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
18996 "7: movl 8(%4), %%eax\n"
18997 "8: movl 12(%4),%%edx\n"
18998- "9: movl %%eax, 8(%3)\n"
18999- "10: movl %%edx, 12(%3)\n"
19000+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
19001+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
19002 "11: movl 16(%4), %%eax\n"
19003 "12: movl 20(%4), %%edx\n"
19004- "13: movl %%eax, 16(%3)\n"
19005- "14: movl %%edx, 20(%3)\n"
19006+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
19007+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
19008 "15: movl 24(%4), %%eax\n"
19009 "16: movl 28(%4), %%edx\n"
19010- "17: movl %%eax, 24(%3)\n"
19011- "18: movl %%edx, 28(%3)\n"
19012+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
19013+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
19014 "19: movl 32(%4), %%eax\n"
19015 "20: movl 36(%4), %%edx\n"
19016- "21: movl %%eax, 32(%3)\n"
19017- "22: movl %%edx, 36(%3)\n"
19018+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
19019+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
19020 "23: movl 40(%4), %%eax\n"
19021 "24: movl 44(%4), %%edx\n"
19022- "25: movl %%eax, 40(%3)\n"
19023- "26: movl %%edx, 44(%3)\n"
19024+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
19025+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
19026 "27: movl 48(%4), %%eax\n"
19027 "28: movl 52(%4), %%edx\n"
19028- "29: movl %%eax, 48(%3)\n"
19029- "30: movl %%edx, 52(%3)\n"
19030+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
19031+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
19032 "31: movl 56(%4), %%eax\n"
19033 "32: movl 60(%4), %%edx\n"
19034- "33: movl %%eax, 56(%3)\n"
19035- "34: movl %%edx, 60(%3)\n"
19036+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
19037+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
19038 " addl $-64, %0\n"
19039 " addl $64, %4\n"
19040 " addl $64, %3\n"
19041@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
19042 " shrl $2, %0\n"
19043 " andl $3, %%eax\n"
19044 " cld\n"
19045+ __COPYUSER_SET_ES
19046 "99: rep; movsl\n"
19047 "36: movl %%eax, %0\n"
19048 "37: rep; movsb\n"
19049 "100:\n"
19050+ __COPYUSER_RESTORE_ES
19051+ ".section .fixup,\"ax\"\n"
19052+ "101: lea 0(%%eax,%0,4),%0\n"
19053+ " jmp 100b\n"
19054+ ".previous\n"
19055+ ".section __ex_table,\"a\"\n"
19056+ " .align 4\n"
19057+ " .long 1b,100b\n"
19058+ " .long 2b,100b\n"
19059+ " .long 3b,100b\n"
19060+ " .long 4b,100b\n"
19061+ " .long 5b,100b\n"
19062+ " .long 6b,100b\n"
19063+ " .long 7b,100b\n"
19064+ " .long 8b,100b\n"
19065+ " .long 9b,100b\n"
19066+ " .long 10b,100b\n"
19067+ " .long 11b,100b\n"
19068+ " .long 12b,100b\n"
19069+ " .long 13b,100b\n"
19070+ " .long 14b,100b\n"
19071+ " .long 15b,100b\n"
19072+ " .long 16b,100b\n"
19073+ " .long 17b,100b\n"
19074+ " .long 18b,100b\n"
19075+ " .long 19b,100b\n"
19076+ " .long 20b,100b\n"
19077+ " .long 21b,100b\n"
19078+ " .long 22b,100b\n"
19079+ " .long 23b,100b\n"
19080+ " .long 24b,100b\n"
19081+ " .long 25b,100b\n"
19082+ " .long 26b,100b\n"
19083+ " .long 27b,100b\n"
19084+ " .long 28b,100b\n"
19085+ " .long 29b,100b\n"
19086+ " .long 30b,100b\n"
19087+ " .long 31b,100b\n"
19088+ " .long 32b,100b\n"
19089+ " .long 33b,100b\n"
19090+ " .long 34b,100b\n"
19091+ " .long 35b,100b\n"
19092+ " .long 36b,100b\n"
19093+ " .long 37b,100b\n"
19094+ " .long 99b,101b\n"
19095+ ".previous"
19096+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
19097+ : "1"(to), "2"(from), "0"(size)
19098+ : "eax", "edx", "memory");
19099+ return size;
19100+}
19101+
19102+static unsigned long
19103+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
19104+{
19105+ int d0, d1;
19106+ __asm__ __volatile__(
19107+ " .align 2,0x90\n"
19108+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
19109+ " cmpl $67, %0\n"
19110+ " jbe 3f\n"
19111+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
19112+ " .align 2,0x90\n"
19113+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
19114+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
19115+ "5: movl %%eax, 0(%3)\n"
19116+ "6: movl %%edx, 4(%3)\n"
19117+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
19118+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
19119+ "9: movl %%eax, 8(%3)\n"
19120+ "10: movl %%edx, 12(%3)\n"
19121+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
19122+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
19123+ "13: movl %%eax, 16(%3)\n"
19124+ "14: movl %%edx, 20(%3)\n"
19125+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
19126+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
19127+ "17: movl %%eax, 24(%3)\n"
19128+ "18: movl %%edx, 28(%3)\n"
19129+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
19130+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
19131+ "21: movl %%eax, 32(%3)\n"
19132+ "22: movl %%edx, 36(%3)\n"
19133+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
19134+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
19135+ "25: movl %%eax, 40(%3)\n"
19136+ "26: movl %%edx, 44(%3)\n"
19137+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
19138+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
19139+ "29: movl %%eax, 48(%3)\n"
19140+ "30: movl %%edx, 52(%3)\n"
19141+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
19142+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
19143+ "33: movl %%eax, 56(%3)\n"
19144+ "34: movl %%edx, 60(%3)\n"
19145+ " addl $-64, %0\n"
19146+ " addl $64, %4\n"
19147+ " addl $64, %3\n"
19148+ " cmpl $63, %0\n"
19149+ " ja 1b\n"
19150+ "35: movl %0, %%eax\n"
19151+ " shrl $2, %0\n"
19152+ " andl $3, %%eax\n"
19153+ " cld\n"
19154+ "99: rep; "__copyuser_seg" movsl\n"
19155+ "36: movl %%eax, %0\n"
19156+ "37: rep; "__copyuser_seg" movsb\n"
19157+ "100:\n"
19158 ".section .fixup,\"ax\"\n"
19159 "101: lea 0(%%eax,%0,4),%0\n"
19160 " jmp 100b\n"
19161@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
19162 int d0, d1;
19163 __asm__ __volatile__(
19164 " .align 2,0x90\n"
19165- "0: movl 32(%4), %%eax\n"
19166+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19167 " cmpl $67, %0\n"
19168 " jbe 2f\n"
19169- "1: movl 64(%4), %%eax\n"
19170+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19171 " .align 2,0x90\n"
19172- "2: movl 0(%4), %%eax\n"
19173- "21: movl 4(%4), %%edx\n"
19174+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19175+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19176 " movl %%eax, 0(%3)\n"
19177 " movl %%edx, 4(%3)\n"
19178- "3: movl 8(%4), %%eax\n"
19179- "31: movl 12(%4),%%edx\n"
19180+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19181+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19182 " movl %%eax, 8(%3)\n"
19183 " movl %%edx, 12(%3)\n"
19184- "4: movl 16(%4), %%eax\n"
19185- "41: movl 20(%4), %%edx\n"
19186+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19187+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19188 " movl %%eax, 16(%3)\n"
19189 " movl %%edx, 20(%3)\n"
19190- "10: movl 24(%4), %%eax\n"
19191- "51: movl 28(%4), %%edx\n"
19192+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19193+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19194 " movl %%eax, 24(%3)\n"
19195 " movl %%edx, 28(%3)\n"
19196- "11: movl 32(%4), %%eax\n"
19197- "61: movl 36(%4), %%edx\n"
19198+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19199+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19200 " movl %%eax, 32(%3)\n"
19201 " movl %%edx, 36(%3)\n"
19202- "12: movl 40(%4), %%eax\n"
19203- "71: movl 44(%4), %%edx\n"
19204+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19205+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19206 " movl %%eax, 40(%3)\n"
19207 " movl %%edx, 44(%3)\n"
19208- "13: movl 48(%4), %%eax\n"
19209- "81: movl 52(%4), %%edx\n"
19210+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19211+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19212 " movl %%eax, 48(%3)\n"
19213 " movl %%edx, 52(%3)\n"
19214- "14: movl 56(%4), %%eax\n"
19215- "91: movl 60(%4), %%edx\n"
19216+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19217+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19218 " movl %%eax, 56(%3)\n"
19219 " movl %%edx, 60(%3)\n"
19220 " addl $-64, %0\n"
19221@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
19222 " shrl $2, %0\n"
19223 " andl $3, %%eax\n"
19224 " cld\n"
19225- "6: rep; movsl\n"
19226+ "6: rep; "__copyuser_seg" movsl\n"
19227 " movl %%eax,%0\n"
19228- "7: rep; movsb\n"
19229+ "7: rep; "__copyuser_seg" movsb\n"
19230 "8:\n"
19231 ".section .fixup,\"ax\"\n"
19232 "9: lea 0(%%eax,%0,4),%0\n"
19233@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
19234
19235 __asm__ __volatile__(
19236 " .align 2,0x90\n"
19237- "0: movl 32(%4), %%eax\n"
19238+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19239 " cmpl $67, %0\n"
19240 " jbe 2f\n"
19241- "1: movl 64(%4), %%eax\n"
19242+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19243 " .align 2,0x90\n"
19244- "2: movl 0(%4), %%eax\n"
19245- "21: movl 4(%4), %%edx\n"
19246+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19247+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19248 " movnti %%eax, 0(%3)\n"
19249 " movnti %%edx, 4(%3)\n"
19250- "3: movl 8(%4), %%eax\n"
19251- "31: movl 12(%4),%%edx\n"
19252+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19253+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19254 " movnti %%eax, 8(%3)\n"
19255 " movnti %%edx, 12(%3)\n"
19256- "4: movl 16(%4), %%eax\n"
19257- "41: movl 20(%4), %%edx\n"
19258+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19259+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19260 " movnti %%eax, 16(%3)\n"
19261 " movnti %%edx, 20(%3)\n"
19262- "10: movl 24(%4), %%eax\n"
19263- "51: movl 28(%4), %%edx\n"
19264+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19265+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19266 " movnti %%eax, 24(%3)\n"
19267 " movnti %%edx, 28(%3)\n"
19268- "11: movl 32(%4), %%eax\n"
19269- "61: movl 36(%4), %%edx\n"
19270+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19271+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19272 " movnti %%eax, 32(%3)\n"
19273 " movnti %%edx, 36(%3)\n"
19274- "12: movl 40(%4), %%eax\n"
19275- "71: movl 44(%4), %%edx\n"
19276+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19277+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19278 " movnti %%eax, 40(%3)\n"
19279 " movnti %%edx, 44(%3)\n"
19280- "13: movl 48(%4), %%eax\n"
19281- "81: movl 52(%4), %%edx\n"
19282+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19283+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19284 " movnti %%eax, 48(%3)\n"
19285 " movnti %%edx, 52(%3)\n"
19286- "14: movl 56(%4), %%eax\n"
19287- "91: movl 60(%4), %%edx\n"
19288+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19289+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19290 " movnti %%eax, 56(%3)\n"
19291 " movnti %%edx, 60(%3)\n"
19292 " addl $-64, %0\n"
19293@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
19294 " shrl $2, %0\n"
19295 " andl $3, %%eax\n"
19296 " cld\n"
19297- "6: rep; movsl\n"
19298+ "6: rep; "__copyuser_seg" movsl\n"
19299 " movl %%eax,%0\n"
19300- "7: rep; movsb\n"
19301+ "7: rep; "__copyuser_seg" movsb\n"
19302 "8:\n"
19303 ".section .fixup,\"ax\"\n"
19304 "9: lea 0(%%eax,%0,4),%0\n"
19305@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
19306
19307 __asm__ __volatile__(
19308 " .align 2,0x90\n"
19309- "0: movl 32(%4), %%eax\n"
19310+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19311 " cmpl $67, %0\n"
19312 " jbe 2f\n"
19313- "1: movl 64(%4), %%eax\n"
19314+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19315 " .align 2,0x90\n"
19316- "2: movl 0(%4), %%eax\n"
19317- "21: movl 4(%4), %%edx\n"
19318+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19319+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19320 " movnti %%eax, 0(%3)\n"
19321 " movnti %%edx, 4(%3)\n"
19322- "3: movl 8(%4), %%eax\n"
19323- "31: movl 12(%4),%%edx\n"
19324+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19325+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19326 " movnti %%eax, 8(%3)\n"
19327 " movnti %%edx, 12(%3)\n"
19328- "4: movl 16(%4), %%eax\n"
19329- "41: movl 20(%4), %%edx\n"
19330+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19331+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19332 " movnti %%eax, 16(%3)\n"
19333 " movnti %%edx, 20(%3)\n"
19334- "10: movl 24(%4), %%eax\n"
19335- "51: movl 28(%4), %%edx\n"
19336+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19337+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19338 " movnti %%eax, 24(%3)\n"
19339 " movnti %%edx, 28(%3)\n"
19340- "11: movl 32(%4), %%eax\n"
19341- "61: movl 36(%4), %%edx\n"
19342+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19343+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19344 " movnti %%eax, 32(%3)\n"
19345 " movnti %%edx, 36(%3)\n"
19346- "12: movl 40(%4), %%eax\n"
19347- "71: movl 44(%4), %%edx\n"
19348+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19349+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19350 " movnti %%eax, 40(%3)\n"
19351 " movnti %%edx, 44(%3)\n"
19352- "13: movl 48(%4), %%eax\n"
19353- "81: movl 52(%4), %%edx\n"
19354+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19355+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19356 " movnti %%eax, 48(%3)\n"
19357 " movnti %%edx, 52(%3)\n"
19358- "14: movl 56(%4), %%eax\n"
19359- "91: movl 60(%4), %%edx\n"
19360+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19361+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19362 " movnti %%eax, 56(%3)\n"
19363 " movnti %%edx, 60(%3)\n"
19364 " addl $-64, %0\n"
19365@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
19366 " shrl $2, %0\n"
19367 " andl $3, %%eax\n"
19368 " cld\n"
19369- "6: rep; movsl\n"
19370+ "6: rep; "__copyuser_seg" movsl\n"
19371 " movl %%eax,%0\n"
19372- "7: rep; movsb\n"
19373+ "7: rep; "__copyuser_seg" movsb\n"
19374 "8:\n"
19375 ".section .fixup,\"ax\"\n"
19376 "9: lea 0(%%eax,%0,4),%0\n"
19377@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
19378 */
19379 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
19380 unsigned long size);
19381-unsigned long __copy_user_intel(void __user *to, const void *from,
19382+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
19383+ unsigned long size);
19384+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
19385 unsigned long size);
19386 unsigned long __copy_user_zeroing_intel_nocache(void *to,
19387 const void __user *from, unsigned long size);
19388 #endif /* CONFIG_X86_INTEL_USERCOPY */
19389
19390 /* Generic arbitrary sized copy. */
19391-#define __copy_user(to, from, size) \
19392+#define __copy_user(to, from, size, prefix, set, restore) \
19393 do { \
19394 int __d0, __d1, __d2; \
19395 __asm__ __volatile__( \
19396+ set \
19397 " cmp $7,%0\n" \
19398 " jbe 1f\n" \
19399 " movl %1,%0\n" \
19400 " negl %0\n" \
19401 " andl $7,%0\n" \
19402 " subl %0,%3\n" \
19403- "4: rep; movsb\n" \
19404+ "4: rep; "prefix"movsb\n" \
19405 " movl %3,%0\n" \
19406 " shrl $2,%0\n" \
19407 " andl $3,%3\n" \
19408 " .align 2,0x90\n" \
19409- "0: rep; movsl\n" \
19410+ "0: rep; "prefix"movsl\n" \
19411 " movl %3,%0\n" \
19412- "1: rep; movsb\n" \
19413+ "1: rep; "prefix"movsb\n" \
19414 "2:\n" \
19415+ restore \
19416 ".section .fixup,\"ax\"\n" \
19417 "5: addl %3,%0\n" \
19418 " jmp 2b\n" \
19419@@ -682,14 +799,14 @@ do { \
19420 " negl %0\n" \
19421 " andl $7,%0\n" \
19422 " subl %0,%3\n" \
19423- "4: rep; movsb\n" \
19424+ "4: rep; "__copyuser_seg"movsb\n" \
19425 " movl %3,%0\n" \
19426 " shrl $2,%0\n" \
19427 " andl $3,%3\n" \
19428 " .align 2,0x90\n" \
19429- "0: rep; movsl\n" \
19430+ "0: rep; "__copyuser_seg"movsl\n" \
19431 " movl %3,%0\n" \
19432- "1: rep; movsb\n" \
19433+ "1: rep; "__copyuser_seg"movsb\n" \
19434 "2:\n" \
19435 ".section .fixup,\"ax\"\n" \
19436 "5: addl %3,%0\n" \
19437@@ -775,9 +892,9 @@ survive:
19438 }
19439 #endif
19440 if (movsl_is_ok(to, from, n))
19441- __copy_user(to, from, n);
19442+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
19443 else
19444- n = __copy_user_intel(to, from, n);
19445+ n = __generic_copy_to_user_intel(to, from, n);
19446 return n;
19447 }
19448 EXPORT_SYMBOL(__copy_to_user_ll);
19449@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
19450 unsigned long n)
19451 {
19452 if (movsl_is_ok(to, from, n))
19453- __copy_user(to, from, n);
19454+ __copy_user(to, from, n, __copyuser_seg, "", "");
19455 else
19456- n = __copy_user_intel((void __user *)to,
19457- (const void *)from, n);
19458+ n = __generic_copy_from_user_intel(to, from, n);
19459 return n;
19460 }
19461 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
19462@@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocach
19463 if (n > 64 && cpu_has_xmm2)
19464 n = __copy_user_intel_nocache(to, from, n);
19465 else
19466- __copy_user(to, from, n);
19467+ __copy_user(to, from, n, __copyuser_seg, "", "");
19468 #else
19469- __copy_user(to, from, n);
19470+ __copy_user(to, from, n, __copyuser_seg, "", "");
19471 #endif
19472 return n;
19473 }
19474 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
19475
19476-/**
19477- * copy_to_user: - Copy a block of data into user space.
19478- * @to: Destination address, in user space.
19479- * @from: Source address, in kernel space.
19480- * @n: Number of bytes to copy.
19481- *
19482- * Context: User context only. This function may sleep.
19483- *
19484- * Copy data from kernel space to user space.
19485- *
19486- * Returns number of bytes that could not be copied.
19487- * On success, this will be zero.
19488- */
19489-unsigned long
19490-copy_to_user(void __user *to, const void *from, unsigned long n)
19491+#ifdef CONFIG_PAX_MEMORY_UDEREF
19492+void __set_fs(mm_segment_t x)
19493 {
19494- if (access_ok(VERIFY_WRITE, to, n))
19495- n = __copy_to_user(to, from, n);
19496- return n;
19497+ switch (x.seg) {
19498+ case 0:
19499+ loadsegment(gs, 0);
19500+ break;
19501+ case TASK_SIZE_MAX:
19502+ loadsegment(gs, __USER_DS);
19503+ break;
19504+ case -1UL:
19505+ loadsegment(gs, __KERNEL_DS);
19506+ break;
19507+ default:
19508+ BUG();
19509+ }
19510+ return;
19511 }
19512-EXPORT_SYMBOL(copy_to_user);
19513+EXPORT_SYMBOL(__set_fs);
19514
19515-/**
19516- * copy_from_user: - Copy a block of data from user space.
19517- * @to: Destination address, in kernel space.
19518- * @from: Source address, in user space.
19519- * @n: Number of bytes to copy.
19520- *
19521- * Context: User context only. This function may sleep.
19522- *
19523- * Copy data from user space to kernel space.
19524- *
19525- * Returns number of bytes that could not be copied.
19526- * On success, this will be zero.
19527- *
19528- * If some data could not be copied, this function will pad the copied
19529- * data to the requested size using zero bytes.
19530- */
19531-unsigned long
19532-copy_from_user(void *to, const void __user *from, unsigned long n)
19533+void set_fs(mm_segment_t x)
19534 {
19535- if (access_ok(VERIFY_READ, from, n))
19536- n = __copy_from_user(to, from, n);
19537- else
19538- memset(to, 0, n);
19539- return n;
19540+ current_thread_info()->addr_limit = x;
19541+ __set_fs(x);
19542 }
19543-EXPORT_SYMBOL(copy_from_user);
19544+EXPORT_SYMBOL(set_fs);
19545+#endif
19546diff -urNp linux-2.6.32.41/arch/x86/lib/usercopy_64.c linux-2.6.32.41/arch/x86/lib/usercopy_64.c
19547--- linux-2.6.32.41/arch/x86/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
19548+++ linux-2.6.32.41/arch/x86/lib/usercopy_64.c 2011-05-04 17:56:20.000000000 -0400
19549@@ -42,6 +42,12 @@ long
19550 __strncpy_from_user(char *dst, const char __user *src, long count)
19551 {
19552 long res;
19553+
19554+#ifdef CONFIG_PAX_MEMORY_UDEREF
19555+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19556+ src += PAX_USER_SHADOW_BASE;
19557+#endif
19558+
19559 __do_strncpy_from_user(dst, src, count, res);
19560 return res;
19561 }
19562@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
19563 {
19564 long __d0;
19565 might_fault();
19566+
19567+#ifdef CONFIG_PAX_MEMORY_UDEREF
19568+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
19569+ addr += PAX_USER_SHADOW_BASE;
19570+#endif
19571+
19572 /* no memory constraint because it doesn't change any memory gcc knows
19573 about */
19574 asm volatile(
19575@@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
19576
19577 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
19578 {
19579- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
19580+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
19581+
19582+#ifdef CONFIG_PAX_MEMORY_UDEREF
19583+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
19584+ to += PAX_USER_SHADOW_BASE;
19585+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
19586+ from += PAX_USER_SHADOW_BASE;
19587+#endif
19588+
19589 return copy_user_generic((__force void *)to, (__force void *)from, len);
19590- }
19591- return len;
19592+ }
19593+ return len;
19594 }
19595 EXPORT_SYMBOL(copy_in_user);
19596
19597diff -urNp linux-2.6.32.41/arch/x86/Makefile linux-2.6.32.41/arch/x86/Makefile
19598--- linux-2.6.32.41/arch/x86/Makefile 2011-03-27 14:31:47.000000000 -0400
19599+++ linux-2.6.32.41/arch/x86/Makefile 2011-04-17 15:56:46.000000000 -0400
19600@@ -189,3 +189,12 @@ define archhelp
19601 echo ' FDARGS="..." arguments for the booted kernel'
19602 echo ' FDINITRD=file initrd for the booted kernel'
19603 endef
19604+
19605+define OLD_LD
19606+
19607+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
19608+*** Please upgrade your binutils to 2.18 or newer
19609+endef
19610+
19611+archprepare:
19612+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
19613diff -urNp linux-2.6.32.41/arch/x86/mm/extable.c linux-2.6.32.41/arch/x86/mm/extable.c
19614--- linux-2.6.32.41/arch/x86/mm/extable.c 2011-03-27 14:31:47.000000000 -0400
19615+++ linux-2.6.32.41/arch/x86/mm/extable.c 2011-04-17 15:56:46.000000000 -0400
19616@@ -1,14 +1,71 @@
19617 #include <linux/module.h>
19618 #include <linux/spinlock.h>
19619+#include <linux/sort.h>
19620 #include <asm/uaccess.h>
19621+#include <asm/pgtable.h>
19622
19623+/*
19624+ * The exception table needs to be sorted so that the binary
19625+ * search that we use to find entries in it works properly.
19626+ * This is used both for the kernel exception table and for
19627+ * the exception tables of modules that get loaded.
19628+ */
19629+static int cmp_ex(const void *a, const void *b)
19630+{
19631+ const struct exception_table_entry *x = a, *y = b;
19632+
19633+ /* avoid overflow */
19634+ if (x->insn > y->insn)
19635+ return 1;
19636+ if (x->insn < y->insn)
19637+ return -1;
19638+ return 0;
19639+}
19640+
19641+static void swap_ex(void *a, void *b, int size)
19642+{
19643+ struct exception_table_entry t, *x = a, *y = b;
19644+
19645+ t = *x;
19646+
19647+ pax_open_kernel();
19648+ *x = *y;
19649+ *y = t;
19650+ pax_close_kernel();
19651+}
19652+
19653+void sort_extable(struct exception_table_entry *start,
19654+ struct exception_table_entry *finish)
19655+{
19656+ sort(start, finish - start, sizeof(struct exception_table_entry),
19657+ cmp_ex, swap_ex);
19658+}
19659+
19660+#ifdef CONFIG_MODULES
19661+/*
19662+ * If the exception table is sorted, any referring to the module init
19663+ * will be at the beginning or the end.
19664+ */
19665+void trim_init_extable(struct module *m)
19666+{
19667+ /*trim the beginning*/
19668+ while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
19669+ m->extable++;
19670+ m->num_exentries--;
19671+ }
19672+ /*trim the end*/
19673+ while (m->num_exentries &&
19674+ within_module_init(m->extable[m->num_exentries-1].insn, m))
19675+ m->num_exentries--;
19676+}
19677+#endif /* CONFIG_MODULES */
19678
19679 int fixup_exception(struct pt_regs *regs)
19680 {
19681 const struct exception_table_entry *fixup;
19682
19683 #ifdef CONFIG_PNPBIOS
19684- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
19685+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
19686 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
19687 extern u32 pnp_bios_is_utter_crap;
19688 pnp_bios_is_utter_crap = 1;
19689diff -urNp linux-2.6.32.41/arch/x86/mm/fault.c linux-2.6.32.41/arch/x86/mm/fault.c
19690--- linux-2.6.32.41/arch/x86/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
19691+++ linux-2.6.32.41/arch/x86/mm/fault.c 2011-04-17 15:56:46.000000000 -0400
19692@@ -11,10 +11,19 @@
19693 #include <linux/kprobes.h> /* __kprobes, ... */
19694 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
19695 #include <linux/perf_event.h> /* perf_sw_event */
19696+#include <linux/unistd.h>
19697+#include <linux/compiler.h>
19698
19699 #include <asm/traps.h> /* dotraplinkage, ... */
19700 #include <asm/pgalloc.h> /* pgd_*(), ... */
19701 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
19702+#include <asm/vsyscall.h>
19703+#include <asm/tlbflush.h>
19704+
19705+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19706+#include <asm/stacktrace.h>
19707+#include "../kernel/dumpstack.h"
19708+#endif
19709
19710 /*
19711 * Page fault error code bits:
19712@@ -51,7 +60,7 @@ static inline int notify_page_fault(stru
19713 int ret = 0;
19714
19715 /* kprobe_running() needs smp_processor_id() */
19716- if (kprobes_built_in() && !user_mode_vm(regs)) {
19717+ if (kprobes_built_in() && !user_mode(regs)) {
19718 preempt_disable();
19719 if (kprobe_running() && kprobe_fault_handler(regs, 14))
19720 ret = 1;
19721@@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *re
19722 return !instr_lo || (instr_lo>>1) == 1;
19723 case 0x00:
19724 /* Prefetch instruction is 0x0F0D or 0x0F18 */
19725- if (probe_kernel_address(instr, opcode))
19726+ if (user_mode(regs)) {
19727+ if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
19728+ return 0;
19729+ } else if (probe_kernel_address(instr, opcode))
19730 return 0;
19731
19732 *prefetch = (instr_lo == 0xF) &&
19733@@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsign
19734 while (instr < max_instr) {
19735 unsigned char opcode;
19736
19737- if (probe_kernel_address(instr, opcode))
19738+ if (user_mode(regs)) {
19739+ if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
19740+ break;
19741+ } else if (probe_kernel_address(instr, opcode))
19742 break;
19743
19744 instr++;
19745@@ -172,6 +187,30 @@ force_sig_info_fault(int si_signo, int s
19746 force_sig_info(si_signo, &info, tsk);
19747 }
19748
19749+#ifdef CONFIG_PAX_EMUTRAMP
19750+static int pax_handle_fetch_fault(struct pt_regs *regs);
19751+#endif
19752+
19753+#ifdef CONFIG_PAX_PAGEEXEC
19754+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
19755+{
19756+ pgd_t *pgd;
19757+ pud_t *pud;
19758+ pmd_t *pmd;
19759+
19760+ pgd = pgd_offset(mm, address);
19761+ if (!pgd_present(*pgd))
19762+ return NULL;
19763+ pud = pud_offset(pgd, address);
19764+ if (!pud_present(*pud))
19765+ return NULL;
19766+ pmd = pmd_offset(pud, address);
19767+ if (!pmd_present(*pmd))
19768+ return NULL;
19769+ return pmd;
19770+}
19771+#endif
19772+
19773 DEFINE_SPINLOCK(pgd_lock);
19774 LIST_HEAD(pgd_list);
19775
19776@@ -224,11 +263,24 @@ void vmalloc_sync_all(void)
19777 address += PMD_SIZE) {
19778
19779 unsigned long flags;
19780+
19781+#ifdef CONFIG_PAX_PER_CPU_PGD
19782+ unsigned long cpu;
19783+#else
19784 struct page *page;
19785+#endif
19786
19787 spin_lock_irqsave(&pgd_lock, flags);
19788+
19789+#ifdef CONFIG_PAX_PER_CPU_PGD
19790+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19791+ pgd_t *pgd = get_cpu_pgd(cpu);
19792+#else
19793 list_for_each_entry(page, &pgd_list, lru) {
19794- if (!vmalloc_sync_one(page_address(page), address))
19795+ pgd_t *pgd = page_address(page);
19796+#endif
19797+
19798+ if (!vmalloc_sync_one(pgd, address))
19799 break;
19800 }
19801 spin_unlock_irqrestore(&pgd_lock, flags);
19802@@ -258,6 +310,11 @@ static noinline int vmalloc_fault(unsign
19803 * an interrupt in the middle of a task switch..
19804 */
19805 pgd_paddr = read_cr3();
19806+
19807+#ifdef CONFIG_PAX_PER_CPU_PGD
19808+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
19809+#endif
19810+
19811 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
19812 if (!pmd_k)
19813 return -1;
19814@@ -332,15 +389,27 @@ void vmalloc_sync_all(void)
19815
19816 const pgd_t *pgd_ref = pgd_offset_k(address);
19817 unsigned long flags;
19818+
19819+#ifdef CONFIG_PAX_PER_CPU_PGD
19820+ unsigned long cpu;
19821+#else
19822 struct page *page;
19823+#endif
19824
19825 if (pgd_none(*pgd_ref))
19826 continue;
19827
19828 spin_lock_irqsave(&pgd_lock, flags);
19829+
19830+#ifdef CONFIG_PAX_PER_CPU_PGD
19831+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19832+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
19833+#else
19834 list_for_each_entry(page, &pgd_list, lru) {
19835 pgd_t *pgd;
19836 pgd = (pgd_t *)page_address(page) + pgd_index(address);
19837+#endif
19838+
19839 if (pgd_none(*pgd))
19840 set_pgd(pgd, *pgd_ref);
19841 else
19842@@ -373,7 +442,14 @@ static noinline int vmalloc_fault(unsign
19843 * happen within a race in page table update. In the later
19844 * case just flush:
19845 */
19846+
19847+#ifdef CONFIG_PAX_PER_CPU_PGD
19848+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
19849+ pgd = pgd_offset_cpu(smp_processor_id(), address);
19850+#else
19851 pgd = pgd_offset(current->active_mm, address);
19852+#endif
19853+
19854 pgd_ref = pgd_offset_k(address);
19855 if (pgd_none(*pgd_ref))
19856 return -1;
19857@@ -535,7 +611,7 @@ static int is_errata93(struct pt_regs *r
19858 static int is_errata100(struct pt_regs *regs, unsigned long address)
19859 {
19860 #ifdef CONFIG_X86_64
19861- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
19862+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
19863 return 1;
19864 #endif
19865 return 0;
19866@@ -562,7 +638,7 @@ static int is_f00f_bug(struct pt_regs *r
19867 }
19868
19869 static const char nx_warning[] = KERN_CRIT
19870-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
19871+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
19872
19873 static void
19874 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
19875@@ -571,15 +647,26 @@ show_fault_oops(struct pt_regs *regs, un
19876 if (!oops_may_print())
19877 return;
19878
19879- if (error_code & PF_INSTR) {
19880+ if (nx_enabled && (error_code & PF_INSTR)) {
19881 unsigned int level;
19882
19883 pte_t *pte = lookup_address(address, &level);
19884
19885 if (pte && pte_present(*pte) && !pte_exec(*pte))
19886- printk(nx_warning, current_uid());
19887+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
19888 }
19889
19890+#ifdef CONFIG_PAX_KERNEXEC
19891+ if (init_mm.start_code <= address && address < init_mm.end_code) {
19892+ if (current->signal->curr_ip)
19893+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
19894+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
19895+ else
19896+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
19897+ current->comm, task_pid_nr(current), current_uid(), current_euid());
19898+ }
19899+#endif
19900+
19901 printk(KERN_ALERT "BUG: unable to handle kernel ");
19902 if (address < PAGE_SIZE)
19903 printk(KERN_CONT "NULL pointer dereference");
19904@@ -704,6 +791,68 @@ __bad_area_nosemaphore(struct pt_regs *r
19905 unsigned long address, int si_code)
19906 {
19907 struct task_struct *tsk = current;
19908+ struct mm_struct *mm = tsk->mm;
19909+
19910+#ifdef CONFIG_X86_64
19911+ if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
19912+ if (regs->ip == (unsigned long)vgettimeofday) {
19913+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
19914+ return;
19915+ } else if (regs->ip == (unsigned long)vtime) {
19916+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
19917+ return;
19918+ } else if (regs->ip == (unsigned long)vgetcpu) {
19919+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
19920+ return;
19921+ }
19922+ }
19923+#endif
19924+
19925+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19926+ if (mm && (error_code & PF_USER)) {
19927+ unsigned long ip = regs->ip;
19928+
19929+ if (v8086_mode(regs))
19930+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
19931+
19932+ /*
19933+ * It's possible to have interrupts off here:
19934+ */
19935+ local_irq_enable();
19936+
19937+#ifdef CONFIG_PAX_PAGEEXEC
19938+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
19939+ ((nx_enabled && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
19940+
19941+#ifdef CONFIG_PAX_EMUTRAMP
19942+ switch (pax_handle_fetch_fault(regs)) {
19943+ case 2:
19944+ return;
19945+ }
19946+#endif
19947+
19948+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
19949+ do_group_exit(SIGKILL);
19950+ }
19951+#endif
19952+
19953+#ifdef CONFIG_PAX_SEGMEXEC
19954+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
19955+
19956+#ifdef CONFIG_PAX_EMUTRAMP
19957+ switch (pax_handle_fetch_fault(regs)) {
19958+ case 2:
19959+ return;
19960+ }
19961+#endif
19962+
19963+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
19964+ do_group_exit(SIGKILL);
19965+ }
19966+#endif
19967+
19968+ }
19969+#endif
19970
19971 /* User mode accesses just cause a SIGSEGV */
19972 if (error_code & PF_USER) {
19973@@ -857,6 +1006,99 @@ static int spurious_fault_check(unsigned
19974 return 1;
19975 }
19976
19977+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19978+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
19979+{
19980+ pte_t *pte;
19981+ pmd_t *pmd;
19982+ spinlock_t *ptl;
19983+ unsigned char pte_mask;
19984+
19985+ if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
19986+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
19987+ return 0;
19988+
19989+ /* PaX: it's our fault, let's handle it if we can */
19990+
19991+ /* PaX: take a look at read faults before acquiring any locks */
19992+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
19993+ /* instruction fetch attempt from a protected page in user mode */
19994+ up_read(&mm->mmap_sem);
19995+
19996+#ifdef CONFIG_PAX_EMUTRAMP
19997+ switch (pax_handle_fetch_fault(regs)) {
19998+ case 2:
19999+ return 1;
20000+ }
20001+#endif
20002+
20003+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
20004+ do_group_exit(SIGKILL);
20005+ }
20006+
20007+ pmd = pax_get_pmd(mm, address);
20008+ if (unlikely(!pmd))
20009+ return 0;
20010+
20011+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
20012+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
20013+ pte_unmap_unlock(pte, ptl);
20014+ return 0;
20015+ }
20016+
20017+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
20018+ /* write attempt to a protected page in user mode */
20019+ pte_unmap_unlock(pte, ptl);
20020+ return 0;
20021+ }
20022+
20023+#ifdef CONFIG_SMP
20024+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
20025+#else
20026+ if (likely(address > get_limit(regs->cs)))
20027+#endif
20028+ {
20029+ set_pte(pte, pte_mkread(*pte));
20030+ __flush_tlb_one(address);
20031+ pte_unmap_unlock(pte, ptl);
20032+ up_read(&mm->mmap_sem);
20033+ return 1;
20034+ }
20035+
20036+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
20037+
20038+ /*
20039+ * PaX: fill DTLB with user rights and retry
20040+ */
20041+ __asm__ __volatile__ (
20042+ "orb %2,(%1)\n"
20043+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
20044+/*
20045+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
20046+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
20047+ * page fault when examined during a TLB load attempt. this is true not only
20048+ * for PTEs holding a non-present entry but also present entries that will
20049+ * raise a page fault (such as those set up by PaX, or the copy-on-write
20050+ * mechanism). in effect it means that we do *not* need to flush the TLBs
20051+ * for our target pages since their PTEs are simply not in the TLBs at all.
20052+
20053+ * the best thing in omitting it is that we gain around 15-20% speed in the
20054+ * fast path of the page fault handler and can get rid of tracing since we
20055+ * can no longer flush unintended entries.
20056+ */
20057+ "invlpg (%0)\n"
20058+#endif
20059+ __copyuser_seg"testb $0,(%0)\n"
20060+ "xorb %3,(%1)\n"
20061+ :
20062+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
20063+ : "memory", "cc");
20064+ pte_unmap_unlock(pte, ptl);
20065+ up_read(&mm->mmap_sem);
20066+ return 1;
20067+}
20068+#endif
20069+
20070 /*
20071 * Handle a spurious fault caused by a stale TLB entry.
20072 *
20073@@ -923,6 +1165,9 @@ int show_unhandled_signals = 1;
20074 static inline int
20075 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
20076 {
20077+ if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
20078+ return 1;
20079+
20080 if (write) {
20081 /* write, present and write, not present: */
20082 if (unlikely(!(vma->vm_flags & VM_WRITE)))
20083@@ -956,17 +1201,31 @@ do_page_fault(struct pt_regs *regs, unsi
20084 {
20085 struct vm_area_struct *vma;
20086 struct task_struct *tsk;
20087- unsigned long address;
20088 struct mm_struct *mm;
20089 int write;
20090 int fault;
20091
20092+ /* Get the faulting address: */
20093+ unsigned long address = read_cr2();
20094+
20095+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20096+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
20097+ if (!search_exception_tables(regs->ip)) {
20098+ bad_area_nosemaphore(regs, error_code, address);
20099+ return;
20100+ }
20101+ if (address < PAX_USER_SHADOW_BASE) {
20102+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
20103+ printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
20104+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
20105+ } else
20106+ address -= PAX_USER_SHADOW_BASE;
20107+ }
20108+#endif
20109+
20110 tsk = current;
20111 mm = tsk->mm;
20112
20113- /* Get the faulting address: */
20114- address = read_cr2();
20115-
20116 /*
20117 * Detect and handle instructions that would cause a page fault for
20118 * both a tracked kernel page and a userspace page.
20119@@ -1026,7 +1285,7 @@ do_page_fault(struct pt_regs *regs, unsi
20120 * User-mode registers count as a user access even for any
20121 * potential system fault or CPU buglet:
20122 */
20123- if (user_mode_vm(regs)) {
20124+ if (user_mode(regs)) {
20125 local_irq_enable();
20126 error_code |= PF_USER;
20127 } else {
20128@@ -1080,6 +1339,11 @@ do_page_fault(struct pt_regs *regs, unsi
20129 might_sleep();
20130 }
20131
20132+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20133+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
20134+ return;
20135+#endif
20136+
20137 vma = find_vma(mm, address);
20138 if (unlikely(!vma)) {
20139 bad_area(regs, error_code, address);
20140@@ -1091,18 +1355,24 @@ do_page_fault(struct pt_regs *regs, unsi
20141 bad_area(regs, error_code, address);
20142 return;
20143 }
20144- if (error_code & PF_USER) {
20145- /*
20146- * Accessing the stack below %sp is always a bug.
20147- * The large cushion allows instructions like enter
20148- * and pusha to work. ("enter $65535, $31" pushes
20149- * 32 pointers and then decrements %sp by 65535.)
20150- */
20151- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
20152- bad_area(regs, error_code, address);
20153- return;
20154- }
20155+ /*
20156+ * Accessing the stack below %sp is always a bug.
20157+ * The large cushion allows instructions like enter
20158+ * and pusha to work. ("enter $65535, $31" pushes
20159+ * 32 pointers and then decrements %sp by 65535.)
20160+ */
20161+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
20162+ bad_area(regs, error_code, address);
20163+ return;
20164+ }
20165+
20166+#ifdef CONFIG_PAX_SEGMEXEC
20167+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
20168+ bad_area(regs, error_code, address);
20169+ return;
20170 }
20171+#endif
20172+
20173 if (unlikely(expand_stack(vma, address))) {
20174 bad_area(regs, error_code, address);
20175 return;
20176@@ -1146,3 +1416,199 @@ good_area:
20177
20178 up_read(&mm->mmap_sem);
20179 }
20180+
20181+#ifdef CONFIG_PAX_EMUTRAMP
20182+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
20183+{
20184+ int err;
20185+
20186+ do { /* PaX: gcc trampoline emulation #1 */
20187+ unsigned char mov1, mov2;
20188+ unsigned short jmp;
20189+ unsigned int addr1, addr2;
20190+
20191+#ifdef CONFIG_X86_64
20192+ if ((regs->ip + 11) >> 32)
20193+ break;
20194+#endif
20195+
20196+ err = get_user(mov1, (unsigned char __user *)regs->ip);
20197+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
20198+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
20199+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
20200+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
20201+
20202+ if (err)
20203+ break;
20204+
20205+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
20206+ regs->cx = addr1;
20207+ regs->ax = addr2;
20208+ regs->ip = addr2;
20209+ return 2;
20210+ }
20211+ } while (0);
20212+
20213+ do { /* PaX: gcc trampoline emulation #2 */
20214+ unsigned char mov, jmp;
20215+ unsigned int addr1, addr2;
20216+
20217+#ifdef CONFIG_X86_64
20218+ if ((regs->ip + 9) >> 32)
20219+ break;
20220+#endif
20221+
20222+ err = get_user(mov, (unsigned char __user *)regs->ip);
20223+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
20224+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
20225+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
20226+
20227+ if (err)
20228+ break;
20229+
20230+ if (mov == 0xB9 && jmp == 0xE9) {
20231+ regs->cx = addr1;
20232+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
20233+ return 2;
20234+ }
20235+ } while (0);
20236+
20237+ return 1; /* PaX in action */
20238+}
20239+
20240+#ifdef CONFIG_X86_64
20241+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
20242+{
20243+ int err;
20244+
20245+ do { /* PaX: gcc trampoline emulation #1 */
20246+ unsigned short mov1, mov2, jmp1;
20247+ unsigned char jmp2;
20248+ unsigned int addr1;
20249+ unsigned long addr2;
20250+
20251+ err = get_user(mov1, (unsigned short __user *)regs->ip);
20252+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
20253+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
20254+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
20255+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
20256+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
20257+
20258+ if (err)
20259+ break;
20260+
20261+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
20262+ regs->r11 = addr1;
20263+ regs->r10 = addr2;
20264+ regs->ip = addr1;
20265+ return 2;
20266+ }
20267+ } while (0);
20268+
20269+ do { /* PaX: gcc trampoline emulation #2 */
20270+ unsigned short mov1, mov2, jmp1;
20271+ unsigned char jmp2;
20272+ unsigned long addr1, addr2;
20273+
20274+ err = get_user(mov1, (unsigned short __user *)regs->ip);
20275+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
20276+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
20277+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
20278+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
20279+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
20280+
20281+ if (err)
20282+ break;
20283+
20284+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
20285+ regs->r11 = addr1;
20286+ regs->r10 = addr2;
20287+ regs->ip = addr1;
20288+ return 2;
20289+ }
20290+ } while (0);
20291+
20292+ return 1; /* PaX in action */
20293+}
20294+#endif
20295+
20296+/*
20297+ * PaX: decide what to do with offenders (regs->ip = fault address)
20298+ *
20299+ * returns 1 when task should be killed
20300+ * 2 when gcc trampoline was detected
20301+ */
20302+static int pax_handle_fetch_fault(struct pt_regs *regs)
20303+{
20304+ if (v8086_mode(regs))
20305+ return 1;
20306+
20307+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
20308+ return 1;
20309+
20310+#ifdef CONFIG_X86_32
20311+ return pax_handle_fetch_fault_32(regs);
20312+#else
20313+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
20314+ return pax_handle_fetch_fault_32(regs);
20315+ else
20316+ return pax_handle_fetch_fault_64(regs);
20317+#endif
20318+}
20319+#endif
20320+
20321+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20322+void pax_report_insns(void *pc, void *sp)
20323+{
20324+ long i;
20325+
20326+ printk(KERN_ERR "PAX: bytes at PC: ");
20327+ for (i = 0; i < 20; i++) {
20328+ unsigned char c;
20329+ if (get_user(c, (__force unsigned char __user *)pc+i))
20330+ printk(KERN_CONT "?? ");
20331+ else
20332+ printk(KERN_CONT "%02x ", c);
20333+ }
20334+ printk("\n");
20335+
20336+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
20337+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
20338+ unsigned long c;
20339+ if (get_user(c, (__force unsigned long __user *)sp+i))
20340+#ifdef CONFIG_X86_32
20341+ printk(KERN_CONT "???????? ");
20342+#else
20343+ printk(KERN_CONT "???????????????? ");
20344+#endif
20345+ else
20346+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
20347+ }
20348+ printk("\n");
20349+}
20350+#endif
20351+
20352+/**
20353+ * probe_kernel_write(): safely attempt to write to a location
20354+ * @dst: address to write to
20355+ * @src: pointer to the data that shall be written
20356+ * @size: size of the data chunk
20357+ *
20358+ * Safely write to address @dst from the buffer at @src. If a kernel fault
20359+ * happens, handle that and return -EFAULT.
20360+ */
20361+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
20362+{
20363+ long ret;
20364+ mm_segment_t old_fs = get_fs();
20365+
20366+ set_fs(KERNEL_DS);
20367+ pagefault_disable();
20368+ pax_open_kernel();
20369+ ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
20370+ pax_close_kernel();
20371+ pagefault_enable();
20372+ set_fs(old_fs);
20373+
20374+ return ret ? -EFAULT : 0;
20375+}
20376diff -urNp linux-2.6.32.41/arch/x86/mm/gup.c linux-2.6.32.41/arch/x86/mm/gup.c
20377--- linux-2.6.32.41/arch/x86/mm/gup.c 2011-03-27 14:31:47.000000000 -0400
20378+++ linux-2.6.32.41/arch/x86/mm/gup.c 2011-04-17 15:56:46.000000000 -0400
20379@@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long
20380 addr = start;
20381 len = (unsigned long) nr_pages << PAGE_SHIFT;
20382 end = start + len;
20383- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
20384+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
20385 (void __user *)start, len)))
20386 return 0;
20387
20388diff -urNp linux-2.6.32.41/arch/x86/mm/highmem_32.c linux-2.6.32.41/arch/x86/mm/highmem_32.c
20389--- linux-2.6.32.41/arch/x86/mm/highmem_32.c 2011-03-27 14:31:47.000000000 -0400
20390+++ linux-2.6.32.41/arch/x86/mm/highmem_32.c 2011-04-17 15:56:46.000000000 -0400
20391@@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page
20392 idx = type + KM_TYPE_NR*smp_processor_id();
20393 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
20394 BUG_ON(!pte_none(*(kmap_pte-idx)));
20395+
20396+ pax_open_kernel();
20397 set_pte(kmap_pte-idx, mk_pte(page, prot));
20398+ pax_close_kernel();
20399
20400 return (void *)vaddr;
20401 }
20402diff -urNp linux-2.6.32.41/arch/x86/mm/hugetlbpage.c linux-2.6.32.41/arch/x86/mm/hugetlbpage.c
20403--- linux-2.6.32.41/arch/x86/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
20404+++ linux-2.6.32.41/arch/x86/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
20405@@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmappe
20406 struct hstate *h = hstate_file(file);
20407 struct mm_struct *mm = current->mm;
20408 struct vm_area_struct *vma;
20409- unsigned long start_addr;
20410+ unsigned long start_addr, pax_task_size = TASK_SIZE;
20411+
20412+#ifdef CONFIG_PAX_SEGMEXEC
20413+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20414+ pax_task_size = SEGMEXEC_TASK_SIZE;
20415+#endif
20416+
20417+ pax_task_size -= PAGE_SIZE;
20418
20419 if (len > mm->cached_hole_size) {
20420- start_addr = mm->free_area_cache;
20421+ start_addr = mm->free_area_cache;
20422 } else {
20423- start_addr = TASK_UNMAPPED_BASE;
20424- mm->cached_hole_size = 0;
20425+ start_addr = mm->mmap_base;
20426+ mm->cached_hole_size = 0;
20427 }
20428
20429 full_search:
20430@@ -281,26 +288,27 @@ full_search:
20431
20432 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
20433 /* At this point: (!vma || addr < vma->vm_end). */
20434- if (TASK_SIZE - len < addr) {
20435+ if (pax_task_size - len < addr) {
20436 /*
20437 * Start a new search - just in case we missed
20438 * some holes.
20439 */
20440- if (start_addr != TASK_UNMAPPED_BASE) {
20441- start_addr = TASK_UNMAPPED_BASE;
20442+ if (start_addr != mm->mmap_base) {
20443+ start_addr = mm->mmap_base;
20444 mm->cached_hole_size = 0;
20445 goto full_search;
20446 }
20447 return -ENOMEM;
20448 }
20449- if (!vma || addr + len <= vma->vm_start) {
20450- mm->free_area_cache = addr + len;
20451- return addr;
20452- }
20453+ if (check_heap_stack_gap(vma, addr, len))
20454+ break;
20455 if (addr + mm->cached_hole_size < vma->vm_start)
20456 mm->cached_hole_size = vma->vm_start - addr;
20457 addr = ALIGN(vma->vm_end, huge_page_size(h));
20458 }
20459+
20460+ mm->free_area_cache = addr + len;
20461+ return addr;
20462 }
20463
20464 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
20465@@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmappe
20466 {
20467 struct hstate *h = hstate_file(file);
20468 struct mm_struct *mm = current->mm;
20469- struct vm_area_struct *vma, *prev_vma;
20470- unsigned long base = mm->mmap_base, addr = addr0;
20471+ struct vm_area_struct *vma;
20472+ unsigned long base = mm->mmap_base, addr;
20473 unsigned long largest_hole = mm->cached_hole_size;
20474- int first_time = 1;
20475
20476 /* don't allow allocations above current base */
20477 if (mm->free_area_cache > base)
20478@@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmappe
20479 largest_hole = 0;
20480 mm->free_area_cache = base;
20481 }
20482-try_again:
20483+
20484 /* make sure it can fit in the remaining address space */
20485 if (mm->free_area_cache < len)
20486 goto fail;
20487
20488 /* either no address requested or cant fit in requested address hole */
20489- addr = (mm->free_area_cache - len) & huge_page_mask(h);
20490+ addr = (mm->free_area_cache - len);
20491 do {
20492+ addr &= huge_page_mask(h);
20493+ vma = find_vma(mm, addr);
20494 /*
20495 * Lookup failure means no vma is above this address,
20496 * i.e. return with success:
20497- */
20498- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
20499- return addr;
20500-
20501- /*
20502 * new region fits between prev_vma->vm_end and
20503 * vma->vm_start, use it:
20504 */
20505- if (addr + len <= vma->vm_start &&
20506- (!prev_vma || (addr >= prev_vma->vm_end))) {
20507+ if (check_heap_stack_gap(vma, addr, len)) {
20508 /* remember the address as a hint for next time */
20509- mm->cached_hole_size = largest_hole;
20510- return (mm->free_area_cache = addr);
20511- } else {
20512- /* pull free_area_cache down to the first hole */
20513- if (mm->free_area_cache == vma->vm_end) {
20514- mm->free_area_cache = vma->vm_start;
20515- mm->cached_hole_size = largest_hole;
20516- }
20517+ mm->cached_hole_size = largest_hole;
20518+ return (mm->free_area_cache = addr);
20519+ }
20520+ /* pull free_area_cache down to the first hole */
20521+ if (mm->free_area_cache == vma->vm_end) {
20522+ mm->free_area_cache = vma->vm_start;
20523+ mm->cached_hole_size = largest_hole;
20524 }
20525
20526 /* remember the largest hole we saw so far */
20527 if (addr + largest_hole < vma->vm_start)
20528- largest_hole = vma->vm_start - addr;
20529+ largest_hole = vma->vm_start - addr;
20530
20531 /* try just below the current vma->vm_start */
20532- addr = (vma->vm_start - len) & huge_page_mask(h);
20533- } while (len <= vma->vm_start);
20534+ addr = skip_heap_stack_gap(vma, len);
20535+ } while (!IS_ERR_VALUE(addr));
20536
20537 fail:
20538 /*
20539- * if hint left us with no space for the requested
20540- * mapping then try again:
20541- */
20542- if (first_time) {
20543- mm->free_area_cache = base;
20544- largest_hole = 0;
20545- first_time = 0;
20546- goto try_again;
20547- }
20548- /*
20549 * A failed mmap() very likely causes application failure,
20550 * so fall back to the bottom-up function here. This scenario
20551 * can happen with large stack limits and large mmap()
20552 * allocations.
20553 */
20554- mm->free_area_cache = TASK_UNMAPPED_BASE;
20555+
20556+#ifdef CONFIG_PAX_SEGMEXEC
20557+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20558+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
20559+ else
20560+#endif
20561+
20562+ mm->mmap_base = TASK_UNMAPPED_BASE;
20563+
20564+#ifdef CONFIG_PAX_RANDMMAP
20565+ if (mm->pax_flags & MF_PAX_RANDMMAP)
20566+ mm->mmap_base += mm->delta_mmap;
20567+#endif
20568+
20569+ mm->free_area_cache = mm->mmap_base;
20570 mm->cached_hole_size = ~0UL;
20571 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
20572 len, pgoff, flags);
20573@@ -387,6 +393,7 @@ fail:
20574 /*
20575 * Restore the topdown base:
20576 */
20577+ mm->mmap_base = base;
20578 mm->free_area_cache = base;
20579 mm->cached_hole_size = ~0UL;
20580
20581@@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *f
20582 struct hstate *h = hstate_file(file);
20583 struct mm_struct *mm = current->mm;
20584 struct vm_area_struct *vma;
20585+ unsigned long pax_task_size = TASK_SIZE;
20586
20587 if (len & ~huge_page_mask(h))
20588 return -EINVAL;
20589- if (len > TASK_SIZE)
20590+
20591+#ifdef CONFIG_PAX_SEGMEXEC
20592+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
20593+ pax_task_size = SEGMEXEC_TASK_SIZE;
20594+#endif
20595+
20596+ pax_task_size -= PAGE_SIZE;
20597+
20598+ if (len > pax_task_size)
20599 return -ENOMEM;
20600
20601 if (flags & MAP_FIXED) {
20602@@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *f
20603 if (addr) {
20604 addr = ALIGN(addr, huge_page_size(h));
20605 vma = find_vma(mm, addr);
20606- if (TASK_SIZE - len >= addr &&
20607- (!vma || addr + len <= vma->vm_start))
20608+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
20609 return addr;
20610 }
20611 if (mm->get_unmapped_area == arch_get_unmapped_area)
20612diff -urNp linux-2.6.32.41/arch/x86/mm/init_32.c linux-2.6.32.41/arch/x86/mm/init_32.c
20613--- linux-2.6.32.41/arch/x86/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
20614+++ linux-2.6.32.41/arch/x86/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
20615@@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
20616 }
20617
20618 /*
20619- * Creates a middle page table and puts a pointer to it in the
20620- * given global directory entry. This only returns the gd entry
20621- * in non-PAE compilation mode, since the middle layer is folded.
20622- */
20623-static pmd_t * __init one_md_table_init(pgd_t *pgd)
20624-{
20625- pud_t *pud;
20626- pmd_t *pmd_table;
20627-
20628-#ifdef CONFIG_X86_PAE
20629- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
20630- if (after_bootmem)
20631- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
20632- else
20633- pmd_table = (pmd_t *)alloc_low_page();
20634- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
20635- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
20636- pud = pud_offset(pgd, 0);
20637- BUG_ON(pmd_table != pmd_offset(pud, 0));
20638-
20639- return pmd_table;
20640- }
20641-#endif
20642- pud = pud_offset(pgd, 0);
20643- pmd_table = pmd_offset(pud, 0);
20644-
20645- return pmd_table;
20646-}
20647-
20648-/*
20649 * Create a page table and place a pointer to it in a middle page
20650 * directory entry:
20651 */
20652@@ -121,13 +91,28 @@ static pte_t * __init one_page_table_ini
20653 page_table = (pte_t *)alloc_low_page();
20654
20655 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
20656+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20657+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
20658+#else
20659 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
20660+#endif
20661 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
20662 }
20663
20664 return pte_offset_kernel(pmd, 0);
20665 }
20666
20667+static pmd_t * __init one_md_table_init(pgd_t *pgd)
20668+{
20669+ pud_t *pud;
20670+ pmd_t *pmd_table;
20671+
20672+ pud = pud_offset(pgd, 0);
20673+ pmd_table = pmd_offset(pud, 0);
20674+
20675+ return pmd_table;
20676+}
20677+
20678 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
20679 {
20680 int pgd_idx = pgd_index(vaddr);
20681@@ -201,6 +186,7 @@ page_table_range_init(unsigned long star
20682 int pgd_idx, pmd_idx;
20683 unsigned long vaddr;
20684 pgd_t *pgd;
20685+ pud_t *pud;
20686 pmd_t *pmd;
20687 pte_t *pte = NULL;
20688
20689@@ -210,8 +196,13 @@ page_table_range_init(unsigned long star
20690 pgd = pgd_base + pgd_idx;
20691
20692 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
20693- pmd = one_md_table_init(pgd);
20694- pmd = pmd + pmd_index(vaddr);
20695+ pud = pud_offset(pgd, vaddr);
20696+ pmd = pmd_offset(pud, vaddr);
20697+
20698+#ifdef CONFIG_X86_PAE
20699+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
20700+#endif
20701+
20702 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
20703 pmd++, pmd_idx++) {
20704 pte = page_table_kmap_check(one_page_table_init(pmd),
20705@@ -223,11 +214,20 @@ page_table_range_init(unsigned long star
20706 }
20707 }
20708
20709-static inline int is_kernel_text(unsigned long addr)
20710+static inline int is_kernel_text(unsigned long start, unsigned long end)
20711 {
20712- if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
20713- return 1;
20714- return 0;
20715+ if ((start > ktla_ktva((unsigned long)_etext) ||
20716+ end <= ktla_ktva((unsigned long)_stext)) &&
20717+ (start > ktla_ktva((unsigned long)_einittext) ||
20718+ end <= ktla_ktva((unsigned long)_sinittext)) &&
20719+
20720+#ifdef CONFIG_ACPI_SLEEP
20721+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
20722+#endif
20723+
20724+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
20725+ return 0;
20726+ return 1;
20727 }
20728
20729 /*
20730@@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned lo
20731 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
20732 unsigned long start_pfn, end_pfn;
20733 pgd_t *pgd_base = swapper_pg_dir;
20734- int pgd_idx, pmd_idx, pte_ofs;
20735+ unsigned int pgd_idx, pmd_idx, pte_ofs;
20736 unsigned long pfn;
20737 pgd_t *pgd;
20738+ pud_t *pud;
20739 pmd_t *pmd;
20740 pte_t *pte;
20741 unsigned pages_2m, pages_4k;
20742@@ -278,8 +279,13 @@ repeat:
20743 pfn = start_pfn;
20744 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
20745 pgd = pgd_base + pgd_idx;
20746- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
20747- pmd = one_md_table_init(pgd);
20748+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
20749+ pud = pud_offset(pgd, 0);
20750+ pmd = pmd_offset(pud, 0);
20751+
20752+#ifdef CONFIG_X86_PAE
20753+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
20754+#endif
20755
20756 if (pfn >= end_pfn)
20757 continue;
20758@@ -291,14 +297,13 @@ repeat:
20759 #endif
20760 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
20761 pmd++, pmd_idx++) {
20762- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
20763+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
20764
20765 /*
20766 * Map with big pages if possible, otherwise
20767 * create normal page tables:
20768 */
20769 if (use_pse) {
20770- unsigned int addr2;
20771 pgprot_t prot = PAGE_KERNEL_LARGE;
20772 /*
20773 * first pass will use the same initial
20774@@ -308,11 +313,7 @@ repeat:
20775 __pgprot(PTE_IDENT_ATTR |
20776 _PAGE_PSE);
20777
20778- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
20779- PAGE_OFFSET + PAGE_SIZE-1;
20780-
20781- if (is_kernel_text(addr) ||
20782- is_kernel_text(addr2))
20783+ if (is_kernel_text(address, address + PMD_SIZE))
20784 prot = PAGE_KERNEL_LARGE_EXEC;
20785
20786 pages_2m++;
20787@@ -329,7 +330,7 @@ repeat:
20788 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
20789 pte += pte_ofs;
20790 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
20791- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
20792+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
20793 pgprot_t prot = PAGE_KERNEL;
20794 /*
20795 * first pass will use the same initial
20796@@ -337,7 +338,7 @@ repeat:
20797 */
20798 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
20799
20800- if (is_kernel_text(addr))
20801+ if (is_kernel_text(address, address + PAGE_SIZE))
20802 prot = PAGE_KERNEL_EXEC;
20803
20804 pages_4k++;
20805@@ -489,7 +490,7 @@ void __init native_pagetable_setup_start
20806
20807 pud = pud_offset(pgd, va);
20808 pmd = pmd_offset(pud, va);
20809- if (!pmd_present(*pmd))
20810+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
20811 break;
20812
20813 pte = pte_offset_kernel(pmd, va);
20814@@ -541,9 +542,7 @@ void __init early_ioremap_page_table_ran
20815
20816 static void __init pagetable_init(void)
20817 {
20818- pgd_t *pgd_base = swapper_pg_dir;
20819-
20820- permanent_kmaps_init(pgd_base);
20821+ permanent_kmaps_init(swapper_pg_dir);
20822 }
20823
20824 #ifdef CONFIG_ACPI_SLEEP
20825@@ -551,12 +550,12 @@ static void __init pagetable_init(void)
20826 * ACPI suspend needs this for resume, because things like the intel-agp
20827 * driver might have split up a kernel 4MB mapping.
20828 */
20829-char swsusp_pg_dir[PAGE_SIZE]
20830+pgd_t swsusp_pg_dir[PTRS_PER_PGD]
20831 __attribute__ ((aligned(PAGE_SIZE)));
20832
20833 static inline void save_pg_dir(void)
20834 {
20835- memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
20836+ clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
20837 }
20838 #else /* !CONFIG_ACPI_SLEEP */
20839 static inline void save_pg_dir(void)
20840@@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
20841 flush_tlb_all();
20842 }
20843
20844-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
20845+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
20846 EXPORT_SYMBOL_GPL(__supported_pte_mask);
20847
20848 /* user-defined highmem size */
20849@@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void
20850 * Initialize the boot-time allocator (with low memory only):
20851 */
20852 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
20853- bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
20854+ bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
20855 PAGE_SIZE);
20856 if (bootmap == -1L)
20857 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
20858@@ -864,6 +863,12 @@ void __init mem_init(void)
20859
20860 pci_iommu_alloc();
20861
20862+#ifdef CONFIG_PAX_PER_CPU_PGD
20863+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
20864+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20865+ KERNEL_PGD_PTRS);
20866+#endif
20867+
20868 #ifdef CONFIG_FLATMEM
20869 BUG_ON(!mem_map);
20870 #endif
20871@@ -881,7 +886,7 @@ void __init mem_init(void)
20872 set_highmem_pages_init();
20873
20874 codesize = (unsigned long) &_etext - (unsigned long) &_text;
20875- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
20876+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
20877 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
20878
20879 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
20880@@ -923,10 +928,10 @@ void __init mem_init(void)
20881 ((unsigned long)&__init_end -
20882 (unsigned long)&__init_begin) >> 10,
20883
20884- (unsigned long)&_etext, (unsigned long)&_edata,
20885- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
20886+ (unsigned long)&_sdata, (unsigned long)&_edata,
20887+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
20888
20889- (unsigned long)&_text, (unsigned long)&_etext,
20890+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
20891 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
20892
20893 /*
20894@@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
20895 if (!kernel_set_to_readonly)
20896 return;
20897
20898+ start = ktla_ktva(start);
20899 pr_debug("Set kernel text: %lx - %lx for read write\n",
20900 start, start+size);
20901
20902@@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
20903 if (!kernel_set_to_readonly)
20904 return;
20905
20906+ start = ktla_ktva(start);
20907 pr_debug("Set kernel text: %lx - %lx for read only\n",
20908 start, start+size);
20909
20910@@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
20911 unsigned long start = PFN_ALIGN(_text);
20912 unsigned long size = PFN_ALIGN(_etext) - start;
20913
20914+ start = ktla_ktva(start);
20915 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
20916 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
20917 size >> 10);
20918diff -urNp linux-2.6.32.41/arch/x86/mm/init_64.c linux-2.6.32.41/arch/x86/mm/init_64.c
20919--- linux-2.6.32.41/arch/x86/mm/init_64.c 2011-04-17 17:00:52.000000000 -0400
20920+++ linux-2.6.32.41/arch/x86/mm/init_64.c 2011-04-17 17:03:05.000000000 -0400
20921@@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
20922 pmd = fill_pmd(pud, vaddr);
20923 pte = fill_pte(pmd, vaddr);
20924
20925+ pax_open_kernel();
20926 set_pte(pte, new_pte);
20927+ pax_close_kernel();
20928
20929 /*
20930 * It's enough to flush this one mapping.
20931@@ -223,14 +225,12 @@ static void __init __init_extra_mapping(
20932 pgd = pgd_offset_k((unsigned long)__va(phys));
20933 if (pgd_none(*pgd)) {
20934 pud = (pud_t *) spp_getpage();
20935- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
20936- _PAGE_USER));
20937+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
20938 }
20939 pud = pud_offset(pgd, (unsigned long)__va(phys));
20940 if (pud_none(*pud)) {
20941 pmd = (pmd_t *) spp_getpage();
20942- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
20943- _PAGE_USER));
20944+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
20945 }
20946 pmd = pmd_offset(pud, phys);
20947 BUG_ON(!pmd_none(*pmd));
20948@@ -675,6 +675,12 @@ void __init mem_init(void)
20949
20950 pci_iommu_alloc();
20951
20952+#ifdef CONFIG_PAX_PER_CPU_PGD
20953+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
20954+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
20955+ KERNEL_PGD_PTRS);
20956+#endif
20957+
20958 /* clear_bss() already clear the empty_zero_page */
20959
20960 reservedpages = 0;
20961@@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
20962 static struct vm_area_struct gate_vma = {
20963 .vm_start = VSYSCALL_START,
20964 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
20965- .vm_page_prot = PAGE_READONLY_EXEC,
20966- .vm_flags = VM_READ | VM_EXEC
20967+ .vm_page_prot = PAGE_READONLY,
20968+ .vm_flags = VM_READ
20969 };
20970
20971 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
20972@@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long a
20973
20974 const char *arch_vma_name(struct vm_area_struct *vma)
20975 {
20976- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
20977+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
20978 return "[vdso]";
20979 if (vma == &gate_vma)
20980 return "[vsyscall]";
20981diff -urNp linux-2.6.32.41/arch/x86/mm/init.c linux-2.6.32.41/arch/x86/mm/init.c
20982--- linux-2.6.32.41/arch/x86/mm/init.c 2011-04-17 17:00:52.000000000 -0400
20983+++ linux-2.6.32.41/arch/x86/mm/init.c 2011-05-23 19:02:20.000000000 -0400
20984@@ -69,11 +69,7 @@ static void __init find_early_table_spac
20985 * cause a hotspot and fill up ZONE_DMA. The page tables
20986 * need roughly 0.5KB per GB.
20987 */
20988-#ifdef CONFIG_X86_32
20989- start = 0x7000;
20990-#else
20991- start = 0x8000;
20992-#endif
20993+ start = 0x100000;
20994 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
20995 tables, PAGE_SIZE);
20996 if (e820_table_start == -1UL)
20997@@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_m
20998 #endif
20999
21000 set_nx();
21001- if (nx_enabled)
21002+ if (nx_enabled && cpu_has_nx)
21003 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
21004
21005 /* Enable PSE if available */
21006@@ -331,7 +327,19 @@ unsigned long __init_refok init_memory_m
21007 */
21008 int devmem_is_allowed(unsigned long pagenr)
21009 {
21010- if (pagenr <= 256)
21011+#ifndef CONFIG_GRKERNSEC_KMEM
21012+ if (!pagenr)
21013+ return 1;
21014+#ifdef CONFIG_VM86
21015+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
21016+ return 1;
21017+#endif
21018+#else
21019+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
21020+ return 0;
21021+#endif
21022+
21023+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
21024 return 1;
21025 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
21026 return 0;
21027@@ -379,6 +387,86 @@ void free_init_pages(char *what, unsigne
21028
21029 void free_initmem(void)
21030 {
21031+
21032+#ifdef CONFIG_PAX_KERNEXEC
21033+#ifdef CONFIG_X86_32
21034+ /* PaX: limit KERNEL_CS to actual size */
21035+ unsigned long addr, limit;
21036+ struct desc_struct d;
21037+ int cpu;
21038+
21039+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
21040+ limit = (limit - 1UL) >> PAGE_SHIFT;
21041+
21042+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
21043+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
21044+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
21045+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
21046+ }
21047+
21048+ /* PaX: make KERNEL_CS read-only */
21049+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
21050+ if (!paravirt_enabled())
21051+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
21052+/*
21053+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
21054+ pgd = pgd_offset_k(addr);
21055+ pud = pud_offset(pgd, addr);
21056+ pmd = pmd_offset(pud, addr);
21057+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21058+ }
21059+*/
21060+#ifdef CONFIG_X86_PAE
21061+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
21062+/*
21063+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
21064+ pgd = pgd_offset_k(addr);
21065+ pud = pud_offset(pgd, addr);
21066+ pmd = pmd_offset(pud, addr);
21067+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
21068+ }
21069+*/
21070+#endif
21071+
21072+#ifdef CONFIG_MODULES
21073+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
21074+#endif
21075+
21076+#else
21077+ pgd_t *pgd;
21078+ pud_t *pud;
21079+ pmd_t *pmd;
21080+ unsigned long addr, end;
21081+
21082+ /* PaX: make kernel code/rodata read-only, rest non-executable */
21083+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
21084+ pgd = pgd_offset_k(addr);
21085+ pud = pud_offset(pgd, addr);
21086+ pmd = pmd_offset(pud, addr);
21087+ if (!pmd_present(*pmd))
21088+ continue;
21089+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
21090+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21091+ else
21092+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
21093+ }
21094+
21095+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
21096+ end = addr + KERNEL_IMAGE_SIZE;
21097+ for (; addr < end; addr += PMD_SIZE) {
21098+ pgd = pgd_offset_k(addr);
21099+ pud = pud_offset(pgd, addr);
21100+ pmd = pmd_offset(pud, addr);
21101+ if (!pmd_present(*pmd))
21102+ continue;
21103+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
21104+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21105+ }
21106+#endif
21107+
21108+ flush_tlb_all();
21109+#endif
21110+
21111 free_init_pages("unused kernel memory",
21112 (unsigned long)(&__init_begin),
21113 (unsigned long)(&__init_end));
21114diff -urNp linux-2.6.32.41/arch/x86/mm/iomap_32.c linux-2.6.32.41/arch/x86/mm/iomap_32.c
21115--- linux-2.6.32.41/arch/x86/mm/iomap_32.c 2011-03-27 14:31:47.000000000 -0400
21116+++ linux-2.6.32.41/arch/x86/mm/iomap_32.c 2011-04-17 15:56:46.000000000 -0400
21117@@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long
21118 debug_kmap_atomic(type);
21119 idx = type + KM_TYPE_NR * smp_processor_id();
21120 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
21121+
21122+ pax_open_kernel();
21123 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
21124+ pax_close_kernel();
21125+
21126 arch_flush_lazy_mmu_mode();
21127
21128 return (void *)vaddr;
21129diff -urNp linux-2.6.32.41/arch/x86/mm/ioremap.c linux-2.6.32.41/arch/x86/mm/ioremap.c
21130--- linux-2.6.32.41/arch/x86/mm/ioremap.c 2011-03-27 14:31:47.000000000 -0400
21131+++ linux-2.6.32.41/arch/x86/mm/ioremap.c 2011-04-17 15:56:46.000000000 -0400
21132@@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
21133 * Second special case: Some BIOSen report the PC BIOS
21134 * area (640->1Mb) as ram even though it is not.
21135 */
21136- if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
21137- pagenr < (BIOS_END >> PAGE_SHIFT))
21138+ if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
21139+ pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
21140 return 0;
21141
21142 for (i = 0; i < e820.nr_map; i++) {
21143@@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(re
21144 /*
21145 * Don't allow anybody to remap normal RAM that we're using..
21146 */
21147- for (pfn = phys_addr >> PAGE_SHIFT;
21148- (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
21149- pfn++) {
21150-
21151+ for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
21152 int is_ram = page_is_ram(pfn);
21153
21154- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
21155+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
21156 return NULL;
21157 WARN_ON_ONCE(is_ram);
21158 }
21159@@ -407,7 +404,7 @@ static int __init early_ioremap_debug_se
21160 early_param("early_ioremap_debug", early_ioremap_debug_setup);
21161
21162 static __initdata int after_paging_init;
21163-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
21164+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
21165
21166 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
21167 {
21168@@ -439,8 +436,7 @@ void __init early_ioremap_init(void)
21169 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
21170
21171 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
21172- memset(bm_pte, 0, sizeof(bm_pte));
21173- pmd_populate_kernel(&init_mm, pmd, bm_pte);
21174+ pmd_populate_user(&init_mm, pmd, bm_pte);
21175
21176 /*
21177 * The boot-ioremap range spans multiple pmds, for which
21178diff -urNp linux-2.6.32.41/arch/x86/mm/kmemcheck/kmemcheck.c linux-2.6.32.41/arch/x86/mm/kmemcheck/kmemcheck.c
21179--- linux-2.6.32.41/arch/x86/mm/kmemcheck/kmemcheck.c 2011-03-27 14:31:47.000000000 -0400
21180+++ linux-2.6.32.41/arch/x86/mm/kmemcheck/kmemcheck.c 2011-04-17 15:56:46.000000000 -0400
21181@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
21182 * memory (e.g. tracked pages)? For now, we need this to avoid
21183 * invoking kmemcheck for PnP BIOS calls.
21184 */
21185- if (regs->flags & X86_VM_MASK)
21186+ if (v8086_mode(regs))
21187 return false;
21188- if (regs->cs != __KERNEL_CS)
21189+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
21190 return false;
21191
21192 pte = kmemcheck_pte_lookup(address);
21193diff -urNp linux-2.6.32.41/arch/x86/mm/mmap.c linux-2.6.32.41/arch/x86/mm/mmap.c
21194--- linux-2.6.32.41/arch/x86/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
21195+++ linux-2.6.32.41/arch/x86/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
21196@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
21197 * Leave an at least ~128 MB hole with possible stack randomization.
21198 */
21199 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
21200-#define MAX_GAP (TASK_SIZE/6*5)
21201+#define MAX_GAP (pax_task_size/6*5)
21202
21203 /*
21204 * True on X86_32 or when emulating IA32 on X86_64
21205@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
21206 return rnd << PAGE_SHIFT;
21207 }
21208
21209-static unsigned long mmap_base(void)
21210+static unsigned long mmap_base(struct mm_struct *mm)
21211 {
21212 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
21213+ unsigned long pax_task_size = TASK_SIZE;
21214+
21215+#ifdef CONFIG_PAX_SEGMEXEC
21216+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21217+ pax_task_size = SEGMEXEC_TASK_SIZE;
21218+#endif
21219
21220 if (gap < MIN_GAP)
21221 gap = MIN_GAP;
21222 else if (gap > MAX_GAP)
21223 gap = MAX_GAP;
21224
21225- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
21226+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
21227 }
21228
21229 /*
21230 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
21231 * does, but not when emulating X86_32
21232 */
21233-static unsigned long mmap_legacy_base(void)
21234+static unsigned long mmap_legacy_base(struct mm_struct *mm)
21235 {
21236- if (mmap_is_ia32())
21237+ if (mmap_is_ia32()) {
21238+
21239+#ifdef CONFIG_PAX_SEGMEXEC
21240+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21241+ return SEGMEXEC_TASK_UNMAPPED_BASE;
21242+ else
21243+#endif
21244+
21245 return TASK_UNMAPPED_BASE;
21246- else
21247+ } else
21248 return TASK_UNMAPPED_BASE + mmap_rnd();
21249 }
21250
21251@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
21252 void arch_pick_mmap_layout(struct mm_struct *mm)
21253 {
21254 if (mmap_is_legacy()) {
21255- mm->mmap_base = mmap_legacy_base();
21256+ mm->mmap_base = mmap_legacy_base(mm);
21257+
21258+#ifdef CONFIG_PAX_RANDMMAP
21259+ if (mm->pax_flags & MF_PAX_RANDMMAP)
21260+ mm->mmap_base += mm->delta_mmap;
21261+#endif
21262+
21263 mm->get_unmapped_area = arch_get_unmapped_area;
21264 mm->unmap_area = arch_unmap_area;
21265 } else {
21266- mm->mmap_base = mmap_base();
21267+ mm->mmap_base = mmap_base(mm);
21268+
21269+#ifdef CONFIG_PAX_RANDMMAP
21270+ if (mm->pax_flags & MF_PAX_RANDMMAP)
21271+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
21272+#endif
21273+
21274 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
21275 mm->unmap_area = arch_unmap_area_topdown;
21276 }
21277diff -urNp linux-2.6.32.41/arch/x86/mm/mmio-mod.c linux-2.6.32.41/arch/x86/mm/mmio-mod.c
21278--- linux-2.6.32.41/arch/x86/mm/mmio-mod.c 2011-03-27 14:31:47.000000000 -0400
21279+++ linux-2.6.32.41/arch/x86/mm/mmio-mod.c 2011-05-04 17:56:28.000000000 -0400
21280@@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p,
21281 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
21282 void __iomem *addr)
21283 {
21284- static atomic_t next_id;
21285+ static atomic_unchecked_t next_id;
21286 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
21287 /* These are page-unaligned. */
21288 struct mmiotrace_map map = {
21289@@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_
21290 .private = trace
21291 },
21292 .phys = offset,
21293- .id = atomic_inc_return(&next_id)
21294+ .id = atomic_inc_return_unchecked(&next_id)
21295 };
21296 map.map_id = trace->id;
21297
21298diff -urNp linux-2.6.32.41/arch/x86/mm/numa_32.c linux-2.6.32.41/arch/x86/mm/numa_32.c
21299--- linux-2.6.32.41/arch/x86/mm/numa_32.c 2011-03-27 14:31:47.000000000 -0400
21300+++ linux-2.6.32.41/arch/x86/mm/numa_32.c 2011-04-17 15:56:46.000000000 -0400
21301@@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int
21302 }
21303 #endif
21304
21305-extern unsigned long find_max_low_pfn(void);
21306 extern unsigned long highend_pfn, highstart_pfn;
21307
21308 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
21309diff -urNp linux-2.6.32.41/arch/x86/mm/pageattr.c linux-2.6.32.41/arch/x86/mm/pageattr.c
21310--- linux-2.6.32.41/arch/x86/mm/pageattr.c 2011-03-27 14:31:47.000000000 -0400
21311+++ linux-2.6.32.41/arch/x86/mm/pageattr.c 2011-04-17 15:56:46.000000000 -0400
21312@@ -261,16 +261,17 @@ static inline pgprot_t static_protection
21313 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
21314 */
21315 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
21316- pgprot_val(forbidden) |= _PAGE_NX;
21317+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
21318
21319 /*
21320 * The kernel text needs to be executable for obvious reasons
21321 * Does not cover __inittext since that is gone later on. On
21322 * 64bit we do not enforce !NX on the low mapping
21323 */
21324- if (within(address, (unsigned long)_text, (unsigned long)_etext))
21325- pgprot_val(forbidden) |= _PAGE_NX;
21326+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
21327+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
21328
21329+#ifdef CONFIG_DEBUG_RODATA
21330 /*
21331 * The .rodata section needs to be read-only. Using the pfn
21332 * catches all aliases.
21333@@ -278,6 +279,14 @@ static inline pgprot_t static_protection
21334 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
21335 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
21336 pgprot_val(forbidden) |= _PAGE_RW;
21337+#endif
21338+
21339+#ifdef CONFIG_PAX_KERNEXEC
21340+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
21341+ pgprot_val(forbidden) |= _PAGE_RW;
21342+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
21343+ }
21344+#endif
21345
21346 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
21347
21348@@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
21349 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
21350 {
21351 /* change init_mm */
21352+ pax_open_kernel();
21353 set_pte_atomic(kpte, pte);
21354+
21355 #ifdef CONFIG_X86_32
21356 if (!SHARED_KERNEL_PMD) {
21357+
21358+#ifdef CONFIG_PAX_PER_CPU_PGD
21359+ unsigned long cpu;
21360+#else
21361 struct page *page;
21362+#endif
21363
21364+#ifdef CONFIG_PAX_PER_CPU_PGD
21365+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
21366+ pgd_t *pgd = get_cpu_pgd(cpu);
21367+#else
21368 list_for_each_entry(page, &pgd_list, lru) {
21369- pgd_t *pgd;
21370+ pgd_t *pgd = (pgd_t *)page_address(page);
21371+#endif
21372+
21373 pud_t *pud;
21374 pmd_t *pmd;
21375
21376- pgd = (pgd_t *)page_address(page) + pgd_index(address);
21377+ pgd += pgd_index(address);
21378 pud = pud_offset(pgd, address);
21379 pmd = pmd_offset(pud, address);
21380 set_pte_atomic((pte_t *)pmd, pte);
21381 }
21382 }
21383 #endif
21384+ pax_close_kernel();
21385 }
21386
21387 static int
21388diff -urNp linux-2.6.32.41/arch/x86/mm/pageattr-test.c linux-2.6.32.41/arch/x86/mm/pageattr-test.c
21389--- linux-2.6.32.41/arch/x86/mm/pageattr-test.c 2011-03-27 14:31:47.000000000 -0400
21390+++ linux-2.6.32.41/arch/x86/mm/pageattr-test.c 2011-04-17 15:56:46.000000000 -0400
21391@@ -36,7 +36,7 @@ enum {
21392
21393 static int pte_testbit(pte_t pte)
21394 {
21395- return pte_flags(pte) & _PAGE_UNUSED1;
21396+ return pte_flags(pte) & _PAGE_CPA_TEST;
21397 }
21398
21399 struct split_state {
21400diff -urNp linux-2.6.32.41/arch/x86/mm/pat.c linux-2.6.32.41/arch/x86/mm/pat.c
21401--- linux-2.6.32.41/arch/x86/mm/pat.c 2011-03-27 14:31:47.000000000 -0400
21402+++ linux-2.6.32.41/arch/x86/mm/pat.c 2011-04-17 15:56:46.000000000 -0400
21403@@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct
21404
21405 conflict:
21406 printk(KERN_INFO "%s:%d conflicting memory types "
21407- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
21408+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
21409 new->end, cattr_name(new->type), cattr_name(entry->type));
21410 return -EBUSY;
21411 }
21412@@ -559,7 +559,7 @@ unlock_ret:
21413
21414 if (err) {
21415 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
21416- current->comm, current->pid, start, end);
21417+ current->comm, task_pid_nr(current), start, end);
21418 }
21419
21420 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
21421@@ -689,8 +689,8 @@ static inline int range_is_allowed(unsig
21422 while (cursor < to) {
21423 if (!devmem_is_allowed(pfn)) {
21424 printk(KERN_INFO
21425- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
21426- current->comm, from, to);
21427+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
21428+ current->comm, from, to, cursor);
21429 return 0;
21430 }
21431 cursor += PAGE_SIZE;
21432@@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, un
21433 printk(KERN_INFO
21434 "%s:%d ioremap_change_attr failed %s "
21435 "for %Lx-%Lx\n",
21436- current->comm, current->pid,
21437+ current->comm, task_pid_nr(current),
21438 cattr_name(flags),
21439 base, (unsigned long long)(base + size));
21440 return -EINVAL;
21441@@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr,
21442 free_memtype(paddr, paddr + size);
21443 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
21444 " for %Lx-%Lx, got %s\n",
21445- current->comm, current->pid,
21446+ current->comm, task_pid_nr(current),
21447 cattr_name(want_flags),
21448 (unsigned long long)paddr,
21449 (unsigned long long)(paddr + size),
21450diff -urNp linux-2.6.32.41/arch/x86/mm/pgtable_32.c linux-2.6.32.41/arch/x86/mm/pgtable_32.c
21451--- linux-2.6.32.41/arch/x86/mm/pgtable_32.c 2011-03-27 14:31:47.000000000 -0400
21452+++ linux-2.6.32.41/arch/x86/mm/pgtable_32.c 2011-04-17 15:56:46.000000000 -0400
21453@@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr,
21454 return;
21455 }
21456 pte = pte_offset_kernel(pmd, vaddr);
21457+
21458+ pax_open_kernel();
21459 if (pte_val(pteval))
21460 set_pte_at(&init_mm, vaddr, pte, pteval);
21461 else
21462 pte_clear(&init_mm, vaddr, pte);
21463+ pax_close_kernel();
21464
21465 /*
21466 * It's enough to flush this one mapping.
21467diff -urNp linux-2.6.32.41/arch/x86/mm/pgtable.c linux-2.6.32.41/arch/x86/mm/pgtable.c
21468--- linux-2.6.32.41/arch/x86/mm/pgtable.c 2011-03-27 14:31:47.000000000 -0400
21469+++ linux-2.6.32.41/arch/x86/mm/pgtable.c 2011-05-11 18:25:15.000000000 -0400
21470@@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *p
21471 list_del(&page->lru);
21472 }
21473
21474-#define UNSHARED_PTRS_PER_PGD \
21475- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
21476+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21477+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
21478
21479+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
21480+{
21481+ while (count--)
21482+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
21483+}
21484+#endif
21485+
21486+#ifdef CONFIG_PAX_PER_CPU_PGD
21487+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
21488+{
21489+ while (count--)
21490+
21491+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21492+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
21493+#else
21494+ *dst++ = *src++;
21495+#endif
21496+
21497+}
21498+#endif
21499+
21500+#ifdef CONFIG_X86_64
21501+#define pxd_t pud_t
21502+#define pyd_t pgd_t
21503+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
21504+#define pxd_free(mm, pud) pud_free((mm), (pud))
21505+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
21506+#define pyd_offset(mm ,address) pgd_offset((mm), (address))
21507+#define PYD_SIZE PGDIR_SIZE
21508+#else
21509+#define pxd_t pmd_t
21510+#define pyd_t pud_t
21511+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
21512+#define pxd_free(mm, pud) pmd_free((mm), (pud))
21513+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
21514+#define pyd_offset(mm ,address) pud_offset((mm), (address))
21515+#define PYD_SIZE PUD_SIZE
21516+#endif
21517+
21518+#ifdef CONFIG_PAX_PER_CPU_PGD
21519+static inline void pgd_ctor(pgd_t *pgd) {}
21520+static inline void pgd_dtor(pgd_t *pgd) {}
21521+#else
21522 static void pgd_ctor(pgd_t *pgd)
21523 {
21524 /* If the pgd points to a shared pagetable level (either the
21525@@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
21526 pgd_list_del(pgd);
21527 spin_unlock_irqrestore(&pgd_lock, flags);
21528 }
21529+#endif
21530
21531 /*
21532 * List of all pgd's needed for non-PAE so it can invalidate entries
21533@@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
21534 * -- wli
21535 */
21536
21537-#ifdef CONFIG_X86_PAE
21538+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
21539 /*
21540 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
21541 * updating the top-level pagetable entries to guarantee the
21542@@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
21543 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
21544 * and initialize the kernel pmds here.
21545 */
21546-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
21547+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
21548
21549 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
21550 {
21551@@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm,
21552 */
21553 flush_tlb_mm(mm);
21554 }
21555+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
21556+#define PREALLOCATED_PXDS USER_PGD_PTRS
21557 #else /* !CONFIG_X86_PAE */
21558
21559 /* No need to prepopulate any pagetable entries in non-PAE modes. */
21560-#define PREALLOCATED_PMDS 0
21561+#define PREALLOCATED_PXDS 0
21562
21563 #endif /* CONFIG_X86_PAE */
21564
21565-static void free_pmds(pmd_t *pmds[])
21566+static void free_pxds(pxd_t *pxds[])
21567 {
21568 int i;
21569
21570- for(i = 0; i < PREALLOCATED_PMDS; i++)
21571- if (pmds[i])
21572- free_page((unsigned long)pmds[i]);
21573+ for(i = 0; i < PREALLOCATED_PXDS; i++)
21574+ if (pxds[i])
21575+ free_page((unsigned long)pxds[i]);
21576 }
21577
21578-static int preallocate_pmds(pmd_t *pmds[])
21579+static int preallocate_pxds(pxd_t *pxds[])
21580 {
21581 int i;
21582 bool failed = false;
21583
21584- for(i = 0; i < PREALLOCATED_PMDS; i++) {
21585- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
21586- if (pmd == NULL)
21587+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
21588+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
21589+ if (pxd == NULL)
21590 failed = true;
21591- pmds[i] = pmd;
21592+ pxds[i] = pxd;
21593 }
21594
21595 if (failed) {
21596- free_pmds(pmds);
21597+ free_pxds(pxds);
21598 return -ENOMEM;
21599 }
21600
21601@@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[
21602 * preallocate which never got a corresponding vma will need to be
21603 * freed manually.
21604 */
21605-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
21606+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
21607 {
21608 int i;
21609
21610- for(i = 0; i < PREALLOCATED_PMDS; i++) {
21611+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
21612 pgd_t pgd = pgdp[i];
21613
21614 if (pgd_val(pgd) != 0) {
21615- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
21616+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
21617
21618- pgdp[i] = native_make_pgd(0);
21619+ set_pgd(pgdp + i, native_make_pgd(0));
21620
21621- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
21622- pmd_free(mm, pmd);
21623+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
21624+ pxd_free(mm, pxd);
21625 }
21626 }
21627 }
21628
21629-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
21630+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
21631 {
21632- pud_t *pud;
21633+ pyd_t *pyd;
21634 unsigned long addr;
21635 int i;
21636
21637- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
21638+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
21639 return;
21640
21641- pud = pud_offset(pgd, 0);
21642+#ifdef CONFIG_X86_64
21643+ pyd = pyd_offset(mm, 0L);
21644+#else
21645+ pyd = pyd_offset(pgd, 0L);
21646+#endif
21647
21648- for (addr = i = 0; i < PREALLOCATED_PMDS;
21649- i++, pud++, addr += PUD_SIZE) {
21650- pmd_t *pmd = pmds[i];
21651+ for (addr = i = 0; i < PREALLOCATED_PXDS;
21652+ i++, pyd++, addr += PYD_SIZE) {
21653+ pxd_t *pxd = pxds[i];
21654
21655 if (i >= KERNEL_PGD_BOUNDARY)
21656- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
21657- sizeof(pmd_t) * PTRS_PER_PMD);
21658+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
21659+ sizeof(pxd_t) * PTRS_PER_PMD);
21660
21661- pud_populate(mm, pud, pmd);
21662+ pyd_populate(mm, pyd, pxd);
21663 }
21664 }
21665
21666 pgd_t *pgd_alloc(struct mm_struct *mm)
21667 {
21668 pgd_t *pgd;
21669- pmd_t *pmds[PREALLOCATED_PMDS];
21670+ pxd_t *pxds[PREALLOCATED_PXDS];
21671+
21672 unsigned long flags;
21673
21674 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
21675@@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
21676
21677 mm->pgd = pgd;
21678
21679- if (preallocate_pmds(pmds) != 0)
21680+ if (preallocate_pxds(pxds) != 0)
21681 goto out_free_pgd;
21682
21683 if (paravirt_pgd_alloc(mm) != 0)
21684- goto out_free_pmds;
21685+ goto out_free_pxds;
21686
21687 /*
21688 * Make sure that pre-populating the pmds is atomic with
21689@@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
21690 spin_lock_irqsave(&pgd_lock, flags);
21691
21692 pgd_ctor(pgd);
21693- pgd_prepopulate_pmd(mm, pgd, pmds);
21694+ pgd_prepopulate_pxd(mm, pgd, pxds);
21695
21696 spin_unlock_irqrestore(&pgd_lock, flags);
21697
21698 return pgd;
21699
21700-out_free_pmds:
21701- free_pmds(pmds);
21702+out_free_pxds:
21703+ free_pxds(pxds);
21704 out_free_pgd:
21705 free_page((unsigned long)pgd);
21706 out:
21707@@ -287,7 +338,7 @@ out:
21708
21709 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
21710 {
21711- pgd_mop_up_pmds(mm, pgd);
21712+ pgd_mop_up_pxds(mm, pgd);
21713 pgd_dtor(pgd);
21714 paravirt_pgd_free(mm, pgd);
21715 free_page((unsigned long)pgd);
21716diff -urNp linux-2.6.32.41/arch/x86/mm/setup_nx.c linux-2.6.32.41/arch/x86/mm/setup_nx.c
21717--- linux-2.6.32.41/arch/x86/mm/setup_nx.c 2011-03-27 14:31:47.000000000 -0400
21718+++ linux-2.6.32.41/arch/x86/mm/setup_nx.c 2011-04-17 15:56:46.000000000 -0400
21719@@ -4,11 +4,10 @@
21720
21721 #include <asm/pgtable.h>
21722
21723+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
21724 int nx_enabled;
21725
21726-#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21727-static int disable_nx __cpuinitdata;
21728-
21729+#ifndef CONFIG_PAX_PAGEEXEC
21730 /*
21731 * noexec = on|off
21732 *
21733@@ -22,32 +21,26 @@ static int __init noexec_setup(char *str
21734 if (!str)
21735 return -EINVAL;
21736 if (!strncmp(str, "on", 2)) {
21737- __supported_pte_mask |= _PAGE_NX;
21738- disable_nx = 0;
21739+ nx_enabled = 1;
21740 } else if (!strncmp(str, "off", 3)) {
21741- disable_nx = 1;
21742- __supported_pte_mask &= ~_PAGE_NX;
21743+ nx_enabled = 0;
21744 }
21745 return 0;
21746 }
21747 early_param("noexec", noexec_setup);
21748 #endif
21749+#endif
21750
21751 #ifdef CONFIG_X86_PAE
21752 void __init set_nx(void)
21753 {
21754- unsigned int v[4], l, h;
21755+ if (!nx_enabled && cpu_has_nx) {
21756+ unsigned l, h;
21757
21758- if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
21759- cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
21760-
21761- if ((v[3] & (1 << 20)) && !disable_nx) {
21762- rdmsr(MSR_EFER, l, h);
21763- l |= EFER_NX;
21764- wrmsr(MSR_EFER, l, h);
21765- nx_enabled = 1;
21766- __supported_pte_mask |= _PAGE_NX;
21767- }
21768+ __supported_pte_mask &= ~_PAGE_NX;
21769+ rdmsr(MSR_EFER, l, h);
21770+ l &= ~EFER_NX;
21771+ wrmsr(MSR_EFER, l, h);
21772 }
21773 }
21774 #else
21775@@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
21776 unsigned long efer;
21777
21778 rdmsrl(MSR_EFER, efer);
21779- if (!(efer & EFER_NX) || disable_nx)
21780+ if (!(efer & EFER_NX) || !nx_enabled)
21781 __supported_pte_mask &= ~_PAGE_NX;
21782 }
21783 #endif
21784diff -urNp linux-2.6.32.41/arch/x86/mm/tlb.c linux-2.6.32.41/arch/x86/mm/tlb.c
21785--- linux-2.6.32.41/arch/x86/mm/tlb.c 2011-03-27 14:31:47.000000000 -0400
21786+++ linux-2.6.32.41/arch/x86/mm/tlb.c 2011-04-23 12:56:10.000000000 -0400
21787@@ -61,7 +61,11 @@ void leave_mm(int cpu)
21788 BUG();
21789 cpumask_clear_cpu(cpu,
21790 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
21791+
21792+#ifndef CONFIG_PAX_PER_CPU_PGD
21793 load_cr3(swapper_pg_dir);
21794+#endif
21795+
21796 }
21797 EXPORT_SYMBOL_GPL(leave_mm);
21798
21799diff -urNp linux-2.6.32.41/arch/x86/oprofile/backtrace.c linux-2.6.32.41/arch/x86/oprofile/backtrace.c
21800--- linux-2.6.32.41/arch/x86/oprofile/backtrace.c 2011-03-27 14:31:47.000000000 -0400
21801+++ linux-2.6.32.41/arch/x86/oprofile/backtrace.c 2011-04-17 15:56:46.000000000 -0400
21802@@ -57,7 +57,7 @@ static struct frame_head *dump_user_back
21803 struct frame_head bufhead[2];
21804
21805 /* Also check accessibility of one struct frame_head beyond */
21806- if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
21807+ if (!__access_ok(VERIFY_READ, head, sizeof(bufhead)))
21808 return NULL;
21809 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
21810 return NULL;
21811@@ -77,7 +77,7 @@ x86_backtrace(struct pt_regs * const reg
21812 {
21813 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
21814
21815- if (!user_mode_vm(regs)) {
21816+ if (!user_mode(regs)) {
21817 unsigned long stack = kernel_stack_pointer(regs);
21818 if (depth)
21819 dump_trace(NULL, regs, (unsigned long *)stack, 0,
21820diff -urNp linux-2.6.32.41/arch/x86/oprofile/op_model_p4.c linux-2.6.32.41/arch/x86/oprofile/op_model_p4.c
21821--- linux-2.6.32.41/arch/x86/oprofile/op_model_p4.c 2011-03-27 14:31:47.000000000 -0400
21822+++ linux-2.6.32.41/arch/x86/oprofile/op_model_p4.c 2011-04-17 15:56:46.000000000 -0400
21823@@ -50,7 +50,7 @@ static inline void setup_num_counters(vo
21824 #endif
21825 }
21826
21827-static int inline addr_increment(void)
21828+static inline int addr_increment(void)
21829 {
21830 #ifdef CONFIG_SMP
21831 return smp_num_siblings == 2 ? 2 : 1;
21832diff -urNp linux-2.6.32.41/arch/x86/pci/common.c linux-2.6.32.41/arch/x86/pci/common.c
21833--- linux-2.6.32.41/arch/x86/pci/common.c 2011-03-27 14:31:47.000000000 -0400
21834+++ linux-2.6.32.41/arch/x86/pci/common.c 2011-04-23 12:56:10.000000000 -0400
21835@@ -31,8 +31,8 @@ int noioapicreroute = 1;
21836 int pcibios_last_bus = -1;
21837 unsigned long pirq_table_addr;
21838 struct pci_bus *pci_root_bus;
21839-struct pci_raw_ops *raw_pci_ops;
21840-struct pci_raw_ops *raw_pci_ext_ops;
21841+const struct pci_raw_ops *raw_pci_ops;
21842+const struct pci_raw_ops *raw_pci_ext_ops;
21843
21844 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
21845 int reg, int len, u32 *val)
21846diff -urNp linux-2.6.32.41/arch/x86/pci/direct.c linux-2.6.32.41/arch/x86/pci/direct.c
21847--- linux-2.6.32.41/arch/x86/pci/direct.c 2011-03-27 14:31:47.000000000 -0400
21848+++ linux-2.6.32.41/arch/x86/pci/direct.c 2011-04-17 15:56:46.000000000 -0400
21849@@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int
21850
21851 #undef PCI_CONF1_ADDRESS
21852
21853-struct pci_raw_ops pci_direct_conf1 = {
21854+const struct pci_raw_ops pci_direct_conf1 = {
21855 .read = pci_conf1_read,
21856 .write = pci_conf1_write,
21857 };
21858@@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int
21859
21860 #undef PCI_CONF2_ADDRESS
21861
21862-struct pci_raw_ops pci_direct_conf2 = {
21863+const struct pci_raw_ops pci_direct_conf2 = {
21864 .read = pci_conf2_read,
21865 .write = pci_conf2_write,
21866 };
21867@@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
21868 * This should be close to trivial, but it isn't, because there are buggy
21869 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
21870 */
21871-static int __init pci_sanity_check(struct pci_raw_ops *o)
21872+static int __init pci_sanity_check(const struct pci_raw_ops *o)
21873 {
21874 u32 x = 0;
21875 int year, devfn;
21876diff -urNp linux-2.6.32.41/arch/x86/pci/mmconfig_32.c linux-2.6.32.41/arch/x86/pci/mmconfig_32.c
21877--- linux-2.6.32.41/arch/x86/pci/mmconfig_32.c 2011-03-27 14:31:47.000000000 -0400
21878+++ linux-2.6.32.41/arch/x86/pci/mmconfig_32.c 2011-04-17 15:56:46.000000000 -0400
21879@@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int
21880 return 0;
21881 }
21882
21883-static struct pci_raw_ops pci_mmcfg = {
21884+static const struct pci_raw_ops pci_mmcfg = {
21885 .read = pci_mmcfg_read,
21886 .write = pci_mmcfg_write,
21887 };
21888diff -urNp linux-2.6.32.41/arch/x86/pci/mmconfig_64.c linux-2.6.32.41/arch/x86/pci/mmconfig_64.c
21889--- linux-2.6.32.41/arch/x86/pci/mmconfig_64.c 2011-03-27 14:31:47.000000000 -0400
21890+++ linux-2.6.32.41/arch/x86/pci/mmconfig_64.c 2011-04-17 15:56:46.000000000 -0400
21891@@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int
21892 return 0;
21893 }
21894
21895-static struct pci_raw_ops pci_mmcfg = {
21896+static const struct pci_raw_ops pci_mmcfg = {
21897 .read = pci_mmcfg_read,
21898 .write = pci_mmcfg_write,
21899 };
21900diff -urNp linux-2.6.32.41/arch/x86/pci/numaq_32.c linux-2.6.32.41/arch/x86/pci/numaq_32.c
21901--- linux-2.6.32.41/arch/x86/pci/numaq_32.c 2011-03-27 14:31:47.000000000 -0400
21902+++ linux-2.6.32.41/arch/x86/pci/numaq_32.c 2011-04-17 15:56:46.000000000 -0400
21903@@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned i
21904
21905 #undef PCI_CONF1_MQ_ADDRESS
21906
21907-static struct pci_raw_ops pci_direct_conf1_mq = {
21908+static const struct pci_raw_ops pci_direct_conf1_mq = {
21909 .read = pci_conf1_mq_read,
21910 .write = pci_conf1_mq_write
21911 };
21912diff -urNp linux-2.6.32.41/arch/x86/pci/olpc.c linux-2.6.32.41/arch/x86/pci/olpc.c
21913--- linux-2.6.32.41/arch/x86/pci/olpc.c 2011-03-27 14:31:47.000000000 -0400
21914+++ linux-2.6.32.41/arch/x86/pci/olpc.c 2011-04-17 15:56:46.000000000 -0400
21915@@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int s
21916 return 0;
21917 }
21918
21919-static struct pci_raw_ops pci_olpc_conf = {
21920+static const struct pci_raw_ops pci_olpc_conf = {
21921 .read = pci_olpc_read,
21922 .write = pci_olpc_write,
21923 };
21924diff -urNp linux-2.6.32.41/arch/x86/pci/pcbios.c linux-2.6.32.41/arch/x86/pci/pcbios.c
21925--- linux-2.6.32.41/arch/x86/pci/pcbios.c 2011-03-27 14:31:47.000000000 -0400
21926+++ linux-2.6.32.41/arch/x86/pci/pcbios.c 2011-04-17 15:56:46.000000000 -0400
21927@@ -56,50 +56,93 @@ union bios32 {
21928 static struct {
21929 unsigned long address;
21930 unsigned short segment;
21931-} bios32_indirect = { 0, __KERNEL_CS };
21932+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
21933
21934 /*
21935 * Returns the entry point for the given service, NULL on error
21936 */
21937
21938-static unsigned long bios32_service(unsigned long service)
21939+static unsigned long __devinit bios32_service(unsigned long service)
21940 {
21941 unsigned char return_code; /* %al */
21942 unsigned long address; /* %ebx */
21943 unsigned long length; /* %ecx */
21944 unsigned long entry; /* %edx */
21945 unsigned long flags;
21946+ struct desc_struct d, *gdt;
21947
21948 local_irq_save(flags);
21949- __asm__("lcall *(%%edi); cld"
21950+
21951+ gdt = get_cpu_gdt_table(smp_processor_id());
21952+
21953+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
21954+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
21955+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
21956+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
21957+
21958+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
21959 : "=a" (return_code),
21960 "=b" (address),
21961 "=c" (length),
21962 "=d" (entry)
21963 : "0" (service),
21964 "1" (0),
21965- "D" (&bios32_indirect));
21966+ "D" (&bios32_indirect),
21967+ "r"(__PCIBIOS_DS)
21968+ : "memory");
21969+
21970+ pax_open_kernel();
21971+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
21972+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
21973+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
21974+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
21975+ pax_close_kernel();
21976+
21977 local_irq_restore(flags);
21978
21979 switch (return_code) {
21980- case 0:
21981- return address + entry;
21982- case 0x80: /* Not present */
21983- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
21984- return 0;
21985- default: /* Shouldn't happen */
21986- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
21987- service, return_code);
21988+ case 0: {
21989+ int cpu;
21990+ unsigned char flags;
21991+
21992+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
21993+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
21994+ printk(KERN_WARNING "bios32_service: not valid\n");
21995 return 0;
21996+ }
21997+ address = address + PAGE_OFFSET;
21998+ length += 16UL; /* some BIOSs underreport this... */
21999+ flags = 4;
22000+ if (length >= 64*1024*1024) {
22001+ length >>= PAGE_SHIFT;
22002+ flags |= 8;
22003+ }
22004+
22005+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
22006+ gdt = get_cpu_gdt_table(cpu);
22007+ pack_descriptor(&d, address, length, 0x9b, flags);
22008+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
22009+ pack_descriptor(&d, address, length, 0x93, flags);
22010+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
22011+ }
22012+ return entry;
22013+ }
22014+ case 0x80: /* Not present */
22015+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
22016+ return 0;
22017+ default: /* Shouldn't happen */
22018+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
22019+ service, return_code);
22020+ return 0;
22021 }
22022 }
22023
22024 static struct {
22025 unsigned long address;
22026 unsigned short segment;
22027-} pci_indirect = { 0, __KERNEL_CS };
22028+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
22029
22030-static int pci_bios_present;
22031+static int pci_bios_present __read_only;
22032
22033 static int __devinit check_pcibios(void)
22034 {
22035@@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
22036 unsigned long flags, pcibios_entry;
22037
22038 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
22039- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
22040+ pci_indirect.address = pcibios_entry;
22041
22042 local_irq_save(flags);
22043- __asm__(
22044- "lcall *(%%edi); cld\n\t"
22045+ __asm__("movw %w6, %%ds\n\t"
22046+ "lcall *%%ss:(%%edi); cld\n\t"
22047+ "push %%ss\n\t"
22048+ "pop %%ds\n\t"
22049 "jc 1f\n\t"
22050 "xor %%ah, %%ah\n"
22051 "1:"
22052@@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
22053 "=b" (ebx),
22054 "=c" (ecx)
22055 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
22056- "D" (&pci_indirect)
22057+ "D" (&pci_indirect),
22058+ "r" (__PCIBIOS_DS)
22059 : "memory");
22060 local_irq_restore(flags);
22061
22062@@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int se
22063
22064 switch (len) {
22065 case 1:
22066- __asm__("lcall *(%%esi); cld\n\t"
22067+ __asm__("movw %w6, %%ds\n\t"
22068+ "lcall *%%ss:(%%esi); cld\n\t"
22069+ "push %%ss\n\t"
22070+ "pop %%ds\n\t"
22071 "jc 1f\n\t"
22072 "xor %%ah, %%ah\n"
22073 "1:"
22074@@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int se
22075 : "1" (PCIBIOS_READ_CONFIG_BYTE),
22076 "b" (bx),
22077 "D" ((long)reg),
22078- "S" (&pci_indirect));
22079+ "S" (&pci_indirect),
22080+ "r" (__PCIBIOS_DS));
22081 /*
22082 * Zero-extend the result beyond 8 bits, do not trust the
22083 * BIOS having done it:
22084@@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int se
22085 *value &= 0xff;
22086 break;
22087 case 2:
22088- __asm__("lcall *(%%esi); cld\n\t"
22089+ __asm__("movw %w6, %%ds\n\t"
22090+ "lcall *%%ss:(%%esi); cld\n\t"
22091+ "push %%ss\n\t"
22092+ "pop %%ds\n\t"
22093 "jc 1f\n\t"
22094 "xor %%ah, %%ah\n"
22095 "1:"
22096@@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int se
22097 : "1" (PCIBIOS_READ_CONFIG_WORD),
22098 "b" (bx),
22099 "D" ((long)reg),
22100- "S" (&pci_indirect));
22101+ "S" (&pci_indirect),
22102+ "r" (__PCIBIOS_DS));
22103 /*
22104 * Zero-extend the result beyond 16 bits, do not trust the
22105 * BIOS having done it:
22106@@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int se
22107 *value &= 0xffff;
22108 break;
22109 case 4:
22110- __asm__("lcall *(%%esi); cld\n\t"
22111+ __asm__("movw %w6, %%ds\n\t"
22112+ "lcall *%%ss:(%%esi); cld\n\t"
22113+ "push %%ss\n\t"
22114+ "pop %%ds\n\t"
22115 "jc 1f\n\t"
22116 "xor %%ah, %%ah\n"
22117 "1:"
22118@@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int se
22119 : "1" (PCIBIOS_READ_CONFIG_DWORD),
22120 "b" (bx),
22121 "D" ((long)reg),
22122- "S" (&pci_indirect));
22123+ "S" (&pci_indirect),
22124+ "r" (__PCIBIOS_DS));
22125 break;
22126 }
22127
22128@@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int s
22129
22130 switch (len) {
22131 case 1:
22132- __asm__("lcall *(%%esi); cld\n\t"
22133+ __asm__("movw %w6, %%ds\n\t"
22134+ "lcall *%%ss:(%%esi); cld\n\t"
22135+ "push %%ss\n\t"
22136+ "pop %%ds\n\t"
22137 "jc 1f\n\t"
22138 "xor %%ah, %%ah\n"
22139 "1:"
22140@@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int s
22141 "c" (value),
22142 "b" (bx),
22143 "D" ((long)reg),
22144- "S" (&pci_indirect));
22145+ "S" (&pci_indirect),
22146+ "r" (__PCIBIOS_DS));
22147 break;
22148 case 2:
22149- __asm__("lcall *(%%esi); cld\n\t"
22150+ __asm__("movw %w6, %%ds\n\t"
22151+ "lcall *%%ss:(%%esi); cld\n\t"
22152+ "push %%ss\n\t"
22153+ "pop %%ds\n\t"
22154 "jc 1f\n\t"
22155 "xor %%ah, %%ah\n"
22156 "1:"
22157@@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int s
22158 "c" (value),
22159 "b" (bx),
22160 "D" ((long)reg),
22161- "S" (&pci_indirect));
22162+ "S" (&pci_indirect),
22163+ "r" (__PCIBIOS_DS));
22164 break;
22165 case 4:
22166- __asm__("lcall *(%%esi); cld\n\t"
22167+ __asm__("movw %w6, %%ds\n\t"
22168+ "lcall *%%ss:(%%esi); cld\n\t"
22169+ "push %%ss\n\t"
22170+ "pop %%ds\n\t"
22171 "jc 1f\n\t"
22172 "xor %%ah, %%ah\n"
22173 "1:"
22174@@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int s
22175 "c" (value),
22176 "b" (bx),
22177 "D" ((long)reg),
22178- "S" (&pci_indirect));
22179+ "S" (&pci_indirect),
22180+ "r" (__PCIBIOS_DS));
22181 break;
22182 }
22183
22184@@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int s
22185 * Function table for BIOS32 access
22186 */
22187
22188-static struct pci_raw_ops pci_bios_access = {
22189+static const struct pci_raw_ops pci_bios_access = {
22190 .read = pci_bios_read,
22191 .write = pci_bios_write
22192 };
22193@@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_acces
22194 * Try to find PCI BIOS.
22195 */
22196
22197-static struct pci_raw_ops * __devinit pci_find_bios(void)
22198+static const struct pci_raw_ops * __devinit pci_find_bios(void)
22199 {
22200 union bios32 *check;
22201 unsigned char sum;
22202@@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_i
22203
22204 DBG("PCI: Fetching IRQ routing table... ");
22205 __asm__("push %%es\n\t"
22206+ "movw %w8, %%ds\n\t"
22207 "push %%ds\n\t"
22208 "pop %%es\n\t"
22209- "lcall *(%%esi); cld\n\t"
22210+ "lcall *%%ss:(%%esi); cld\n\t"
22211 "pop %%es\n\t"
22212+ "push %%ss\n\t"
22213+ "pop %%ds\n"
22214 "jc 1f\n\t"
22215 "xor %%ah, %%ah\n"
22216 "1:"
22217@@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_i
22218 "1" (0),
22219 "D" ((long) &opt),
22220 "S" (&pci_indirect),
22221- "m" (opt)
22222+ "m" (opt),
22223+ "r" (__PCIBIOS_DS)
22224 : "memory");
22225 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
22226 if (ret & 0xff00)
22227@@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_d
22228 {
22229 int ret;
22230
22231- __asm__("lcall *(%%esi); cld\n\t"
22232+ __asm__("movw %w5, %%ds\n\t"
22233+ "lcall *%%ss:(%%esi); cld\n\t"
22234+ "push %%ss\n\t"
22235+ "pop %%ds\n"
22236 "jc 1f\n\t"
22237 "xor %%ah, %%ah\n"
22238 "1:"
22239@@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_d
22240 : "0" (PCIBIOS_SET_PCI_HW_INT),
22241 "b" ((dev->bus->number << 8) | dev->devfn),
22242 "c" ((irq << 8) | (pin + 10)),
22243- "S" (&pci_indirect));
22244+ "S" (&pci_indirect),
22245+ "r" (__PCIBIOS_DS));
22246 return !(ret & 0xff00);
22247 }
22248 EXPORT_SYMBOL(pcibios_set_irq_routing);
22249diff -urNp linux-2.6.32.41/arch/x86/power/cpu.c linux-2.6.32.41/arch/x86/power/cpu.c
22250--- linux-2.6.32.41/arch/x86/power/cpu.c 2011-03-27 14:31:47.000000000 -0400
22251+++ linux-2.6.32.41/arch/x86/power/cpu.c 2011-04-17 15:56:46.000000000 -0400
22252@@ -129,7 +129,7 @@ static void do_fpu_end(void)
22253 static void fix_processor_context(void)
22254 {
22255 int cpu = smp_processor_id();
22256- struct tss_struct *t = &per_cpu(init_tss, cpu);
22257+ struct tss_struct *t = init_tss + cpu;
22258
22259 set_tss_desc(cpu, t); /*
22260 * This just modifies memory; should not be
22261@@ -139,7 +139,9 @@ static void fix_processor_context(void)
22262 */
22263
22264 #ifdef CONFIG_X86_64
22265+ pax_open_kernel();
22266 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
22267+ pax_close_kernel();
22268
22269 syscall_init(); /* This sets MSR_*STAR and related */
22270 #endif
22271diff -urNp linux-2.6.32.41/arch/x86/vdso/Makefile linux-2.6.32.41/arch/x86/vdso/Makefile
22272--- linux-2.6.32.41/arch/x86/vdso/Makefile 2011-03-27 14:31:47.000000000 -0400
22273+++ linux-2.6.32.41/arch/x86/vdso/Makefile 2011-04-17 15:56:46.000000000 -0400
22274@@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
22275 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
22276 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
22277
22278-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
22279+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
22280 GCOV_PROFILE := n
22281
22282 #
22283diff -urNp linux-2.6.32.41/arch/x86/vdso/vclock_gettime.c linux-2.6.32.41/arch/x86/vdso/vclock_gettime.c
22284--- linux-2.6.32.41/arch/x86/vdso/vclock_gettime.c 2011-03-27 14:31:47.000000000 -0400
22285+++ linux-2.6.32.41/arch/x86/vdso/vclock_gettime.c 2011-04-17 15:56:46.000000000 -0400
22286@@ -22,24 +22,48 @@
22287 #include <asm/hpet.h>
22288 #include <asm/unistd.h>
22289 #include <asm/io.h>
22290+#include <asm/fixmap.h>
22291 #include "vextern.h"
22292
22293 #define gtod vdso_vsyscall_gtod_data
22294
22295+notrace noinline long __vdso_fallback_time(long *t)
22296+{
22297+ long secs;
22298+ asm volatile("syscall"
22299+ : "=a" (secs)
22300+ : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
22301+ return secs;
22302+}
22303+
22304 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
22305 {
22306 long ret;
22307 asm("syscall" : "=a" (ret) :
22308- "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
22309+ "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
22310 return ret;
22311 }
22312
22313+notrace static inline cycle_t __vdso_vread_hpet(void)
22314+{
22315+ return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
22316+}
22317+
22318+notrace static inline cycle_t __vdso_vread_tsc(void)
22319+{
22320+ cycle_t ret = (cycle_t)vget_cycles();
22321+
22322+ return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
22323+}
22324+
22325 notrace static inline long vgetns(void)
22326 {
22327 long v;
22328- cycles_t (*vread)(void);
22329- vread = gtod->clock.vread;
22330- v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
22331+ if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
22332+ v = __vdso_vread_tsc();
22333+ else
22334+ v = __vdso_vread_hpet();
22335+ v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
22336 return (v * gtod->clock.mult) >> gtod->clock.shift;
22337 }
22338
22339@@ -113,7 +137,9 @@ notrace static noinline int do_monotonic
22340
22341 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
22342 {
22343- if (likely(gtod->sysctl_enabled))
22344+ if (likely(gtod->sysctl_enabled &&
22345+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
22346+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
22347 switch (clock) {
22348 case CLOCK_REALTIME:
22349 if (likely(gtod->clock.vread))
22350@@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid
22351 int clock_gettime(clockid_t, struct timespec *)
22352 __attribute__((weak, alias("__vdso_clock_gettime")));
22353
22354-notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
22355+notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
22356 {
22357 long ret;
22358- if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
22359+ asm("syscall" : "=a" (ret) :
22360+ "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
22361+ return ret;
22362+}
22363+
22364+notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
22365+{
22366+ if (likely(gtod->sysctl_enabled &&
22367+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
22368+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
22369+ {
22370 if (likely(tv != NULL)) {
22371 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
22372 offsetof(struct timespec, tv_nsec) ||
22373@@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct t
22374 }
22375 return 0;
22376 }
22377- asm("syscall" : "=a" (ret) :
22378- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
22379- return ret;
22380+ return __vdso_fallback_gettimeofday(tv, tz);
22381 }
22382 int gettimeofday(struct timeval *, struct timezone *)
22383 __attribute__((weak, alias("__vdso_gettimeofday")));
22384diff -urNp linux-2.6.32.41/arch/x86/vdso/vdso32-setup.c linux-2.6.32.41/arch/x86/vdso/vdso32-setup.c
22385--- linux-2.6.32.41/arch/x86/vdso/vdso32-setup.c 2011-03-27 14:31:47.000000000 -0400
22386+++ linux-2.6.32.41/arch/x86/vdso/vdso32-setup.c 2011-04-23 12:56:10.000000000 -0400
22387@@ -25,6 +25,7 @@
22388 #include <asm/tlbflush.h>
22389 #include <asm/vdso.h>
22390 #include <asm/proto.h>
22391+#include <asm/mman.h>
22392
22393 enum {
22394 VDSO_DISABLED = 0,
22395@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
22396 void enable_sep_cpu(void)
22397 {
22398 int cpu = get_cpu();
22399- struct tss_struct *tss = &per_cpu(init_tss, cpu);
22400+ struct tss_struct *tss = init_tss + cpu;
22401
22402 if (!boot_cpu_has(X86_FEATURE_SEP)) {
22403 put_cpu();
22404@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
22405 gate_vma.vm_start = FIXADDR_USER_START;
22406 gate_vma.vm_end = FIXADDR_USER_END;
22407 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
22408- gate_vma.vm_page_prot = __P101;
22409+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
22410 /*
22411 * Make sure the vDSO gets into every core dump.
22412 * Dumping its contents makes post-mortem fully interpretable later
22413@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
22414 if (compat)
22415 addr = VDSO_HIGH_BASE;
22416 else {
22417- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
22418+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
22419 if (IS_ERR_VALUE(addr)) {
22420 ret = addr;
22421 goto up_fail;
22422 }
22423 }
22424
22425- current->mm->context.vdso = (void *)addr;
22426+ current->mm->context.vdso = addr;
22427
22428 if (compat_uses_vma || !compat) {
22429 /*
22430@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
22431 }
22432
22433 current_thread_info()->sysenter_return =
22434- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
22435+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
22436
22437 up_fail:
22438 if (ret)
22439- current->mm->context.vdso = NULL;
22440+ current->mm->context.vdso = 0;
22441
22442 up_write(&mm->mmap_sem);
22443
22444@@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
22445
22446 const char *arch_vma_name(struct vm_area_struct *vma)
22447 {
22448- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
22449+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
22450 return "[vdso]";
22451+
22452+#ifdef CONFIG_PAX_SEGMEXEC
22453+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
22454+ return "[vdso]";
22455+#endif
22456+
22457 return NULL;
22458 }
22459
22460@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
22461 struct mm_struct *mm = tsk->mm;
22462
22463 /* Check to see if this task was created in compat vdso mode */
22464- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
22465+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
22466 return &gate_vma;
22467 return NULL;
22468 }
22469diff -urNp linux-2.6.32.41/arch/x86/vdso/vdso.lds.S linux-2.6.32.41/arch/x86/vdso/vdso.lds.S
22470--- linux-2.6.32.41/arch/x86/vdso/vdso.lds.S 2011-03-27 14:31:47.000000000 -0400
22471+++ linux-2.6.32.41/arch/x86/vdso/vdso.lds.S 2011-04-17 15:56:46.000000000 -0400
22472@@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
22473 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
22474 #include "vextern.h"
22475 #undef VEXTERN
22476+
22477+#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
22478+VEXTERN(fallback_gettimeofday)
22479+VEXTERN(fallback_time)
22480+VEXTERN(getcpu)
22481+#undef VEXTERN
22482diff -urNp linux-2.6.32.41/arch/x86/vdso/vextern.h linux-2.6.32.41/arch/x86/vdso/vextern.h
22483--- linux-2.6.32.41/arch/x86/vdso/vextern.h 2011-03-27 14:31:47.000000000 -0400
22484+++ linux-2.6.32.41/arch/x86/vdso/vextern.h 2011-04-17 15:56:46.000000000 -0400
22485@@ -11,6 +11,5 @@
22486 put into vextern.h and be referenced as a pointer with vdso prefix.
22487 The main kernel later fills in the values. */
22488
22489-VEXTERN(jiffies)
22490 VEXTERN(vgetcpu_mode)
22491 VEXTERN(vsyscall_gtod_data)
22492diff -urNp linux-2.6.32.41/arch/x86/vdso/vma.c linux-2.6.32.41/arch/x86/vdso/vma.c
22493--- linux-2.6.32.41/arch/x86/vdso/vma.c 2011-03-27 14:31:47.000000000 -0400
22494+++ linux-2.6.32.41/arch/x86/vdso/vma.c 2011-04-17 15:56:46.000000000 -0400
22495@@ -57,7 +57,7 @@ static int __init init_vdso_vars(void)
22496 if (!vbase)
22497 goto oom;
22498
22499- if (memcmp(vbase, "\177ELF", 4)) {
22500+ if (memcmp(vbase, ELFMAG, SELFMAG)) {
22501 printk("VDSO: I'm broken; not ELF\n");
22502 vdso_enabled = 0;
22503 }
22504@@ -66,6 +66,7 @@ static int __init init_vdso_vars(void)
22505 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
22506 #include "vextern.h"
22507 #undef VEXTERN
22508+ vunmap(vbase);
22509 return 0;
22510
22511 oom:
22512@@ -116,7 +117,7 @@ int arch_setup_additional_pages(struct l
22513 goto up_fail;
22514 }
22515
22516- current->mm->context.vdso = (void *)addr;
22517+ current->mm->context.vdso = addr;
22518
22519 ret = install_special_mapping(mm, addr, vdso_size,
22520 VM_READ|VM_EXEC|
22521@@ -124,7 +125,7 @@ int arch_setup_additional_pages(struct l
22522 VM_ALWAYSDUMP,
22523 vdso_pages);
22524 if (ret) {
22525- current->mm->context.vdso = NULL;
22526+ current->mm->context.vdso = 0;
22527 goto up_fail;
22528 }
22529
22530@@ -132,10 +133,3 @@ up_fail:
22531 up_write(&mm->mmap_sem);
22532 return ret;
22533 }
22534-
22535-static __init int vdso_setup(char *s)
22536-{
22537- vdso_enabled = simple_strtoul(s, NULL, 0);
22538- return 0;
22539-}
22540-__setup("vdso=", vdso_setup);
22541diff -urNp linux-2.6.32.41/arch/x86/xen/enlighten.c linux-2.6.32.41/arch/x86/xen/enlighten.c
22542--- linux-2.6.32.41/arch/x86/xen/enlighten.c 2011-03-27 14:31:47.000000000 -0400
22543+++ linux-2.6.32.41/arch/x86/xen/enlighten.c 2011-05-22 23:02:03.000000000 -0400
22544@@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
22545
22546 struct shared_info xen_dummy_shared_info;
22547
22548-void *xen_initial_gdt;
22549-
22550 /*
22551 * Point at some empty memory to start with. We map the real shared_info
22552 * page as soon as fixmap is up and running.
22553@@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_des
22554
22555 preempt_disable();
22556
22557- start = __get_cpu_var(idt_desc).address;
22558+ start = (unsigned long)__get_cpu_var(idt_desc).address;
22559 end = start + __get_cpu_var(idt_desc).size + 1;
22560
22561 xen_mc_flush();
22562@@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic
22563 #endif
22564 };
22565
22566-static void xen_reboot(int reason)
22567+static __noreturn void xen_reboot(int reason)
22568 {
22569 struct sched_shutdown r = { .reason = reason };
22570
22571@@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
22572 BUG();
22573 }
22574
22575-static void xen_restart(char *msg)
22576+static __noreturn void xen_restart(char *msg)
22577 {
22578 xen_reboot(SHUTDOWN_reboot);
22579 }
22580
22581-static void xen_emergency_restart(void)
22582+static __noreturn void xen_emergency_restart(void)
22583 {
22584 xen_reboot(SHUTDOWN_reboot);
22585 }
22586
22587-static void xen_machine_halt(void)
22588+static __noreturn void xen_machine_halt(void)
22589 {
22590 xen_reboot(SHUTDOWN_poweroff);
22591 }
22592@@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(
22593 */
22594 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
22595
22596-#ifdef CONFIG_X86_64
22597 /* Work out if we support NX */
22598- check_efer();
22599+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
22600+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
22601+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
22602+ unsigned l, h;
22603+
22604+#ifdef CONFIG_X86_PAE
22605+ nx_enabled = 1;
22606+#endif
22607+ __supported_pte_mask |= _PAGE_NX;
22608+ rdmsr(MSR_EFER, l, h);
22609+ l |= EFER_NX;
22610+ wrmsr(MSR_EFER, l, h);
22611+ }
22612 #endif
22613
22614 xen_setup_features();
22615@@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(
22616
22617 machine_ops = xen_machine_ops;
22618
22619- /*
22620- * The only reliable way to retain the initial address of the
22621- * percpu gdt_page is to remember it here, so we can go and
22622- * mark it RW later, when the initial percpu area is freed.
22623- */
22624- xen_initial_gdt = &per_cpu(gdt_page, 0);
22625-
22626 xen_smp_init();
22627
22628 pgd = (pgd_t *)xen_start_info->pt_base;
22629diff -urNp linux-2.6.32.41/arch/x86/xen/mmu.c linux-2.6.32.41/arch/x86/xen/mmu.c
22630--- linux-2.6.32.41/arch/x86/xen/mmu.c 2011-03-27 14:31:47.000000000 -0400
22631+++ linux-2.6.32.41/arch/x86/xen/mmu.c 2011-04-17 15:56:46.000000000 -0400
22632@@ -1714,6 +1714,8 @@ __init pgd_t *xen_setup_kernel_pagetable
22633 convert_pfn_mfn(init_level4_pgt);
22634 convert_pfn_mfn(level3_ident_pgt);
22635 convert_pfn_mfn(level3_kernel_pgt);
22636+ convert_pfn_mfn(level3_vmalloc_pgt);
22637+ convert_pfn_mfn(level3_vmemmap_pgt);
22638
22639 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
22640 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
22641@@ -1732,7 +1734,10 @@ __init pgd_t *xen_setup_kernel_pagetable
22642 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
22643 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
22644 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
22645+ set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
22646+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
22647 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
22648+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
22649 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
22650 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
22651
22652diff -urNp linux-2.6.32.41/arch/x86/xen/smp.c linux-2.6.32.41/arch/x86/xen/smp.c
22653--- linux-2.6.32.41/arch/x86/xen/smp.c 2011-03-27 14:31:47.000000000 -0400
22654+++ linux-2.6.32.41/arch/x86/xen/smp.c 2011-05-11 18:25:15.000000000 -0400
22655@@ -167,11 +167,6 @@ static void __init xen_smp_prepare_boot_
22656 {
22657 BUG_ON(smp_processor_id() != 0);
22658 native_smp_prepare_boot_cpu();
22659-
22660- /* We've switched to the "real" per-cpu gdt, so make sure the
22661- old memory can be recycled */
22662- make_lowmem_page_readwrite(xen_initial_gdt);
22663-
22664 xen_setup_vcpu_info_placement();
22665 }
22666
22667@@ -231,12 +226,12 @@ cpu_initialize_context(unsigned int cpu,
22668 gdt = get_cpu_gdt_table(cpu);
22669
22670 ctxt->flags = VGCF_IN_KERNEL;
22671- ctxt->user_regs.ds = __USER_DS;
22672- ctxt->user_regs.es = __USER_DS;
22673+ ctxt->user_regs.ds = __KERNEL_DS;
22674+ ctxt->user_regs.es = __KERNEL_DS;
22675 ctxt->user_regs.ss = __KERNEL_DS;
22676 #ifdef CONFIG_X86_32
22677 ctxt->user_regs.fs = __KERNEL_PERCPU;
22678- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
22679+ savesegment(gs, ctxt->user_regs.gs);
22680 #else
22681 ctxt->gs_base_kernel = per_cpu_offset(cpu);
22682 #endif
22683@@ -287,13 +282,12 @@ static int __cpuinit xen_cpu_up(unsigned
22684 int rc;
22685
22686 per_cpu(current_task, cpu) = idle;
22687+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
22688 #ifdef CONFIG_X86_32
22689 irq_ctx_init(cpu);
22690 #else
22691 clear_tsk_thread_flag(idle, TIF_FORK);
22692- per_cpu(kernel_stack, cpu) =
22693- (unsigned long)task_stack_page(idle) -
22694- KERNEL_STACK_OFFSET + THREAD_SIZE;
22695+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
22696 #endif
22697 xen_setup_runstate_info(cpu);
22698 xen_setup_timer(cpu);
22699diff -urNp linux-2.6.32.41/arch/x86/xen/xen-asm_32.S linux-2.6.32.41/arch/x86/xen/xen-asm_32.S
22700--- linux-2.6.32.41/arch/x86/xen/xen-asm_32.S 2011-03-27 14:31:47.000000000 -0400
22701+++ linux-2.6.32.41/arch/x86/xen/xen-asm_32.S 2011-04-22 19:13:13.000000000 -0400
22702@@ -83,14 +83,14 @@ ENTRY(xen_iret)
22703 ESP_OFFSET=4 # bytes pushed onto stack
22704
22705 /*
22706- * Store vcpu_info pointer for easy access. Do it this way to
22707- * avoid having to reload %fs
22708+ * Store vcpu_info pointer for easy access.
22709 */
22710 #ifdef CONFIG_SMP
22711- GET_THREAD_INFO(%eax)
22712- movl TI_cpu(%eax), %eax
22713- movl __per_cpu_offset(,%eax,4), %eax
22714- mov per_cpu__xen_vcpu(%eax), %eax
22715+ push %fs
22716+ mov $(__KERNEL_PERCPU), %eax
22717+ mov %eax, %fs
22718+ mov PER_CPU_VAR(xen_vcpu), %eax
22719+ pop %fs
22720 #else
22721 movl per_cpu__xen_vcpu, %eax
22722 #endif
22723diff -urNp linux-2.6.32.41/arch/x86/xen/xen-head.S linux-2.6.32.41/arch/x86/xen/xen-head.S
22724--- linux-2.6.32.41/arch/x86/xen/xen-head.S 2011-03-27 14:31:47.000000000 -0400
22725+++ linux-2.6.32.41/arch/x86/xen/xen-head.S 2011-04-17 15:56:46.000000000 -0400
22726@@ -19,6 +19,17 @@ ENTRY(startup_xen)
22727 #ifdef CONFIG_X86_32
22728 mov %esi,xen_start_info
22729 mov $init_thread_union+THREAD_SIZE,%esp
22730+#ifdef CONFIG_SMP
22731+ movl $cpu_gdt_table,%edi
22732+ movl $__per_cpu_load,%eax
22733+ movw %ax,__KERNEL_PERCPU + 2(%edi)
22734+ rorl $16,%eax
22735+ movb %al,__KERNEL_PERCPU + 4(%edi)
22736+ movb %ah,__KERNEL_PERCPU + 7(%edi)
22737+ movl $__per_cpu_end - 1,%eax
22738+ subl $__per_cpu_start,%eax
22739+ movw %ax,__KERNEL_PERCPU + 0(%edi)
22740+#endif
22741 #else
22742 mov %rsi,xen_start_info
22743 mov $init_thread_union+THREAD_SIZE,%rsp
22744diff -urNp linux-2.6.32.41/arch/x86/xen/xen-ops.h linux-2.6.32.41/arch/x86/xen/xen-ops.h
22745--- linux-2.6.32.41/arch/x86/xen/xen-ops.h 2011-03-27 14:31:47.000000000 -0400
22746+++ linux-2.6.32.41/arch/x86/xen/xen-ops.h 2011-04-17 15:56:46.000000000 -0400
22747@@ -10,8 +10,6 @@
22748 extern const char xen_hypervisor_callback[];
22749 extern const char xen_failsafe_callback[];
22750
22751-extern void *xen_initial_gdt;
22752-
22753 struct trap_info;
22754 void xen_copy_trap_info(struct trap_info *traps);
22755
22756diff -urNp linux-2.6.32.41/block/blk-integrity.c linux-2.6.32.41/block/blk-integrity.c
22757--- linux-2.6.32.41/block/blk-integrity.c 2011-03-27 14:31:47.000000000 -0400
22758+++ linux-2.6.32.41/block/blk-integrity.c 2011-04-17 15:56:46.000000000 -0400
22759@@ -278,7 +278,7 @@ static struct attribute *integrity_attrs
22760 NULL,
22761 };
22762
22763-static struct sysfs_ops integrity_ops = {
22764+static const struct sysfs_ops integrity_ops = {
22765 .show = &integrity_attr_show,
22766 .store = &integrity_attr_store,
22767 };
22768diff -urNp linux-2.6.32.41/block/blk-iopoll.c linux-2.6.32.41/block/blk-iopoll.c
22769--- linux-2.6.32.41/block/blk-iopoll.c 2011-03-27 14:31:47.000000000 -0400
22770+++ linux-2.6.32.41/block/blk-iopoll.c 2011-04-17 15:56:46.000000000 -0400
22771@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
22772 }
22773 EXPORT_SYMBOL(blk_iopoll_complete);
22774
22775-static void blk_iopoll_softirq(struct softirq_action *h)
22776+static void blk_iopoll_softirq(void)
22777 {
22778 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
22779 int rearm = 0, budget = blk_iopoll_budget;
22780diff -urNp linux-2.6.32.41/block/blk-map.c linux-2.6.32.41/block/blk-map.c
22781--- linux-2.6.32.41/block/blk-map.c 2011-03-27 14:31:47.000000000 -0400
22782+++ linux-2.6.32.41/block/blk-map.c 2011-04-18 16:57:33.000000000 -0400
22783@@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct requ
22784 * direct dma. else, set up kernel bounce buffers
22785 */
22786 uaddr = (unsigned long) ubuf;
22787- if (blk_rq_aligned(q, ubuf, len) && !map_data)
22788+ if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
22789 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
22790 else
22791 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
22792@@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_q
22793 for (i = 0; i < iov_count; i++) {
22794 unsigned long uaddr = (unsigned long)iov[i].iov_base;
22795
22796+ if (!iov[i].iov_len)
22797+ return -EINVAL;
22798+
22799 if (uaddr & queue_dma_alignment(q)) {
22800 unaligned = 1;
22801 break;
22802 }
22803- if (!iov[i].iov_len)
22804- return -EINVAL;
22805 }
22806
22807 if (unaligned || (q->dma_pad_mask & len) || map_data)
22808@@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue
22809 if (!len || !kbuf)
22810 return -EINVAL;
22811
22812- do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
22813+ do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
22814 if (do_copy)
22815 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
22816 else
22817diff -urNp linux-2.6.32.41/block/blk-softirq.c linux-2.6.32.41/block/blk-softirq.c
22818--- linux-2.6.32.41/block/blk-softirq.c 2011-03-27 14:31:47.000000000 -0400
22819+++ linux-2.6.32.41/block/blk-softirq.c 2011-04-17 15:56:46.000000000 -0400
22820@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
22821 * Softirq action handler - move entries to local list and loop over them
22822 * while passing them to the queue registered handler.
22823 */
22824-static void blk_done_softirq(struct softirq_action *h)
22825+static void blk_done_softirq(void)
22826 {
22827 struct list_head *cpu_list, local_list;
22828
22829diff -urNp linux-2.6.32.41/block/blk-sysfs.c linux-2.6.32.41/block/blk-sysfs.c
22830--- linux-2.6.32.41/block/blk-sysfs.c 2011-05-10 22:12:01.000000000 -0400
22831+++ linux-2.6.32.41/block/blk-sysfs.c 2011-05-10 22:12:26.000000000 -0400
22832@@ -414,7 +414,7 @@ static void blk_release_queue(struct kob
22833 kmem_cache_free(blk_requestq_cachep, q);
22834 }
22835
22836-static struct sysfs_ops queue_sysfs_ops = {
22837+static const struct sysfs_ops queue_sysfs_ops = {
22838 .show = queue_attr_show,
22839 .store = queue_attr_store,
22840 };
22841diff -urNp linux-2.6.32.41/block/bsg.c linux-2.6.32.41/block/bsg.c
22842--- linux-2.6.32.41/block/bsg.c 2011-03-27 14:31:47.000000000 -0400
22843+++ linux-2.6.32.41/block/bsg.c 2011-04-17 15:56:46.000000000 -0400
22844@@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
22845 struct sg_io_v4 *hdr, struct bsg_device *bd,
22846 fmode_t has_write_perm)
22847 {
22848+ unsigned char tmpcmd[sizeof(rq->__cmd)];
22849+ unsigned char *cmdptr;
22850+
22851 if (hdr->request_len > BLK_MAX_CDB) {
22852 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
22853 if (!rq->cmd)
22854 return -ENOMEM;
22855- }
22856+ cmdptr = rq->cmd;
22857+ } else
22858+ cmdptr = tmpcmd;
22859
22860- if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
22861+ if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
22862 hdr->request_len))
22863 return -EFAULT;
22864
22865+ if (cmdptr != rq->cmd)
22866+ memcpy(rq->cmd, cmdptr, hdr->request_len);
22867+
22868 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
22869 if (blk_verify_command(rq->cmd, has_write_perm))
22870 return -EPERM;
22871diff -urNp linux-2.6.32.41/block/elevator.c linux-2.6.32.41/block/elevator.c
22872--- linux-2.6.32.41/block/elevator.c 2011-03-27 14:31:47.000000000 -0400
22873+++ linux-2.6.32.41/block/elevator.c 2011-04-17 15:56:46.000000000 -0400
22874@@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, str
22875 return error;
22876 }
22877
22878-static struct sysfs_ops elv_sysfs_ops = {
22879+static const struct sysfs_ops elv_sysfs_ops = {
22880 .show = elv_attr_show,
22881 .store = elv_attr_store,
22882 };
22883diff -urNp linux-2.6.32.41/block/scsi_ioctl.c linux-2.6.32.41/block/scsi_ioctl.c
22884--- linux-2.6.32.41/block/scsi_ioctl.c 2011-03-27 14:31:47.000000000 -0400
22885+++ linux-2.6.32.41/block/scsi_ioctl.c 2011-04-23 13:28:22.000000000 -0400
22886@@ -220,8 +220,20 @@ EXPORT_SYMBOL(blk_verify_command);
22887 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
22888 struct sg_io_hdr *hdr, fmode_t mode)
22889 {
22890- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
22891+ unsigned char tmpcmd[sizeof(rq->__cmd)];
22892+ unsigned char *cmdptr;
22893+
22894+ if (rq->cmd != rq->__cmd)
22895+ cmdptr = rq->cmd;
22896+ else
22897+ cmdptr = tmpcmd;
22898+
22899+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
22900 return -EFAULT;
22901+
22902+ if (cmdptr != rq->cmd)
22903+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
22904+
22905 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
22906 return -EPERM;
22907
22908@@ -430,6 +442,8 @@ int sg_scsi_ioctl(struct request_queue *
22909 int err;
22910 unsigned int in_len, out_len, bytes, opcode, cmdlen;
22911 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
22912+ unsigned char tmpcmd[sizeof(rq->__cmd)];
22913+ unsigned char *cmdptr;
22914
22915 if (!sic)
22916 return -EINVAL;
22917@@ -463,9 +477,18 @@ int sg_scsi_ioctl(struct request_queue *
22918 */
22919 err = -EFAULT;
22920 rq->cmd_len = cmdlen;
22921- if (copy_from_user(rq->cmd, sic->data, cmdlen))
22922+
22923+ if (rq->cmd != rq->__cmd)
22924+ cmdptr = rq->cmd;
22925+ else
22926+ cmdptr = tmpcmd;
22927+
22928+ if (copy_from_user(cmdptr, sic->data, cmdlen))
22929 goto error;
22930
22931+ if (rq->cmd != cmdptr)
22932+ memcpy(rq->cmd, cmdptr, cmdlen);
22933+
22934 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
22935 goto error;
22936
22937diff -urNp linux-2.6.32.41/crypto/serpent.c linux-2.6.32.41/crypto/serpent.c
22938--- linux-2.6.32.41/crypto/serpent.c 2011-03-27 14:31:47.000000000 -0400
22939+++ linux-2.6.32.41/crypto/serpent.c 2011-05-16 21:46:57.000000000 -0400
22940@@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
22941 u32 r0,r1,r2,r3,r4;
22942 int i;
22943
22944+ pax_track_stack();
22945+
22946 /* Copy key, add padding */
22947
22948 for (i = 0; i < keylen; ++i)
22949diff -urNp linux-2.6.32.41/Documentation/dontdiff linux-2.6.32.41/Documentation/dontdiff
22950--- linux-2.6.32.41/Documentation/dontdiff 2011-03-27 14:31:47.000000000 -0400
22951+++ linux-2.6.32.41/Documentation/dontdiff 2011-05-18 20:09:36.000000000 -0400
22952@@ -1,13 +1,16 @@
22953 *.a
22954 *.aux
22955 *.bin
22956+*.cis
22957 *.cpio
22958 *.csp
22959+*.dbg
22960 *.dsp
22961 *.dvi
22962 *.elf
22963 *.eps
22964 *.fw
22965+*.gcno
22966 *.gen.S
22967 *.gif
22968 *.grep
22969@@ -38,8 +41,10 @@
22970 *.tab.h
22971 *.tex
22972 *.ver
22973+*.vim
22974 *.xml
22975 *_MODULES
22976+*_reg_safe.h
22977 *_vga16.c
22978 *~
22979 *.9
22980@@ -49,11 +54,16 @@
22981 53c700_d.h
22982 CVS
22983 ChangeSet
22984+GPATH
22985+GRTAGS
22986+GSYMS
22987+GTAGS
22988 Image
22989 Kerntypes
22990 Module.markers
22991 Module.symvers
22992 PENDING
22993+PERF*
22994 SCCS
22995 System.map*
22996 TAGS
22997@@ -76,7 +86,11 @@ btfixupprep
22998 build
22999 bvmlinux
23000 bzImage*
23001+capability_names.h
23002+capflags.c
23003 classlist.h*
23004+clut_vga16.c
23005+common-cmds.h
23006 comp*.log
23007 compile.h*
23008 conf
23009@@ -103,13 +117,14 @@ gen_crc32table
23010 gen_init_cpio
23011 genksyms
23012 *_gray256.c
23013+hash
23014 ihex2fw
23015 ikconfig.h*
23016 initramfs_data.cpio
23017+initramfs_data.cpio.bz2
23018 initramfs_data.cpio.gz
23019 initramfs_list
23020 kallsyms
23021-kconfig
23022 keywords.c
23023 ksym.c*
23024 ksym.h*
23025@@ -133,7 +148,9 @@ mkboot
23026 mkbugboot
23027 mkcpustr
23028 mkdep
23029+mkpiggy
23030 mkprep
23031+mkregtable
23032 mktables
23033 mktree
23034 modpost
23035@@ -149,6 +166,7 @@ patches*
23036 pca200e.bin
23037 pca200e_ecd.bin2
23038 piggy.gz
23039+piggy.S
23040 piggyback
23041 pnmtologo
23042 ppc_defs.h*
23043@@ -157,12 +175,15 @@ qconf
23044 raid6altivec*.c
23045 raid6int*.c
23046 raid6tables.c
23047+regdb.c
23048 relocs
23049+rlim_names.h
23050 series
23051 setup
23052 setup.bin
23053 setup.elf
23054 sImage
23055+slabinfo
23056 sm_tbl*
23057 split-include
23058 syscalltab.h
23059@@ -186,14 +207,20 @@ version.h*
23060 vmlinux
23061 vmlinux-*
23062 vmlinux.aout
23063+vmlinux.bin.all
23064+vmlinux.bin.bz2
23065 vmlinux.lds
23066+vmlinux.relocs
23067+voffset.h
23068 vsyscall.lds
23069 vsyscall_32.lds
23070 wanxlfw.inc
23071 uImage
23072 unifdef
23073+utsrelease.h
23074 wakeup.bin
23075 wakeup.elf
23076 wakeup.lds
23077 zImage*
23078 zconf.hash.c
23079+zoffset.h
23080diff -urNp linux-2.6.32.41/Documentation/kernel-parameters.txt linux-2.6.32.41/Documentation/kernel-parameters.txt
23081--- linux-2.6.32.41/Documentation/kernel-parameters.txt 2011-03-27 14:31:47.000000000 -0400
23082+++ linux-2.6.32.41/Documentation/kernel-parameters.txt 2011-04-17 15:56:45.000000000 -0400
23083@@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters.
23084 the specified number of seconds. This is to be used if
23085 your oopses keep scrolling off the screen.
23086
23087+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
23088+ virtualization environments that don't cope well with the
23089+ expand down segment used by UDEREF on X86-32 or the frequent
23090+ page table updates on X86-64.
23091+
23092+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
23093+
23094 pcbit= [HW,ISDN]
23095
23096 pcd. [PARIDE]
23097diff -urNp linux-2.6.32.41/drivers/acpi/acpi_pad.c linux-2.6.32.41/drivers/acpi/acpi_pad.c
23098--- linux-2.6.32.41/drivers/acpi/acpi_pad.c 2011-03-27 14:31:47.000000000 -0400
23099+++ linux-2.6.32.41/drivers/acpi/acpi_pad.c 2011-04-17 15:56:46.000000000 -0400
23100@@ -30,7 +30,7 @@
23101 #include <acpi/acpi_bus.h>
23102 #include <acpi/acpi_drivers.h>
23103
23104-#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
23105+#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
23106 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
23107 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
23108 static DEFINE_MUTEX(isolated_cpus_lock);
23109diff -urNp linux-2.6.32.41/drivers/acpi/battery.c linux-2.6.32.41/drivers/acpi/battery.c
23110--- linux-2.6.32.41/drivers/acpi/battery.c 2011-03-27 14:31:47.000000000 -0400
23111+++ linux-2.6.32.41/drivers/acpi/battery.c 2011-04-17 15:56:46.000000000 -0400
23112@@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
23113 }
23114
23115 static struct battery_file {
23116- struct file_operations ops;
23117+ const struct file_operations ops;
23118 mode_t mode;
23119 const char *name;
23120 } acpi_battery_file[] = {
23121diff -urNp linux-2.6.32.41/drivers/acpi/dock.c linux-2.6.32.41/drivers/acpi/dock.c
23122--- linux-2.6.32.41/drivers/acpi/dock.c 2011-03-27 14:31:47.000000000 -0400
23123+++ linux-2.6.32.41/drivers/acpi/dock.c 2011-04-17 15:56:46.000000000 -0400
23124@@ -77,7 +77,7 @@ struct dock_dependent_device {
23125 struct list_head list;
23126 struct list_head hotplug_list;
23127 acpi_handle handle;
23128- struct acpi_dock_ops *ops;
23129+ const struct acpi_dock_ops *ops;
23130 void *context;
23131 };
23132
23133@@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifi
23134 * the dock driver after _DCK is executed.
23135 */
23136 int
23137-register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
23138+register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
23139 void *context)
23140 {
23141 struct dock_dependent_device *dd;
23142diff -urNp linux-2.6.32.41/drivers/acpi/osl.c linux-2.6.32.41/drivers/acpi/osl.c
23143--- linux-2.6.32.41/drivers/acpi/osl.c 2011-03-27 14:31:47.000000000 -0400
23144+++ linux-2.6.32.41/drivers/acpi/osl.c 2011-04-17 15:56:46.000000000 -0400
23145@@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_addres
23146 void __iomem *virt_addr;
23147
23148 virt_addr = ioremap(phys_addr, width);
23149+ if (!virt_addr)
23150+ return AE_NO_MEMORY;
23151 if (!value)
23152 value = &dummy;
23153
23154@@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_addre
23155 void __iomem *virt_addr;
23156
23157 virt_addr = ioremap(phys_addr, width);
23158+ if (!virt_addr)
23159+ return AE_NO_MEMORY;
23160
23161 switch (width) {
23162 case 8:
23163diff -urNp linux-2.6.32.41/drivers/acpi/power_meter.c linux-2.6.32.41/drivers/acpi/power_meter.c
23164--- linux-2.6.32.41/drivers/acpi/power_meter.c 2011-03-27 14:31:47.000000000 -0400
23165+++ linux-2.6.32.41/drivers/acpi/power_meter.c 2011-04-17 15:56:46.000000000 -0400
23166@@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *d
23167 return res;
23168
23169 temp /= 1000;
23170- if (temp < 0)
23171- return -EINVAL;
23172
23173 mutex_lock(&resource->lock);
23174 resource->trip[attr->index - 7] = temp;
23175diff -urNp linux-2.6.32.41/drivers/acpi/proc.c linux-2.6.32.41/drivers/acpi/proc.c
23176--- linux-2.6.32.41/drivers/acpi/proc.c 2011-03-27 14:31:47.000000000 -0400
23177+++ linux-2.6.32.41/drivers/acpi/proc.c 2011-04-17 15:56:46.000000000 -0400
23178@@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct f
23179 size_t count, loff_t * ppos)
23180 {
23181 struct list_head *node, *next;
23182- char strbuf[5];
23183- char str[5] = "";
23184- unsigned int len = count;
23185+ char strbuf[5] = {0};
23186 struct acpi_device *found_dev = NULL;
23187
23188- if (len > 4)
23189- len = 4;
23190- if (len < 0)
23191- return -EFAULT;
23192+ if (count > 4)
23193+ count = 4;
23194
23195- if (copy_from_user(strbuf, buffer, len))
23196+ if (copy_from_user(strbuf, buffer, count))
23197 return -EFAULT;
23198- strbuf[len] = '\0';
23199- sscanf(strbuf, "%s", str);
23200+ strbuf[count] = '\0';
23201
23202 mutex_lock(&acpi_device_lock);
23203 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
23204@@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct f
23205 if (!dev->wakeup.flags.valid)
23206 continue;
23207
23208- if (!strncmp(dev->pnp.bus_id, str, 4)) {
23209+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
23210 dev->wakeup.state.enabled =
23211 dev->wakeup.state.enabled ? 0 : 1;
23212 found_dev = dev;
23213diff -urNp linux-2.6.32.41/drivers/acpi/processor_core.c linux-2.6.32.41/drivers/acpi/processor_core.c
23214--- linux-2.6.32.41/drivers/acpi/processor_core.c 2011-03-27 14:31:47.000000000 -0400
23215+++ linux-2.6.32.41/drivers/acpi/processor_core.c 2011-04-17 15:56:46.000000000 -0400
23216@@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(
23217 return 0;
23218 }
23219
23220- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
23221+ BUG_ON(pr->id >= nr_cpu_ids);
23222
23223 /*
23224 * Buggy BIOS check
23225diff -urNp linux-2.6.32.41/drivers/acpi/sbshc.c linux-2.6.32.41/drivers/acpi/sbshc.c
23226--- linux-2.6.32.41/drivers/acpi/sbshc.c 2011-03-27 14:31:47.000000000 -0400
23227+++ linux-2.6.32.41/drivers/acpi/sbshc.c 2011-04-17 15:56:46.000000000 -0400
23228@@ -17,7 +17,7 @@
23229
23230 #define PREFIX "ACPI: "
23231
23232-#define ACPI_SMB_HC_CLASS "smbus_host_controller"
23233+#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
23234 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
23235
23236 struct acpi_smb_hc {
23237diff -urNp linux-2.6.32.41/drivers/acpi/sleep.c linux-2.6.32.41/drivers/acpi/sleep.c
23238--- linux-2.6.32.41/drivers/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
23239+++ linux-2.6.32.41/drivers/acpi/sleep.c 2011-04-17 15:56:46.000000000 -0400
23240@@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(susp
23241 }
23242 }
23243
23244-static struct platform_suspend_ops acpi_suspend_ops = {
23245+static const struct platform_suspend_ops acpi_suspend_ops = {
23246 .valid = acpi_suspend_state_valid,
23247 .begin = acpi_suspend_begin,
23248 .prepare_late = acpi_pm_prepare,
23249@@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspen
23250 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
23251 * been requested.
23252 */
23253-static struct platform_suspend_ops acpi_suspend_ops_old = {
23254+static const struct platform_suspend_ops acpi_suspend_ops_old = {
23255 .valid = acpi_suspend_state_valid,
23256 .begin = acpi_suspend_begin_old,
23257 .prepare_late = acpi_pm_disable_gpes,
23258@@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
23259 acpi_enable_all_runtime_gpes();
23260 }
23261
23262-static struct platform_hibernation_ops acpi_hibernation_ops = {
23263+static const struct platform_hibernation_ops acpi_hibernation_ops = {
23264 .begin = acpi_hibernation_begin,
23265 .end = acpi_pm_end,
23266 .pre_snapshot = acpi_hibernation_pre_snapshot,
23267@@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot
23268 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
23269 * been requested.
23270 */
23271-static struct platform_hibernation_ops acpi_hibernation_ops_old = {
23272+static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
23273 .begin = acpi_hibernation_begin_old,
23274 .end = acpi_pm_end,
23275 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
23276diff -urNp linux-2.6.32.41/drivers/acpi/video.c linux-2.6.32.41/drivers/acpi/video.c
23277--- linux-2.6.32.41/drivers/acpi/video.c 2011-03-27 14:31:47.000000000 -0400
23278+++ linux-2.6.32.41/drivers/acpi/video.c 2011-04-17 15:56:46.000000000 -0400
23279@@ -359,7 +359,7 @@ static int acpi_video_set_brightness(str
23280 vd->brightness->levels[request_level]);
23281 }
23282
23283-static struct backlight_ops acpi_backlight_ops = {
23284+static const struct backlight_ops acpi_backlight_ops = {
23285 .get_brightness = acpi_video_get_brightness,
23286 .update_status = acpi_video_set_brightness,
23287 };
23288diff -urNp linux-2.6.32.41/drivers/ata/ahci.c linux-2.6.32.41/drivers/ata/ahci.c
23289--- linux-2.6.32.41/drivers/ata/ahci.c 2011-03-27 14:31:47.000000000 -0400
23290+++ linux-2.6.32.41/drivers/ata/ahci.c 2011-04-23 12:56:10.000000000 -0400
23291@@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sh
23292 .sdev_attrs = ahci_sdev_attrs,
23293 };
23294
23295-static struct ata_port_operations ahci_ops = {
23296+static const struct ata_port_operations ahci_ops = {
23297 .inherits = &sata_pmp_port_ops,
23298
23299 .qc_defer = sata_pmp_qc_defer_cmd_switch,
23300@@ -424,17 +424,17 @@ static struct ata_port_operations ahci_o
23301 .port_stop = ahci_port_stop,
23302 };
23303
23304-static struct ata_port_operations ahci_vt8251_ops = {
23305+static const struct ata_port_operations ahci_vt8251_ops = {
23306 .inherits = &ahci_ops,
23307 .hardreset = ahci_vt8251_hardreset,
23308 };
23309
23310-static struct ata_port_operations ahci_p5wdh_ops = {
23311+static const struct ata_port_operations ahci_p5wdh_ops = {
23312 .inherits = &ahci_ops,
23313 .hardreset = ahci_p5wdh_hardreset,
23314 };
23315
23316-static struct ata_port_operations ahci_sb600_ops = {
23317+static const struct ata_port_operations ahci_sb600_ops = {
23318 .inherits = &ahci_ops,
23319 .softreset = ahci_sb600_softreset,
23320 .pmp_softreset = ahci_sb600_softreset,
23321diff -urNp linux-2.6.32.41/drivers/ata/ata_generic.c linux-2.6.32.41/drivers/ata/ata_generic.c
23322--- linux-2.6.32.41/drivers/ata/ata_generic.c 2011-03-27 14:31:47.000000000 -0400
23323+++ linux-2.6.32.41/drivers/ata/ata_generic.c 2011-04-17 15:56:46.000000000 -0400
23324@@ -104,7 +104,7 @@ static struct scsi_host_template generic
23325 ATA_BMDMA_SHT(DRV_NAME),
23326 };
23327
23328-static struct ata_port_operations generic_port_ops = {
23329+static const struct ata_port_operations generic_port_ops = {
23330 .inherits = &ata_bmdma_port_ops,
23331 .cable_detect = ata_cable_unknown,
23332 .set_mode = generic_set_mode,
23333diff -urNp linux-2.6.32.41/drivers/ata/ata_piix.c linux-2.6.32.41/drivers/ata/ata_piix.c
23334--- linux-2.6.32.41/drivers/ata/ata_piix.c 2011-03-27 14:31:47.000000000 -0400
23335+++ linux-2.6.32.41/drivers/ata/ata_piix.c 2011-04-23 12:56:10.000000000 -0400
23336@@ -318,7 +318,7 @@ static struct scsi_host_template piix_sh
23337 ATA_BMDMA_SHT(DRV_NAME),
23338 };
23339
23340-static struct ata_port_operations piix_pata_ops = {
23341+static const struct ata_port_operations piix_pata_ops = {
23342 .inherits = &ata_bmdma32_port_ops,
23343 .cable_detect = ata_cable_40wire,
23344 .set_piomode = piix_set_piomode,
23345@@ -326,22 +326,22 @@ static struct ata_port_operations piix_p
23346 .prereset = piix_pata_prereset,
23347 };
23348
23349-static struct ata_port_operations piix_vmw_ops = {
23350+static const struct ata_port_operations piix_vmw_ops = {
23351 .inherits = &piix_pata_ops,
23352 .bmdma_status = piix_vmw_bmdma_status,
23353 };
23354
23355-static struct ata_port_operations ich_pata_ops = {
23356+static const struct ata_port_operations ich_pata_ops = {
23357 .inherits = &piix_pata_ops,
23358 .cable_detect = ich_pata_cable_detect,
23359 .set_dmamode = ich_set_dmamode,
23360 };
23361
23362-static struct ata_port_operations piix_sata_ops = {
23363+static const struct ata_port_operations piix_sata_ops = {
23364 .inherits = &ata_bmdma_port_ops,
23365 };
23366
23367-static struct ata_port_operations piix_sidpr_sata_ops = {
23368+static const struct ata_port_operations piix_sidpr_sata_ops = {
23369 .inherits = &piix_sata_ops,
23370 .hardreset = sata_std_hardreset,
23371 .scr_read = piix_sidpr_scr_read,
23372diff -urNp linux-2.6.32.41/drivers/ata/libata-acpi.c linux-2.6.32.41/drivers/ata/libata-acpi.c
23373--- linux-2.6.32.41/drivers/ata/libata-acpi.c 2011-03-27 14:31:47.000000000 -0400
23374+++ linux-2.6.32.41/drivers/ata/libata-acpi.c 2011-04-17 15:56:46.000000000 -0400
23375@@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_han
23376 ata_acpi_uevent(dev->link->ap, dev, event);
23377 }
23378
23379-static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
23380+static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
23381 .handler = ata_acpi_dev_notify_dock,
23382 .uevent = ata_acpi_dev_uevent,
23383 };
23384
23385-static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
23386+static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
23387 .handler = ata_acpi_ap_notify_dock,
23388 .uevent = ata_acpi_ap_uevent,
23389 };
23390diff -urNp linux-2.6.32.41/drivers/ata/libata-core.c linux-2.6.32.41/drivers/ata/libata-core.c
23391--- linux-2.6.32.41/drivers/ata/libata-core.c 2011-03-27 14:31:47.000000000 -0400
23392+++ linux-2.6.32.41/drivers/ata/libata-core.c 2011-04-23 12:56:10.000000000 -0400
23393@@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *
23394 struct ata_port *ap;
23395 unsigned int tag;
23396
23397- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23398+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23399 ap = qc->ap;
23400
23401 qc->flags = 0;
23402@@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued
23403 struct ata_port *ap;
23404 struct ata_link *link;
23405
23406- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23407+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23408 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
23409 ap = qc->ap;
23410 link = qc->dev->link;
23411@@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device
23412 * LOCKING:
23413 * None.
23414 */
23415-static void ata_finalize_port_ops(struct ata_port_operations *ops)
23416+static void ata_finalize_port_ops(const struct ata_port_operations *ops)
23417 {
23418 static DEFINE_SPINLOCK(lock);
23419 const struct ata_port_operations *cur;
23420@@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct
23421 return;
23422
23423 spin_lock(&lock);
23424+ pax_open_kernel();
23425
23426 for (cur = ops->inherits; cur; cur = cur->inherits) {
23427 void **inherit = (void **)cur;
23428@@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct
23429 if (IS_ERR(*pp))
23430 *pp = NULL;
23431
23432- ops->inherits = NULL;
23433+ ((struct ata_port_operations *)ops)->inherits = NULL;
23434
23435+ pax_close_kernel();
23436 spin_unlock(&lock);
23437 }
23438
23439@@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host
23440 */
23441 /* KILLME - the only user left is ipr */
23442 void ata_host_init(struct ata_host *host, struct device *dev,
23443- unsigned long flags, struct ata_port_operations *ops)
23444+ unsigned long flags, const struct ata_port_operations *ops)
23445 {
23446 spin_lock_init(&host->lock);
23447 host->dev = dev;
23448@@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(stru
23449 /* truly dummy */
23450 }
23451
23452-struct ata_port_operations ata_dummy_port_ops = {
23453+const struct ata_port_operations ata_dummy_port_ops = {
23454 .qc_prep = ata_noop_qc_prep,
23455 .qc_issue = ata_dummy_qc_issue,
23456 .error_handler = ata_dummy_error_handler,
23457diff -urNp linux-2.6.32.41/drivers/ata/libata-eh.c linux-2.6.32.41/drivers/ata/libata-eh.c
23458--- linux-2.6.32.41/drivers/ata/libata-eh.c 2011-03-27 14:31:47.000000000 -0400
23459+++ linux-2.6.32.41/drivers/ata/libata-eh.c 2011-05-16 21:46:57.000000000 -0400
23460@@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
23461 {
23462 struct ata_link *link;
23463
23464+ pax_track_stack();
23465+
23466 ata_for_each_link(link, ap, HOST_FIRST)
23467 ata_eh_link_report(link);
23468 }
23469@@ -3590,7 +3592,7 @@ void ata_do_eh(struct ata_port *ap, ata_
23470 */
23471 void ata_std_error_handler(struct ata_port *ap)
23472 {
23473- struct ata_port_operations *ops = ap->ops;
23474+ const struct ata_port_operations *ops = ap->ops;
23475 ata_reset_fn_t hardreset = ops->hardreset;
23476
23477 /* ignore built-in hardreset if SCR access is not available */
23478diff -urNp linux-2.6.32.41/drivers/ata/libata-pmp.c linux-2.6.32.41/drivers/ata/libata-pmp.c
23479--- linux-2.6.32.41/drivers/ata/libata-pmp.c 2011-03-27 14:31:47.000000000 -0400
23480+++ linux-2.6.32.41/drivers/ata/libata-pmp.c 2011-04-17 15:56:46.000000000 -0400
23481@@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(str
23482 */
23483 static int sata_pmp_eh_recover(struct ata_port *ap)
23484 {
23485- struct ata_port_operations *ops = ap->ops;
23486+ const struct ata_port_operations *ops = ap->ops;
23487 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
23488 struct ata_link *pmp_link = &ap->link;
23489 struct ata_device *pmp_dev = pmp_link->device;
23490diff -urNp linux-2.6.32.41/drivers/ata/pata_acpi.c linux-2.6.32.41/drivers/ata/pata_acpi.c
23491--- linux-2.6.32.41/drivers/ata/pata_acpi.c 2011-03-27 14:31:47.000000000 -0400
23492+++ linux-2.6.32.41/drivers/ata/pata_acpi.c 2011-04-17 15:56:46.000000000 -0400
23493@@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_s
23494 ATA_BMDMA_SHT(DRV_NAME),
23495 };
23496
23497-static struct ata_port_operations pacpi_ops = {
23498+static const struct ata_port_operations pacpi_ops = {
23499 .inherits = &ata_bmdma_port_ops,
23500 .qc_issue = pacpi_qc_issue,
23501 .cable_detect = pacpi_cable_detect,
23502diff -urNp linux-2.6.32.41/drivers/ata/pata_ali.c linux-2.6.32.41/drivers/ata/pata_ali.c
23503--- linux-2.6.32.41/drivers/ata/pata_ali.c 2011-03-27 14:31:47.000000000 -0400
23504+++ linux-2.6.32.41/drivers/ata/pata_ali.c 2011-04-17 15:56:46.000000000 -0400
23505@@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht
23506 * Port operations for PIO only ALi
23507 */
23508
23509-static struct ata_port_operations ali_early_port_ops = {
23510+static const struct ata_port_operations ali_early_port_ops = {
23511 .inherits = &ata_sff_port_ops,
23512 .cable_detect = ata_cable_40wire,
23513 .set_piomode = ali_set_piomode,
23514@@ -382,7 +382,7 @@ static const struct ata_port_operations
23515 * Port operations for DMA capable ALi without cable
23516 * detect
23517 */
23518-static struct ata_port_operations ali_20_port_ops = {
23519+static const struct ata_port_operations ali_20_port_ops = {
23520 .inherits = &ali_dma_base_ops,
23521 .cable_detect = ata_cable_40wire,
23522 .mode_filter = ali_20_filter,
23523@@ -393,7 +393,7 @@ static struct ata_port_operations ali_20
23524 /*
23525 * Port operations for DMA capable ALi with cable detect
23526 */
23527-static struct ata_port_operations ali_c2_port_ops = {
23528+static const struct ata_port_operations ali_c2_port_ops = {
23529 .inherits = &ali_dma_base_ops,
23530 .check_atapi_dma = ali_check_atapi_dma,
23531 .cable_detect = ali_c2_cable_detect,
23532@@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2
23533 /*
23534 * Port operations for DMA capable ALi with cable detect
23535 */
23536-static struct ata_port_operations ali_c4_port_ops = {
23537+static const struct ata_port_operations ali_c4_port_ops = {
23538 .inherits = &ali_dma_base_ops,
23539 .check_atapi_dma = ali_check_atapi_dma,
23540 .cable_detect = ali_c2_cable_detect,
23541@@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4
23542 /*
23543 * Port operations for DMA capable ALi with cable detect and LBA48
23544 */
23545-static struct ata_port_operations ali_c5_port_ops = {
23546+static const struct ata_port_operations ali_c5_port_ops = {
23547 .inherits = &ali_dma_base_ops,
23548 .check_atapi_dma = ali_check_atapi_dma,
23549 .dev_config = ali_warn_atapi_dma,
23550diff -urNp linux-2.6.32.41/drivers/ata/pata_amd.c linux-2.6.32.41/drivers/ata/pata_amd.c
23551--- linux-2.6.32.41/drivers/ata/pata_amd.c 2011-03-27 14:31:47.000000000 -0400
23552+++ linux-2.6.32.41/drivers/ata/pata_amd.c 2011-04-17 15:56:46.000000000 -0400
23553@@ -397,28 +397,28 @@ static const struct ata_port_operations
23554 .prereset = amd_pre_reset,
23555 };
23556
23557-static struct ata_port_operations amd33_port_ops = {
23558+static const struct ata_port_operations amd33_port_ops = {
23559 .inherits = &amd_base_port_ops,
23560 .cable_detect = ata_cable_40wire,
23561 .set_piomode = amd33_set_piomode,
23562 .set_dmamode = amd33_set_dmamode,
23563 };
23564
23565-static struct ata_port_operations amd66_port_ops = {
23566+static const struct ata_port_operations amd66_port_ops = {
23567 .inherits = &amd_base_port_ops,
23568 .cable_detect = ata_cable_unknown,
23569 .set_piomode = amd66_set_piomode,
23570 .set_dmamode = amd66_set_dmamode,
23571 };
23572
23573-static struct ata_port_operations amd100_port_ops = {
23574+static const struct ata_port_operations amd100_port_ops = {
23575 .inherits = &amd_base_port_ops,
23576 .cable_detect = ata_cable_unknown,
23577 .set_piomode = amd100_set_piomode,
23578 .set_dmamode = amd100_set_dmamode,
23579 };
23580
23581-static struct ata_port_operations amd133_port_ops = {
23582+static const struct ata_port_operations amd133_port_ops = {
23583 .inherits = &amd_base_port_ops,
23584 .cable_detect = amd_cable_detect,
23585 .set_piomode = amd133_set_piomode,
23586@@ -433,13 +433,13 @@ static const struct ata_port_operations
23587 .host_stop = nv_host_stop,
23588 };
23589
23590-static struct ata_port_operations nv100_port_ops = {
23591+static const struct ata_port_operations nv100_port_ops = {
23592 .inherits = &nv_base_port_ops,
23593 .set_piomode = nv100_set_piomode,
23594 .set_dmamode = nv100_set_dmamode,
23595 };
23596
23597-static struct ata_port_operations nv133_port_ops = {
23598+static const struct ata_port_operations nv133_port_ops = {
23599 .inherits = &nv_base_port_ops,
23600 .set_piomode = nv133_set_piomode,
23601 .set_dmamode = nv133_set_dmamode,
23602diff -urNp linux-2.6.32.41/drivers/ata/pata_artop.c linux-2.6.32.41/drivers/ata/pata_artop.c
23603--- linux-2.6.32.41/drivers/ata/pata_artop.c 2011-03-27 14:31:47.000000000 -0400
23604+++ linux-2.6.32.41/drivers/ata/pata_artop.c 2011-04-17 15:56:46.000000000 -0400
23605@@ -311,7 +311,7 @@ static struct scsi_host_template artop_s
23606 ATA_BMDMA_SHT(DRV_NAME),
23607 };
23608
23609-static struct ata_port_operations artop6210_ops = {
23610+static const struct ata_port_operations artop6210_ops = {
23611 .inherits = &ata_bmdma_port_ops,
23612 .cable_detect = ata_cable_40wire,
23613 .set_piomode = artop6210_set_piomode,
23614@@ -320,7 +320,7 @@ static struct ata_port_operations artop6
23615 .qc_defer = artop6210_qc_defer,
23616 };
23617
23618-static struct ata_port_operations artop6260_ops = {
23619+static const struct ata_port_operations artop6260_ops = {
23620 .inherits = &ata_bmdma_port_ops,
23621 .cable_detect = artop6260_cable_detect,
23622 .set_piomode = artop6260_set_piomode,
23623diff -urNp linux-2.6.32.41/drivers/ata/pata_at32.c linux-2.6.32.41/drivers/ata/pata_at32.c
23624--- linux-2.6.32.41/drivers/ata/pata_at32.c 2011-03-27 14:31:47.000000000 -0400
23625+++ linux-2.6.32.41/drivers/ata/pata_at32.c 2011-04-17 15:56:46.000000000 -0400
23626@@ -172,7 +172,7 @@ static struct scsi_host_template at32_sh
23627 ATA_PIO_SHT(DRV_NAME),
23628 };
23629
23630-static struct ata_port_operations at32_port_ops = {
23631+static const struct ata_port_operations at32_port_ops = {
23632 .inherits = &ata_sff_port_ops,
23633 .cable_detect = ata_cable_40wire,
23634 .set_piomode = pata_at32_set_piomode,
23635diff -urNp linux-2.6.32.41/drivers/ata/pata_at91.c linux-2.6.32.41/drivers/ata/pata_at91.c
23636--- linux-2.6.32.41/drivers/ata/pata_at91.c 2011-03-27 14:31:47.000000000 -0400
23637+++ linux-2.6.32.41/drivers/ata/pata_at91.c 2011-04-17 15:56:46.000000000 -0400
23638@@ -195,7 +195,7 @@ static struct scsi_host_template pata_at
23639 ATA_PIO_SHT(DRV_NAME),
23640 };
23641
23642-static struct ata_port_operations pata_at91_port_ops = {
23643+static const struct ata_port_operations pata_at91_port_ops = {
23644 .inherits = &ata_sff_port_ops,
23645
23646 .sff_data_xfer = pata_at91_data_xfer_noirq,
23647diff -urNp linux-2.6.32.41/drivers/ata/pata_atiixp.c linux-2.6.32.41/drivers/ata/pata_atiixp.c
23648--- linux-2.6.32.41/drivers/ata/pata_atiixp.c 2011-03-27 14:31:47.000000000 -0400
23649+++ linux-2.6.32.41/drivers/ata/pata_atiixp.c 2011-04-17 15:56:46.000000000 -0400
23650@@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_
23651 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
23652 };
23653
23654-static struct ata_port_operations atiixp_port_ops = {
23655+static const struct ata_port_operations atiixp_port_ops = {
23656 .inherits = &ata_bmdma_port_ops,
23657
23658 .qc_prep = ata_sff_dumb_qc_prep,
23659diff -urNp linux-2.6.32.41/drivers/ata/pata_atp867x.c linux-2.6.32.41/drivers/ata/pata_atp867x.c
23660--- linux-2.6.32.41/drivers/ata/pata_atp867x.c 2011-03-27 14:31:47.000000000 -0400
23661+++ linux-2.6.32.41/drivers/ata/pata_atp867x.c 2011-04-17 15:56:46.000000000 -0400
23662@@ -274,7 +274,7 @@ static struct scsi_host_template atp867x
23663 ATA_BMDMA_SHT(DRV_NAME),
23664 };
23665
23666-static struct ata_port_operations atp867x_ops = {
23667+static const struct ata_port_operations atp867x_ops = {
23668 .inherits = &ata_bmdma_port_ops,
23669 .cable_detect = atp867x_cable_detect,
23670 .set_piomode = atp867x_set_piomode,
23671diff -urNp linux-2.6.32.41/drivers/ata/pata_bf54x.c linux-2.6.32.41/drivers/ata/pata_bf54x.c
23672--- linux-2.6.32.41/drivers/ata/pata_bf54x.c 2011-03-27 14:31:47.000000000 -0400
23673+++ linux-2.6.32.41/drivers/ata/pata_bf54x.c 2011-04-17 15:56:46.000000000 -0400
23674@@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sh
23675 .dma_boundary = ATA_DMA_BOUNDARY,
23676 };
23677
23678-static struct ata_port_operations bfin_pata_ops = {
23679+static const struct ata_port_operations bfin_pata_ops = {
23680 .inherits = &ata_sff_port_ops,
23681
23682 .set_piomode = bfin_set_piomode,
23683diff -urNp linux-2.6.32.41/drivers/ata/pata_cmd640.c linux-2.6.32.41/drivers/ata/pata_cmd640.c
23684--- linux-2.6.32.41/drivers/ata/pata_cmd640.c 2011-03-27 14:31:47.000000000 -0400
23685+++ linux-2.6.32.41/drivers/ata/pata_cmd640.c 2011-04-17 15:56:46.000000000 -0400
23686@@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_
23687 ATA_BMDMA_SHT(DRV_NAME),
23688 };
23689
23690-static struct ata_port_operations cmd640_port_ops = {
23691+static const struct ata_port_operations cmd640_port_ops = {
23692 .inherits = &ata_bmdma_port_ops,
23693 /* In theory xfer_noirq is not needed once we kill the prefetcher */
23694 .sff_data_xfer = ata_sff_data_xfer_noirq,
23695diff -urNp linux-2.6.32.41/drivers/ata/pata_cmd64x.c linux-2.6.32.41/drivers/ata/pata_cmd64x.c
23696--- linux-2.6.32.41/drivers/ata/pata_cmd64x.c 2011-03-27 14:31:47.000000000 -0400
23697+++ linux-2.6.32.41/drivers/ata/pata_cmd64x.c 2011-04-17 15:56:46.000000000 -0400
23698@@ -275,18 +275,18 @@ static const struct ata_port_operations
23699 .set_dmamode = cmd64x_set_dmamode,
23700 };
23701
23702-static struct ata_port_operations cmd64x_port_ops = {
23703+static const struct ata_port_operations cmd64x_port_ops = {
23704 .inherits = &cmd64x_base_ops,
23705 .cable_detect = ata_cable_40wire,
23706 };
23707
23708-static struct ata_port_operations cmd646r1_port_ops = {
23709+static const struct ata_port_operations cmd646r1_port_ops = {
23710 .inherits = &cmd64x_base_ops,
23711 .bmdma_stop = cmd646r1_bmdma_stop,
23712 .cable_detect = ata_cable_40wire,
23713 };
23714
23715-static struct ata_port_operations cmd648_port_ops = {
23716+static const struct ata_port_operations cmd648_port_ops = {
23717 .inherits = &cmd64x_base_ops,
23718 .bmdma_stop = cmd648_bmdma_stop,
23719 .cable_detect = cmd648_cable_detect,
23720diff -urNp linux-2.6.32.41/drivers/ata/pata_cs5520.c linux-2.6.32.41/drivers/ata/pata_cs5520.c
23721--- linux-2.6.32.41/drivers/ata/pata_cs5520.c 2011-03-27 14:31:47.000000000 -0400
23722+++ linux-2.6.32.41/drivers/ata/pata_cs5520.c 2011-04-17 15:56:46.000000000 -0400
23723@@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_
23724 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
23725 };
23726
23727-static struct ata_port_operations cs5520_port_ops = {
23728+static const struct ata_port_operations cs5520_port_ops = {
23729 .inherits = &ata_bmdma_port_ops,
23730 .qc_prep = ata_sff_dumb_qc_prep,
23731 .cable_detect = ata_cable_40wire,
23732diff -urNp linux-2.6.32.41/drivers/ata/pata_cs5530.c linux-2.6.32.41/drivers/ata/pata_cs5530.c
23733--- linux-2.6.32.41/drivers/ata/pata_cs5530.c 2011-03-27 14:31:47.000000000 -0400
23734+++ linux-2.6.32.41/drivers/ata/pata_cs5530.c 2011-04-17 15:56:46.000000000 -0400
23735@@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_
23736 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
23737 };
23738
23739-static struct ata_port_operations cs5530_port_ops = {
23740+static const struct ata_port_operations cs5530_port_ops = {
23741 .inherits = &ata_bmdma_port_ops,
23742
23743 .qc_prep = ata_sff_dumb_qc_prep,
23744diff -urNp linux-2.6.32.41/drivers/ata/pata_cs5535.c linux-2.6.32.41/drivers/ata/pata_cs5535.c
23745--- linux-2.6.32.41/drivers/ata/pata_cs5535.c 2011-03-27 14:31:47.000000000 -0400
23746+++ linux-2.6.32.41/drivers/ata/pata_cs5535.c 2011-04-17 15:56:46.000000000 -0400
23747@@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_
23748 ATA_BMDMA_SHT(DRV_NAME),
23749 };
23750
23751-static struct ata_port_operations cs5535_port_ops = {
23752+static const struct ata_port_operations cs5535_port_ops = {
23753 .inherits = &ata_bmdma_port_ops,
23754 .cable_detect = cs5535_cable_detect,
23755 .set_piomode = cs5535_set_piomode,
23756diff -urNp linux-2.6.32.41/drivers/ata/pata_cs5536.c linux-2.6.32.41/drivers/ata/pata_cs5536.c
23757--- linux-2.6.32.41/drivers/ata/pata_cs5536.c 2011-03-27 14:31:47.000000000 -0400
23758+++ linux-2.6.32.41/drivers/ata/pata_cs5536.c 2011-04-17 15:56:46.000000000 -0400
23759@@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_
23760 ATA_BMDMA_SHT(DRV_NAME),
23761 };
23762
23763-static struct ata_port_operations cs5536_port_ops = {
23764+static const struct ata_port_operations cs5536_port_ops = {
23765 .inherits = &ata_bmdma_port_ops,
23766 .cable_detect = cs5536_cable_detect,
23767 .set_piomode = cs5536_set_piomode,
23768diff -urNp linux-2.6.32.41/drivers/ata/pata_cypress.c linux-2.6.32.41/drivers/ata/pata_cypress.c
23769--- linux-2.6.32.41/drivers/ata/pata_cypress.c 2011-03-27 14:31:47.000000000 -0400
23770+++ linux-2.6.32.41/drivers/ata/pata_cypress.c 2011-04-17 15:56:46.000000000 -0400
23771@@ -113,7 +113,7 @@ static struct scsi_host_template cy82c69
23772 ATA_BMDMA_SHT(DRV_NAME),
23773 };
23774
23775-static struct ata_port_operations cy82c693_port_ops = {
23776+static const struct ata_port_operations cy82c693_port_ops = {
23777 .inherits = &ata_bmdma_port_ops,
23778 .cable_detect = ata_cable_40wire,
23779 .set_piomode = cy82c693_set_piomode,
23780diff -urNp linux-2.6.32.41/drivers/ata/pata_efar.c linux-2.6.32.41/drivers/ata/pata_efar.c
23781--- linux-2.6.32.41/drivers/ata/pata_efar.c 2011-03-27 14:31:47.000000000 -0400
23782+++ linux-2.6.32.41/drivers/ata/pata_efar.c 2011-04-17 15:56:46.000000000 -0400
23783@@ -222,7 +222,7 @@ static struct scsi_host_template efar_sh
23784 ATA_BMDMA_SHT(DRV_NAME),
23785 };
23786
23787-static struct ata_port_operations efar_ops = {
23788+static const struct ata_port_operations efar_ops = {
23789 .inherits = &ata_bmdma_port_ops,
23790 .cable_detect = efar_cable_detect,
23791 .set_piomode = efar_set_piomode,
23792diff -urNp linux-2.6.32.41/drivers/ata/pata_hpt366.c linux-2.6.32.41/drivers/ata/pata_hpt366.c
23793--- linux-2.6.32.41/drivers/ata/pata_hpt366.c 2011-03-27 14:31:47.000000000 -0400
23794+++ linux-2.6.32.41/drivers/ata/pata_hpt366.c 2011-04-17 15:56:46.000000000 -0400
23795@@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_
23796 * Configuration for HPT366/68
23797 */
23798
23799-static struct ata_port_operations hpt366_port_ops = {
23800+static const struct ata_port_operations hpt366_port_ops = {
23801 .inherits = &ata_bmdma_port_ops,
23802 .cable_detect = hpt36x_cable_detect,
23803 .mode_filter = hpt366_filter,
23804diff -urNp linux-2.6.32.41/drivers/ata/pata_hpt37x.c linux-2.6.32.41/drivers/ata/pata_hpt37x.c
23805--- linux-2.6.32.41/drivers/ata/pata_hpt37x.c 2011-03-27 14:31:47.000000000 -0400
23806+++ linux-2.6.32.41/drivers/ata/pata_hpt37x.c 2011-04-17 15:56:46.000000000 -0400
23807@@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_
23808 * Configuration for HPT370
23809 */
23810
23811-static struct ata_port_operations hpt370_port_ops = {
23812+static const struct ata_port_operations hpt370_port_ops = {
23813 .inherits = &ata_bmdma_port_ops,
23814
23815 .bmdma_stop = hpt370_bmdma_stop,
23816@@ -591,7 +591,7 @@ static struct ata_port_operations hpt370
23817 * Configuration for HPT370A. Close to 370 but less filters
23818 */
23819
23820-static struct ata_port_operations hpt370a_port_ops = {
23821+static const struct ata_port_operations hpt370a_port_ops = {
23822 .inherits = &hpt370_port_ops,
23823 .mode_filter = hpt370a_filter,
23824 };
23825@@ -601,7 +601,7 @@ static struct ata_port_operations hpt370
23826 * and DMA mode setting functionality.
23827 */
23828
23829-static struct ata_port_operations hpt372_port_ops = {
23830+static const struct ata_port_operations hpt372_port_ops = {
23831 .inherits = &ata_bmdma_port_ops,
23832
23833 .bmdma_stop = hpt37x_bmdma_stop,
23834@@ -616,7 +616,7 @@ static struct ata_port_operations hpt372
23835 * but we have a different cable detection procedure for function 1.
23836 */
23837
23838-static struct ata_port_operations hpt374_fn1_port_ops = {
23839+static const struct ata_port_operations hpt374_fn1_port_ops = {
23840 .inherits = &hpt372_port_ops,
23841 .prereset = hpt374_fn1_pre_reset,
23842 };
23843diff -urNp linux-2.6.32.41/drivers/ata/pata_hpt3x2n.c linux-2.6.32.41/drivers/ata/pata_hpt3x2n.c
23844--- linux-2.6.32.41/drivers/ata/pata_hpt3x2n.c 2011-03-27 14:31:47.000000000 -0400
23845+++ linux-2.6.32.41/drivers/ata/pata_hpt3x2n.c 2011-04-17 15:56:46.000000000 -0400
23846@@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n
23847 * Configuration for HPT3x2n.
23848 */
23849
23850-static struct ata_port_operations hpt3x2n_port_ops = {
23851+static const struct ata_port_operations hpt3x2n_port_ops = {
23852 .inherits = &ata_bmdma_port_ops,
23853
23854 .bmdma_stop = hpt3x2n_bmdma_stop,
23855diff -urNp linux-2.6.32.41/drivers/ata/pata_hpt3x3.c linux-2.6.32.41/drivers/ata/pata_hpt3x3.c
23856--- linux-2.6.32.41/drivers/ata/pata_hpt3x3.c 2011-03-27 14:31:47.000000000 -0400
23857+++ linux-2.6.32.41/drivers/ata/pata_hpt3x3.c 2011-04-17 15:56:46.000000000 -0400
23858@@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_
23859 ATA_BMDMA_SHT(DRV_NAME),
23860 };
23861
23862-static struct ata_port_operations hpt3x3_port_ops = {
23863+static const struct ata_port_operations hpt3x3_port_ops = {
23864 .inherits = &ata_bmdma_port_ops,
23865 .cable_detect = ata_cable_40wire,
23866 .set_piomode = hpt3x3_set_piomode,
23867diff -urNp linux-2.6.32.41/drivers/ata/pata_icside.c linux-2.6.32.41/drivers/ata/pata_icside.c
23868--- linux-2.6.32.41/drivers/ata/pata_icside.c 2011-03-27 14:31:47.000000000 -0400
23869+++ linux-2.6.32.41/drivers/ata/pata_icside.c 2011-04-17 15:56:46.000000000 -0400
23870@@ -319,7 +319,7 @@ static void pata_icside_postreset(struct
23871 }
23872 }
23873
23874-static struct ata_port_operations pata_icside_port_ops = {
23875+static const struct ata_port_operations pata_icside_port_ops = {
23876 .inherits = &ata_sff_port_ops,
23877 /* no need to build any PRD tables for DMA */
23878 .qc_prep = ata_noop_qc_prep,
23879diff -urNp linux-2.6.32.41/drivers/ata/pata_isapnp.c linux-2.6.32.41/drivers/ata/pata_isapnp.c
23880--- linux-2.6.32.41/drivers/ata/pata_isapnp.c 2011-03-27 14:31:47.000000000 -0400
23881+++ linux-2.6.32.41/drivers/ata/pata_isapnp.c 2011-04-17 15:56:46.000000000 -0400
23882@@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_
23883 ATA_PIO_SHT(DRV_NAME),
23884 };
23885
23886-static struct ata_port_operations isapnp_port_ops = {
23887+static const struct ata_port_operations isapnp_port_ops = {
23888 .inherits = &ata_sff_port_ops,
23889 .cable_detect = ata_cable_40wire,
23890 };
23891
23892-static struct ata_port_operations isapnp_noalt_port_ops = {
23893+static const struct ata_port_operations isapnp_noalt_port_ops = {
23894 .inherits = &ata_sff_port_ops,
23895 .cable_detect = ata_cable_40wire,
23896 /* No altstatus so we don't want to use the lost interrupt poll */
23897diff -urNp linux-2.6.32.41/drivers/ata/pata_it8213.c linux-2.6.32.41/drivers/ata/pata_it8213.c
23898--- linux-2.6.32.41/drivers/ata/pata_it8213.c 2011-03-27 14:31:47.000000000 -0400
23899+++ linux-2.6.32.41/drivers/ata/pata_it8213.c 2011-04-17 15:56:46.000000000 -0400
23900@@ -234,7 +234,7 @@ static struct scsi_host_template it8213_
23901 };
23902
23903
23904-static struct ata_port_operations it8213_ops = {
23905+static const struct ata_port_operations it8213_ops = {
23906 .inherits = &ata_bmdma_port_ops,
23907 .cable_detect = it8213_cable_detect,
23908 .set_piomode = it8213_set_piomode,
23909diff -urNp linux-2.6.32.41/drivers/ata/pata_it821x.c linux-2.6.32.41/drivers/ata/pata_it821x.c
23910--- linux-2.6.32.41/drivers/ata/pata_it821x.c 2011-03-27 14:31:47.000000000 -0400
23911+++ linux-2.6.32.41/drivers/ata/pata_it821x.c 2011-04-17 15:56:46.000000000 -0400
23912@@ -800,7 +800,7 @@ static struct scsi_host_template it821x_
23913 ATA_BMDMA_SHT(DRV_NAME),
23914 };
23915
23916-static struct ata_port_operations it821x_smart_port_ops = {
23917+static const struct ata_port_operations it821x_smart_port_ops = {
23918 .inherits = &ata_bmdma_port_ops,
23919
23920 .check_atapi_dma= it821x_check_atapi_dma,
23921@@ -814,7 +814,7 @@ static struct ata_port_operations it821x
23922 .port_start = it821x_port_start,
23923 };
23924
23925-static struct ata_port_operations it821x_passthru_port_ops = {
23926+static const struct ata_port_operations it821x_passthru_port_ops = {
23927 .inherits = &ata_bmdma_port_ops,
23928
23929 .check_atapi_dma= it821x_check_atapi_dma,
23930@@ -830,7 +830,7 @@ static struct ata_port_operations it821x
23931 .port_start = it821x_port_start,
23932 };
23933
23934-static struct ata_port_operations it821x_rdc_port_ops = {
23935+static const struct ata_port_operations it821x_rdc_port_ops = {
23936 .inherits = &ata_bmdma_port_ops,
23937
23938 .check_atapi_dma= it821x_check_atapi_dma,
23939diff -urNp linux-2.6.32.41/drivers/ata/pata_ixp4xx_cf.c linux-2.6.32.41/drivers/ata/pata_ixp4xx_cf.c
23940--- linux-2.6.32.41/drivers/ata/pata_ixp4xx_cf.c 2011-03-27 14:31:47.000000000 -0400
23941+++ linux-2.6.32.41/drivers/ata/pata_ixp4xx_cf.c 2011-04-17 15:56:46.000000000 -0400
23942@@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_
23943 ATA_PIO_SHT(DRV_NAME),
23944 };
23945
23946-static struct ata_port_operations ixp4xx_port_ops = {
23947+static const struct ata_port_operations ixp4xx_port_ops = {
23948 .inherits = &ata_sff_port_ops,
23949 .sff_data_xfer = ixp4xx_mmio_data_xfer,
23950 .cable_detect = ata_cable_40wire,
23951diff -urNp linux-2.6.32.41/drivers/ata/pata_jmicron.c linux-2.6.32.41/drivers/ata/pata_jmicron.c
23952--- linux-2.6.32.41/drivers/ata/pata_jmicron.c 2011-03-27 14:31:47.000000000 -0400
23953+++ linux-2.6.32.41/drivers/ata/pata_jmicron.c 2011-04-17 15:56:46.000000000 -0400
23954@@ -111,7 +111,7 @@ static struct scsi_host_template jmicron
23955 ATA_BMDMA_SHT(DRV_NAME),
23956 };
23957
23958-static struct ata_port_operations jmicron_ops = {
23959+static const struct ata_port_operations jmicron_ops = {
23960 .inherits = &ata_bmdma_port_ops,
23961 .prereset = jmicron_pre_reset,
23962 };
23963diff -urNp linux-2.6.32.41/drivers/ata/pata_legacy.c linux-2.6.32.41/drivers/ata/pata_legacy.c
23964--- linux-2.6.32.41/drivers/ata/pata_legacy.c 2011-03-27 14:31:47.000000000 -0400
23965+++ linux-2.6.32.41/drivers/ata/pata_legacy.c 2011-04-17 15:56:46.000000000 -0400
23966@@ -106,7 +106,7 @@ struct legacy_probe {
23967
23968 struct legacy_controller {
23969 const char *name;
23970- struct ata_port_operations *ops;
23971+ const struct ata_port_operations *ops;
23972 unsigned int pio_mask;
23973 unsigned int flags;
23974 unsigned int pflags;
23975@@ -223,12 +223,12 @@ static const struct ata_port_operations
23976 * pio_mask as well.
23977 */
23978
23979-static struct ata_port_operations simple_port_ops = {
23980+static const struct ata_port_operations simple_port_ops = {
23981 .inherits = &legacy_base_port_ops,
23982 .sff_data_xfer = ata_sff_data_xfer_noirq,
23983 };
23984
23985-static struct ata_port_operations legacy_port_ops = {
23986+static const struct ata_port_operations legacy_port_ops = {
23987 .inherits = &legacy_base_port_ops,
23988 .sff_data_xfer = ata_sff_data_xfer_noirq,
23989 .set_mode = legacy_set_mode,
23990@@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(st
23991 return buflen;
23992 }
23993
23994-static struct ata_port_operations pdc20230_port_ops = {
23995+static const struct ata_port_operations pdc20230_port_ops = {
23996 .inherits = &legacy_base_port_ops,
23997 .set_piomode = pdc20230_set_piomode,
23998 .sff_data_xfer = pdc_data_xfer_vlb,
23999@@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct a
24000 ioread8(ap->ioaddr.status_addr);
24001 }
24002
24003-static struct ata_port_operations ht6560a_port_ops = {
24004+static const struct ata_port_operations ht6560a_port_ops = {
24005 .inherits = &legacy_base_port_ops,
24006 .set_piomode = ht6560a_set_piomode,
24007 };
24008@@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct a
24009 ioread8(ap->ioaddr.status_addr);
24010 }
24011
24012-static struct ata_port_operations ht6560b_port_ops = {
24013+static const struct ata_port_operations ht6560b_port_ops = {
24014 .inherits = &legacy_base_port_ops,
24015 .set_piomode = ht6560b_set_piomode,
24016 };
24017@@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(stru
24018 }
24019
24020
24021-static struct ata_port_operations opti82c611a_port_ops = {
24022+static const struct ata_port_operations opti82c611a_port_ops = {
24023 .inherits = &legacy_base_port_ops,
24024 .set_piomode = opti82c611a_set_piomode,
24025 };
24026@@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(
24027 return ata_sff_qc_issue(qc);
24028 }
24029
24030-static struct ata_port_operations opti82c46x_port_ops = {
24031+static const struct ata_port_operations opti82c46x_port_ops = {
24032 .inherits = &legacy_base_port_ops,
24033 .set_piomode = opti82c46x_set_piomode,
24034 .qc_issue = opti82c46x_qc_issue,
24035@@ -771,20 +771,20 @@ static int qdi_port(struct platform_devi
24036 return 0;
24037 }
24038
24039-static struct ata_port_operations qdi6500_port_ops = {
24040+static const struct ata_port_operations qdi6500_port_ops = {
24041 .inherits = &legacy_base_port_ops,
24042 .set_piomode = qdi6500_set_piomode,
24043 .qc_issue = qdi_qc_issue,
24044 .sff_data_xfer = vlb32_data_xfer,
24045 };
24046
24047-static struct ata_port_operations qdi6580_port_ops = {
24048+static const struct ata_port_operations qdi6580_port_ops = {
24049 .inherits = &legacy_base_port_ops,
24050 .set_piomode = qdi6580_set_piomode,
24051 .sff_data_xfer = vlb32_data_xfer,
24052 };
24053
24054-static struct ata_port_operations qdi6580dp_port_ops = {
24055+static const struct ata_port_operations qdi6580dp_port_ops = {
24056 .inherits = &legacy_base_port_ops,
24057 .set_piomode = qdi6580dp_set_piomode,
24058 .sff_data_xfer = vlb32_data_xfer,
24059@@ -855,7 +855,7 @@ static int winbond_port(struct platform_
24060 return 0;
24061 }
24062
24063-static struct ata_port_operations winbond_port_ops = {
24064+static const struct ata_port_operations winbond_port_ops = {
24065 .inherits = &legacy_base_port_ops,
24066 .set_piomode = winbond_set_piomode,
24067 .sff_data_xfer = vlb32_data_xfer,
24068@@ -978,7 +978,7 @@ static __init int legacy_init_one(struct
24069 int pio_modes = controller->pio_mask;
24070 unsigned long io = probe->port;
24071 u32 mask = (1 << probe->slot);
24072- struct ata_port_operations *ops = controller->ops;
24073+ const struct ata_port_operations *ops = controller->ops;
24074 struct legacy_data *ld = &legacy_data[probe->slot];
24075 struct ata_host *host = NULL;
24076 struct ata_port *ap;
24077diff -urNp linux-2.6.32.41/drivers/ata/pata_marvell.c linux-2.6.32.41/drivers/ata/pata_marvell.c
24078--- linux-2.6.32.41/drivers/ata/pata_marvell.c 2011-03-27 14:31:47.000000000 -0400
24079+++ linux-2.6.32.41/drivers/ata/pata_marvell.c 2011-04-17 15:56:46.000000000 -0400
24080@@ -100,7 +100,7 @@ static struct scsi_host_template marvell
24081 ATA_BMDMA_SHT(DRV_NAME),
24082 };
24083
24084-static struct ata_port_operations marvell_ops = {
24085+static const struct ata_port_operations marvell_ops = {
24086 .inherits = &ata_bmdma_port_ops,
24087 .cable_detect = marvell_cable_detect,
24088 .prereset = marvell_pre_reset,
24089diff -urNp linux-2.6.32.41/drivers/ata/pata_mpc52xx.c linux-2.6.32.41/drivers/ata/pata_mpc52xx.c
24090--- linux-2.6.32.41/drivers/ata/pata_mpc52xx.c 2011-03-27 14:31:47.000000000 -0400
24091+++ linux-2.6.32.41/drivers/ata/pata_mpc52xx.c 2011-04-17 15:56:46.000000000 -0400
24092@@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx
24093 ATA_PIO_SHT(DRV_NAME),
24094 };
24095
24096-static struct ata_port_operations mpc52xx_ata_port_ops = {
24097+static const struct ata_port_operations mpc52xx_ata_port_ops = {
24098 .inherits = &ata_bmdma_port_ops,
24099 .sff_dev_select = mpc52xx_ata_dev_select,
24100 .set_piomode = mpc52xx_ata_set_piomode,
24101diff -urNp linux-2.6.32.41/drivers/ata/pata_mpiix.c linux-2.6.32.41/drivers/ata/pata_mpiix.c
24102--- linux-2.6.32.41/drivers/ata/pata_mpiix.c 2011-03-27 14:31:47.000000000 -0400
24103+++ linux-2.6.32.41/drivers/ata/pata_mpiix.c 2011-04-17 15:56:46.000000000 -0400
24104@@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_s
24105 ATA_PIO_SHT(DRV_NAME),
24106 };
24107
24108-static struct ata_port_operations mpiix_port_ops = {
24109+static const struct ata_port_operations mpiix_port_ops = {
24110 .inherits = &ata_sff_port_ops,
24111 .qc_issue = mpiix_qc_issue,
24112 .cable_detect = ata_cable_40wire,
24113diff -urNp linux-2.6.32.41/drivers/ata/pata_netcell.c linux-2.6.32.41/drivers/ata/pata_netcell.c
24114--- linux-2.6.32.41/drivers/ata/pata_netcell.c 2011-03-27 14:31:47.000000000 -0400
24115+++ linux-2.6.32.41/drivers/ata/pata_netcell.c 2011-04-17 15:56:46.000000000 -0400
24116@@ -34,7 +34,7 @@ static struct scsi_host_template netcell
24117 ATA_BMDMA_SHT(DRV_NAME),
24118 };
24119
24120-static struct ata_port_operations netcell_ops = {
24121+static const struct ata_port_operations netcell_ops = {
24122 .inherits = &ata_bmdma_port_ops,
24123 .cable_detect = ata_cable_80wire,
24124 .read_id = netcell_read_id,
24125diff -urNp linux-2.6.32.41/drivers/ata/pata_ninja32.c linux-2.6.32.41/drivers/ata/pata_ninja32.c
24126--- linux-2.6.32.41/drivers/ata/pata_ninja32.c 2011-03-27 14:31:47.000000000 -0400
24127+++ linux-2.6.32.41/drivers/ata/pata_ninja32.c 2011-04-17 15:56:46.000000000 -0400
24128@@ -81,7 +81,7 @@ static struct scsi_host_template ninja32
24129 ATA_BMDMA_SHT(DRV_NAME),
24130 };
24131
24132-static struct ata_port_operations ninja32_port_ops = {
24133+static const struct ata_port_operations ninja32_port_ops = {
24134 .inherits = &ata_bmdma_port_ops,
24135 .sff_dev_select = ninja32_dev_select,
24136 .cable_detect = ata_cable_40wire,
24137diff -urNp linux-2.6.32.41/drivers/ata/pata_ns87410.c linux-2.6.32.41/drivers/ata/pata_ns87410.c
24138--- linux-2.6.32.41/drivers/ata/pata_ns87410.c 2011-03-27 14:31:47.000000000 -0400
24139+++ linux-2.6.32.41/drivers/ata/pata_ns87410.c 2011-04-17 15:56:46.000000000 -0400
24140@@ -132,7 +132,7 @@ static struct scsi_host_template ns87410
24141 ATA_PIO_SHT(DRV_NAME),
24142 };
24143
24144-static struct ata_port_operations ns87410_port_ops = {
24145+static const struct ata_port_operations ns87410_port_ops = {
24146 .inherits = &ata_sff_port_ops,
24147 .qc_issue = ns87410_qc_issue,
24148 .cable_detect = ata_cable_40wire,
24149diff -urNp linux-2.6.32.41/drivers/ata/pata_ns87415.c linux-2.6.32.41/drivers/ata/pata_ns87415.c
24150--- linux-2.6.32.41/drivers/ata/pata_ns87415.c 2011-03-27 14:31:47.000000000 -0400
24151+++ linux-2.6.32.41/drivers/ata/pata_ns87415.c 2011-04-17 15:56:46.000000000 -0400
24152@@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct at
24153 }
24154 #endif /* 87560 SuperIO Support */
24155
24156-static struct ata_port_operations ns87415_pata_ops = {
24157+static const struct ata_port_operations ns87415_pata_ops = {
24158 .inherits = &ata_bmdma_port_ops,
24159
24160 .check_atapi_dma = ns87415_check_atapi_dma,
24161@@ -313,7 +313,7 @@ static struct ata_port_operations ns8741
24162 };
24163
24164 #if defined(CONFIG_SUPERIO)
24165-static struct ata_port_operations ns87560_pata_ops = {
24166+static const struct ata_port_operations ns87560_pata_ops = {
24167 .inherits = &ns87415_pata_ops,
24168 .sff_tf_read = ns87560_tf_read,
24169 .sff_check_status = ns87560_check_status,
24170diff -urNp linux-2.6.32.41/drivers/ata/pata_octeon_cf.c linux-2.6.32.41/drivers/ata/pata_octeon_cf.c
24171--- linux-2.6.32.41/drivers/ata/pata_octeon_cf.c 2011-03-27 14:31:47.000000000 -0400
24172+++ linux-2.6.32.41/drivers/ata/pata_octeon_cf.c 2011-04-17 15:56:46.000000000 -0400
24173@@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(s
24174 return 0;
24175 }
24176
24177+/* cannot be const */
24178 static struct ata_port_operations octeon_cf_ops = {
24179 .inherits = &ata_sff_port_ops,
24180 .check_atapi_dma = octeon_cf_check_atapi_dma,
24181diff -urNp linux-2.6.32.41/drivers/ata/pata_oldpiix.c linux-2.6.32.41/drivers/ata/pata_oldpiix.c
24182--- linux-2.6.32.41/drivers/ata/pata_oldpiix.c 2011-03-27 14:31:47.000000000 -0400
24183+++ linux-2.6.32.41/drivers/ata/pata_oldpiix.c 2011-04-17 15:56:46.000000000 -0400
24184@@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix
24185 ATA_BMDMA_SHT(DRV_NAME),
24186 };
24187
24188-static struct ata_port_operations oldpiix_pata_ops = {
24189+static const struct ata_port_operations oldpiix_pata_ops = {
24190 .inherits = &ata_bmdma_port_ops,
24191 .qc_issue = oldpiix_qc_issue,
24192 .cable_detect = ata_cable_40wire,
24193diff -urNp linux-2.6.32.41/drivers/ata/pata_opti.c linux-2.6.32.41/drivers/ata/pata_opti.c
24194--- linux-2.6.32.41/drivers/ata/pata_opti.c 2011-03-27 14:31:47.000000000 -0400
24195+++ linux-2.6.32.41/drivers/ata/pata_opti.c 2011-04-17 15:56:46.000000000 -0400
24196@@ -152,7 +152,7 @@ static struct scsi_host_template opti_sh
24197 ATA_PIO_SHT(DRV_NAME),
24198 };
24199
24200-static struct ata_port_operations opti_port_ops = {
24201+static const struct ata_port_operations opti_port_ops = {
24202 .inherits = &ata_sff_port_ops,
24203 .cable_detect = ata_cable_40wire,
24204 .set_piomode = opti_set_piomode,
24205diff -urNp linux-2.6.32.41/drivers/ata/pata_optidma.c linux-2.6.32.41/drivers/ata/pata_optidma.c
24206--- linux-2.6.32.41/drivers/ata/pata_optidma.c 2011-03-27 14:31:47.000000000 -0400
24207+++ linux-2.6.32.41/drivers/ata/pata_optidma.c 2011-04-17 15:56:46.000000000 -0400
24208@@ -337,7 +337,7 @@ static struct scsi_host_template optidma
24209 ATA_BMDMA_SHT(DRV_NAME),
24210 };
24211
24212-static struct ata_port_operations optidma_port_ops = {
24213+static const struct ata_port_operations optidma_port_ops = {
24214 .inherits = &ata_bmdma_port_ops,
24215 .cable_detect = ata_cable_40wire,
24216 .set_piomode = optidma_set_pio_mode,
24217@@ -346,7 +346,7 @@ static struct ata_port_operations optidm
24218 .prereset = optidma_pre_reset,
24219 };
24220
24221-static struct ata_port_operations optiplus_port_ops = {
24222+static const struct ata_port_operations optiplus_port_ops = {
24223 .inherits = &optidma_port_ops,
24224 .set_piomode = optiplus_set_pio_mode,
24225 .set_dmamode = optiplus_set_dma_mode,
24226diff -urNp linux-2.6.32.41/drivers/ata/pata_palmld.c linux-2.6.32.41/drivers/ata/pata_palmld.c
24227--- linux-2.6.32.41/drivers/ata/pata_palmld.c 2011-03-27 14:31:47.000000000 -0400
24228+++ linux-2.6.32.41/drivers/ata/pata_palmld.c 2011-04-17 15:56:46.000000000 -0400
24229@@ -37,7 +37,7 @@ static struct scsi_host_template palmld_
24230 ATA_PIO_SHT(DRV_NAME),
24231 };
24232
24233-static struct ata_port_operations palmld_port_ops = {
24234+static const struct ata_port_operations palmld_port_ops = {
24235 .inherits = &ata_sff_port_ops,
24236 .sff_data_xfer = ata_sff_data_xfer_noirq,
24237 .cable_detect = ata_cable_40wire,
24238diff -urNp linux-2.6.32.41/drivers/ata/pata_pcmcia.c linux-2.6.32.41/drivers/ata/pata_pcmcia.c
24239--- linux-2.6.32.41/drivers/ata/pata_pcmcia.c 2011-03-27 14:31:47.000000000 -0400
24240+++ linux-2.6.32.41/drivers/ata/pata_pcmcia.c 2011-04-17 15:56:46.000000000 -0400
24241@@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_
24242 ATA_PIO_SHT(DRV_NAME),
24243 };
24244
24245-static struct ata_port_operations pcmcia_port_ops = {
24246+static const struct ata_port_operations pcmcia_port_ops = {
24247 .inherits = &ata_sff_port_ops,
24248 .sff_data_xfer = ata_sff_data_xfer_noirq,
24249 .cable_detect = ata_cable_40wire,
24250 .set_mode = pcmcia_set_mode,
24251 };
24252
24253-static struct ata_port_operations pcmcia_8bit_port_ops = {
24254+static const struct ata_port_operations pcmcia_8bit_port_ops = {
24255 .inherits = &ata_sff_port_ops,
24256 .sff_data_xfer = ata_data_xfer_8bit,
24257 .cable_detect = ata_cable_40wire,
24258@@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia
24259 unsigned long io_base, ctl_base;
24260 void __iomem *io_addr, *ctl_addr;
24261 int n_ports = 1;
24262- struct ata_port_operations *ops = &pcmcia_port_ops;
24263+ const struct ata_port_operations *ops = &pcmcia_port_ops;
24264
24265 info = kzalloc(sizeof(*info), GFP_KERNEL);
24266 if (info == NULL)
24267diff -urNp linux-2.6.32.41/drivers/ata/pata_pdc2027x.c linux-2.6.32.41/drivers/ata/pata_pdc2027x.c
24268--- linux-2.6.32.41/drivers/ata/pata_pdc2027x.c 2011-03-27 14:31:47.000000000 -0400
24269+++ linux-2.6.32.41/drivers/ata/pata_pdc2027x.c 2011-04-17 15:56:46.000000000 -0400
24270@@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027
24271 ATA_BMDMA_SHT(DRV_NAME),
24272 };
24273
24274-static struct ata_port_operations pdc2027x_pata100_ops = {
24275+static const struct ata_port_operations pdc2027x_pata100_ops = {
24276 .inherits = &ata_bmdma_port_ops,
24277 .check_atapi_dma = pdc2027x_check_atapi_dma,
24278 .cable_detect = pdc2027x_cable_detect,
24279 .prereset = pdc2027x_prereset,
24280 };
24281
24282-static struct ata_port_operations pdc2027x_pata133_ops = {
24283+static const struct ata_port_operations pdc2027x_pata133_ops = {
24284 .inherits = &pdc2027x_pata100_ops,
24285 .mode_filter = pdc2027x_mode_filter,
24286 .set_piomode = pdc2027x_set_piomode,
24287diff -urNp linux-2.6.32.41/drivers/ata/pata_pdc202xx_old.c linux-2.6.32.41/drivers/ata/pata_pdc202xx_old.c
24288--- linux-2.6.32.41/drivers/ata/pata_pdc202xx_old.c 2011-03-27 14:31:47.000000000 -0400
24289+++ linux-2.6.32.41/drivers/ata/pata_pdc202xx_old.c 2011-04-17 15:56:46.000000000 -0400
24290@@ -274,7 +274,7 @@ static struct scsi_host_template pdc202x
24291 ATA_BMDMA_SHT(DRV_NAME),
24292 };
24293
24294-static struct ata_port_operations pdc2024x_port_ops = {
24295+static const struct ata_port_operations pdc2024x_port_ops = {
24296 .inherits = &ata_bmdma_port_ops,
24297
24298 .cable_detect = ata_cable_40wire,
24299@@ -284,7 +284,7 @@ static struct ata_port_operations pdc202
24300 .sff_exec_command = pdc202xx_exec_command,
24301 };
24302
24303-static struct ata_port_operations pdc2026x_port_ops = {
24304+static const struct ata_port_operations pdc2026x_port_ops = {
24305 .inherits = &pdc2024x_port_ops,
24306
24307 .check_atapi_dma = pdc2026x_check_atapi_dma,
24308diff -urNp linux-2.6.32.41/drivers/ata/pata_platform.c linux-2.6.32.41/drivers/ata/pata_platform.c
24309--- linux-2.6.32.41/drivers/ata/pata_platform.c 2011-03-27 14:31:47.000000000 -0400
24310+++ linux-2.6.32.41/drivers/ata/pata_platform.c 2011-04-17 15:56:46.000000000 -0400
24311@@ -48,7 +48,7 @@ static struct scsi_host_template pata_pl
24312 ATA_PIO_SHT(DRV_NAME),
24313 };
24314
24315-static struct ata_port_operations pata_platform_port_ops = {
24316+static const struct ata_port_operations pata_platform_port_ops = {
24317 .inherits = &ata_sff_port_ops,
24318 .sff_data_xfer = ata_sff_data_xfer_noirq,
24319 .cable_detect = ata_cable_unknown,
24320diff -urNp linux-2.6.32.41/drivers/ata/pata_qdi.c linux-2.6.32.41/drivers/ata/pata_qdi.c
24321--- linux-2.6.32.41/drivers/ata/pata_qdi.c 2011-03-27 14:31:47.000000000 -0400
24322+++ linux-2.6.32.41/drivers/ata/pata_qdi.c 2011-04-17 15:56:46.000000000 -0400
24323@@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht
24324 ATA_PIO_SHT(DRV_NAME),
24325 };
24326
24327-static struct ata_port_operations qdi6500_port_ops = {
24328+static const struct ata_port_operations qdi6500_port_ops = {
24329 .inherits = &ata_sff_port_ops,
24330 .qc_issue = qdi_qc_issue,
24331 .sff_data_xfer = qdi_data_xfer,
24332@@ -165,7 +165,7 @@ static struct ata_port_operations qdi650
24333 .set_piomode = qdi6500_set_piomode,
24334 };
24335
24336-static struct ata_port_operations qdi6580_port_ops = {
24337+static const struct ata_port_operations qdi6580_port_ops = {
24338 .inherits = &qdi6500_port_ops,
24339 .set_piomode = qdi6580_set_piomode,
24340 };
24341diff -urNp linux-2.6.32.41/drivers/ata/pata_radisys.c linux-2.6.32.41/drivers/ata/pata_radisys.c
24342--- linux-2.6.32.41/drivers/ata/pata_radisys.c 2011-03-27 14:31:47.000000000 -0400
24343+++ linux-2.6.32.41/drivers/ata/pata_radisys.c 2011-04-17 15:56:46.000000000 -0400
24344@@ -187,7 +187,7 @@ static struct scsi_host_template radisys
24345 ATA_BMDMA_SHT(DRV_NAME),
24346 };
24347
24348-static struct ata_port_operations radisys_pata_ops = {
24349+static const struct ata_port_operations radisys_pata_ops = {
24350 .inherits = &ata_bmdma_port_ops,
24351 .qc_issue = radisys_qc_issue,
24352 .cable_detect = ata_cable_unknown,
24353diff -urNp linux-2.6.32.41/drivers/ata/pata_rb532_cf.c linux-2.6.32.41/drivers/ata/pata_rb532_cf.c
24354--- linux-2.6.32.41/drivers/ata/pata_rb532_cf.c 2011-03-27 14:31:47.000000000 -0400
24355+++ linux-2.6.32.41/drivers/ata/pata_rb532_cf.c 2011-04-17 15:56:46.000000000 -0400
24356@@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handle
24357 return IRQ_HANDLED;
24358 }
24359
24360-static struct ata_port_operations rb532_pata_port_ops = {
24361+static const struct ata_port_operations rb532_pata_port_ops = {
24362 .inherits = &ata_sff_port_ops,
24363 .sff_data_xfer = ata_sff_data_xfer32,
24364 };
24365diff -urNp linux-2.6.32.41/drivers/ata/pata_rdc.c linux-2.6.32.41/drivers/ata/pata_rdc.c
24366--- linux-2.6.32.41/drivers/ata/pata_rdc.c 2011-03-27 14:31:47.000000000 -0400
24367+++ linux-2.6.32.41/drivers/ata/pata_rdc.c 2011-04-17 15:56:46.000000000 -0400
24368@@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_p
24369 pci_write_config_byte(dev, 0x48, udma_enable);
24370 }
24371
24372-static struct ata_port_operations rdc_pata_ops = {
24373+static const struct ata_port_operations rdc_pata_ops = {
24374 .inherits = &ata_bmdma32_port_ops,
24375 .cable_detect = rdc_pata_cable_detect,
24376 .set_piomode = rdc_set_piomode,
24377diff -urNp linux-2.6.32.41/drivers/ata/pata_rz1000.c linux-2.6.32.41/drivers/ata/pata_rz1000.c
24378--- linux-2.6.32.41/drivers/ata/pata_rz1000.c 2011-03-27 14:31:47.000000000 -0400
24379+++ linux-2.6.32.41/drivers/ata/pata_rz1000.c 2011-04-17 15:56:46.000000000 -0400
24380@@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_
24381 ATA_PIO_SHT(DRV_NAME),
24382 };
24383
24384-static struct ata_port_operations rz1000_port_ops = {
24385+static const struct ata_port_operations rz1000_port_ops = {
24386 .inherits = &ata_sff_port_ops,
24387 .cable_detect = ata_cable_40wire,
24388 .set_mode = rz1000_set_mode,
24389diff -urNp linux-2.6.32.41/drivers/ata/pata_sc1200.c linux-2.6.32.41/drivers/ata/pata_sc1200.c
24390--- linux-2.6.32.41/drivers/ata/pata_sc1200.c 2011-03-27 14:31:47.000000000 -0400
24391+++ linux-2.6.32.41/drivers/ata/pata_sc1200.c 2011-04-17 15:56:46.000000000 -0400
24392@@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_
24393 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24394 };
24395
24396-static struct ata_port_operations sc1200_port_ops = {
24397+static const struct ata_port_operations sc1200_port_ops = {
24398 .inherits = &ata_bmdma_port_ops,
24399 .qc_prep = ata_sff_dumb_qc_prep,
24400 .qc_issue = sc1200_qc_issue,
24401diff -urNp linux-2.6.32.41/drivers/ata/pata_scc.c linux-2.6.32.41/drivers/ata/pata_scc.c
24402--- linux-2.6.32.41/drivers/ata/pata_scc.c 2011-03-27 14:31:47.000000000 -0400
24403+++ linux-2.6.32.41/drivers/ata/pata_scc.c 2011-04-17 15:56:46.000000000 -0400
24404@@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht
24405 ATA_BMDMA_SHT(DRV_NAME),
24406 };
24407
24408-static struct ata_port_operations scc_pata_ops = {
24409+static const struct ata_port_operations scc_pata_ops = {
24410 .inherits = &ata_bmdma_port_ops,
24411
24412 .set_piomode = scc_set_piomode,
24413diff -urNp linux-2.6.32.41/drivers/ata/pata_sch.c linux-2.6.32.41/drivers/ata/pata_sch.c
24414--- linux-2.6.32.41/drivers/ata/pata_sch.c 2011-03-27 14:31:47.000000000 -0400
24415+++ linux-2.6.32.41/drivers/ata/pata_sch.c 2011-04-17 15:56:46.000000000 -0400
24416@@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht
24417 ATA_BMDMA_SHT(DRV_NAME),
24418 };
24419
24420-static struct ata_port_operations sch_pata_ops = {
24421+static const struct ata_port_operations sch_pata_ops = {
24422 .inherits = &ata_bmdma_port_ops,
24423 .cable_detect = ata_cable_unknown,
24424 .set_piomode = sch_set_piomode,
24425diff -urNp linux-2.6.32.41/drivers/ata/pata_serverworks.c linux-2.6.32.41/drivers/ata/pata_serverworks.c
24426--- linux-2.6.32.41/drivers/ata/pata_serverworks.c 2011-03-27 14:31:47.000000000 -0400
24427+++ linux-2.6.32.41/drivers/ata/pata_serverworks.c 2011-04-17 15:56:46.000000000 -0400
24428@@ -299,7 +299,7 @@ static struct scsi_host_template serverw
24429 ATA_BMDMA_SHT(DRV_NAME),
24430 };
24431
24432-static struct ata_port_operations serverworks_osb4_port_ops = {
24433+static const struct ata_port_operations serverworks_osb4_port_ops = {
24434 .inherits = &ata_bmdma_port_ops,
24435 .cable_detect = serverworks_cable_detect,
24436 .mode_filter = serverworks_osb4_filter,
24437@@ -307,7 +307,7 @@ static struct ata_port_operations server
24438 .set_dmamode = serverworks_set_dmamode,
24439 };
24440
24441-static struct ata_port_operations serverworks_csb_port_ops = {
24442+static const struct ata_port_operations serverworks_csb_port_ops = {
24443 .inherits = &serverworks_osb4_port_ops,
24444 .mode_filter = serverworks_csb_filter,
24445 };
24446diff -urNp linux-2.6.32.41/drivers/ata/pata_sil680.c linux-2.6.32.41/drivers/ata/pata_sil680.c
24447--- linux-2.6.32.41/drivers/ata/pata_sil680.c 2011-03-27 14:31:47.000000000 -0400
24448+++ linux-2.6.32.41/drivers/ata/pata_sil680.c 2011-04-17 15:56:46.000000000 -0400
24449@@ -194,7 +194,7 @@ static struct scsi_host_template sil680_
24450 ATA_BMDMA_SHT(DRV_NAME),
24451 };
24452
24453-static struct ata_port_operations sil680_port_ops = {
24454+static const struct ata_port_operations sil680_port_ops = {
24455 .inherits = &ata_bmdma32_port_ops,
24456 .cable_detect = sil680_cable_detect,
24457 .set_piomode = sil680_set_piomode,
24458diff -urNp linux-2.6.32.41/drivers/ata/pata_sis.c linux-2.6.32.41/drivers/ata/pata_sis.c
24459--- linux-2.6.32.41/drivers/ata/pata_sis.c 2011-03-27 14:31:47.000000000 -0400
24460+++ linux-2.6.32.41/drivers/ata/pata_sis.c 2011-04-17 15:56:46.000000000 -0400
24461@@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht
24462 ATA_BMDMA_SHT(DRV_NAME),
24463 };
24464
24465-static struct ata_port_operations sis_133_for_sata_ops = {
24466+static const struct ata_port_operations sis_133_for_sata_ops = {
24467 .inherits = &ata_bmdma_port_ops,
24468 .set_piomode = sis_133_set_piomode,
24469 .set_dmamode = sis_133_set_dmamode,
24470 .cable_detect = sis_133_cable_detect,
24471 };
24472
24473-static struct ata_port_operations sis_base_ops = {
24474+static const struct ata_port_operations sis_base_ops = {
24475 .inherits = &ata_bmdma_port_ops,
24476 .prereset = sis_pre_reset,
24477 };
24478
24479-static struct ata_port_operations sis_133_ops = {
24480+static const struct ata_port_operations sis_133_ops = {
24481 .inherits = &sis_base_ops,
24482 .set_piomode = sis_133_set_piomode,
24483 .set_dmamode = sis_133_set_dmamode,
24484 .cable_detect = sis_133_cable_detect,
24485 };
24486
24487-static struct ata_port_operations sis_133_early_ops = {
24488+static const struct ata_port_operations sis_133_early_ops = {
24489 .inherits = &sis_base_ops,
24490 .set_piomode = sis_100_set_piomode,
24491 .set_dmamode = sis_133_early_set_dmamode,
24492 .cable_detect = sis_66_cable_detect,
24493 };
24494
24495-static struct ata_port_operations sis_100_ops = {
24496+static const struct ata_port_operations sis_100_ops = {
24497 .inherits = &sis_base_ops,
24498 .set_piomode = sis_100_set_piomode,
24499 .set_dmamode = sis_100_set_dmamode,
24500 .cable_detect = sis_66_cable_detect,
24501 };
24502
24503-static struct ata_port_operations sis_66_ops = {
24504+static const struct ata_port_operations sis_66_ops = {
24505 .inherits = &sis_base_ops,
24506 .set_piomode = sis_old_set_piomode,
24507 .set_dmamode = sis_66_set_dmamode,
24508 .cable_detect = sis_66_cable_detect,
24509 };
24510
24511-static struct ata_port_operations sis_old_ops = {
24512+static const struct ata_port_operations sis_old_ops = {
24513 .inherits = &sis_base_ops,
24514 .set_piomode = sis_old_set_piomode,
24515 .set_dmamode = sis_old_set_dmamode,
24516diff -urNp linux-2.6.32.41/drivers/ata/pata_sl82c105.c linux-2.6.32.41/drivers/ata/pata_sl82c105.c
24517--- linux-2.6.32.41/drivers/ata/pata_sl82c105.c 2011-03-27 14:31:47.000000000 -0400
24518+++ linux-2.6.32.41/drivers/ata/pata_sl82c105.c 2011-04-17 15:56:46.000000000 -0400
24519@@ -231,7 +231,7 @@ static struct scsi_host_template sl82c10
24520 ATA_BMDMA_SHT(DRV_NAME),
24521 };
24522
24523-static struct ata_port_operations sl82c105_port_ops = {
24524+static const struct ata_port_operations sl82c105_port_ops = {
24525 .inherits = &ata_bmdma_port_ops,
24526 .qc_defer = sl82c105_qc_defer,
24527 .bmdma_start = sl82c105_bmdma_start,
24528diff -urNp linux-2.6.32.41/drivers/ata/pata_triflex.c linux-2.6.32.41/drivers/ata/pata_triflex.c
24529--- linux-2.6.32.41/drivers/ata/pata_triflex.c 2011-03-27 14:31:47.000000000 -0400
24530+++ linux-2.6.32.41/drivers/ata/pata_triflex.c 2011-04-17 15:56:46.000000000 -0400
24531@@ -178,7 +178,7 @@ static struct scsi_host_template triflex
24532 ATA_BMDMA_SHT(DRV_NAME),
24533 };
24534
24535-static struct ata_port_operations triflex_port_ops = {
24536+static const struct ata_port_operations triflex_port_ops = {
24537 .inherits = &ata_bmdma_port_ops,
24538 .bmdma_start = triflex_bmdma_start,
24539 .bmdma_stop = triflex_bmdma_stop,
24540diff -urNp linux-2.6.32.41/drivers/ata/pata_via.c linux-2.6.32.41/drivers/ata/pata_via.c
24541--- linux-2.6.32.41/drivers/ata/pata_via.c 2011-03-27 14:31:47.000000000 -0400
24542+++ linux-2.6.32.41/drivers/ata/pata_via.c 2011-04-17 15:56:46.000000000 -0400
24543@@ -419,7 +419,7 @@ static struct scsi_host_template via_sht
24544 ATA_BMDMA_SHT(DRV_NAME),
24545 };
24546
24547-static struct ata_port_operations via_port_ops = {
24548+static const struct ata_port_operations via_port_ops = {
24549 .inherits = &ata_bmdma_port_ops,
24550 .cable_detect = via_cable_detect,
24551 .set_piomode = via_set_piomode,
24552@@ -429,7 +429,7 @@ static struct ata_port_operations via_po
24553 .port_start = via_port_start,
24554 };
24555
24556-static struct ata_port_operations via_port_ops_noirq = {
24557+static const struct ata_port_operations via_port_ops_noirq = {
24558 .inherits = &via_port_ops,
24559 .sff_data_xfer = ata_sff_data_xfer_noirq,
24560 };
24561diff -urNp linux-2.6.32.41/drivers/ata/pata_winbond.c linux-2.6.32.41/drivers/ata/pata_winbond.c
24562--- linux-2.6.32.41/drivers/ata/pata_winbond.c 2011-03-27 14:31:47.000000000 -0400
24563+++ linux-2.6.32.41/drivers/ata/pata_winbond.c 2011-04-17 15:56:46.000000000 -0400
24564@@ -125,7 +125,7 @@ static struct scsi_host_template winbond
24565 ATA_PIO_SHT(DRV_NAME),
24566 };
24567
24568-static struct ata_port_operations winbond_port_ops = {
24569+static const struct ata_port_operations winbond_port_ops = {
24570 .inherits = &ata_sff_port_ops,
24571 .sff_data_xfer = winbond_data_xfer,
24572 .cable_detect = ata_cable_40wire,
24573diff -urNp linux-2.6.32.41/drivers/ata/pdc_adma.c linux-2.6.32.41/drivers/ata/pdc_adma.c
24574--- linux-2.6.32.41/drivers/ata/pdc_adma.c 2011-03-27 14:31:47.000000000 -0400
24575+++ linux-2.6.32.41/drivers/ata/pdc_adma.c 2011-04-17 15:56:46.000000000 -0400
24576@@ -145,7 +145,7 @@ static struct scsi_host_template adma_at
24577 .dma_boundary = ADMA_DMA_BOUNDARY,
24578 };
24579
24580-static struct ata_port_operations adma_ata_ops = {
24581+static const struct ata_port_operations adma_ata_ops = {
24582 .inherits = &ata_sff_port_ops,
24583
24584 .lost_interrupt = ATA_OP_NULL,
24585diff -urNp linux-2.6.32.41/drivers/ata/sata_fsl.c linux-2.6.32.41/drivers/ata/sata_fsl.c
24586--- linux-2.6.32.41/drivers/ata/sata_fsl.c 2011-03-27 14:31:47.000000000 -0400
24587+++ linux-2.6.32.41/drivers/ata/sata_fsl.c 2011-04-17 15:56:46.000000000 -0400
24588@@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fs
24589 .dma_boundary = ATA_DMA_BOUNDARY,
24590 };
24591
24592-static struct ata_port_operations sata_fsl_ops = {
24593+static const struct ata_port_operations sata_fsl_ops = {
24594 .inherits = &sata_pmp_port_ops,
24595
24596 .qc_defer = ata_std_qc_defer,
24597diff -urNp linux-2.6.32.41/drivers/ata/sata_inic162x.c linux-2.6.32.41/drivers/ata/sata_inic162x.c
24598--- linux-2.6.32.41/drivers/ata/sata_inic162x.c 2011-03-27 14:31:47.000000000 -0400
24599+++ linux-2.6.32.41/drivers/ata/sata_inic162x.c 2011-04-17 15:56:46.000000000 -0400
24600@@ -721,7 +721,7 @@ static int inic_port_start(struct ata_po
24601 return 0;
24602 }
24603
24604-static struct ata_port_operations inic_port_ops = {
24605+static const struct ata_port_operations inic_port_ops = {
24606 .inherits = &sata_port_ops,
24607
24608 .check_atapi_dma = inic_check_atapi_dma,
24609diff -urNp linux-2.6.32.41/drivers/ata/sata_mv.c linux-2.6.32.41/drivers/ata/sata_mv.c
24610--- linux-2.6.32.41/drivers/ata/sata_mv.c 2011-03-27 14:31:47.000000000 -0400
24611+++ linux-2.6.32.41/drivers/ata/sata_mv.c 2011-04-17 15:56:46.000000000 -0400
24612@@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht
24613 .dma_boundary = MV_DMA_BOUNDARY,
24614 };
24615
24616-static struct ata_port_operations mv5_ops = {
24617+static const struct ata_port_operations mv5_ops = {
24618 .inherits = &ata_sff_port_ops,
24619
24620 .lost_interrupt = ATA_OP_NULL,
24621@@ -678,7 +678,7 @@ static struct ata_port_operations mv5_op
24622 .port_stop = mv_port_stop,
24623 };
24624
24625-static struct ata_port_operations mv6_ops = {
24626+static const struct ata_port_operations mv6_ops = {
24627 .inherits = &mv5_ops,
24628 .dev_config = mv6_dev_config,
24629 .scr_read = mv_scr_read,
24630@@ -698,7 +698,7 @@ static struct ata_port_operations mv6_op
24631 .bmdma_status = mv_bmdma_status,
24632 };
24633
24634-static struct ata_port_operations mv_iie_ops = {
24635+static const struct ata_port_operations mv_iie_ops = {
24636 .inherits = &mv6_ops,
24637 .dev_config = ATA_OP_NULL,
24638 .qc_prep = mv_qc_prep_iie,
24639diff -urNp linux-2.6.32.41/drivers/ata/sata_nv.c linux-2.6.32.41/drivers/ata/sata_nv.c
24640--- linux-2.6.32.41/drivers/ata/sata_nv.c 2011-03-27 14:31:47.000000000 -0400
24641+++ linux-2.6.32.41/drivers/ata/sata_nv.c 2011-04-17 15:56:46.000000000 -0400
24642@@ -464,7 +464,7 @@ static struct scsi_host_template nv_swnc
24643 * cases. Define nv_hardreset() which only kicks in for post-boot
24644 * probing and use it for all variants.
24645 */
24646-static struct ata_port_operations nv_generic_ops = {
24647+static const struct ata_port_operations nv_generic_ops = {
24648 .inherits = &ata_bmdma_port_ops,
24649 .lost_interrupt = ATA_OP_NULL,
24650 .scr_read = nv_scr_read,
24651@@ -472,20 +472,20 @@ static struct ata_port_operations nv_gen
24652 .hardreset = nv_hardreset,
24653 };
24654
24655-static struct ata_port_operations nv_nf2_ops = {
24656+static const struct ata_port_operations nv_nf2_ops = {
24657 .inherits = &nv_generic_ops,
24658 .freeze = nv_nf2_freeze,
24659 .thaw = nv_nf2_thaw,
24660 };
24661
24662-static struct ata_port_operations nv_ck804_ops = {
24663+static const struct ata_port_operations nv_ck804_ops = {
24664 .inherits = &nv_generic_ops,
24665 .freeze = nv_ck804_freeze,
24666 .thaw = nv_ck804_thaw,
24667 .host_stop = nv_ck804_host_stop,
24668 };
24669
24670-static struct ata_port_operations nv_adma_ops = {
24671+static const struct ata_port_operations nv_adma_ops = {
24672 .inherits = &nv_ck804_ops,
24673
24674 .check_atapi_dma = nv_adma_check_atapi_dma,
24675@@ -509,7 +509,7 @@ static struct ata_port_operations nv_adm
24676 .host_stop = nv_adma_host_stop,
24677 };
24678
24679-static struct ata_port_operations nv_swncq_ops = {
24680+static const struct ata_port_operations nv_swncq_ops = {
24681 .inherits = &nv_generic_ops,
24682
24683 .qc_defer = ata_std_qc_defer,
24684diff -urNp linux-2.6.32.41/drivers/ata/sata_promise.c linux-2.6.32.41/drivers/ata/sata_promise.c
24685--- linux-2.6.32.41/drivers/ata/sata_promise.c 2011-03-27 14:31:47.000000000 -0400
24686+++ linux-2.6.32.41/drivers/ata/sata_promise.c 2011-04-17 15:56:46.000000000 -0400
24687@@ -195,7 +195,7 @@ static const struct ata_port_operations
24688 .error_handler = pdc_error_handler,
24689 };
24690
24691-static struct ata_port_operations pdc_sata_ops = {
24692+static const struct ata_port_operations pdc_sata_ops = {
24693 .inherits = &pdc_common_ops,
24694 .cable_detect = pdc_sata_cable_detect,
24695 .freeze = pdc_sata_freeze,
24696@@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sa
24697
24698 /* First-generation chips need a more restrictive ->check_atapi_dma op,
24699 and ->freeze/thaw that ignore the hotplug controls. */
24700-static struct ata_port_operations pdc_old_sata_ops = {
24701+static const struct ata_port_operations pdc_old_sata_ops = {
24702 .inherits = &pdc_sata_ops,
24703 .freeze = pdc_freeze,
24704 .thaw = pdc_thaw,
24705 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
24706 };
24707
24708-static struct ata_port_operations pdc_pata_ops = {
24709+static const struct ata_port_operations pdc_pata_ops = {
24710 .inherits = &pdc_common_ops,
24711 .cable_detect = pdc_pata_cable_detect,
24712 .freeze = pdc_freeze,
24713diff -urNp linux-2.6.32.41/drivers/ata/sata_qstor.c linux-2.6.32.41/drivers/ata/sata_qstor.c
24714--- linux-2.6.32.41/drivers/ata/sata_qstor.c 2011-03-27 14:31:47.000000000 -0400
24715+++ linux-2.6.32.41/drivers/ata/sata_qstor.c 2011-04-17 15:56:46.000000000 -0400
24716@@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_
24717 .dma_boundary = QS_DMA_BOUNDARY,
24718 };
24719
24720-static struct ata_port_operations qs_ata_ops = {
24721+static const struct ata_port_operations qs_ata_ops = {
24722 .inherits = &ata_sff_port_ops,
24723
24724 .check_atapi_dma = qs_check_atapi_dma,
24725diff -urNp linux-2.6.32.41/drivers/ata/sata_sil24.c linux-2.6.32.41/drivers/ata/sata_sil24.c
24726--- linux-2.6.32.41/drivers/ata/sata_sil24.c 2011-03-27 14:31:47.000000000 -0400
24727+++ linux-2.6.32.41/drivers/ata/sata_sil24.c 2011-04-17 15:56:46.000000000 -0400
24728@@ -388,7 +388,7 @@ static struct scsi_host_template sil24_s
24729 .dma_boundary = ATA_DMA_BOUNDARY,
24730 };
24731
24732-static struct ata_port_operations sil24_ops = {
24733+static const struct ata_port_operations sil24_ops = {
24734 .inherits = &sata_pmp_port_ops,
24735
24736 .qc_defer = sil24_qc_defer,
24737diff -urNp linux-2.6.32.41/drivers/ata/sata_sil.c linux-2.6.32.41/drivers/ata/sata_sil.c
24738--- linux-2.6.32.41/drivers/ata/sata_sil.c 2011-03-27 14:31:47.000000000 -0400
24739+++ linux-2.6.32.41/drivers/ata/sata_sil.c 2011-04-17 15:56:46.000000000 -0400
24740@@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht
24741 .sg_tablesize = ATA_MAX_PRD
24742 };
24743
24744-static struct ata_port_operations sil_ops = {
24745+static const struct ata_port_operations sil_ops = {
24746 .inherits = &ata_bmdma32_port_ops,
24747 .dev_config = sil_dev_config,
24748 .set_mode = sil_set_mode,
24749diff -urNp linux-2.6.32.41/drivers/ata/sata_sis.c linux-2.6.32.41/drivers/ata/sata_sis.c
24750--- linux-2.6.32.41/drivers/ata/sata_sis.c 2011-03-27 14:31:47.000000000 -0400
24751+++ linux-2.6.32.41/drivers/ata/sata_sis.c 2011-04-17 15:56:46.000000000 -0400
24752@@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht
24753 ATA_BMDMA_SHT(DRV_NAME),
24754 };
24755
24756-static struct ata_port_operations sis_ops = {
24757+static const struct ata_port_operations sis_ops = {
24758 .inherits = &ata_bmdma_port_ops,
24759 .scr_read = sis_scr_read,
24760 .scr_write = sis_scr_write,
24761diff -urNp linux-2.6.32.41/drivers/ata/sata_svw.c linux-2.6.32.41/drivers/ata/sata_svw.c
24762--- linux-2.6.32.41/drivers/ata/sata_svw.c 2011-03-27 14:31:47.000000000 -0400
24763+++ linux-2.6.32.41/drivers/ata/sata_svw.c 2011-04-17 15:56:46.000000000 -0400
24764@@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata
24765 };
24766
24767
24768-static struct ata_port_operations k2_sata_ops = {
24769+static const struct ata_port_operations k2_sata_ops = {
24770 .inherits = &ata_bmdma_port_ops,
24771 .sff_tf_load = k2_sata_tf_load,
24772 .sff_tf_read = k2_sata_tf_read,
24773diff -urNp linux-2.6.32.41/drivers/ata/sata_sx4.c linux-2.6.32.41/drivers/ata/sata_sx4.c
24774--- linux-2.6.32.41/drivers/ata/sata_sx4.c 2011-03-27 14:31:47.000000000 -0400
24775+++ linux-2.6.32.41/drivers/ata/sata_sx4.c 2011-04-17 15:56:46.000000000 -0400
24776@@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sat
24777 };
24778
24779 /* TODO: inherit from base port_ops after converting to new EH */
24780-static struct ata_port_operations pdc_20621_ops = {
24781+static const struct ata_port_operations pdc_20621_ops = {
24782 .inherits = &ata_sff_port_ops,
24783
24784 .check_atapi_dma = pdc_check_atapi_dma,
24785diff -urNp linux-2.6.32.41/drivers/ata/sata_uli.c linux-2.6.32.41/drivers/ata/sata_uli.c
24786--- linux-2.6.32.41/drivers/ata/sata_uli.c 2011-03-27 14:31:47.000000000 -0400
24787+++ linux-2.6.32.41/drivers/ata/sata_uli.c 2011-04-17 15:56:46.000000000 -0400
24788@@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht
24789 ATA_BMDMA_SHT(DRV_NAME),
24790 };
24791
24792-static struct ata_port_operations uli_ops = {
24793+static const struct ata_port_operations uli_ops = {
24794 .inherits = &ata_bmdma_port_ops,
24795 .scr_read = uli_scr_read,
24796 .scr_write = uli_scr_write,
24797diff -urNp linux-2.6.32.41/drivers/ata/sata_via.c linux-2.6.32.41/drivers/ata/sata_via.c
24798--- linux-2.6.32.41/drivers/ata/sata_via.c 2011-05-10 22:12:01.000000000 -0400
24799+++ linux-2.6.32.41/drivers/ata/sata_via.c 2011-05-10 22:15:08.000000000 -0400
24800@@ -115,32 +115,32 @@ static struct scsi_host_template svia_sh
24801 ATA_BMDMA_SHT(DRV_NAME),
24802 };
24803
24804-static struct ata_port_operations svia_base_ops = {
24805+static const struct ata_port_operations svia_base_ops = {
24806 .inherits = &ata_bmdma_port_ops,
24807 .sff_tf_load = svia_tf_load,
24808 };
24809
24810-static struct ata_port_operations vt6420_sata_ops = {
24811+static const struct ata_port_operations vt6420_sata_ops = {
24812 .inherits = &svia_base_ops,
24813 .freeze = svia_noop_freeze,
24814 .prereset = vt6420_prereset,
24815 .bmdma_start = vt6420_bmdma_start,
24816 };
24817
24818-static struct ata_port_operations vt6421_pata_ops = {
24819+static const struct ata_port_operations vt6421_pata_ops = {
24820 .inherits = &svia_base_ops,
24821 .cable_detect = vt6421_pata_cable_detect,
24822 .set_piomode = vt6421_set_pio_mode,
24823 .set_dmamode = vt6421_set_dma_mode,
24824 };
24825
24826-static struct ata_port_operations vt6421_sata_ops = {
24827+static const struct ata_port_operations vt6421_sata_ops = {
24828 .inherits = &svia_base_ops,
24829 .scr_read = svia_scr_read,
24830 .scr_write = svia_scr_write,
24831 };
24832
24833-static struct ata_port_operations vt8251_ops = {
24834+static const struct ata_port_operations vt8251_ops = {
24835 .inherits = &svia_base_ops,
24836 .hardreset = sata_std_hardreset,
24837 .scr_read = vt8251_scr_read,
24838diff -urNp linux-2.6.32.41/drivers/ata/sata_vsc.c linux-2.6.32.41/drivers/ata/sata_vsc.c
24839--- linux-2.6.32.41/drivers/ata/sata_vsc.c 2011-03-27 14:31:47.000000000 -0400
24840+++ linux-2.6.32.41/drivers/ata/sata_vsc.c 2011-04-17 15:56:46.000000000 -0400
24841@@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sat
24842 };
24843
24844
24845-static struct ata_port_operations vsc_sata_ops = {
24846+static const struct ata_port_operations vsc_sata_ops = {
24847 .inherits = &ata_bmdma_port_ops,
24848 /* The IRQ handling is not quite standard SFF behaviour so we
24849 cannot use the default lost interrupt handler */
24850diff -urNp linux-2.6.32.41/drivers/atm/adummy.c linux-2.6.32.41/drivers/atm/adummy.c
24851--- linux-2.6.32.41/drivers/atm/adummy.c 2011-03-27 14:31:47.000000000 -0400
24852+++ linux-2.6.32.41/drivers/atm/adummy.c 2011-04-17 15:56:46.000000000 -0400
24853@@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct
24854 vcc->pop(vcc, skb);
24855 else
24856 dev_kfree_skb_any(skb);
24857- atomic_inc(&vcc->stats->tx);
24858+ atomic_inc_unchecked(&vcc->stats->tx);
24859
24860 return 0;
24861 }
24862diff -urNp linux-2.6.32.41/drivers/atm/ambassador.c linux-2.6.32.41/drivers/atm/ambassador.c
24863--- linux-2.6.32.41/drivers/atm/ambassador.c 2011-03-27 14:31:47.000000000 -0400
24864+++ linux-2.6.32.41/drivers/atm/ambassador.c 2011-04-17 15:56:46.000000000 -0400
24865@@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev,
24866 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
24867
24868 // VC layer stats
24869- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
24870+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
24871
24872 // free the descriptor
24873 kfree (tx_descr);
24874@@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev,
24875 dump_skb ("<<<", vc, skb);
24876
24877 // VC layer stats
24878- atomic_inc(&atm_vcc->stats->rx);
24879+ atomic_inc_unchecked(&atm_vcc->stats->rx);
24880 __net_timestamp(skb);
24881 // end of our responsability
24882 atm_vcc->push (atm_vcc, skb);
24883@@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev,
24884 } else {
24885 PRINTK (KERN_INFO, "dropped over-size frame");
24886 // should we count this?
24887- atomic_inc(&atm_vcc->stats->rx_drop);
24888+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
24889 }
24890
24891 } else {
24892@@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * at
24893 }
24894
24895 if (check_area (skb->data, skb->len)) {
24896- atomic_inc(&atm_vcc->stats->tx_err);
24897+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
24898 return -ENOMEM; // ?
24899 }
24900
24901diff -urNp linux-2.6.32.41/drivers/atm/atmtcp.c linux-2.6.32.41/drivers/atm/atmtcp.c
24902--- linux-2.6.32.41/drivers/atm/atmtcp.c 2011-03-27 14:31:47.000000000 -0400
24903+++ linux-2.6.32.41/drivers/atm/atmtcp.c 2011-04-17 15:56:46.000000000 -0400
24904@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc
24905 if (vcc->pop) vcc->pop(vcc,skb);
24906 else dev_kfree_skb(skb);
24907 if (dev_data) return 0;
24908- atomic_inc(&vcc->stats->tx_err);
24909+ atomic_inc_unchecked(&vcc->stats->tx_err);
24910 return -ENOLINK;
24911 }
24912 size = skb->len+sizeof(struct atmtcp_hdr);
24913@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc
24914 if (!new_skb) {
24915 if (vcc->pop) vcc->pop(vcc,skb);
24916 else dev_kfree_skb(skb);
24917- atomic_inc(&vcc->stats->tx_err);
24918+ atomic_inc_unchecked(&vcc->stats->tx_err);
24919 return -ENOBUFS;
24920 }
24921 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
24922@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc
24923 if (vcc->pop) vcc->pop(vcc,skb);
24924 else dev_kfree_skb(skb);
24925 out_vcc->push(out_vcc,new_skb);
24926- atomic_inc(&vcc->stats->tx);
24927- atomic_inc(&out_vcc->stats->rx);
24928+ atomic_inc_unchecked(&vcc->stats->tx);
24929+ atomic_inc_unchecked(&out_vcc->stats->rx);
24930 return 0;
24931 }
24932
24933@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc
24934 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
24935 read_unlock(&vcc_sklist_lock);
24936 if (!out_vcc) {
24937- atomic_inc(&vcc->stats->tx_err);
24938+ atomic_inc_unchecked(&vcc->stats->tx_err);
24939 goto done;
24940 }
24941 skb_pull(skb,sizeof(struct atmtcp_hdr));
24942@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc
24943 __net_timestamp(new_skb);
24944 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
24945 out_vcc->push(out_vcc,new_skb);
24946- atomic_inc(&vcc->stats->tx);
24947- atomic_inc(&out_vcc->stats->rx);
24948+ atomic_inc_unchecked(&vcc->stats->tx);
24949+ atomic_inc_unchecked(&out_vcc->stats->rx);
24950 done:
24951 if (vcc->pop) vcc->pop(vcc,skb);
24952 else dev_kfree_skb(skb);
24953diff -urNp linux-2.6.32.41/drivers/atm/eni.c linux-2.6.32.41/drivers/atm/eni.c
24954--- linux-2.6.32.41/drivers/atm/eni.c 2011-03-27 14:31:47.000000000 -0400
24955+++ linux-2.6.32.41/drivers/atm/eni.c 2011-04-17 15:56:46.000000000 -0400
24956@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
24957 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
24958 vcc->dev->number);
24959 length = 0;
24960- atomic_inc(&vcc->stats->rx_err);
24961+ atomic_inc_unchecked(&vcc->stats->rx_err);
24962 }
24963 else {
24964 length = ATM_CELL_SIZE-1; /* no HEC */
24965@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
24966 size);
24967 }
24968 eff = length = 0;
24969- atomic_inc(&vcc->stats->rx_err);
24970+ atomic_inc_unchecked(&vcc->stats->rx_err);
24971 }
24972 else {
24973 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
24974@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
24975 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
24976 vcc->dev->number,vcc->vci,length,size << 2,descr);
24977 length = eff = 0;
24978- atomic_inc(&vcc->stats->rx_err);
24979+ atomic_inc_unchecked(&vcc->stats->rx_err);
24980 }
24981 }
24982 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
24983@@ -770,7 +770,7 @@ rx_dequeued++;
24984 vcc->push(vcc,skb);
24985 pushed++;
24986 }
24987- atomic_inc(&vcc->stats->rx);
24988+ atomic_inc_unchecked(&vcc->stats->rx);
24989 }
24990 wake_up(&eni_dev->rx_wait);
24991 }
24992@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *d
24993 PCI_DMA_TODEVICE);
24994 if (vcc->pop) vcc->pop(vcc,skb);
24995 else dev_kfree_skb_irq(skb);
24996- atomic_inc(&vcc->stats->tx);
24997+ atomic_inc_unchecked(&vcc->stats->tx);
24998 wake_up(&eni_dev->tx_wait);
24999 dma_complete++;
25000 }
25001diff -urNp linux-2.6.32.41/drivers/atm/firestream.c linux-2.6.32.41/drivers/atm/firestream.c
25002--- linux-2.6.32.41/drivers/atm/firestream.c 2011-03-27 14:31:47.000000000 -0400
25003+++ linux-2.6.32.41/drivers/atm/firestream.c 2011-04-17 15:56:46.000000000 -0400
25004@@ -748,7 +748,7 @@ static void process_txdone_queue (struct
25005 }
25006 }
25007
25008- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25009+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25010
25011 fs_dprintk (FS_DEBUG_TXMEM, "i");
25012 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
25013@@ -815,7 +815,7 @@ static void process_incoming (struct fs_
25014 #endif
25015 skb_put (skb, qe->p1 & 0xffff);
25016 ATM_SKB(skb)->vcc = atm_vcc;
25017- atomic_inc(&atm_vcc->stats->rx);
25018+ atomic_inc_unchecked(&atm_vcc->stats->rx);
25019 __net_timestamp(skb);
25020 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
25021 atm_vcc->push (atm_vcc, skb);
25022@@ -836,12 +836,12 @@ static void process_incoming (struct fs_
25023 kfree (pe);
25024 }
25025 if (atm_vcc)
25026- atomic_inc(&atm_vcc->stats->rx_drop);
25027+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25028 break;
25029 case 0x1f: /* Reassembly abort: no buffers. */
25030 /* Silently increment error counter. */
25031 if (atm_vcc)
25032- atomic_inc(&atm_vcc->stats->rx_drop);
25033+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25034 break;
25035 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
25036 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
25037diff -urNp linux-2.6.32.41/drivers/atm/fore200e.c linux-2.6.32.41/drivers/atm/fore200e.c
25038--- linux-2.6.32.41/drivers/atm/fore200e.c 2011-03-27 14:31:47.000000000 -0400
25039+++ linux-2.6.32.41/drivers/atm/fore200e.c 2011-04-17 15:56:46.000000000 -0400
25040@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200
25041 #endif
25042 /* check error condition */
25043 if (*entry->status & STATUS_ERROR)
25044- atomic_inc(&vcc->stats->tx_err);
25045+ atomic_inc_unchecked(&vcc->stats->tx_err);
25046 else
25047- atomic_inc(&vcc->stats->tx);
25048+ atomic_inc_unchecked(&vcc->stats->tx);
25049 }
25050 }
25051
25052@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore2
25053 if (skb == NULL) {
25054 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
25055
25056- atomic_inc(&vcc->stats->rx_drop);
25057+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25058 return -ENOMEM;
25059 }
25060
25061@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore2
25062
25063 dev_kfree_skb_any(skb);
25064
25065- atomic_inc(&vcc->stats->rx_drop);
25066+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25067 return -ENOMEM;
25068 }
25069
25070 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25071
25072 vcc->push(vcc, skb);
25073- atomic_inc(&vcc->stats->rx);
25074+ atomic_inc_unchecked(&vcc->stats->rx);
25075
25076 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25077
25078@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200
25079 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
25080 fore200e->atm_dev->number,
25081 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
25082- atomic_inc(&vcc->stats->rx_err);
25083+ atomic_inc_unchecked(&vcc->stats->rx_err);
25084 }
25085 }
25086
25087@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struc
25088 goto retry_here;
25089 }
25090
25091- atomic_inc(&vcc->stats->tx_err);
25092+ atomic_inc_unchecked(&vcc->stats->tx_err);
25093
25094 fore200e->tx_sat++;
25095 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
25096diff -urNp linux-2.6.32.41/drivers/atm/he.c linux-2.6.32.41/drivers/atm/he.c
25097--- linux-2.6.32.41/drivers/atm/he.c 2011-03-27 14:31:47.000000000 -0400
25098+++ linux-2.6.32.41/drivers/atm/he.c 2011-04-17 15:56:46.000000000 -0400
25099@@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, i
25100
25101 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
25102 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
25103- atomic_inc(&vcc->stats->rx_drop);
25104+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25105 goto return_host_buffers;
25106 }
25107
25108@@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, i
25109 RBRQ_LEN_ERR(he_dev->rbrq_head)
25110 ? "LEN_ERR" : "",
25111 vcc->vpi, vcc->vci);
25112- atomic_inc(&vcc->stats->rx_err);
25113+ atomic_inc_unchecked(&vcc->stats->rx_err);
25114 goto return_host_buffers;
25115 }
25116
25117@@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, i
25118 vcc->push(vcc, skb);
25119 spin_lock(&he_dev->global_lock);
25120
25121- atomic_inc(&vcc->stats->rx);
25122+ atomic_inc_unchecked(&vcc->stats->rx);
25123
25124 return_host_buffers:
25125 ++pdus_assembled;
25126@@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
25127 tpd->vcc->pop(tpd->vcc, tpd->skb);
25128 else
25129 dev_kfree_skb_any(tpd->skb);
25130- atomic_inc(&tpd->vcc->stats->tx_err);
25131+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
25132 }
25133 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
25134 return;
25135@@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25136 vcc->pop(vcc, skb);
25137 else
25138 dev_kfree_skb_any(skb);
25139- atomic_inc(&vcc->stats->tx_err);
25140+ atomic_inc_unchecked(&vcc->stats->tx_err);
25141 return -EINVAL;
25142 }
25143
25144@@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25145 vcc->pop(vcc, skb);
25146 else
25147 dev_kfree_skb_any(skb);
25148- atomic_inc(&vcc->stats->tx_err);
25149+ atomic_inc_unchecked(&vcc->stats->tx_err);
25150 return -EINVAL;
25151 }
25152 #endif
25153@@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25154 vcc->pop(vcc, skb);
25155 else
25156 dev_kfree_skb_any(skb);
25157- atomic_inc(&vcc->stats->tx_err);
25158+ atomic_inc_unchecked(&vcc->stats->tx_err);
25159 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25160 return -ENOMEM;
25161 }
25162@@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25163 vcc->pop(vcc, skb);
25164 else
25165 dev_kfree_skb_any(skb);
25166- atomic_inc(&vcc->stats->tx_err);
25167+ atomic_inc_unchecked(&vcc->stats->tx_err);
25168 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25169 return -ENOMEM;
25170 }
25171@@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25172 __enqueue_tpd(he_dev, tpd, cid);
25173 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25174
25175- atomic_inc(&vcc->stats->tx);
25176+ atomic_inc_unchecked(&vcc->stats->tx);
25177
25178 return 0;
25179 }
25180diff -urNp linux-2.6.32.41/drivers/atm/horizon.c linux-2.6.32.41/drivers/atm/horizon.c
25181--- linux-2.6.32.41/drivers/atm/horizon.c 2011-03-27 14:31:47.000000000 -0400
25182+++ linux-2.6.32.41/drivers/atm/horizon.c 2011-04-17 15:56:46.000000000 -0400
25183@@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev,
25184 {
25185 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
25186 // VC layer stats
25187- atomic_inc(&vcc->stats->rx);
25188+ atomic_inc_unchecked(&vcc->stats->rx);
25189 __net_timestamp(skb);
25190 // end of our responsability
25191 vcc->push (vcc, skb);
25192@@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const
25193 dev->tx_iovec = NULL;
25194
25195 // VC layer stats
25196- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25197+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25198
25199 // free the skb
25200 hrz_kfree_skb (skb);
25201diff -urNp linux-2.6.32.41/drivers/atm/idt77252.c linux-2.6.32.41/drivers/atm/idt77252.c
25202--- linux-2.6.32.41/drivers/atm/idt77252.c 2011-03-27 14:31:47.000000000 -0400
25203+++ linux-2.6.32.41/drivers/atm/idt77252.c 2011-04-17 15:56:46.000000000 -0400
25204@@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, str
25205 else
25206 dev_kfree_skb(skb);
25207
25208- atomic_inc(&vcc->stats->tx);
25209+ atomic_inc_unchecked(&vcc->stats->tx);
25210 }
25211
25212 atomic_dec(&scq->used);
25213@@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, st
25214 if ((sb = dev_alloc_skb(64)) == NULL) {
25215 printk("%s: Can't allocate buffers for aal0.\n",
25216 card->name);
25217- atomic_add(i, &vcc->stats->rx_drop);
25218+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
25219 break;
25220 }
25221 if (!atm_charge(vcc, sb->truesize)) {
25222 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
25223 card->name);
25224- atomic_add(i - 1, &vcc->stats->rx_drop);
25225+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
25226 dev_kfree_skb(sb);
25227 break;
25228 }
25229@@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, st
25230 ATM_SKB(sb)->vcc = vcc;
25231 __net_timestamp(sb);
25232 vcc->push(vcc, sb);
25233- atomic_inc(&vcc->stats->rx);
25234+ atomic_inc_unchecked(&vcc->stats->rx);
25235
25236 cell += ATM_CELL_PAYLOAD;
25237 }
25238@@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, st
25239 "(CDC: %08x)\n",
25240 card->name, len, rpp->len, readl(SAR_REG_CDC));
25241 recycle_rx_pool_skb(card, rpp);
25242- atomic_inc(&vcc->stats->rx_err);
25243+ atomic_inc_unchecked(&vcc->stats->rx_err);
25244 return;
25245 }
25246 if (stat & SAR_RSQE_CRC) {
25247 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
25248 recycle_rx_pool_skb(card, rpp);
25249- atomic_inc(&vcc->stats->rx_err);
25250+ atomic_inc_unchecked(&vcc->stats->rx_err);
25251 return;
25252 }
25253 if (skb_queue_len(&rpp->queue) > 1) {
25254@@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, st
25255 RXPRINTK("%s: Can't alloc RX skb.\n",
25256 card->name);
25257 recycle_rx_pool_skb(card, rpp);
25258- atomic_inc(&vcc->stats->rx_err);
25259+ atomic_inc_unchecked(&vcc->stats->rx_err);
25260 return;
25261 }
25262 if (!atm_charge(vcc, skb->truesize)) {
25263@@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, st
25264 __net_timestamp(skb);
25265
25266 vcc->push(vcc, skb);
25267- atomic_inc(&vcc->stats->rx);
25268+ atomic_inc_unchecked(&vcc->stats->rx);
25269
25270 return;
25271 }
25272@@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, st
25273 __net_timestamp(skb);
25274
25275 vcc->push(vcc, skb);
25276- atomic_inc(&vcc->stats->rx);
25277+ atomic_inc_unchecked(&vcc->stats->rx);
25278
25279 if (skb->truesize > SAR_FB_SIZE_3)
25280 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
25281@@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
25282 if (vcc->qos.aal != ATM_AAL0) {
25283 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
25284 card->name, vpi, vci);
25285- atomic_inc(&vcc->stats->rx_drop);
25286+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25287 goto drop;
25288 }
25289
25290 if ((sb = dev_alloc_skb(64)) == NULL) {
25291 printk("%s: Can't allocate buffers for AAL0.\n",
25292 card->name);
25293- atomic_inc(&vcc->stats->rx_err);
25294+ atomic_inc_unchecked(&vcc->stats->rx_err);
25295 goto drop;
25296 }
25297
25298@@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
25299 ATM_SKB(sb)->vcc = vcc;
25300 __net_timestamp(sb);
25301 vcc->push(vcc, sb);
25302- atomic_inc(&vcc->stats->rx);
25303+ atomic_inc_unchecked(&vcc->stats->rx);
25304
25305 drop:
25306 skb_pull(queue, 64);
25307@@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
25308
25309 if (vc == NULL) {
25310 printk("%s: NULL connection in send().\n", card->name);
25311- atomic_inc(&vcc->stats->tx_err);
25312+ atomic_inc_unchecked(&vcc->stats->tx_err);
25313 dev_kfree_skb(skb);
25314 return -EINVAL;
25315 }
25316 if (!test_bit(VCF_TX, &vc->flags)) {
25317 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
25318- atomic_inc(&vcc->stats->tx_err);
25319+ atomic_inc_unchecked(&vcc->stats->tx_err);
25320 dev_kfree_skb(skb);
25321 return -EINVAL;
25322 }
25323@@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
25324 break;
25325 default:
25326 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
25327- atomic_inc(&vcc->stats->tx_err);
25328+ atomic_inc_unchecked(&vcc->stats->tx_err);
25329 dev_kfree_skb(skb);
25330 return -EINVAL;
25331 }
25332
25333 if (skb_shinfo(skb)->nr_frags != 0) {
25334 printk("%s: No scatter-gather yet.\n", card->name);
25335- atomic_inc(&vcc->stats->tx_err);
25336+ atomic_inc_unchecked(&vcc->stats->tx_err);
25337 dev_kfree_skb(skb);
25338 return -EINVAL;
25339 }
25340@@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
25341
25342 err = queue_skb(card, vc, skb, oam);
25343 if (err) {
25344- atomic_inc(&vcc->stats->tx_err);
25345+ atomic_inc_unchecked(&vcc->stats->tx_err);
25346 dev_kfree_skb(skb);
25347 return err;
25348 }
25349@@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
25350 skb = dev_alloc_skb(64);
25351 if (!skb) {
25352 printk("%s: Out of memory in send_oam().\n", card->name);
25353- atomic_inc(&vcc->stats->tx_err);
25354+ atomic_inc_unchecked(&vcc->stats->tx_err);
25355 return -ENOMEM;
25356 }
25357 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
25358diff -urNp linux-2.6.32.41/drivers/atm/iphase.c linux-2.6.32.41/drivers/atm/iphase.c
25359--- linux-2.6.32.41/drivers/atm/iphase.c 2011-03-27 14:31:47.000000000 -0400
25360+++ linux-2.6.32.41/drivers/atm/iphase.c 2011-04-17 15:56:46.000000000 -0400
25361@@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
25362 status = (u_short) (buf_desc_ptr->desc_mode);
25363 if (status & (RX_CER | RX_PTE | RX_OFL))
25364 {
25365- atomic_inc(&vcc->stats->rx_err);
25366+ atomic_inc_unchecked(&vcc->stats->rx_err);
25367 IF_ERR(printk("IA: bad packet, dropping it");)
25368 if (status & RX_CER) {
25369 IF_ERR(printk(" cause: packet CRC error\n");)
25370@@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
25371 len = dma_addr - buf_addr;
25372 if (len > iadev->rx_buf_sz) {
25373 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
25374- atomic_inc(&vcc->stats->rx_err);
25375+ atomic_inc_unchecked(&vcc->stats->rx_err);
25376 goto out_free_desc;
25377 }
25378
25379@@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *
25380 ia_vcc = INPH_IA_VCC(vcc);
25381 if (ia_vcc == NULL)
25382 {
25383- atomic_inc(&vcc->stats->rx_err);
25384+ atomic_inc_unchecked(&vcc->stats->rx_err);
25385 dev_kfree_skb_any(skb);
25386 atm_return(vcc, atm_guess_pdu2truesize(len));
25387 goto INCR_DLE;
25388@@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *
25389 if ((length > iadev->rx_buf_sz) || (length >
25390 (skb->len - sizeof(struct cpcs_trailer))))
25391 {
25392- atomic_inc(&vcc->stats->rx_err);
25393+ atomic_inc_unchecked(&vcc->stats->rx_err);
25394 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
25395 length, skb->len);)
25396 dev_kfree_skb_any(skb);
25397@@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *
25398
25399 IF_RX(printk("rx_dle_intr: skb push");)
25400 vcc->push(vcc,skb);
25401- atomic_inc(&vcc->stats->rx);
25402+ atomic_inc_unchecked(&vcc->stats->rx);
25403 iadev->rx_pkt_cnt++;
25404 }
25405 INCR_DLE:
25406@@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev,
25407 {
25408 struct k_sonet_stats *stats;
25409 stats = &PRIV(_ia_dev[board])->sonet_stats;
25410- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
25411- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
25412- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
25413- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
25414- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
25415- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
25416- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
25417- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
25418- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
25419+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
25420+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
25421+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
25422+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
25423+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
25424+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
25425+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
25426+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
25427+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
25428 }
25429 ia_cmds.status = 0;
25430 break;
25431@@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
25432 if ((desc == 0) || (desc > iadev->num_tx_desc))
25433 {
25434 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
25435- atomic_inc(&vcc->stats->tx);
25436+ atomic_inc_unchecked(&vcc->stats->tx);
25437 if (vcc->pop)
25438 vcc->pop(vcc, skb);
25439 else
25440@@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
25441 ATM_DESC(skb) = vcc->vci;
25442 skb_queue_tail(&iadev->tx_dma_q, skb);
25443
25444- atomic_inc(&vcc->stats->tx);
25445+ atomic_inc_unchecked(&vcc->stats->tx);
25446 iadev->tx_pkt_cnt++;
25447 /* Increment transaction counter */
25448 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
25449
25450 #if 0
25451 /* add flow control logic */
25452- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
25453+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
25454 if (iavcc->vc_desc_cnt > 10) {
25455 vcc->tx_quota = vcc->tx_quota * 3 / 4;
25456 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
25457diff -urNp linux-2.6.32.41/drivers/atm/lanai.c linux-2.6.32.41/drivers/atm/lanai.c
25458--- linux-2.6.32.41/drivers/atm/lanai.c 2011-03-27 14:31:47.000000000 -0400
25459+++ linux-2.6.32.41/drivers/atm/lanai.c 2011-04-17 15:56:46.000000000 -0400
25460@@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct l
25461 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
25462 lanai_endtx(lanai, lvcc);
25463 lanai_free_skb(lvcc->tx.atmvcc, skb);
25464- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
25465+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
25466 }
25467
25468 /* Try to fill the buffer - don't call unless there is backlog */
25469@@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc
25470 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
25471 __net_timestamp(skb);
25472 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
25473- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
25474+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
25475 out:
25476 lvcc->rx.buf.ptr = end;
25477 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
25478@@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_d
25479 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
25480 "vcc %d\n", lanai->number, (unsigned int) s, vci);
25481 lanai->stats.service_rxnotaal5++;
25482- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25483+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25484 return 0;
25485 }
25486 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
25487@@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_d
25488 int bytes;
25489 read_unlock(&vcc_sklist_lock);
25490 DPRINTK("got trashed rx pdu on vci %d\n", vci);
25491- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25492+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25493 lvcc->stats.x.aal5.service_trash++;
25494 bytes = (SERVICE_GET_END(s) * 16) -
25495 (((unsigned long) lvcc->rx.buf.ptr) -
25496@@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_d
25497 }
25498 if (s & SERVICE_STREAM) {
25499 read_unlock(&vcc_sklist_lock);
25500- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25501+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25502 lvcc->stats.x.aal5.service_stream++;
25503 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
25504 "PDU on VCI %d!\n", lanai->number, vci);
25505@@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_d
25506 return 0;
25507 }
25508 DPRINTK("got rx crc error on vci %d\n", vci);
25509- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25510+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25511 lvcc->stats.x.aal5.service_rxcrc++;
25512 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
25513 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
25514diff -urNp linux-2.6.32.41/drivers/atm/nicstar.c linux-2.6.32.41/drivers/atm/nicstar.c
25515--- linux-2.6.32.41/drivers/atm/nicstar.c 2011-03-27 14:31:47.000000000 -0400
25516+++ linux-2.6.32.41/drivers/atm/nicstar.c 2011-04-17 15:56:46.000000000 -0400
25517@@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc,
25518 if ((vc = (vc_map *) vcc->dev_data) == NULL)
25519 {
25520 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
25521- atomic_inc(&vcc->stats->tx_err);
25522+ atomic_inc_unchecked(&vcc->stats->tx_err);
25523 dev_kfree_skb_any(skb);
25524 return -EINVAL;
25525 }
25526@@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc,
25527 if (!vc->tx)
25528 {
25529 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
25530- atomic_inc(&vcc->stats->tx_err);
25531+ atomic_inc_unchecked(&vcc->stats->tx_err);
25532 dev_kfree_skb_any(skb);
25533 return -EINVAL;
25534 }
25535@@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc,
25536 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
25537 {
25538 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
25539- atomic_inc(&vcc->stats->tx_err);
25540+ atomic_inc_unchecked(&vcc->stats->tx_err);
25541 dev_kfree_skb_any(skb);
25542 return -EINVAL;
25543 }
25544@@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc,
25545 if (skb_shinfo(skb)->nr_frags != 0)
25546 {
25547 printk("nicstar%d: No scatter-gather yet.\n", card->index);
25548- atomic_inc(&vcc->stats->tx_err);
25549+ atomic_inc_unchecked(&vcc->stats->tx_err);
25550 dev_kfree_skb_any(skb);
25551 return -EINVAL;
25552 }
25553@@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc,
25554
25555 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
25556 {
25557- atomic_inc(&vcc->stats->tx_err);
25558+ atomic_inc_unchecked(&vcc->stats->tx_err);
25559 dev_kfree_skb_any(skb);
25560 return -EIO;
25561 }
25562- atomic_inc(&vcc->stats->tx);
25563+ atomic_inc_unchecked(&vcc->stats->tx);
25564
25565 return 0;
25566 }
25567@@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_
25568 {
25569 printk("nicstar%d: Can't allocate buffers for aal0.\n",
25570 card->index);
25571- atomic_add(i,&vcc->stats->rx_drop);
25572+ atomic_add_unchecked(i,&vcc->stats->rx_drop);
25573 break;
25574 }
25575 if (!atm_charge(vcc, sb->truesize))
25576 {
25577 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
25578 card->index);
25579- atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
25580+ atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
25581 dev_kfree_skb_any(sb);
25582 break;
25583 }
25584@@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_
25585 ATM_SKB(sb)->vcc = vcc;
25586 __net_timestamp(sb);
25587 vcc->push(vcc, sb);
25588- atomic_inc(&vcc->stats->rx);
25589+ atomic_inc_unchecked(&vcc->stats->rx);
25590 cell += ATM_CELL_PAYLOAD;
25591 }
25592
25593@@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_
25594 if (iovb == NULL)
25595 {
25596 printk("nicstar%d: Out of iovec buffers.\n", card->index);
25597- atomic_inc(&vcc->stats->rx_drop);
25598+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25599 recycle_rx_buf(card, skb);
25600 return;
25601 }
25602@@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_
25603 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
25604 {
25605 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
25606- atomic_inc(&vcc->stats->rx_err);
25607+ atomic_inc_unchecked(&vcc->stats->rx_err);
25608 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
25609 NS_SKB(iovb)->iovcnt = 0;
25610 iovb->len = 0;
25611@@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_
25612 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
25613 card->index);
25614 which_list(card, skb);
25615- atomic_inc(&vcc->stats->rx_err);
25616+ atomic_inc_unchecked(&vcc->stats->rx_err);
25617 recycle_rx_buf(card, skb);
25618 vc->rx_iov = NULL;
25619 recycle_iov_buf(card, iovb);
25620@@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_
25621 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
25622 card->index);
25623 which_list(card, skb);
25624- atomic_inc(&vcc->stats->rx_err);
25625+ atomic_inc_unchecked(&vcc->stats->rx_err);
25626 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
25627 NS_SKB(iovb)->iovcnt);
25628 vc->rx_iov = NULL;
25629@@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_
25630 printk(" - PDU size mismatch.\n");
25631 else
25632 printk(".\n");
25633- atomic_inc(&vcc->stats->rx_err);
25634+ atomic_inc_unchecked(&vcc->stats->rx_err);
25635 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
25636 NS_SKB(iovb)->iovcnt);
25637 vc->rx_iov = NULL;
25638@@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_
25639 if (!atm_charge(vcc, skb->truesize))
25640 {
25641 push_rxbufs(card, skb);
25642- atomic_inc(&vcc->stats->rx_drop);
25643+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25644 }
25645 else
25646 {
25647@@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_
25648 ATM_SKB(skb)->vcc = vcc;
25649 __net_timestamp(skb);
25650 vcc->push(vcc, skb);
25651- atomic_inc(&vcc->stats->rx);
25652+ atomic_inc_unchecked(&vcc->stats->rx);
25653 }
25654 }
25655 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
25656@@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_
25657 if (!atm_charge(vcc, sb->truesize))
25658 {
25659 push_rxbufs(card, sb);
25660- atomic_inc(&vcc->stats->rx_drop);
25661+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25662 }
25663 else
25664 {
25665@@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_
25666 ATM_SKB(sb)->vcc = vcc;
25667 __net_timestamp(sb);
25668 vcc->push(vcc, sb);
25669- atomic_inc(&vcc->stats->rx);
25670+ atomic_inc_unchecked(&vcc->stats->rx);
25671 }
25672
25673 push_rxbufs(card, skb);
25674@@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_
25675 if (!atm_charge(vcc, skb->truesize))
25676 {
25677 push_rxbufs(card, skb);
25678- atomic_inc(&vcc->stats->rx_drop);
25679+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25680 }
25681 else
25682 {
25683@@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_
25684 ATM_SKB(skb)->vcc = vcc;
25685 __net_timestamp(skb);
25686 vcc->push(vcc, skb);
25687- atomic_inc(&vcc->stats->rx);
25688+ atomic_inc_unchecked(&vcc->stats->rx);
25689 }
25690
25691 push_rxbufs(card, sb);
25692@@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_
25693 if (hb == NULL)
25694 {
25695 printk("nicstar%d: Out of huge buffers.\n", card->index);
25696- atomic_inc(&vcc->stats->rx_drop);
25697+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25698 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
25699 NS_SKB(iovb)->iovcnt);
25700 vc->rx_iov = NULL;
25701@@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_
25702 }
25703 else
25704 dev_kfree_skb_any(hb);
25705- atomic_inc(&vcc->stats->rx_drop);
25706+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25707 }
25708 else
25709 {
25710@@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_
25711 #endif /* NS_USE_DESTRUCTORS */
25712 __net_timestamp(hb);
25713 vcc->push(vcc, hb);
25714- atomic_inc(&vcc->stats->rx);
25715+ atomic_inc_unchecked(&vcc->stats->rx);
25716 }
25717 }
25718
25719diff -urNp linux-2.6.32.41/drivers/atm/solos-pci.c linux-2.6.32.41/drivers/atm/solos-pci.c
25720--- linux-2.6.32.41/drivers/atm/solos-pci.c 2011-04-17 17:00:52.000000000 -0400
25721+++ linux-2.6.32.41/drivers/atm/solos-pci.c 2011-05-16 21:46:57.000000000 -0400
25722@@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
25723 }
25724 atm_charge(vcc, skb->truesize);
25725 vcc->push(vcc, skb);
25726- atomic_inc(&vcc->stats->rx);
25727+ atomic_inc_unchecked(&vcc->stats->rx);
25728 break;
25729
25730 case PKT_STATUS:
25731@@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *
25732 char msg[500];
25733 char item[10];
25734
25735+ pax_track_stack();
25736+
25737 len = buf->len;
25738 for (i = 0; i < len; i++){
25739 if(i % 8 == 0)
25740@@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_car
25741 vcc = SKB_CB(oldskb)->vcc;
25742
25743 if (vcc) {
25744- atomic_inc(&vcc->stats->tx);
25745+ atomic_inc_unchecked(&vcc->stats->tx);
25746 solos_pop(vcc, oldskb);
25747 } else
25748 dev_kfree_skb_irq(oldskb);
25749diff -urNp linux-2.6.32.41/drivers/atm/suni.c linux-2.6.32.41/drivers/atm/suni.c
25750--- linux-2.6.32.41/drivers/atm/suni.c 2011-03-27 14:31:47.000000000 -0400
25751+++ linux-2.6.32.41/drivers/atm/suni.c 2011-04-17 15:56:46.000000000 -0400
25752@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
25753
25754
25755 #define ADD_LIMITED(s,v) \
25756- atomic_add((v),&stats->s); \
25757- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
25758+ atomic_add_unchecked((v),&stats->s); \
25759+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
25760
25761
25762 static void suni_hz(unsigned long from_timer)
25763diff -urNp linux-2.6.32.41/drivers/atm/uPD98402.c linux-2.6.32.41/drivers/atm/uPD98402.c
25764--- linux-2.6.32.41/drivers/atm/uPD98402.c 2011-03-27 14:31:47.000000000 -0400
25765+++ linux-2.6.32.41/drivers/atm/uPD98402.c 2011-04-17 15:56:46.000000000 -0400
25766@@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *d
25767 struct sonet_stats tmp;
25768 int error = 0;
25769
25770- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
25771+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
25772 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
25773 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
25774 if (zero && !error) {
25775@@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev
25776
25777
25778 #define ADD_LIMITED(s,v) \
25779- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
25780- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
25781- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
25782+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
25783+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
25784+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
25785
25786
25787 static void stat_event(struct atm_dev *dev)
25788@@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev
25789 if (reason & uPD98402_INT_PFM) stat_event(dev);
25790 if (reason & uPD98402_INT_PCO) {
25791 (void) GET(PCOCR); /* clear interrupt cause */
25792- atomic_add(GET(HECCT),
25793+ atomic_add_unchecked(GET(HECCT),
25794 &PRIV(dev)->sonet_stats.uncorr_hcs);
25795 }
25796 if ((reason & uPD98402_INT_RFO) &&
25797@@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev
25798 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
25799 uPD98402_INT_LOS),PIMR); /* enable them */
25800 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
25801- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
25802- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
25803- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
25804+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
25805+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
25806+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
25807 return 0;
25808 }
25809
25810diff -urNp linux-2.6.32.41/drivers/atm/zatm.c linux-2.6.32.41/drivers/atm/zatm.c
25811--- linux-2.6.32.41/drivers/atm/zatm.c 2011-03-27 14:31:47.000000000 -0400
25812+++ linux-2.6.32.41/drivers/atm/zatm.c 2011-04-17 15:56:46.000000000 -0400
25813@@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
25814 }
25815 if (!size) {
25816 dev_kfree_skb_irq(skb);
25817- if (vcc) atomic_inc(&vcc->stats->rx_err);
25818+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
25819 continue;
25820 }
25821 if (!atm_charge(vcc,skb->truesize)) {
25822@@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
25823 skb->len = size;
25824 ATM_SKB(skb)->vcc = vcc;
25825 vcc->push(vcc,skb);
25826- atomic_inc(&vcc->stats->rx);
25827+ atomic_inc_unchecked(&vcc->stats->rx);
25828 }
25829 zout(pos & 0xffff,MTA(mbx));
25830 #if 0 /* probably a stupid idea */
25831@@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
25832 skb_queue_head(&zatm_vcc->backlog,skb);
25833 break;
25834 }
25835- atomic_inc(&vcc->stats->tx);
25836+ atomic_inc_unchecked(&vcc->stats->tx);
25837 wake_up(&zatm_vcc->tx_wait);
25838 }
25839
25840diff -urNp linux-2.6.32.41/drivers/base/bus.c linux-2.6.32.41/drivers/base/bus.c
25841--- linux-2.6.32.41/drivers/base/bus.c 2011-03-27 14:31:47.000000000 -0400
25842+++ linux-2.6.32.41/drivers/base/bus.c 2011-04-17 15:56:46.000000000 -0400
25843@@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kob
25844 return ret;
25845 }
25846
25847-static struct sysfs_ops driver_sysfs_ops = {
25848+static const struct sysfs_ops driver_sysfs_ops = {
25849 .show = drv_attr_show,
25850 .store = drv_attr_store,
25851 };
25852@@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kob
25853 return ret;
25854 }
25855
25856-static struct sysfs_ops bus_sysfs_ops = {
25857+static const struct sysfs_ops bus_sysfs_ops = {
25858 .show = bus_attr_show,
25859 .store = bus_attr_store,
25860 };
25861@@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset
25862 return 0;
25863 }
25864
25865-static struct kset_uevent_ops bus_uevent_ops = {
25866+static const struct kset_uevent_ops bus_uevent_ops = {
25867 .filter = bus_uevent_filter,
25868 };
25869
25870diff -urNp linux-2.6.32.41/drivers/base/class.c linux-2.6.32.41/drivers/base/class.c
25871--- linux-2.6.32.41/drivers/base/class.c 2011-03-27 14:31:47.000000000 -0400
25872+++ linux-2.6.32.41/drivers/base/class.c 2011-04-17 15:56:46.000000000 -0400
25873@@ -63,7 +63,7 @@ static void class_release(struct kobject
25874 kfree(cp);
25875 }
25876
25877-static struct sysfs_ops class_sysfs_ops = {
25878+static const struct sysfs_ops class_sysfs_ops = {
25879 .show = class_attr_show,
25880 .store = class_attr_store,
25881 };
25882diff -urNp linux-2.6.32.41/drivers/base/core.c linux-2.6.32.41/drivers/base/core.c
25883--- linux-2.6.32.41/drivers/base/core.c 2011-03-27 14:31:47.000000000 -0400
25884+++ linux-2.6.32.41/drivers/base/core.c 2011-04-17 15:56:46.000000000 -0400
25885@@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kob
25886 return ret;
25887 }
25888
25889-static struct sysfs_ops dev_sysfs_ops = {
25890+static const struct sysfs_ops dev_sysfs_ops = {
25891 .show = dev_attr_show,
25892 .store = dev_attr_store,
25893 };
25894@@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset,
25895 return retval;
25896 }
25897
25898-static struct kset_uevent_ops device_uevent_ops = {
25899+static const struct kset_uevent_ops device_uevent_ops = {
25900 .filter = dev_uevent_filter,
25901 .name = dev_uevent_name,
25902 .uevent = dev_uevent,
25903diff -urNp linux-2.6.32.41/drivers/base/memory.c linux-2.6.32.41/drivers/base/memory.c
25904--- linux-2.6.32.41/drivers/base/memory.c 2011-03-27 14:31:47.000000000 -0400
25905+++ linux-2.6.32.41/drivers/base/memory.c 2011-04-17 15:56:46.000000000 -0400
25906@@ -44,7 +44,7 @@ static int memory_uevent(struct kset *ks
25907 return retval;
25908 }
25909
25910-static struct kset_uevent_ops memory_uevent_ops = {
25911+static const struct kset_uevent_ops memory_uevent_ops = {
25912 .name = memory_uevent_name,
25913 .uevent = memory_uevent,
25914 };
25915diff -urNp linux-2.6.32.41/drivers/base/sys.c linux-2.6.32.41/drivers/base/sys.c
25916--- linux-2.6.32.41/drivers/base/sys.c 2011-03-27 14:31:47.000000000 -0400
25917+++ linux-2.6.32.41/drivers/base/sys.c 2011-04-17 15:56:46.000000000 -0400
25918@@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struc
25919 return -EIO;
25920 }
25921
25922-static struct sysfs_ops sysfs_ops = {
25923+static const struct sysfs_ops sysfs_ops = {
25924 .show = sysdev_show,
25925 .store = sysdev_store,
25926 };
25927@@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct
25928 return -EIO;
25929 }
25930
25931-static struct sysfs_ops sysfs_class_ops = {
25932+static const struct sysfs_ops sysfs_class_ops = {
25933 .show = sysdev_class_show,
25934 .store = sysdev_class_store,
25935 };
25936diff -urNp linux-2.6.32.41/drivers/block/cciss.c linux-2.6.32.41/drivers/block/cciss.c
25937--- linux-2.6.32.41/drivers/block/cciss.c 2011-03-27 14:31:47.000000000 -0400
25938+++ linux-2.6.32.41/drivers/block/cciss.c 2011-04-17 15:56:46.000000000 -0400
25939@@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct
25940 int err;
25941 u32 cp;
25942
25943+ memset(&arg64, 0, sizeof(arg64));
25944+
25945 err = 0;
25946 err |=
25947 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
25948diff -urNp linux-2.6.32.41/drivers/block/cpqarray.c linux-2.6.32.41/drivers/block/cpqarray.c
25949--- linux-2.6.32.41/drivers/block/cpqarray.c 2011-03-27 14:31:47.000000000 -0400
25950+++ linux-2.6.32.41/drivers/block/cpqarray.c 2011-05-16 21:46:57.000000000 -0400
25951@@ -896,6 +896,8 @@ static void do_ida_request(struct reques
25952 struct scatterlist tmp_sg[SG_MAX];
25953 int i, dir, seg;
25954
25955+ pax_track_stack();
25956+
25957 if (blk_queue_plugged(q))
25958 goto startio;
25959
25960diff -urNp linux-2.6.32.41/drivers/block/DAC960.c linux-2.6.32.41/drivers/block/DAC960.c
25961--- linux-2.6.32.41/drivers/block/DAC960.c 2011-03-27 14:31:47.000000000 -0400
25962+++ linux-2.6.32.41/drivers/block/DAC960.c 2011-05-16 21:46:57.000000000 -0400
25963@@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfigur
25964 unsigned long flags;
25965 int Channel, TargetID;
25966
25967+ pax_track_stack();
25968+
25969 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
25970 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
25971 sizeof(DAC960_SCSI_Inquiry_T) +
25972diff -urNp linux-2.6.32.41/drivers/block/nbd.c linux-2.6.32.41/drivers/block/nbd.c
25973--- linux-2.6.32.41/drivers/block/nbd.c 2011-03-27 14:31:47.000000000 -0400
25974+++ linux-2.6.32.41/drivers/block/nbd.c 2011-05-16 21:46:57.000000000 -0400
25975@@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *
25976 struct kvec iov;
25977 sigset_t blocked, oldset;
25978
25979+ pax_track_stack();
25980+
25981 if (unlikely(!sock)) {
25982 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
25983 lo->disk->disk_name, (send ? "send" : "recv"));
25984@@ -569,6 +571,8 @@ static void do_nbd_request(struct reques
25985 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
25986 unsigned int cmd, unsigned long arg)
25987 {
25988+ pax_track_stack();
25989+
25990 switch (cmd) {
25991 case NBD_DISCONNECT: {
25992 struct request sreq;
25993diff -urNp linux-2.6.32.41/drivers/block/pktcdvd.c linux-2.6.32.41/drivers/block/pktcdvd.c
25994--- linux-2.6.32.41/drivers/block/pktcdvd.c 2011-03-27 14:31:47.000000000 -0400
25995+++ linux-2.6.32.41/drivers/block/pktcdvd.c 2011-04-17 15:56:46.000000000 -0400
25996@@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kob
25997 return len;
25998 }
25999
26000-static struct sysfs_ops kobj_pkt_ops = {
26001+static const struct sysfs_ops kobj_pkt_ops = {
26002 .show = kobj_pkt_show,
26003 .store = kobj_pkt_store
26004 };
26005diff -urNp linux-2.6.32.41/drivers/char/agp/frontend.c linux-2.6.32.41/drivers/char/agp/frontend.c
26006--- linux-2.6.32.41/drivers/char/agp/frontend.c 2011-03-27 14:31:47.000000000 -0400
26007+++ linux-2.6.32.41/drivers/char/agp/frontend.c 2011-04-17 15:56:46.000000000 -0400
26008@@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct ag
26009 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
26010 return -EFAULT;
26011
26012- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
26013+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
26014 return -EFAULT;
26015
26016 client = agp_find_client_by_pid(reserve.pid);
26017diff -urNp linux-2.6.32.41/drivers/char/briq_panel.c linux-2.6.32.41/drivers/char/briq_panel.c
26018--- linux-2.6.32.41/drivers/char/briq_panel.c 2011-03-27 14:31:47.000000000 -0400
26019+++ linux-2.6.32.41/drivers/char/briq_panel.c 2011-04-18 19:48:57.000000000 -0400
26020@@ -10,6 +10,7 @@
26021 #include <linux/types.h>
26022 #include <linux/errno.h>
26023 #include <linux/tty.h>
26024+#include <linux/mutex.h>
26025 #include <linux/timer.h>
26026 #include <linux/kernel.h>
26027 #include <linux/wait.h>
26028@@ -36,6 +37,7 @@ static int vfd_is_open;
26029 static unsigned char vfd[40];
26030 static int vfd_cursor;
26031 static unsigned char ledpb, led;
26032+static DEFINE_MUTEX(vfd_mutex);
26033
26034 static void update_vfd(void)
26035 {
26036@@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct f
26037 if (!vfd_is_open)
26038 return -EBUSY;
26039
26040+ mutex_lock(&vfd_mutex);
26041 for (;;) {
26042 char c;
26043 if (!indx)
26044 break;
26045- if (get_user(c, buf))
26046+ if (get_user(c, buf)) {
26047+ mutex_unlock(&vfd_mutex);
26048 return -EFAULT;
26049+ }
26050 if (esc) {
26051 set_led(c);
26052 esc = 0;
26053@@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct f
26054 buf++;
26055 }
26056 update_vfd();
26057+ mutex_unlock(&vfd_mutex);
26058
26059 return len;
26060 }
26061diff -urNp linux-2.6.32.41/drivers/char/genrtc.c linux-2.6.32.41/drivers/char/genrtc.c
26062--- linux-2.6.32.41/drivers/char/genrtc.c 2011-03-27 14:31:47.000000000 -0400
26063+++ linux-2.6.32.41/drivers/char/genrtc.c 2011-04-18 19:45:42.000000000 -0400
26064@@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *i
26065 switch (cmd) {
26066
26067 case RTC_PLL_GET:
26068+ memset(&pll, 0, sizeof(pll));
26069 if (get_rtc_pll(&pll))
26070 return -EINVAL;
26071 else
26072diff -urNp linux-2.6.32.41/drivers/char/hpet.c linux-2.6.32.41/drivers/char/hpet.c
26073--- linux-2.6.32.41/drivers/char/hpet.c 2011-03-27 14:31:47.000000000 -0400
26074+++ linux-2.6.32.41/drivers/char/hpet.c 2011-04-23 12:56:11.000000000 -0400
26075@@ -430,7 +430,7 @@ static int hpet_release(struct inode *in
26076 return 0;
26077 }
26078
26079-static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
26080+static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
26081
26082 static int
26083 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
26084@@ -565,7 +565,7 @@ static inline unsigned long hpet_time_di
26085 }
26086
26087 static int
26088-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
26089+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
26090 {
26091 struct hpet_timer __iomem *timer;
26092 struct hpet __iomem *hpet;
26093@@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp,
26094 {
26095 struct hpet_info info;
26096
26097+ memset(&info, 0, sizeof(info));
26098+
26099 if (devp->hd_ireqfreq)
26100 info.hi_ireqfreq =
26101 hpet_time_div(hpetp, devp->hd_ireqfreq);
26102- else
26103- info.hi_ireqfreq = 0;
26104 info.hi_flags =
26105 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
26106 info.hi_hpet = hpetp->hp_which;
26107diff -urNp linux-2.6.32.41/drivers/char/hvc_beat.c linux-2.6.32.41/drivers/char/hvc_beat.c
26108--- linux-2.6.32.41/drivers/char/hvc_beat.c 2011-03-27 14:31:47.000000000 -0400
26109+++ linux-2.6.32.41/drivers/char/hvc_beat.c 2011-04-17 15:56:46.000000000 -0400
26110@@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t v
26111 return cnt;
26112 }
26113
26114-static struct hv_ops hvc_beat_get_put_ops = {
26115+static const struct hv_ops hvc_beat_get_put_ops = {
26116 .get_chars = hvc_beat_get_chars,
26117 .put_chars = hvc_beat_put_chars,
26118 };
26119diff -urNp linux-2.6.32.41/drivers/char/hvc_console.c linux-2.6.32.41/drivers/char/hvc_console.c
26120--- linux-2.6.32.41/drivers/char/hvc_console.c 2011-03-27 14:31:47.000000000 -0400
26121+++ linux-2.6.32.41/drivers/char/hvc_console.c 2011-04-17 15:56:46.000000000 -0400
26122@@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_ind
26123 * console interfaces but can still be used as a tty device. This has to be
26124 * static because kmalloc will not work during early console init.
26125 */
26126-static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
26127+static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
26128 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
26129 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
26130
26131@@ -247,7 +247,7 @@ static void destroy_hvc_struct(struct kr
26132 * vty adapters do NOT get an hvc_instantiate() callback since they
26133 * appear after early console init.
26134 */
26135-int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
26136+int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
26137 {
26138 struct hvc_struct *hp;
26139
26140@@ -756,7 +756,7 @@ static const struct tty_operations hvc_o
26141 };
26142
26143 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
26144- struct hv_ops *ops, int outbuf_size)
26145+ const struct hv_ops *ops, int outbuf_size)
26146 {
26147 struct hvc_struct *hp;
26148 int i;
26149diff -urNp linux-2.6.32.41/drivers/char/hvc_console.h linux-2.6.32.41/drivers/char/hvc_console.h
26150--- linux-2.6.32.41/drivers/char/hvc_console.h 2011-03-27 14:31:47.000000000 -0400
26151+++ linux-2.6.32.41/drivers/char/hvc_console.h 2011-04-17 15:56:46.000000000 -0400
26152@@ -55,7 +55,7 @@ struct hvc_struct {
26153 int outbuf_size;
26154 int n_outbuf;
26155 uint32_t vtermno;
26156- struct hv_ops *ops;
26157+ const struct hv_ops *ops;
26158 int irq_requested;
26159 int data;
26160 struct winsize ws;
26161@@ -76,11 +76,11 @@ struct hv_ops {
26162 };
26163
26164 /* Register a vterm and a slot index for use as a console (console_init) */
26165-extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
26166+extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
26167
26168 /* register a vterm for hvc tty operation (module_init or hotplug add) */
26169 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
26170- struct hv_ops *ops, int outbuf_size);
26171+ const struct hv_ops *ops, int outbuf_size);
26172 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
26173 extern int hvc_remove(struct hvc_struct *hp);
26174
26175diff -urNp linux-2.6.32.41/drivers/char/hvc_iseries.c linux-2.6.32.41/drivers/char/hvc_iseries.c
26176--- linux-2.6.32.41/drivers/char/hvc_iseries.c 2011-03-27 14:31:47.000000000 -0400
26177+++ linux-2.6.32.41/drivers/char/hvc_iseries.c 2011-04-17 15:56:46.000000000 -0400
26178@@ -197,7 +197,7 @@ done:
26179 return sent;
26180 }
26181
26182-static struct hv_ops hvc_get_put_ops = {
26183+static const struct hv_ops hvc_get_put_ops = {
26184 .get_chars = get_chars,
26185 .put_chars = put_chars,
26186 .notifier_add = notifier_add_irq,
26187diff -urNp linux-2.6.32.41/drivers/char/hvc_iucv.c linux-2.6.32.41/drivers/char/hvc_iucv.c
26188--- linux-2.6.32.41/drivers/char/hvc_iucv.c 2011-03-27 14:31:47.000000000 -0400
26189+++ linux-2.6.32.41/drivers/char/hvc_iucv.c 2011-04-17 15:56:46.000000000 -0400
26190@@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(stru
26191
26192
26193 /* HVC operations */
26194-static struct hv_ops hvc_iucv_ops = {
26195+static const struct hv_ops hvc_iucv_ops = {
26196 .get_chars = hvc_iucv_get_chars,
26197 .put_chars = hvc_iucv_put_chars,
26198 .notifier_add = hvc_iucv_notifier_add,
26199diff -urNp linux-2.6.32.41/drivers/char/hvc_rtas.c linux-2.6.32.41/drivers/char/hvc_rtas.c
26200--- linux-2.6.32.41/drivers/char/hvc_rtas.c 2011-03-27 14:31:47.000000000 -0400
26201+++ linux-2.6.32.41/drivers/char/hvc_rtas.c 2011-04-17 15:56:46.000000000 -0400
26202@@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_
26203 return i;
26204 }
26205
26206-static struct hv_ops hvc_rtas_get_put_ops = {
26207+static const struct hv_ops hvc_rtas_get_put_ops = {
26208 .get_chars = hvc_rtas_read_console,
26209 .put_chars = hvc_rtas_write_console,
26210 };
26211diff -urNp linux-2.6.32.41/drivers/char/hvcs.c linux-2.6.32.41/drivers/char/hvcs.c
26212--- linux-2.6.32.41/drivers/char/hvcs.c 2011-03-27 14:31:47.000000000 -0400
26213+++ linux-2.6.32.41/drivers/char/hvcs.c 2011-04-17 15:56:46.000000000 -0400
26214@@ -82,6 +82,7 @@
26215 #include <asm/hvcserver.h>
26216 #include <asm/uaccess.h>
26217 #include <asm/vio.h>
26218+#include <asm/local.h>
26219
26220 /*
26221 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
26222@@ -269,7 +270,7 @@ struct hvcs_struct {
26223 unsigned int index;
26224
26225 struct tty_struct *tty;
26226- int open_count;
26227+ local_t open_count;
26228
26229 /*
26230 * Used to tell the driver kernel_thread what operations need to take
26231@@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(st
26232
26233 spin_lock_irqsave(&hvcsd->lock, flags);
26234
26235- if (hvcsd->open_count > 0) {
26236+ if (local_read(&hvcsd->open_count) > 0) {
26237 spin_unlock_irqrestore(&hvcsd->lock, flags);
26238 printk(KERN_INFO "HVCS: vterm state unchanged. "
26239 "The hvcs device node is still in use.\n");
26240@@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *
26241 if ((retval = hvcs_partner_connect(hvcsd)))
26242 goto error_release;
26243
26244- hvcsd->open_count = 1;
26245+ local_set(&hvcsd->open_count, 1);
26246 hvcsd->tty = tty;
26247 tty->driver_data = hvcsd;
26248
26249@@ -1169,7 +1170,7 @@ fast_open:
26250
26251 spin_lock_irqsave(&hvcsd->lock, flags);
26252 kref_get(&hvcsd->kref);
26253- hvcsd->open_count++;
26254+ local_inc(&hvcsd->open_count);
26255 hvcsd->todo_mask |= HVCS_SCHED_READ;
26256 spin_unlock_irqrestore(&hvcsd->lock, flags);
26257
26258@@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct
26259 hvcsd = tty->driver_data;
26260
26261 spin_lock_irqsave(&hvcsd->lock, flags);
26262- if (--hvcsd->open_count == 0) {
26263+ if (local_dec_and_test(&hvcsd->open_count)) {
26264
26265 vio_disable_interrupts(hvcsd->vdev);
26266
26267@@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct
26268 free_irq(irq, hvcsd);
26269 kref_put(&hvcsd->kref, destroy_hvcs_struct);
26270 return;
26271- } else if (hvcsd->open_count < 0) {
26272+ } else if (local_read(&hvcsd->open_count) < 0) {
26273 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
26274 " is missmanaged.\n",
26275- hvcsd->vdev->unit_address, hvcsd->open_count);
26276+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
26277 }
26278
26279 spin_unlock_irqrestore(&hvcsd->lock, flags);
26280@@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struc
26281
26282 spin_lock_irqsave(&hvcsd->lock, flags);
26283 /* Preserve this so that we know how many kref refs to put */
26284- temp_open_count = hvcsd->open_count;
26285+ temp_open_count = local_read(&hvcsd->open_count);
26286
26287 /*
26288 * Don't kref put inside the spinlock because the destruction
26289@@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struc
26290 hvcsd->tty->driver_data = NULL;
26291 hvcsd->tty = NULL;
26292
26293- hvcsd->open_count = 0;
26294+ local_set(&hvcsd->open_count, 0);
26295
26296 /* This will drop any buffered data on the floor which is OK in a hangup
26297 * scenario. */
26298@@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct
26299 * the middle of a write operation? This is a crummy place to do this
26300 * but we want to keep it all in the spinlock.
26301 */
26302- if (hvcsd->open_count <= 0) {
26303+ if (local_read(&hvcsd->open_count) <= 0) {
26304 spin_unlock_irqrestore(&hvcsd->lock, flags);
26305 return -ENODEV;
26306 }
26307@@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_st
26308 {
26309 struct hvcs_struct *hvcsd = tty->driver_data;
26310
26311- if (!hvcsd || hvcsd->open_count <= 0)
26312+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
26313 return 0;
26314
26315 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
26316diff -urNp linux-2.6.32.41/drivers/char/hvc_udbg.c linux-2.6.32.41/drivers/char/hvc_udbg.c
26317--- linux-2.6.32.41/drivers/char/hvc_udbg.c 2011-03-27 14:31:47.000000000 -0400
26318+++ linux-2.6.32.41/drivers/char/hvc_udbg.c 2011-04-17 15:56:46.000000000 -0400
26319@@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno
26320 return i;
26321 }
26322
26323-static struct hv_ops hvc_udbg_ops = {
26324+static const struct hv_ops hvc_udbg_ops = {
26325 .get_chars = hvc_udbg_get,
26326 .put_chars = hvc_udbg_put,
26327 };
26328diff -urNp linux-2.6.32.41/drivers/char/hvc_vio.c linux-2.6.32.41/drivers/char/hvc_vio.c
26329--- linux-2.6.32.41/drivers/char/hvc_vio.c 2011-03-27 14:31:47.000000000 -0400
26330+++ linux-2.6.32.41/drivers/char/hvc_vio.c 2011-04-17 15:56:46.000000000 -0400
26331@@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t v
26332 return got;
26333 }
26334
26335-static struct hv_ops hvc_get_put_ops = {
26336+static const struct hv_ops hvc_get_put_ops = {
26337 .get_chars = filtered_get_chars,
26338 .put_chars = hvc_put_chars,
26339 .notifier_add = notifier_add_irq,
26340diff -urNp linux-2.6.32.41/drivers/char/hvc_xen.c linux-2.6.32.41/drivers/char/hvc_xen.c
26341--- linux-2.6.32.41/drivers/char/hvc_xen.c 2011-03-27 14:31:47.000000000 -0400
26342+++ linux-2.6.32.41/drivers/char/hvc_xen.c 2011-04-17 15:56:46.000000000 -0400
26343@@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno
26344 return recv;
26345 }
26346
26347-static struct hv_ops hvc_ops = {
26348+static const struct hv_ops hvc_ops = {
26349 .get_chars = read_console,
26350 .put_chars = write_console,
26351 .notifier_add = notifier_add_irq,
26352diff -urNp linux-2.6.32.41/drivers/char/ipmi/ipmi_msghandler.c linux-2.6.32.41/drivers/char/ipmi/ipmi_msghandler.c
26353--- linux-2.6.32.41/drivers/char/ipmi/ipmi_msghandler.c 2011-03-27 14:31:47.000000000 -0400
26354+++ linux-2.6.32.41/drivers/char/ipmi/ipmi_msghandler.c 2011-05-16 21:46:57.000000000 -0400
26355@@ -414,7 +414,7 @@ struct ipmi_smi {
26356 struct proc_dir_entry *proc_dir;
26357 char proc_dir_name[10];
26358
26359- atomic_t stats[IPMI_NUM_STATS];
26360+ atomic_unchecked_t stats[IPMI_NUM_STATS];
26361
26362 /*
26363 * run_to_completion duplicate of smb_info, smi_info
26364@@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
26365
26366
26367 #define ipmi_inc_stat(intf, stat) \
26368- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
26369+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
26370 #define ipmi_get_stat(intf, stat) \
26371- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
26372+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
26373
26374 static int is_lan_addr(struct ipmi_addr *addr)
26375 {
26376@@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
26377 INIT_LIST_HEAD(&intf->cmd_rcvrs);
26378 init_waitqueue_head(&intf->waitq);
26379 for (i = 0; i < IPMI_NUM_STATS; i++)
26380- atomic_set(&intf->stats[i], 0);
26381+ atomic_set_unchecked(&intf->stats[i], 0);
26382
26383 intf->proc_dir = NULL;
26384
26385@@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
26386 struct ipmi_smi_msg smi_msg;
26387 struct ipmi_recv_msg recv_msg;
26388
26389+ pax_track_stack();
26390+
26391 si = (struct ipmi_system_interface_addr *) &addr;
26392 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
26393 si->channel = IPMI_BMC_CHANNEL;
26394diff -urNp linux-2.6.32.41/drivers/char/ipmi/ipmi_si_intf.c linux-2.6.32.41/drivers/char/ipmi/ipmi_si_intf.c
26395--- linux-2.6.32.41/drivers/char/ipmi/ipmi_si_intf.c 2011-03-27 14:31:47.000000000 -0400
26396+++ linux-2.6.32.41/drivers/char/ipmi/ipmi_si_intf.c 2011-04-17 15:56:46.000000000 -0400
26397@@ -277,7 +277,7 @@ struct smi_info {
26398 unsigned char slave_addr;
26399
26400 /* Counters and things for the proc filesystem. */
26401- atomic_t stats[SI_NUM_STATS];
26402+ atomic_unchecked_t stats[SI_NUM_STATS];
26403
26404 struct task_struct *thread;
26405
26406@@ -285,9 +285,9 @@ struct smi_info {
26407 };
26408
26409 #define smi_inc_stat(smi, stat) \
26410- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
26411+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
26412 #define smi_get_stat(smi, stat) \
26413- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
26414+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
26415
26416 #define SI_MAX_PARMS 4
26417
26418@@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info
26419 atomic_set(&new_smi->req_events, 0);
26420 new_smi->run_to_completion = 0;
26421 for (i = 0; i < SI_NUM_STATS; i++)
26422- atomic_set(&new_smi->stats[i], 0);
26423+ atomic_set_unchecked(&new_smi->stats[i], 0);
26424
26425 new_smi->interrupt_disabled = 0;
26426 atomic_set(&new_smi->stop_operation, 0);
26427diff -urNp linux-2.6.32.41/drivers/char/istallion.c linux-2.6.32.41/drivers/char/istallion.c
26428--- linux-2.6.32.41/drivers/char/istallion.c 2011-03-27 14:31:47.000000000 -0400
26429+++ linux-2.6.32.41/drivers/char/istallion.c 2011-05-16 21:46:57.000000000 -0400
26430@@ -187,7 +187,6 @@ static struct ktermios stli_deftermios
26431 * re-used for each stats call.
26432 */
26433 static comstats_t stli_comstats;
26434-static combrd_t stli_brdstats;
26435 static struct asystats stli_cdkstats;
26436
26437 /*****************************************************************************/
26438@@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __u
26439 {
26440 struct stlibrd *brdp;
26441 unsigned int i;
26442+ combrd_t stli_brdstats;
26443
26444 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
26445 return -EFAULT;
26446@@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stl
26447 struct stliport stli_dummyport;
26448 struct stliport *portp;
26449
26450+ pax_track_stack();
26451+
26452 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
26453 return -EFAULT;
26454 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
26455@@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stli
26456 struct stlibrd stli_dummybrd;
26457 struct stlibrd *brdp;
26458
26459+ pax_track_stack();
26460+
26461 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
26462 return -EFAULT;
26463 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
26464diff -urNp linux-2.6.32.41/drivers/char/Kconfig linux-2.6.32.41/drivers/char/Kconfig
26465--- linux-2.6.32.41/drivers/char/Kconfig 2011-03-27 14:31:47.000000000 -0400
26466+++ linux-2.6.32.41/drivers/char/Kconfig 2011-04-18 19:20:15.000000000 -0400
26467@@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
26468
26469 config DEVKMEM
26470 bool "/dev/kmem virtual device support"
26471- default y
26472+ default n
26473+ depends on !GRKERNSEC_KMEM
26474 help
26475 Say Y here if you want to support the /dev/kmem device. The
26476 /dev/kmem device is rarely used, but can be used for certain
26477@@ -1114,6 +1115,7 @@ config DEVPORT
26478 bool
26479 depends on !M68K
26480 depends on ISA || PCI
26481+ depends on !GRKERNSEC_KMEM
26482 default y
26483
26484 source "drivers/s390/char/Kconfig"
26485diff -urNp linux-2.6.32.41/drivers/char/keyboard.c linux-2.6.32.41/drivers/char/keyboard.c
26486--- linux-2.6.32.41/drivers/char/keyboard.c 2011-03-27 14:31:47.000000000 -0400
26487+++ linux-2.6.32.41/drivers/char/keyboard.c 2011-04-17 15:56:46.000000000 -0400
26488@@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, u
26489 kbd->kbdmode == VC_MEDIUMRAW) &&
26490 value != KVAL(K_SAK))
26491 return; /* SAK is allowed even in raw mode */
26492+
26493+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
26494+ {
26495+ void *func = fn_handler[value];
26496+ if (func == fn_show_state || func == fn_show_ptregs ||
26497+ func == fn_show_mem)
26498+ return;
26499+ }
26500+#endif
26501+
26502 fn_handler[value](vc);
26503 }
26504
26505@@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_
26506 .evbit = { BIT_MASK(EV_SND) },
26507 },
26508
26509- { }, /* Terminating entry */
26510+ { 0 }, /* Terminating entry */
26511 };
26512
26513 MODULE_DEVICE_TABLE(input, kbd_ids);
26514diff -urNp linux-2.6.32.41/drivers/char/mem.c linux-2.6.32.41/drivers/char/mem.c
26515--- linux-2.6.32.41/drivers/char/mem.c 2011-03-27 14:31:47.000000000 -0400
26516+++ linux-2.6.32.41/drivers/char/mem.c 2011-04-17 15:56:46.000000000 -0400
26517@@ -18,6 +18,7 @@
26518 #include <linux/raw.h>
26519 #include <linux/tty.h>
26520 #include <linux/capability.h>
26521+#include <linux/security.h>
26522 #include <linux/ptrace.h>
26523 #include <linux/device.h>
26524 #include <linux/highmem.h>
26525@@ -35,6 +36,10 @@
26526 # include <linux/efi.h>
26527 #endif
26528
26529+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
26530+extern struct file_operations grsec_fops;
26531+#endif
26532+
26533 static inline unsigned long size_inside_page(unsigned long start,
26534 unsigned long size)
26535 {
26536@@ -102,9 +107,13 @@ static inline int range_is_allowed(unsig
26537
26538 while (cursor < to) {
26539 if (!devmem_is_allowed(pfn)) {
26540+#ifdef CONFIG_GRKERNSEC_KMEM
26541+ gr_handle_mem_readwrite(from, to);
26542+#else
26543 printk(KERN_INFO
26544 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
26545 current->comm, from, to);
26546+#endif
26547 return 0;
26548 }
26549 cursor += PAGE_SIZE;
26550@@ -112,6 +121,11 @@ static inline int range_is_allowed(unsig
26551 }
26552 return 1;
26553 }
26554+#elif defined(CONFIG_GRKERNSEC_KMEM)
26555+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
26556+{
26557+ return 0;
26558+}
26559 #else
26560 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
26561 {
26562@@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * fi
26563 #endif
26564
26565 while (count > 0) {
26566+ char *temp;
26567+
26568 /*
26569 * Handle first page in case it's not aligned
26570 */
26571@@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * fi
26572 if (!ptr)
26573 return -EFAULT;
26574
26575- if (copy_to_user(buf, ptr, sz)) {
26576+#ifdef CONFIG_PAX_USERCOPY
26577+ temp = kmalloc(sz, GFP_KERNEL);
26578+ if (!temp) {
26579+ unxlate_dev_mem_ptr(p, ptr);
26580+ return -ENOMEM;
26581+ }
26582+ memcpy(temp, ptr, sz);
26583+#else
26584+ temp = ptr;
26585+#endif
26586+
26587+ if (copy_to_user(buf, temp, sz)) {
26588+
26589+#ifdef CONFIG_PAX_USERCOPY
26590+ kfree(temp);
26591+#endif
26592+
26593 unxlate_dev_mem_ptr(p, ptr);
26594 return -EFAULT;
26595 }
26596
26597+#ifdef CONFIG_PAX_USERCOPY
26598+ kfree(temp);
26599+#endif
26600+
26601 unxlate_dev_mem_ptr(p, ptr);
26602
26603 buf += sz;
26604@@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *fi
26605 size_t count, loff_t *ppos)
26606 {
26607 unsigned long p = *ppos;
26608- ssize_t low_count, read, sz;
26609+ ssize_t low_count, read, sz, err = 0;
26610 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
26611- int err = 0;
26612
26613 read = 0;
26614 if (p < (unsigned long) high_memory) {
26615@@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *fi
26616 }
26617 #endif
26618 while (low_count > 0) {
26619+ char *temp;
26620+
26621 sz = size_inside_page(p, low_count);
26622
26623 /*
26624@@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *fi
26625 */
26626 kbuf = xlate_dev_kmem_ptr((char *)p);
26627
26628- if (copy_to_user(buf, kbuf, sz))
26629+#ifdef CONFIG_PAX_USERCOPY
26630+ temp = kmalloc(sz, GFP_KERNEL);
26631+ if (!temp)
26632+ return -ENOMEM;
26633+ memcpy(temp, kbuf, sz);
26634+#else
26635+ temp = kbuf;
26636+#endif
26637+
26638+ err = copy_to_user(buf, temp, sz);
26639+
26640+#ifdef CONFIG_PAX_USERCOPY
26641+ kfree(temp);
26642+#endif
26643+
26644+ if (err)
26645 return -EFAULT;
26646 buf += sz;
26647 p += sz;
26648@@ -889,6 +941,9 @@ static const struct memdev {
26649 #ifdef CONFIG_CRASH_DUMP
26650 [12] = { "oldmem", 0, &oldmem_fops, NULL },
26651 #endif
26652+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
26653+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
26654+#endif
26655 };
26656
26657 static int memory_open(struct inode *inode, struct file *filp)
26658diff -urNp linux-2.6.32.41/drivers/char/pcmcia/ipwireless/tty.c linux-2.6.32.41/drivers/char/pcmcia/ipwireless/tty.c
26659--- linux-2.6.32.41/drivers/char/pcmcia/ipwireless/tty.c 2011-03-27 14:31:47.000000000 -0400
26660+++ linux-2.6.32.41/drivers/char/pcmcia/ipwireless/tty.c 2011-04-17 15:56:46.000000000 -0400
26661@@ -29,6 +29,7 @@
26662 #include <linux/tty_driver.h>
26663 #include <linux/tty_flip.h>
26664 #include <linux/uaccess.h>
26665+#include <asm/local.h>
26666
26667 #include "tty.h"
26668 #include "network.h"
26669@@ -51,7 +52,7 @@ struct ipw_tty {
26670 int tty_type;
26671 struct ipw_network *network;
26672 struct tty_struct *linux_tty;
26673- int open_count;
26674+ local_t open_count;
26675 unsigned int control_lines;
26676 struct mutex ipw_tty_mutex;
26677 int tx_bytes_queued;
26678@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
26679 mutex_unlock(&tty->ipw_tty_mutex);
26680 return -ENODEV;
26681 }
26682- if (tty->open_count == 0)
26683+ if (local_read(&tty->open_count) == 0)
26684 tty->tx_bytes_queued = 0;
26685
26686- tty->open_count++;
26687+ local_inc(&tty->open_count);
26688
26689 tty->linux_tty = linux_tty;
26690 linux_tty->driver_data = tty;
26691@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
26692
26693 static void do_ipw_close(struct ipw_tty *tty)
26694 {
26695- tty->open_count--;
26696-
26697- if (tty->open_count == 0) {
26698+ if (local_dec_return(&tty->open_count) == 0) {
26699 struct tty_struct *linux_tty = tty->linux_tty;
26700
26701 if (linux_tty != NULL) {
26702@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
26703 return;
26704
26705 mutex_lock(&tty->ipw_tty_mutex);
26706- if (tty->open_count == 0) {
26707+ if (local_read(&tty->open_count) == 0) {
26708 mutex_unlock(&tty->ipw_tty_mutex);
26709 return;
26710 }
26711@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
26712 return;
26713 }
26714
26715- if (!tty->open_count) {
26716+ if (!local_read(&tty->open_count)) {
26717 mutex_unlock(&tty->ipw_tty_mutex);
26718 return;
26719 }
26720@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
26721 return -ENODEV;
26722
26723 mutex_lock(&tty->ipw_tty_mutex);
26724- if (!tty->open_count) {
26725+ if (!local_read(&tty->open_count)) {
26726 mutex_unlock(&tty->ipw_tty_mutex);
26727 return -EINVAL;
26728 }
26729@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
26730 if (!tty)
26731 return -ENODEV;
26732
26733- if (!tty->open_count)
26734+ if (!local_read(&tty->open_count))
26735 return -EINVAL;
26736
26737 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
26738@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
26739 if (!tty)
26740 return 0;
26741
26742- if (!tty->open_count)
26743+ if (!local_read(&tty->open_count))
26744 return 0;
26745
26746 return tty->tx_bytes_queued;
26747@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
26748 if (!tty)
26749 return -ENODEV;
26750
26751- if (!tty->open_count)
26752+ if (!local_read(&tty->open_count))
26753 return -EINVAL;
26754
26755 return get_control_lines(tty);
26756@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
26757 if (!tty)
26758 return -ENODEV;
26759
26760- if (!tty->open_count)
26761+ if (!local_read(&tty->open_count))
26762 return -EINVAL;
26763
26764 return set_control_lines(tty, set, clear);
26765@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
26766 if (!tty)
26767 return -ENODEV;
26768
26769- if (!tty->open_count)
26770+ if (!local_read(&tty->open_count))
26771 return -EINVAL;
26772
26773 /* FIXME: Exactly how is the tty object locked here .. */
26774@@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty
26775 against a parallel ioctl etc */
26776 mutex_lock(&ttyj->ipw_tty_mutex);
26777 }
26778- while (ttyj->open_count)
26779+ while (local_read(&ttyj->open_count))
26780 do_ipw_close(ttyj);
26781 ipwireless_disassociate_network_ttys(network,
26782 ttyj->channel_idx);
26783diff -urNp linux-2.6.32.41/drivers/char/pty.c linux-2.6.32.41/drivers/char/pty.c
26784--- linux-2.6.32.41/drivers/char/pty.c 2011-03-27 14:31:47.000000000 -0400
26785+++ linux-2.6.32.41/drivers/char/pty.c 2011-04-17 15:56:46.000000000 -0400
26786@@ -682,7 +682,18 @@ static int ptmx_open(struct inode *inode
26787 return ret;
26788 }
26789
26790-static struct file_operations ptmx_fops;
26791+static const struct file_operations ptmx_fops = {
26792+ .llseek = no_llseek,
26793+ .read = tty_read,
26794+ .write = tty_write,
26795+ .poll = tty_poll,
26796+ .unlocked_ioctl = tty_ioctl,
26797+ .compat_ioctl = tty_compat_ioctl,
26798+ .open = ptmx_open,
26799+ .release = tty_release,
26800+ .fasync = tty_fasync,
26801+};
26802+
26803
26804 static void __init unix98_pty_init(void)
26805 {
26806@@ -736,9 +747,6 @@ static void __init unix98_pty_init(void)
26807 register_sysctl_table(pty_root_table);
26808
26809 /* Now create the /dev/ptmx special device */
26810- tty_default_fops(&ptmx_fops);
26811- ptmx_fops.open = ptmx_open;
26812-
26813 cdev_init(&ptmx_cdev, &ptmx_fops);
26814 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
26815 register_chrdev_region(MKDEV(TTYAUX_MAJOR, 2), 1, "/dev/ptmx") < 0)
26816diff -urNp linux-2.6.32.41/drivers/char/random.c linux-2.6.32.41/drivers/char/random.c
26817--- linux-2.6.32.41/drivers/char/random.c 2011-03-27 14:31:47.000000000 -0400
26818+++ linux-2.6.32.41/drivers/char/random.c 2011-04-17 15:56:46.000000000 -0400
26819@@ -254,8 +254,13 @@
26820 /*
26821 * Configuration information
26822 */
26823+#ifdef CONFIG_GRKERNSEC_RANDNET
26824+#define INPUT_POOL_WORDS 512
26825+#define OUTPUT_POOL_WORDS 128
26826+#else
26827 #define INPUT_POOL_WORDS 128
26828 #define OUTPUT_POOL_WORDS 32
26829+#endif
26830 #define SEC_XFER_SIZE 512
26831
26832 /*
26833@@ -292,10 +297,17 @@ static struct poolinfo {
26834 int poolwords;
26835 int tap1, tap2, tap3, tap4, tap5;
26836 } poolinfo_table[] = {
26837+#ifdef CONFIG_GRKERNSEC_RANDNET
26838+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
26839+ { 512, 411, 308, 208, 104, 1 },
26840+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
26841+ { 128, 103, 76, 51, 25, 1 },
26842+#else
26843 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
26844 { 128, 103, 76, 51, 25, 1 },
26845 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
26846 { 32, 26, 20, 14, 7, 1 },
26847+#endif
26848 #if 0
26849 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
26850 { 2048, 1638, 1231, 819, 411, 1 },
26851@@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
26852 #include <linux/sysctl.h>
26853
26854 static int min_read_thresh = 8, min_write_thresh;
26855-static int max_read_thresh = INPUT_POOL_WORDS * 32;
26856+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
26857 static int max_write_thresh = INPUT_POOL_WORDS * 32;
26858 static char sysctl_bootid[16];
26859
26860diff -urNp linux-2.6.32.41/drivers/char/rocket.c linux-2.6.32.41/drivers/char/rocket.c
26861--- linux-2.6.32.41/drivers/char/rocket.c 2011-03-27 14:31:47.000000000 -0400
26862+++ linux-2.6.32.41/drivers/char/rocket.c 2011-05-16 21:46:57.000000000 -0400
26863@@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info
26864 struct rocket_ports tmp;
26865 int board;
26866
26867+ pax_track_stack();
26868+
26869 if (!retports)
26870 return -EFAULT;
26871 memset(&tmp, 0, sizeof (tmp));
26872diff -urNp linux-2.6.32.41/drivers/char/sonypi.c linux-2.6.32.41/drivers/char/sonypi.c
26873--- linux-2.6.32.41/drivers/char/sonypi.c 2011-03-27 14:31:47.000000000 -0400
26874+++ linux-2.6.32.41/drivers/char/sonypi.c 2011-04-17 15:56:46.000000000 -0400
26875@@ -55,6 +55,7 @@
26876 #include <asm/uaccess.h>
26877 #include <asm/io.h>
26878 #include <asm/system.h>
26879+#include <asm/local.h>
26880
26881 #include <linux/sonypi.h>
26882
26883@@ -491,7 +492,7 @@ static struct sonypi_device {
26884 spinlock_t fifo_lock;
26885 wait_queue_head_t fifo_proc_list;
26886 struct fasync_struct *fifo_async;
26887- int open_count;
26888+ local_t open_count;
26889 int model;
26890 struct input_dev *input_jog_dev;
26891 struct input_dev *input_key_dev;
26892@@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, st
26893 static int sonypi_misc_release(struct inode *inode, struct file *file)
26894 {
26895 mutex_lock(&sonypi_device.lock);
26896- sonypi_device.open_count--;
26897+ local_dec(&sonypi_device.open_count);
26898 mutex_unlock(&sonypi_device.lock);
26899 return 0;
26900 }
26901@@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode
26902 lock_kernel();
26903 mutex_lock(&sonypi_device.lock);
26904 /* Flush input queue on first open */
26905- if (!sonypi_device.open_count)
26906+ if (!local_read(&sonypi_device.open_count))
26907 kfifo_reset(sonypi_device.fifo);
26908- sonypi_device.open_count++;
26909+ local_inc(&sonypi_device.open_count);
26910 mutex_unlock(&sonypi_device.lock);
26911 unlock_kernel();
26912 return 0;
26913diff -urNp linux-2.6.32.41/drivers/char/stallion.c linux-2.6.32.41/drivers/char/stallion.c
26914--- linux-2.6.32.41/drivers/char/stallion.c 2011-03-27 14:31:47.000000000 -0400
26915+++ linux-2.6.32.41/drivers/char/stallion.c 2011-05-16 21:46:57.000000000 -0400
26916@@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlp
26917 struct stlport stl_dummyport;
26918 struct stlport *portp;
26919
26920+ pax_track_stack();
26921+
26922 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
26923 return -EFAULT;
26924 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
26925diff -urNp linux-2.6.32.41/drivers/char/tpm/tpm_bios.c linux-2.6.32.41/drivers/char/tpm/tpm_bios.c
26926--- linux-2.6.32.41/drivers/char/tpm/tpm_bios.c 2011-03-27 14:31:47.000000000 -0400
26927+++ linux-2.6.32.41/drivers/char/tpm/tpm_bios.c 2011-04-17 15:56:46.000000000 -0400
26928@@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start
26929 event = addr;
26930
26931 if ((event->event_type == 0 && event->event_size == 0) ||
26932- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
26933+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
26934 return NULL;
26935
26936 return addr;
26937@@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(
26938 return NULL;
26939
26940 if ((event->event_type == 0 && event->event_size == 0) ||
26941- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
26942+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
26943 return NULL;
26944
26945 (*pos)++;
26946@@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_
26947 int i;
26948
26949 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
26950- seq_putc(m, data[i]);
26951+ if (!seq_putc(m, data[i]))
26952+ return -EFAULT;
26953
26954 return 0;
26955 }
26956@@ -409,6 +410,11 @@ static int read_log(struct tpm_bios_log
26957 log->bios_event_log_end = log->bios_event_log + len;
26958
26959 virt = acpi_os_map_memory(start, len);
26960+ if (!virt) {
26961+ kfree(log->bios_event_log);
26962+ log->bios_event_log = NULL;
26963+ return -EFAULT;
26964+ }
26965
26966 memcpy(log->bios_event_log, virt, len);
26967
26968diff -urNp linux-2.6.32.41/drivers/char/tpm/tpm.c linux-2.6.32.41/drivers/char/tpm/tpm.c
26969--- linux-2.6.32.41/drivers/char/tpm/tpm.c 2011-04-17 17:00:52.000000000 -0400
26970+++ linux-2.6.32.41/drivers/char/tpm/tpm.c 2011-05-16 21:46:57.000000000 -0400
26971@@ -402,7 +402,7 @@ static ssize_t tpm_transmit(struct tpm_c
26972 chip->vendor.req_complete_val)
26973 goto out_recv;
26974
26975- if ((status == chip->vendor.req_canceled)) {
26976+ if (status == chip->vendor.req_canceled) {
26977 dev_err(chip->dev, "Operation Canceled\n");
26978 rc = -ECANCELED;
26979 goto out;
26980@@ -821,6 +821,8 @@ ssize_t tpm_show_pubek(struct device *de
26981
26982 struct tpm_chip *chip = dev_get_drvdata(dev);
26983
26984+ pax_track_stack();
26985+
26986 tpm_cmd.header.in = tpm_readpubek_header;
26987 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
26988 "attempting to read the PUBEK");
26989diff -urNp linux-2.6.32.41/drivers/char/tty_io.c linux-2.6.32.41/drivers/char/tty_io.c
26990--- linux-2.6.32.41/drivers/char/tty_io.c 2011-03-27 14:31:47.000000000 -0400
26991+++ linux-2.6.32.41/drivers/char/tty_io.c 2011-04-17 15:56:46.000000000 -0400
26992@@ -136,21 +136,10 @@ LIST_HEAD(tty_drivers); /* linked list
26993 DEFINE_MUTEX(tty_mutex);
26994 EXPORT_SYMBOL(tty_mutex);
26995
26996-static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
26997-static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
26998 ssize_t redirected_tty_write(struct file *, const char __user *,
26999 size_t, loff_t *);
27000-static unsigned int tty_poll(struct file *, poll_table *);
27001 static int tty_open(struct inode *, struct file *);
27002-static int tty_release(struct inode *, struct file *);
27003 long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
27004-#ifdef CONFIG_COMPAT
27005-static long tty_compat_ioctl(struct file *file, unsigned int cmd,
27006- unsigned long arg);
27007-#else
27008-#define tty_compat_ioctl NULL
27009-#endif
27010-static int tty_fasync(int fd, struct file *filp, int on);
27011 static void release_tty(struct tty_struct *tty, int idx);
27012 static void __proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
27013 static void proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
27014@@ -870,7 +859,7 @@ EXPORT_SYMBOL(start_tty);
27015 * read calls may be outstanding in parallel.
27016 */
27017
27018-static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
27019+ssize_t tty_read(struct file *file, char __user *buf, size_t count,
27020 loff_t *ppos)
27021 {
27022 int i;
27023@@ -898,6 +887,8 @@ static ssize_t tty_read(struct file *fil
27024 return i;
27025 }
27026
27027+EXPORT_SYMBOL(tty_read);
27028+
27029 void tty_write_unlock(struct tty_struct *tty)
27030 {
27031 mutex_unlock(&tty->atomic_write_lock);
27032@@ -1045,7 +1036,7 @@ void tty_write_message(struct tty_struct
27033 * write method will not be invoked in parallel for each device.
27034 */
27035
27036-static ssize_t tty_write(struct file *file, const char __user *buf,
27037+ssize_t tty_write(struct file *file, const char __user *buf,
27038 size_t count, loff_t *ppos)
27039 {
27040 struct tty_struct *tty;
27041@@ -1072,6 +1063,8 @@ static ssize_t tty_write(struct file *fi
27042 return ret;
27043 }
27044
27045+EXPORT_SYMBOL(tty_write);
27046+
27047 ssize_t redirected_tty_write(struct file *file, const char __user *buf,
27048 size_t count, loff_t *ppos)
27049 {
27050@@ -1867,7 +1860,7 @@ static int tty_open(struct inode *inode,
27051 * Takes bkl. See tty_release_dev
27052 */
27053
27054-static int tty_release(struct inode *inode, struct file *filp)
27055+int tty_release(struct inode *inode, struct file *filp)
27056 {
27057 lock_kernel();
27058 tty_release_dev(filp);
27059@@ -1875,6 +1868,8 @@ static int tty_release(struct inode *ino
27060 return 0;
27061 }
27062
27063+EXPORT_SYMBOL(tty_release);
27064+
27065 /**
27066 * tty_poll - check tty status
27067 * @filp: file being polled
27068@@ -1887,7 +1882,7 @@ static int tty_release(struct inode *ino
27069 * may be re-entered freely by other callers.
27070 */
27071
27072-static unsigned int tty_poll(struct file *filp, poll_table *wait)
27073+unsigned int tty_poll(struct file *filp, poll_table *wait)
27074 {
27075 struct tty_struct *tty;
27076 struct tty_ldisc *ld;
27077@@ -1904,7 +1899,9 @@ static unsigned int tty_poll(struct file
27078 return ret;
27079 }
27080
27081-static int tty_fasync(int fd, struct file *filp, int on)
27082+EXPORT_SYMBOL(tty_poll);
27083+
27084+int tty_fasync(int fd, struct file *filp, int on)
27085 {
27086 struct tty_struct *tty;
27087 unsigned long flags;
27088@@ -1948,6 +1945,8 @@ out:
27089 return retval;
27090 }
27091
27092+EXPORT_SYMBOL(tty_fasync);
27093+
27094 /**
27095 * tiocsti - fake input character
27096 * @tty: tty to fake input into
27097@@ -2582,8 +2581,10 @@ long tty_ioctl(struct file *file, unsign
27098 return retval;
27099 }
27100
27101+EXPORT_SYMBOL(tty_ioctl);
27102+
27103 #ifdef CONFIG_COMPAT
27104-static long tty_compat_ioctl(struct file *file, unsigned int cmd,
27105+long tty_compat_ioctl(struct file *file, unsigned int cmd,
27106 unsigned long arg)
27107 {
27108 struct inode *inode = file->f_dentry->d_inode;
27109@@ -2607,6 +2608,8 @@ static long tty_compat_ioctl(struct file
27110
27111 return retval;
27112 }
27113+
27114+EXPORT_SYMBOL(tty_compat_ioctl);
27115 #endif
27116
27117 /*
27118@@ -3050,11 +3053,6 @@ struct tty_struct *get_current_tty(void)
27119 }
27120 EXPORT_SYMBOL_GPL(get_current_tty);
27121
27122-void tty_default_fops(struct file_operations *fops)
27123-{
27124- *fops = tty_fops;
27125-}
27126-
27127 /*
27128 * Initialize the console device. This is called *early*, so
27129 * we can't necessarily depend on lots of kernel help here.
27130diff -urNp linux-2.6.32.41/drivers/char/tty_ldisc.c linux-2.6.32.41/drivers/char/tty_ldisc.c
27131--- linux-2.6.32.41/drivers/char/tty_ldisc.c 2011-03-27 14:31:47.000000000 -0400
27132+++ linux-2.6.32.41/drivers/char/tty_ldisc.c 2011-04-17 15:56:46.000000000 -0400
27133@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
27134 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
27135 struct tty_ldisc_ops *ldo = ld->ops;
27136
27137- ldo->refcount--;
27138+ atomic_dec(&ldo->refcount);
27139 module_put(ldo->owner);
27140 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
27141
27142@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
27143 spin_lock_irqsave(&tty_ldisc_lock, flags);
27144 tty_ldiscs[disc] = new_ldisc;
27145 new_ldisc->num = disc;
27146- new_ldisc->refcount = 0;
27147+ atomic_set(&new_ldisc->refcount, 0);
27148 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
27149
27150 return ret;
27151@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
27152 return -EINVAL;
27153
27154 spin_lock_irqsave(&tty_ldisc_lock, flags);
27155- if (tty_ldiscs[disc]->refcount)
27156+ if (atomic_read(&tty_ldiscs[disc]->refcount))
27157 ret = -EBUSY;
27158 else
27159 tty_ldiscs[disc] = NULL;
27160@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
27161 if (ldops) {
27162 ret = ERR_PTR(-EAGAIN);
27163 if (try_module_get(ldops->owner)) {
27164- ldops->refcount++;
27165+ atomic_inc(&ldops->refcount);
27166 ret = ldops;
27167 }
27168 }
27169@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
27170 unsigned long flags;
27171
27172 spin_lock_irqsave(&tty_ldisc_lock, flags);
27173- ldops->refcount--;
27174+ atomic_dec(&ldops->refcount);
27175 module_put(ldops->owner);
27176 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
27177 }
27178diff -urNp linux-2.6.32.41/drivers/char/virtio_console.c linux-2.6.32.41/drivers/char/virtio_console.c
27179--- linux-2.6.32.41/drivers/char/virtio_console.c 2011-03-27 14:31:47.000000000 -0400
27180+++ linux-2.6.32.41/drivers/char/virtio_console.c 2011-04-17 15:56:46.000000000 -0400
27181@@ -44,6 +44,7 @@ static unsigned int in_len;
27182 static char *in, *inbuf;
27183
27184 /* The operations for our console. */
27185+/* cannot be const */
27186 static struct hv_ops virtio_cons;
27187
27188 /* The hvc device */
27189diff -urNp linux-2.6.32.41/drivers/char/vt.c linux-2.6.32.41/drivers/char/vt.c
27190--- linux-2.6.32.41/drivers/char/vt.c 2011-03-27 14:31:47.000000000 -0400
27191+++ linux-2.6.32.41/drivers/char/vt.c 2011-04-17 15:56:46.000000000 -0400
27192@@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
27193
27194 static void notify_write(struct vc_data *vc, unsigned int unicode)
27195 {
27196- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
27197+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
27198 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
27199 }
27200
27201diff -urNp linux-2.6.32.41/drivers/char/vt_ioctl.c linux-2.6.32.41/drivers/char/vt_ioctl.c
27202--- linux-2.6.32.41/drivers/char/vt_ioctl.c 2011-03-27 14:31:47.000000000 -0400
27203+++ linux-2.6.32.41/drivers/char/vt_ioctl.c 2011-04-17 15:56:46.000000000 -0400
27204@@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
27205 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
27206 return -EFAULT;
27207
27208- if (!capable(CAP_SYS_TTY_CONFIG))
27209- perm = 0;
27210-
27211 switch (cmd) {
27212 case KDGKBENT:
27213 key_map = key_maps[s];
27214@@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __
27215 val = (i ? K_HOLE : K_NOSUCHMAP);
27216 return put_user(val, &user_kbe->kb_value);
27217 case KDSKBENT:
27218+ if (!capable(CAP_SYS_TTY_CONFIG))
27219+ perm = 0;
27220+
27221 if (!perm)
27222 return -EPERM;
27223+
27224 if (!i && v == K_NOSUCHMAP) {
27225 /* deallocate map */
27226 key_map = key_maps[s];
27227@@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
27228 int i, j, k;
27229 int ret;
27230
27231- if (!capable(CAP_SYS_TTY_CONFIG))
27232- perm = 0;
27233-
27234 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
27235 if (!kbs) {
27236 ret = -ENOMEM;
27237@@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
27238 kfree(kbs);
27239 return ((p && *p) ? -EOVERFLOW : 0);
27240 case KDSKBSENT:
27241+ if (!capable(CAP_SYS_TTY_CONFIG))
27242+ perm = 0;
27243+
27244 if (!perm) {
27245 ret = -EPERM;
27246 goto reterr;
27247diff -urNp linux-2.6.32.41/drivers/cpufreq/cpufreq.c linux-2.6.32.41/drivers/cpufreq/cpufreq.c
27248--- linux-2.6.32.41/drivers/cpufreq/cpufreq.c 2011-03-27 14:31:47.000000000 -0400
27249+++ linux-2.6.32.41/drivers/cpufreq/cpufreq.c 2011-04-17 15:56:46.000000000 -0400
27250@@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct
27251 complete(&policy->kobj_unregister);
27252 }
27253
27254-static struct sysfs_ops sysfs_ops = {
27255+static const struct sysfs_ops sysfs_ops = {
27256 .show = show,
27257 .store = store,
27258 };
27259diff -urNp linux-2.6.32.41/drivers/cpuidle/sysfs.c linux-2.6.32.41/drivers/cpuidle/sysfs.c
27260--- linux-2.6.32.41/drivers/cpuidle/sysfs.c 2011-03-27 14:31:47.000000000 -0400
27261+++ linux-2.6.32.41/drivers/cpuidle/sysfs.c 2011-04-17 15:56:46.000000000 -0400
27262@@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobj
27263 return ret;
27264 }
27265
27266-static struct sysfs_ops cpuidle_sysfs_ops = {
27267+static const struct sysfs_ops cpuidle_sysfs_ops = {
27268 .show = cpuidle_show,
27269 .store = cpuidle_store,
27270 };
27271@@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct
27272 return ret;
27273 }
27274
27275-static struct sysfs_ops cpuidle_state_sysfs_ops = {
27276+static const struct sysfs_ops cpuidle_state_sysfs_ops = {
27277 .show = cpuidle_state_show,
27278 };
27279
27280@@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpui
27281 .release = cpuidle_state_sysfs_release,
27282 };
27283
27284-static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
27285+static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
27286 {
27287 kobject_put(&device->kobjs[i]->kobj);
27288 wait_for_completion(&device->kobjs[i]->kobj_unregister);
27289diff -urNp linux-2.6.32.41/drivers/crypto/hifn_795x.c linux-2.6.32.41/drivers/crypto/hifn_795x.c
27290--- linux-2.6.32.41/drivers/crypto/hifn_795x.c 2011-03-27 14:31:47.000000000 -0400
27291+++ linux-2.6.32.41/drivers/crypto/hifn_795x.c 2011-05-16 21:46:57.000000000 -0400
27292@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
27293 0xCA, 0x34, 0x2B, 0x2E};
27294 struct scatterlist sg;
27295
27296+ pax_track_stack();
27297+
27298 memset(src, 0, sizeof(src));
27299 memset(ctx.key, 0, sizeof(ctx.key));
27300
27301diff -urNp linux-2.6.32.41/drivers/crypto/padlock-aes.c linux-2.6.32.41/drivers/crypto/padlock-aes.c
27302--- linux-2.6.32.41/drivers/crypto/padlock-aes.c 2011-03-27 14:31:47.000000000 -0400
27303+++ linux-2.6.32.41/drivers/crypto/padlock-aes.c 2011-05-16 21:46:57.000000000 -0400
27304@@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm
27305 struct crypto_aes_ctx gen_aes;
27306 int cpu;
27307
27308+ pax_track_stack();
27309+
27310 if (key_len % 8) {
27311 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
27312 return -EINVAL;
27313diff -urNp linux-2.6.32.41/drivers/dma/ioat/dma.c linux-2.6.32.41/drivers/dma/ioat/dma.c
27314--- linux-2.6.32.41/drivers/dma/ioat/dma.c 2011-03-27 14:31:47.000000000 -0400
27315+++ linux-2.6.32.41/drivers/dma/ioat/dma.c 2011-04-17 15:56:46.000000000 -0400
27316@@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, str
27317 return entry->show(&chan->common, page);
27318 }
27319
27320-struct sysfs_ops ioat_sysfs_ops = {
27321+const struct sysfs_ops ioat_sysfs_ops = {
27322 .show = ioat_attr_show,
27323 };
27324
27325diff -urNp linux-2.6.32.41/drivers/dma/ioat/dma.h linux-2.6.32.41/drivers/dma/ioat/dma.h
27326--- linux-2.6.32.41/drivers/dma/ioat/dma.h 2011-03-27 14:31:47.000000000 -0400
27327+++ linux-2.6.32.41/drivers/dma/ioat/dma.h 2011-04-17 15:56:46.000000000 -0400
27328@@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_c
27329 unsigned long *phys_complete);
27330 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
27331 void ioat_kobject_del(struct ioatdma_device *device);
27332-extern struct sysfs_ops ioat_sysfs_ops;
27333+extern const struct sysfs_ops ioat_sysfs_ops;
27334 extern struct ioat_sysfs_entry ioat_version_attr;
27335 extern struct ioat_sysfs_entry ioat_cap_attr;
27336 #endif /* IOATDMA_H */
27337diff -urNp linux-2.6.32.41/drivers/edac/edac_device_sysfs.c linux-2.6.32.41/drivers/edac/edac_device_sysfs.c
27338--- linux-2.6.32.41/drivers/edac/edac_device_sysfs.c 2011-03-27 14:31:47.000000000 -0400
27339+++ linux-2.6.32.41/drivers/edac/edac_device_sysfs.c 2011-04-17 15:56:46.000000000 -0400
27340@@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(s
27341 }
27342
27343 /* edac_dev file operations for an 'ctl_info' */
27344-static struct sysfs_ops device_ctl_info_ops = {
27345+static const struct sysfs_ops device_ctl_info_ops = {
27346 .show = edac_dev_ctl_info_show,
27347 .store = edac_dev_ctl_info_store
27348 };
27349@@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(s
27350 }
27351
27352 /* edac_dev file operations for an 'instance' */
27353-static struct sysfs_ops device_instance_ops = {
27354+static const struct sysfs_ops device_instance_ops = {
27355 .show = edac_dev_instance_show,
27356 .store = edac_dev_instance_store
27357 };
27358@@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(stru
27359 }
27360
27361 /* edac_dev file operations for a 'block' */
27362-static struct sysfs_ops device_block_ops = {
27363+static const struct sysfs_ops device_block_ops = {
27364 .show = edac_dev_block_show,
27365 .store = edac_dev_block_store
27366 };
27367diff -urNp linux-2.6.32.41/drivers/edac/edac_mc_sysfs.c linux-2.6.32.41/drivers/edac/edac_mc_sysfs.c
27368--- linux-2.6.32.41/drivers/edac/edac_mc_sysfs.c 2011-03-27 14:31:47.000000000 -0400
27369+++ linux-2.6.32.41/drivers/edac/edac_mc_sysfs.c 2011-04-17 15:56:46.000000000 -0400
27370@@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kob
27371 return -EIO;
27372 }
27373
27374-static struct sysfs_ops csrowfs_ops = {
27375+static const struct sysfs_ops csrowfs_ops = {
27376 .show = csrowdev_show,
27377 .store = csrowdev_store
27378 };
27379@@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobje
27380 }
27381
27382 /* Intermediate show/store table */
27383-static struct sysfs_ops mci_ops = {
27384+static const struct sysfs_ops mci_ops = {
27385 .show = mcidev_show,
27386 .store = mcidev_store
27387 };
27388diff -urNp linux-2.6.32.41/drivers/edac/edac_pci_sysfs.c linux-2.6.32.41/drivers/edac/edac_pci_sysfs.c
27389--- linux-2.6.32.41/drivers/edac/edac_pci_sysfs.c 2011-03-27 14:31:47.000000000 -0400
27390+++ linux-2.6.32.41/drivers/edac/edac_pci_sysfs.c 2011-05-04 17:56:20.000000000 -0400
27391@@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log
27392 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
27393 static int edac_pci_poll_msec = 1000; /* one second workq period */
27394
27395-static atomic_t pci_parity_count = ATOMIC_INIT(0);
27396-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
27397+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
27398+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
27399
27400 static struct kobject *edac_pci_top_main_kobj;
27401 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
27402@@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(s
27403 }
27404
27405 /* fs_ops table */
27406-static struct sysfs_ops pci_instance_ops = {
27407+static const struct sysfs_ops pci_instance_ops = {
27408 .show = edac_pci_instance_show,
27409 .store = edac_pci_instance_store
27410 };
27411@@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct
27412 return -EIO;
27413 }
27414
27415-static struct sysfs_ops edac_pci_sysfs_ops = {
27416+static const struct sysfs_ops edac_pci_sysfs_ops = {
27417 .show = edac_pci_dev_show,
27418 .store = edac_pci_dev_store
27419 };
27420@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(str
27421 edac_printk(KERN_CRIT, EDAC_PCI,
27422 "Signaled System Error on %s\n",
27423 pci_name(dev));
27424- atomic_inc(&pci_nonparity_count);
27425+ atomic_inc_unchecked(&pci_nonparity_count);
27426 }
27427
27428 if (status & (PCI_STATUS_PARITY)) {
27429@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(str
27430 "Master Data Parity Error on %s\n",
27431 pci_name(dev));
27432
27433- atomic_inc(&pci_parity_count);
27434+ atomic_inc_unchecked(&pci_parity_count);
27435 }
27436
27437 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27438@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(str
27439 "Detected Parity Error on %s\n",
27440 pci_name(dev));
27441
27442- atomic_inc(&pci_parity_count);
27443+ atomic_inc_unchecked(&pci_parity_count);
27444 }
27445 }
27446
27447@@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(str
27448 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
27449 "Signaled System Error on %s\n",
27450 pci_name(dev));
27451- atomic_inc(&pci_nonparity_count);
27452+ atomic_inc_unchecked(&pci_nonparity_count);
27453 }
27454
27455 if (status & (PCI_STATUS_PARITY)) {
27456@@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(str
27457 "Master Data Parity Error on "
27458 "%s\n", pci_name(dev));
27459
27460- atomic_inc(&pci_parity_count);
27461+ atomic_inc_unchecked(&pci_parity_count);
27462 }
27463
27464 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27465@@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(str
27466 "Detected Parity Error on %s\n",
27467 pci_name(dev));
27468
27469- atomic_inc(&pci_parity_count);
27470+ atomic_inc_unchecked(&pci_parity_count);
27471 }
27472 }
27473 }
27474@@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
27475 if (!check_pci_errors)
27476 return;
27477
27478- before_count = atomic_read(&pci_parity_count);
27479+ before_count = atomic_read_unchecked(&pci_parity_count);
27480
27481 /* scan all PCI devices looking for a Parity Error on devices and
27482 * bridges.
27483@@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
27484 /* Only if operator has selected panic on PCI Error */
27485 if (edac_pci_get_panic_on_pe()) {
27486 /* If the count is different 'after' from 'before' */
27487- if (before_count != atomic_read(&pci_parity_count))
27488+ if (before_count != atomic_read_unchecked(&pci_parity_count))
27489 panic("EDAC: PCI Parity Error");
27490 }
27491 }
27492diff -urNp linux-2.6.32.41/drivers/firewire/core-cdev.c linux-2.6.32.41/drivers/firewire/core-cdev.c
27493--- linux-2.6.32.41/drivers/firewire/core-cdev.c 2011-03-27 14:31:47.000000000 -0400
27494+++ linux-2.6.32.41/drivers/firewire/core-cdev.c 2011-04-17 15:56:46.000000000 -0400
27495@@ -1141,8 +1141,7 @@ static int init_iso_resource(struct clie
27496 int ret;
27497
27498 if ((request->channels == 0 && request->bandwidth == 0) ||
27499- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
27500- request->bandwidth < 0)
27501+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
27502 return -EINVAL;
27503
27504 r = kmalloc(sizeof(*r), GFP_KERNEL);
27505diff -urNp linux-2.6.32.41/drivers/firewire/core-transaction.c linux-2.6.32.41/drivers/firewire/core-transaction.c
27506--- linux-2.6.32.41/drivers/firewire/core-transaction.c 2011-03-27 14:31:47.000000000 -0400
27507+++ linux-2.6.32.41/drivers/firewire/core-transaction.c 2011-05-16 21:46:57.000000000 -0400
27508@@ -36,6 +36,7 @@
27509 #include <linux/string.h>
27510 #include <linux/timer.h>
27511 #include <linux/types.h>
27512+#include <linux/sched.h>
27513
27514 #include <asm/byteorder.h>
27515
27516@@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *c
27517 struct transaction_callback_data d;
27518 struct fw_transaction t;
27519
27520+ pax_track_stack();
27521+
27522 init_completion(&d.done);
27523 d.payload = payload;
27524 fw_send_request(card, &t, tcode, destination_id, generation, speed,
27525diff -urNp linux-2.6.32.41/drivers/firmware/dmi_scan.c linux-2.6.32.41/drivers/firmware/dmi_scan.c
27526--- linux-2.6.32.41/drivers/firmware/dmi_scan.c 2011-03-27 14:31:47.000000000 -0400
27527+++ linux-2.6.32.41/drivers/firmware/dmi_scan.c 2011-04-17 15:56:46.000000000 -0400
27528@@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
27529 }
27530 }
27531 else {
27532- /*
27533- * no iounmap() for that ioremap(); it would be a no-op, but
27534- * it's so early in setup that sucker gets confused into doing
27535- * what it shouldn't if we actually call it.
27536- */
27537 p = dmi_ioremap(0xF0000, 0x10000);
27538 if (p == NULL)
27539 goto error;
27540diff -urNp linux-2.6.32.41/drivers/firmware/edd.c linux-2.6.32.41/drivers/firmware/edd.c
27541--- linux-2.6.32.41/drivers/firmware/edd.c 2011-03-27 14:31:47.000000000 -0400
27542+++ linux-2.6.32.41/drivers/firmware/edd.c 2011-04-17 15:56:46.000000000 -0400
27543@@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, str
27544 return ret;
27545 }
27546
27547-static struct sysfs_ops edd_attr_ops = {
27548+static const struct sysfs_ops edd_attr_ops = {
27549 .show = edd_attr_show,
27550 };
27551
27552diff -urNp linux-2.6.32.41/drivers/firmware/efivars.c linux-2.6.32.41/drivers/firmware/efivars.c
27553--- linux-2.6.32.41/drivers/firmware/efivars.c 2011-03-27 14:31:47.000000000 -0400
27554+++ linux-2.6.32.41/drivers/firmware/efivars.c 2011-04-17 15:56:46.000000000 -0400
27555@@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct
27556 return ret;
27557 }
27558
27559-static struct sysfs_ops efivar_attr_ops = {
27560+static const struct sysfs_ops efivar_attr_ops = {
27561 .show = efivar_attr_show,
27562 .store = efivar_attr_store,
27563 };
27564diff -urNp linux-2.6.32.41/drivers/firmware/iscsi_ibft.c linux-2.6.32.41/drivers/firmware/iscsi_ibft.c
27565--- linux-2.6.32.41/drivers/firmware/iscsi_ibft.c 2011-03-27 14:31:47.000000000 -0400
27566+++ linux-2.6.32.41/drivers/firmware/iscsi_ibft.c 2011-04-17 15:56:46.000000000 -0400
27567@@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struc
27568 return ret;
27569 }
27570
27571-static struct sysfs_ops ibft_attr_ops = {
27572+static const struct sysfs_ops ibft_attr_ops = {
27573 .show = ibft_show_attribute,
27574 };
27575
27576diff -urNp linux-2.6.32.41/drivers/firmware/memmap.c linux-2.6.32.41/drivers/firmware/memmap.c
27577--- linux-2.6.32.41/drivers/firmware/memmap.c 2011-03-27 14:31:47.000000000 -0400
27578+++ linux-2.6.32.41/drivers/firmware/memmap.c 2011-04-17 15:56:46.000000000 -0400
27579@@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
27580 NULL
27581 };
27582
27583-static struct sysfs_ops memmap_attr_ops = {
27584+static const struct sysfs_ops memmap_attr_ops = {
27585 .show = memmap_attr_show,
27586 };
27587
27588diff -urNp linux-2.6.32.41/drivers/gpio/vr41xx_giu.c linux-2.6.32.41/drivers/gpio/vr41xx_giu.c
27589--- linux-2.6.32.41/drivers/gpio/vr41xx_giu.c 2011-03-27 14:31:47.000000000 -0400
27590+++ linux-2.6.32.41/drivers/gpio/vr41xx_giu.c 2011-05-04 17:56:28.000000000 -0400
27591@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
27592 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
27593 maskl, pendl, maskh, pendh);
27594
27595- atomic_inc(&irq_err_count);
27596+ atomic_inc_unchecked(&irq_err_count);
27597
27598 return -EINVAL;
27599 }
27600diff -urNp linux-2.6.32.41/drivers/gpu/drm/drm_crtc_helper.c linux-2.6.32.41/drivers/gpu/drm/drm_crtc_helper.c
27601--- linux-2.6.32.41/drivers/gpu/drm/drm_crtc_helper.c 2011-03-27 14:31:47.000000000 -0400
27602+++ linux-2.6.32.41/drivers/gpu/drm/drm_crtc_helper.c 2011-05-16 21:46:57.000000000 -0400
27603@@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct d
27604 struct drm_crtc *tmp;
27605 int crtc_mask = 1;
27606
27607- WARN(!crtc, "checking null crtc?");
27608+ BUG_ON(!crtc);
27609
27610 dev = crtc->dev;
27611
27612@@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm
27613
27614 adjusted_mode = drm_mode_duplicate(dev, mode);
27615
27616+ pax_track_stack();
27617+
27618 crtc->enabled = drm_helper_crtc_in_use(crtc);
27619
27620 if (!crtc->enabled)
27621diff -urNp linux-2.6.32.41/drivers/gpu/drm/drm_drv.c linux-2.6.32.41/drivers/gpu/drm/drm_drv.c
27622--- linux-2.6.32.41/drivers/gpu/drm/drm_drv.c 2011-03-27 14:31:47.000000000 -0400
27623+++ linux-2.6.32.41/drivers/gpu/drm/drm_drv.c 2011-04-17 15:56:46.000000000 -0400
27624@@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struc
27625 char *kdata = NULL;
27626
27627 atomic_inc(&dev->ioctl_count);
27628- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
27629+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
27630 ++file_priv->ioctl_count;
27631
27632 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
27633diff -urNp linux-2.6.32.41/drivers/gpu/drm/drm_fops.c linux-2.6.32.41/drivers/gpu/drm/drm_fops.c
27634--- linux-2.6.32.41/drivers/gpu/drm/drm_fops.c 2011-03-27 14:31:47.000000000 -0400
27635+++ linux-2.6.32.41/drivers/gpu/drm/drm_fops.c 2011-04-17 15:56:46.000000000 -0400
27636@@ -66,7 +66,7 @@ static int drm_setup(struct drm_device *
27637 }
27638
27639 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
27640- atomic_set(&dev->counts[i], 0);
27641+ atomic_set_unchecked(&dev->counts[i], 0);
27642
27643 dev->sigdata.lock = NULL;
27644
27645@@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct
27646
27647 retcode = drm_open_helper(inode, filp, dev);
27648 if (!retcode) {
27649- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
27650+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
27651 spin_lock(&dev->count_lock);
27652- if (!dev->open_count++) {
27653+ if (local_inc_return(&dev->open_count) == 1) {
27654 spin_unlock(&dev->count_lock);
27655 retcode = drm_setup(dev);
27656 goto out;
27657@@ -435,7 +435,7 @@ int drm_release(struct inode *inode, str
27658
27659 lock_kernel();
27660
27661- DRM_DEBUG("open_count = %d\n", dev->open_count);
27662+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
27663
27664 if (dev->driver->preclose)
27665 dev->driver->preclose(dev, file_priv);
27666@@ -447,7 +447,7 @@ int drm_release(struct inode *inode, str
27667 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
27668 task_pid_nr(current),
27669 (long)old_encode_dev(file_priv->minor->device),
27670- dev->open_count);
27671+ local_read(&dev->open_count));
27672
27673 /* if the master has gone away we can't do anything with the lock */
27674 if (file_priv->minor->master)
27675@@ -524,9 +524,9 @@ int drm_release(struct inode *inode, str
27676 * End inline drm_release
27677 */
27678
27679- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
27680+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
27681 spin_lock(&dev->count_lock);
27682- if (!--dev->open_count) {
27683+ if (local_dec_and_test(&dev->open_count)) {
27684 if (atomic_read(&dev->ioctl_count)) {
27685 DRM_ERROR("Device busy: %d\n",
27686 atomic_read(&dev->ioctl_count));
27687diff -urNp linux-2.6.32.41/drivers/gpu/drm/drm_gem.c linux-2.6.32.41/drivers/gpu/drm/drm_gem.c
27688--- linux-2.6.32.41/drivers/gpu/drm/drm_gem.c 2011-03-27 14:31:47.000000000 -0400
27689+++ linux-2.6.32.41/drivers/gpu/drm/drm_gem.c 2011-04-17 15:56:46.000000000 -0400
27690@@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
27691 spin_lock_init(&dev->object_name_lock);
27692 idr_init(&dev->object_name_idr);
27693 atomic_set(&dev->object_count, 0);
27694- atomic_set(&dev->object_memory, 0);
27695+ atomic_set_unchecked(&dev->object_memory, 0);
27696 atomic_set(&dev->pin_count, 0);
27697- atomic_set(&dev->pin_memory, 0);
27698+ atomic_set_unchecked(&dev->pin_memory, 0);
27699 atomic_set(&dev->gtt_count, 0);
27700- atomic_set(&dev->gtt_memory, 0);
27701+ atomic_set_unchecked(&dev->gtt_memory, 0);
27702
27703 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
27704 if (!mm) {
27705@@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *
27706 goto fput;
27707 }
27708 atomic_inc(&dev->object_count);
27709- atomic_add(obj->size, &dev->object_memory);
27710+ atomic_add_unchecked(obj->size, &dev->object_memory);
27711 return obj;
27712 fput:
27713 fput(obj->filp);
27714@@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
27715
27716 fput(obj->filp);
27717 atomic_dec(&dev->object_count);
27718- atomic_sub(obj->size, &dev->object_memory);
27719+ atomic_sub_unchecked(obj->size, &dev->object_memory);
27720 kfree(obj);
27721 }
27722 EXPORT_SYMBOL(drm_gem_object_free);
27723diff -urNp linux-2.6.32.41/drivers/gpu/drm/drm_info.c linux-2.6.32.41/drivers/gpu/drm/drm_info.c
27724--- linux-2.6.32.41/drivers/gpu/drm/drm_info.c 2011-03-27 14:31:47.000000000 -0400
27725+++ linux-2.6.32.41/drivers/gpu/drm/drm_info.c 2011-04-17 15:56:46.000000000 -0400
27726@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
27727 struct drm_local_map *map;
27728 struct drm_map_list *r_list;
27729
27730- /* Hardcoded from _DRM_FRAME_BUFFER,
27731- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
27732- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
27733- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
27734+ static const char * const types[] = {
27735+ [_DRM_FRAME_BUFFER] = "FB",
27736+ [_DRM_REGISTERS] = "REG",
27737+ [_DRM_SHM] = "SHM",
27738+ [_DRM_AGP] = "AGP",
27739+ [_DRM_SCATTER_GATHER] = "SG",
27740+ [_DRM_CONSISTENT] = "PCI",
27741+ [_DRM_GEM] = "GEM" };
27742 const char *type;
27743 int i;
27744
27745@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
27746 map = r_list->map;
27747 if (!map)
27748 continue;
27749- if (map->type < 0 || map->type > 5)
27750+ if (map->type >= ARRAY_SIZE(types))
27751 type = "??";
27752 else
27753 type = types[map->type];
27754@@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file
27755 struct drm_device *dev = node->minor->dev;
27756
27757 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
27758- seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
27759+ seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
27760 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
27761- seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
27762- seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
27763+ seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
27764+ seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
27765 seq_printf(m, "%d gtt total\n", dev->gtt_total);
27766 return 0;
27767 }
27768@@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, voi
27769 mutex_lock(&dev->struct_mutex);
27770 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
27771 atomic_read(&dev->vma_count),
27772+#ifdef CONFIG_GRKERNSEC_HIDESYM
27773+ NULL, 0);
27774+#else
27775 high_memory, (u64)virt_to_phys(high_memory));
27776+#endif
27777
27778 list_for_each_entry(pt, &dev->vmalist, head) {
27779 vma = pt->vma;
27780@@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, voi
27781 continue;
27782 seq_printf(m,
27783 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
27784- pt->pid, vma->vm_start, vma->vm_end,
27785+ pt->pid,
27786+#ifdef CONFIG_GRKERNSEC_HIDESYM
27787+ 0, 0,
27788+#else
27789+ vma->vm_start, vma->vm_end,
27790+#endif
27791 vma->vm_flags & VM_READ ? 'r' : '-',
27792 vma->vm_flags & VM_WRITE ? 'w' : '-',
27793 vma->vm_flags & VM_EXEC ? 'x' : '-',
27794 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
27795 vma->vm_flags & VM_LOCKED ? 'l' : '-',
27796 vma->vm_flags & VM_IO ? 'i' : '-',
27797+#ifdef CONFIG_GRKERNSEC_HIDESYM
27798+ 0);
27799+#else
27800 vma->vm_pgoff);
27801+#endif
27802
27803 #if defined(__i386__)
27804 pgprot = pgprot_val(vma->vm_page_prot);
27805diff -urNp linux-2.6.32.41/drivers/gpu/drm/drm_ioctl.c linux-2.6.32.41/drivers/gpu/drm/drm_ioctl.c
27806--- linux-2.6.32.41/drivers/gpu/drm/drm_ioctl.c 2011-03-27 14:31:47.000000000 -0400
27807+++ linux-2.6.32.41/drivers/gpu/drm/drm_ioctl.c 2011-04-17 15:56:46.000000000 -0400
27808@@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev,
27809 stats->data[i].value =
27810 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
27811 else
27812- stats->data[i].value = atomic_read(&dev->counts[i]);
27813+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
27814 stats->data[i].type = dev->types[i];
27815 }
27816
27817diff -urNp linux-2.6.32.41/drivers/gpu/drm/drm_lock.c linux-2.6.32.41/drivers/gpu/drm/drm_lock.c
27818--- linux-2.6.32.41/drivers/gpu/drm/drm_lock.c 2011-03-27 14:31:47.000000000 -0400
27819+++ linux-2.6.32.41/drivers/gpu/drm/drm_lock.c 2011-04-17 15:56:46.000000000 -0400
27820@@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, voi
27821 if (drm_lock_take(&master->lock, lock->context)) {
27822 master->lock.file_priv = file_priv;
27823 master->lock.lock_time = jiffies;
27824- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
27825+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
27826 break; /* Got lock */
27827 }
27828
27829@@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, v
27830 return -EINVAL;
27831 }
27832
27833- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
27834+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
27835
27836 /* kernel_context_switch isn't used by any of the x86 drm
27837 * modules but is required by the Sparc driver.
27838diff -urNp linux-2.6.32.41/drivers/gpu/drm/i810/i810_dma.c linux-2.6.32.41/drivers/gpu/drm/i810/i810_dma.c
27839--- linux-2.6.32.41/drivers/gpu/drm/i810/i810_dma.c 2011-03-27 14:31:47.000000000 -0400
27840+++ linux-2.6.32.41/drivers/gpu/drm/i810/i810_dma.c 2011-04-17 15:56:46.000000000 -0400
27841@@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_de
27842 dma->buflist[vertex->idx],
27843 vertex->discard, vertex->used);
27844
27845- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
27846- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
27847+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
27848+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
27849 sarea_priv->last_enqueue = dev_priv->counter - 1;
27850 sarea_priv->last_dispatch = (int)hw_status[5];
27851
27852@@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device
27853 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
27854 mc->last_render);
27855
27856- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
27857- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
27858+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
27859+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
27860 sarea_priv->last_enqueue = dev_priv->counter - 1;
27861 sarea_priv->last_dispatch = (int)hw_status[5];
27862
27863diff -urNp linux-2.6.32.41/drivers/gpu/drm/i810/i810_drv.h linux-2.6.32.41/drivers/gpu/drm/i810/i810_drv.h
27864--- linux-2.6.32.41/drivers/gpu/drm/i810/i810_drv.h 2011-03-27 14:31:47.000000000 -0400
27865+++ linux-2.6.32.41/drivers/gpu/drm/i810/i810_drv.h 2011-05-04 17:56:28.000000000 -0400
27866@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
27867 int page_flipping;
27868
27869 wait_queue_head_t irq_queue;
27870- atomic_t irq_received;
27871- atomic_t irq_emitted;
27872+ atomic_unchecked_t irq_received;
27873+ atomic_unchecked_t irq_emitted;
27874
27875 int front_offset;
27876 } drm_i810_private_t;
27877diff -urNp linux-2.6.32.41/drivers/gpu/drm/i830/i830_drv.h linux-2.6.32.41/drivers/gpu/drm/i830/i830_drv.h
27878--- linux-2.6.32.41/drivers/gpu/drm/i830/i830_drv.h 2011-03-27 14:31:47.000000000 -0400
27879+++ linux-2.6.32.41/drivers/gpu/drm/i830/i830_drv.h 2011-05-04 17:56:28.000000000 -0400
27880@@ -115,8 +115,8 @@ typedef struct drm_i830_private {
27881 int page_flipping;
27882
27883 wait_queue_head_t irq_queue;
27884- atomic_t irq_received;
27885- atomic_t irq_emitted;
27886+ atomic_unchecked_t irq_received;
27887+ atomic_unchecked_t irq_emitted;
27888
27889 int use_mi_batchbuffer_start;
27890
27891diff -urNp linux-2.6.32.41/drivers/gpu/drm/i830/i830_irq.c linux-2.6.32.41/drivers/gpu/drm/i830/i830_irq.c
27892--- linux-2.6.32.41/drivers/gpu/drm/i830/i830_irq.c 2011-03-27 14:31:47.000000000 -0400
27893+++ linux-2.6.32.41/drivers/gpu/drm/i830/i830_irq.c 2011-05-04 17:56:28.000000000 -0400
27894@@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_
27895
27896 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
27897
27898- atomic_inc(&dev_priv->irq_received);
27899+ atomic_inc_unchecked(&dev_priv->irq_received);
27900 wake_up_interruptible(&dev_priv->irq_queue);
27901
27902 return IRQ_HANDLED;
27903@@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_devi
27904
27905 DRM_DEBUG("%s\n", __func__);
27906
27907- atomic_inc(&dev_priv->irq_emitted);
27908+ atomic_inc_unchecked(&dev_priv->irq_emitted);
27909
27910 BEGIN_LP_RING(2);
27911 OUT_RING(0);
27912 OUT_RING(GFX_OP_USER_INTERRUPT);
27913 ADVANCE_LP_RING();
27914
27915- return atomic_read(&dev_priv->irq_emitted);
27916+ return atomic_read_unchecked(&dev_priv->irq_emitted);
27917 }
27918
27919 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
27920@@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_devi
27921
27922 DRM_DEBUG("%s\n", __func__);
27923
27924- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
27925+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
27926 return 0;
27927
27928 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
27929@@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_devi
27930
27931 for (;;) {
27932 __set_current_state(TASK_INTERRUPTIBLE);
27933- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
27934+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
27935 break;
27936 if ((signed)(end - jiffies) <= 0) {
27937 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
27938@@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct d
27939 I830_WRITE16(I830REG_HWSTAM, 0xffff);
27940 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
27941 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
27942- atomic_set(&dev_priv->irq_received, 0);
27943- atomic_set(&dev_priv->irq_emitted, 0);
27944+ atomic_set_unchecked(&dev_priv->irq_received, 0);
27945+ atomic_set_unchecked(&dev_priv->irq_emitted, 0);
27946 init_waitqueue_head(&dev_priv->irq_queue);
27947 }
27948
27949diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ch7017.c linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ch7017.c
27950--- linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ch7017.c 2011-03-27 14:31:47.000000000 -0400
27951+++ linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ch7017.c 2011-04-17 15:56:46.000000000 -0400
27952@@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_
27953 }
27954 }
27955
27956-struct intel_dvo_dev_ops ch7017_ops = {
27957+const struct intel_dvo_dev_ops ch7017_ops = {
27958 .init = ch7017_init,
27959 .detect = ch7017_detect,
27960 .mode_valid = ch7017_mode_valid,
27961diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ch7xxx.c linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ch7xxx.c
27962--- linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-03-27 14:31:47.000000000 -0400
27963+++ linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-04-17 15:56:46.000000000 -0400
27964@@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_
27965 }
27966 }
27967
27968-struct intel_dvo_dev_ops ch7xxx_ops = {
27969+const struct intel_dvo_dev_ops ch7xxx_ops = {
27970 .init = ch7xxx_init,
27971 .detect = ch7xxx_detect,
27972 .mode_valid = ch7xxx_mode_valid,
27973diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/dvo.h linux-2.6.32.41/drivers/gpu/drm/i915/dvo.h
27974--- linux-2.6.32.41/drivers/gpu/drm/i915/dvo.h 2011-03-27 14:31:47.000000000 -0400
27975+++ linux-2.6.32.41/drivers/gpu/drm/i915/dvo.h 2011-04-17 15:56:46.000000000 -0400
27976@@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
27977 *
27978 * \return singly-linked list of modes or NULL if no modes found.
27979 */
27980- struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
27981+ struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
27982
27983 /**
27984 * Clean up driver-specific bits of the output
27985 */
27986- void (*destroy) (struct intel_dvo_device *dvo);
27987+ void (* const destroy) (struct intel_dvo_device *dvo);
27988
27989 /**
27990 * Debugging hook to dump device registers to log file
27991 */
27992- void (*dump_regs)(struct intel_dvo_device *dvo);
27993+ void (* const dump_regs)(struct intel_dvo_device *dvo);
27994 };
27995
27996-extern struct intel_dvo_dev_ops sil164_ops;
27997-extern struct intel_dvo_dev_ops ch7xxx_ops;
27998-extern struct intel_dvo_dev_ops ivch_ops;
27999-extern struct intel_dvo_dev_ops tfp410_ops;
28000-extern struct intel_dvo_dev_ops ch7017_ops;
28001+extern const struct intel_dvo_dev_ops sil164_ops;
28002+extern const struct intel_dvo_dev_ops ch7xxx_ops;
28003+extern const struct intel_dvo_dev_ops ivch_ops;
28004+extern const struct intel_dvo_dev_ops tfp410_ops;
28005+extern const struct intel_dvo_dev_ops ch7017_ops;
28006
28007 #endif /* _INTEL_DVO_H */
28008diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ivch.c linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ivch.c
28009--- linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ivch.c 2011-03-27 14:31:47.000000000 -0400
28010+++ linux-2.6.32.41/drivers/gpu/drm/i915/dvo_ivch.c 2011-04-17 15:56:46.000000000 -0400
28011@@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dv
28012 }
28013 }
28014
28015-struct intel_dvo_dev_ops ivch_ops= {
28016+const struct intel_dvo_dev_ops ivch_ops= {
28017 .init = ivch_init,
28018 .dpms = ivch_dpms,
28019 .save = ivch_save,
28020diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/dvo_sil164.c linux-2.6.32.41/drivers/gpu/drm/i915/dvo_sil164.c
28021--- linux-2.6.32.41/drivers/gpu/drm/i915/dvo_sil164.c 2011-03-27 14:31:47.000000000 -0400
28022+++ linux-2.6.32.41/drivers/gpu/drm/i915/dvo_sil164.c 2011-04-17 15:56:46.000000000 -0400
28023@@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_
28024 }
28025 }
28026
28027-struct intel_dvo_dev_ops sil164_ops = {
28028+const struct intel_dvo_dev_ops sil164_ops = {
28029 .init = sil164_init,
28030 .detect = sil164_detect,
28031 .mode_valid = sil164_mode_valid,
28032diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/dvo_tfp410.c linux-2.6.32.41/drivers/gpu/drm/i915/dvo_tfp410.c
28033--- linux-2.6.32.41/drivers/gpu/drm/i915/dvo_tfp410.c 2011-03-27 14:31:47.000000000 -0400
28034+++ linux-2.6.32.41/drivers/gpu/drm/i915/dvo_tfp410.c 2011-04-17 15:56:46.000000000 -0400
28035@@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_
28036 }
28037 }
28038
28039-struct intel_dvo_dev_ops tfp410_ops = {
28040+const struct intel_dvo_dev_ops tfp410_ops = {
28041 .init = tfp410_init,
28042 .detect = tfp410_detect,
28043 .mode_valid = tfp410_mode_valid,
28044diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/i915_debugfs.c linux-2.6.32.41/drivers/gpu/drm/i915/i915_debugfs.c
28045--- linux-2.6.32.41/drivers/gpu/drm/i915/i915_debugfs.c 2011-03-27 14:31:47.000000000 -0400
28046+++ linux-2.6.32.41/drivers/gpu/drm/i915/i915_debugfs.c 2011-05-04 17:56:28.000000000 -0400
28047@@ -192,7 +192,7 @@ static int i915_interrupt_info(struct se
28048 I915_READ(GTIMR));
28049 }
28050 seq_printf(m, "Interrupts received: %d\n",
28051- atomic_read(&dev_priv->irq_received));
28052+ atomic_read_unchecked(&dev_priv->irq_received));
28053 if (dev_priv->hw_status_page != NULL) {
28054 seq_printf(m, "Current sequence: %d\n",
28055 i915_get_gem_seqno(dev));
28056diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/i915_drv.c linux-2.6.32.41/drivers/gpu/drm/i915/i915_drv.c
28057--- linux-2.6.32.41/drivers/gpu/drm/i915/i915_drv.c 2011-03-27 14:31:47.000000000 -0400
28058+++ linux-2.6.32.41/drivers/gpu/drm/i915/i915_drv.c 2011-04-17 15:56:46.000000000 -0400
28059@@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
28060 return i915_resume(dev);
28061 }
28062
28063-static struct vm_operations_struct i915_gem_vm_ops = {
28064+static const struct vm_operations_struct i915_gem_vm_ops = {
28065 .fault = i915_gem_fault,
28066 .open = drm_gem_vm_open,
28067 .close = drm_gem_vm_close,
28068diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/i915_drv.h linux-2.6.32.41/drivers/gpu/drm/i915/i915_drv.h
28069--- linux-2.6.32.41/drivers/gpu/drm/i915/i915_drv.h 2011-03-27 14:31:47.000000000 -0400
28070+++ linux-2.6.32.41/drivers/gpu/drm/i915/i915_drv.h 2011-05-04 17:56:28.000000000 -0400
28071@@ -197,7 +197,7 @@ typedef struct drm_i915_private {
28072 int page_flipping;
28073
28074 wait_queue_head_t irq_queue;
28075- atomic_t irq_received;
28076+ atomic_unchecked_t irq_received;
28077 /** Protects user_irq_refcount and irq_mask_reg */
28078 spinlock_t user_irq_lock;
28079 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
28080diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/i915_gem.c linux-2.6.32.41/drivers/gpu/drm/i915/i915_gem.c
28081--- linux-2.6.32.41/drivers/gpu/drm/i915/i915_gem.c 2011-03-27 14:31:47.000000000 -0400
28082+++ linux-2.6.32.41/drivers/gpu/drm/i915/i915_gem.c 2011-04-17 15:56:46.000000000 -0400
28083@@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_d
28084
28085 args->aper_size = dev->gtt_total;
28086 args->aper_available_size = (args->aper_size -
28087- atomic_read(&dev->pin_memory));
28088+ atomic_read_unchecked(&dev->pin_memory));
28089
28090 return 0;
28091 }
28092@@ -492,6 +492,11 @@ i915_gem_pread_ioctl(struct drm_device *
28093 return -EINVAL;
28094 }
28095
28096+ if (!access_ok(VERIFY_WRITE, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
28097+ drm_gem_object_unreference(obj);
28098+ return -EFAULT;
28099+ }
28100+
28101 if (i915_gem_object_needs_bit17_swizzle(obj)) {
28102 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
28103 } else {
28104@@ -965,6 +970,11 @@ i915_gem_pwrite_ioctl(struct drm_device
28105 return -EINVAL;
28106 }
28107
28108+ if (!access_ok(VERIFY_READ, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
28109+ drm_gem_object_unreference(obj);
28110+ return -EFAULT;
28111+ }
28112+
28113 /* We can only do the GTT pwrite on untiled buffers, as otherwise
28114 * it would end up going through the fenced access, and we'll get
28115 * different detiling behavior between reading and writing.
28116@@ -2054,7 +2064,7 @@ i915_gem_object_unbind(struct drm_gem_ob
28117
28118 if (obj_priv->gtt_space) {
28119 atomic_dec(&dev->gtt_count);
28120- atomic_sub(obj->size, &dev->gtt_memory);
28121+ atomic_sub_unchecked(obj->size, &dev->gtt_memory);
28122
28123 drm_mm_put_block(obj_priv->gtt_space);
28124 obj_priv->gtt_space = NULL;
28125@@ -2697,7 +2707,7 @@ i915_gem_object_bind_to_gtt(struct drm_g
28126 goto search_free;
28127 }
28128 atomic_inc(&dev->gtt_count);
28129- atomic_add(obj->size, &dev->gtt_memory);
28130+ atomic_add_unchecked(obj->size, &dev->gtt_memory);
28131
28132 /* Assert that the object is not currently in any GPU domain. As it
28133 * wasn't in the GTT, there shouldn't be any way it could have been in
28134@@ -3751,9 +3761,9 @@ i915_gem_execbuffer(struct drm_device *d
28135 "%d/%d gtt bytes\n",
28136 atomic_read(&dev->object_count),
28137 atomic_read(&dev->pin_count),
28138- atomic_read(&dev->object_memory),
28139- atomic_read(&dev->pin_memory),
28140- atomic_read(&dev->gtt_memory),
28141+ atomic_read_unchecked(&dev->object_memory),
28142+ atomic_read_unchecked(&dev->pin_memory),
28143+ atomic_read_unchecked(&dev->gtt_memory),
28144 dev->gtt_total);
28145 }
28146 goto err;
28147@@ -3985,7 +3995,7 @@ i915_gem_object_pin(struct drm_gem_objec
28148 */
28149 if (obj_priv->pin_count == 1) {
28150 atomic_inc(&dev->pin_count);
28151- atomic_add(obj->size, &dev->pin_memory);
28152+ atomic_add_unchecked(obj->size, &dev->pin_memory);
28153 if (!obj_priv->active &&
28154 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
28155 !list_empty(&obj_priv->list))
28156@@ -4018,7 +4028,7 @@ i915_gem_object_unpin(struct drm_gem_obj
28157 list_move_tail(&obj_priv->list,
28158 &dev_priv->mm.inactive_list);
28159 atomic_dec(&dev->pin_count);
28160- atomic_sub(obj->size, &dev->pin_memory);
28161+ atomic_sub_unchecked(obj->size, &dev->pin_memory);
28162 }
28163 i915_verify_inactive(dev, __FILE__, __LINE__);
28164 }
28165diff -urNp linux-2.6.32.41/drivers/gpu/drm/i915/i915_irq.c linux-2.6.32.41/drivers/gpu/drm/i915/i915_irq.c
28166--- linux-2.6.32.41/drivers/gpu/drm/i915/i915_irq.c 2011-03-27 14:31:47.000000000 -0400
28167+++ linux-2.6.32.41/drivers/gpu/drm/i915/i915_irq.c 2011-05-04 17:56:28.000000000 -0400
28168@@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_
28169 int irq_received;
28170 int ret = IRQ_NONE;
28171
28172- atomic_inc(&dev_priv->irq_received);
28173+ atomic_inc_unchecked(&dev_priv->irq_received);
28174
28175 if (IS_IGDNG(dev))
28176 return igdng_irq_handler(dev);
28177@@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct d
28178 {
28179 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28180
28181- atomic_set(&dev_priv->irq_received, 0);
28182+ atomic_set_unchecked(&dev_priv->irq_received, 0);
28183
28184 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28185 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28186diff -urNp linux-2.6.32.41/drivers/gpu/drm/mga/mga_drv.h linux-2.6.32.41/drivers/gpu/drm/mga/mga_drv.h
28187--- linux-2.6.32.41/drivers/gpu/drm/mga/mga_drv.h 2011-03-27 14:31:47.000000000 -0400
28188+++ linux-2.6.32.41/drivers/gpu/drm/mga/mga_drv.h 2011-05-04 17:56:28.000000000 -0400
28189@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
28190 u32 clear_cmd;
28191 u32 maccess;
28192
28193- atomic_t vbl_received; /**< Number of vblanks received. */
28194+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
28195 wait_queue_head_t fence_queue;
28196- atomic_t last_fence_retired;
28197+ atomic_unchecked_t last_fence_retired;
28198 u32 next_fence_to_post;
28199
28200 unsigned int fb_cpp;
28201diff -urNp linux-2.6.32.41/drivers/gpu/drm/mga/mga_irq.c linux-2.6.32.41/drivers/gpu/drm/mga/mga_irq.c
28202--- linux-2.6.32.41/drivers/gpu/drm/mga/mga_irq.c 2011-03-27 14:31:47.000000000 -0400
28203+++ linux-2.6.32.41/drivers/gpu/drm/mga/mga_irq.c 2011-05-04 17:56:28.000000000 -0400
28204@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
28205 if (crtc != 0)
28206 return 0;
28207
28208- return atomic_read(&dev_priv->vbl_received);
28209+ return atomic_read_unchecked(&dev_priv->vbl_received);
28210 }
28211
28212
28213@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
28214 /* VBLANK interrupt */
28215 if (status & MGA_VLINEPEN) {
28216 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
28217- atomic_inc(&dev_priv->vbl_received);
28218+ atomic_inc_unchecked(&dev_priv->vbl_received);
28219 drm_handle_vblank(dev, 0);
28220 handled = 1;
28221 }
28222@@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
28223 MGA_WRITE(MGA_PRIMEND, prim_end);
28224 }
28225
28226- atomic_inc(&dev_priv->last_fence_retired);
28227+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
28228 DRM_WAKEUP(&dev_priv->fence_queue);
28229 handled = 1;
28230 }
28231@@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_dev
28232 * using fences.
28233 */
28234 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
28235- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
28236+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
28237 - *sequence) <= (1 << 23)));
28238
28239 *sequence = cur_fence;
28240diff -urNp linux-2.6.32.41/drivers/gpu/drm/r128/r128_cce.c linux-2.6.32.41/drivers/gpu/drm/r128/r128_cce.c
28241--- linux-2.6.32.41/drivers/gpu/drm/r128/r128_cce.c 2011-03-27 14:31:47.000000000 -0400
28242+++ linux-2.6.32.41/drivers/gpu/drm/r128/r128_cce.c 2011-05-04 17:56:28.000000000 -0400
28243@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
28244
28245 /* GH: Simple idle check.
28246 */
28247- atomic_set(&dev_priv->idle_count, 0);
28248+ atomic_set_unchecked(&dev_priv->idle_count, 0);
28249
28250 /* We don't support anything other than bus-mastering ring mode,
28251 * but the ring can be in either AGP or PCI space for the ring
28252diff -urNp linux-2.6.32.41/drivers/gpu/drm/r128/r128_drv.h linux-2.6.32.41/drivers/gpu/drm/r128/r128_drv.h
28253--- linux-2.6.32.41/drivers/gpu/drm/r128/r128_drv.h 2011-03-27 14:31:47.000000000 -0400
28254+++ linux-2.6.32.41/drivers/gpu/drm/r128/r128_drv.h 2011-05-04 17:56:28.000000000 -0400
28255@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
28256 int is_pci;
28257 unsigned long cce_buffers_offset;
28258
28259- atomic_t idle_count;
28260+ atomic_unchecked_t idle_count;
28261
28262 int page_flipping;
28263 int current_page;
28264 u32 crtc_offset;
28265 u32 crtc_offset_cntl;
28266
28267- atomic_t vbl_received;
28268+ atomic_unchecked_t vbl_received;
28269
28270 u32 color_fmt;
28271 unsigned int front_offset;
28272diff -urNp linux-2.6.32.41/drivers/gpu/drm/r128/r128_irq.c linux-2.6.32.41/drivers/gpu/drm/r128/r128_irq.c
28273--- linux-2.6.32.41/drivers/gpu/drm/r128/r128_irq.c 2011-03-27 14:31:47.000000000 -0400
28274+++ linux-2.6.32.41/drivers/gpu/drm/r128/r128_irq.c 2011-05-04 17:56:28.000000000 -0400
28275@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
28276 if (crtc != 0)
28277 return 0;
28278
28279- return atomic_read(&dev_priv->vbl_received);
28280+ return atomic_read_unchecked(&dev_priv->vbl_received);
28281 }
28282
28283 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28284@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
28285 /* VBLANK interrupt */
28286 if (status & R128_CRTC_VBLANK_INT) {
28287 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
28288- atomic_inc(&dev_priv->vbl_received);
28289+ atomic_inc_unchecked(&dev_priv->vbl_received);
28290 drm_handle_vblank(dev, 0);
28291 return IRQ_HANDLED;
28292 }
28293diff -urNp linux-2.6.32.41/drivers/gpu/drm/r128/r128_state.c linux-2.6.32.41/drivers/gpu/drm/r128/r128_state.c
28294--- linux-2.6.32.41/drivers/gpu/drm/r128/r128_state.c 2011-03-27 14:31:47.000000000 -0400
28295+++ linux-2.6.32.41/drivers/gpu/drm/r128/r128_state.c 2011-05-04 17:56:28.000000000 -0400
28296@@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_priv
28297
28298 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
28299 {
28300- if (atomic_read(&dev_priv->idle_count) == 0) {
28301+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
28302 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
28303 } else {
28304- atomic_set(&dev_priv->idle_count, 0);
28305+ atomic_set_unchecked(&dev_priv->idle_count, 0);
28306 }
28307 }
28308
28309diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/atom.c linux-2.6.32.41/drivers/gpu/drm/radeon/atom.c
28310--- linux-2.6.32.41/drivers/gpu/drm/radeon/atom.c 2011-05-10 22:12:01.000000000 -0400
28311+++ linux-2.6.32.41/drivers/gpu/drm/radeon/atom.c 2011-05-16 21:46:57.000000000 -0400
28312@@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct c
28313 char name[512];
28314 int i;
28315
28316+ pax_track_stack();
28317+
28318 ctx->card = card;
28319 ctx->bios = bios;
28320
28321diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/mkregtable.c linux-2.6.32.41/drivers/gpu/drm/radeon/mkregtable.c
28322--- linux-2.6.32.41/drivers/gpu/drm/radeon/mkregtable.c 2011-03-27 14:31:47.000000000 -0400
28323+++ linux-2.6.32.41/drivers/gpu/drm/radeon/mkregtable.c 2011-04-17 15:56:46.000000000 -0400
28324@@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
28325 regex_t mask_rex;
28326 regmatch_t match[4];
28327 char buf[1024];
28328- size_t end;
28329+ long end;
28330 int len;
28331 int done = 0;
28332 int r;
28333 unsigned o;
28334 struct offset *offset;
28335 char last_reg_s[10];
28336- int last_reg;
28337+ unsigned long last_reg;
28338
28339 if (regcomp
28340 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
28341diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_atombios.c linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_atombios.c
28342--- linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_atombios.c 2011-03-27 14:31:47.000000000 -0400
28343+++ linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_atombios.c 2011-05-16 21:46:57.000000000 -0400
28344@@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from
28345 bool linkb;
28346 struct radeon_i2c_bus_rec ddc_bus;
28347
28348+ pax_track_stack();
28349+
28350 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
28351
28352 if (data_offset == 0)
28353@@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_o
28354 }
28355 }
28356
28357-struct bios_connector {
28358+static struct bios_connector {
28359 bool valid;
28360 uint16_t line_mux;
28361 uint16_t devices;
28362 int connector_type;
28363 struct radeon_i2c_bus_rec ddc_bus;
28364-};
28365+} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
28366
28367 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
28368 drm_device
28369@@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from
28370 uint8_t dac;
28371 union atom_supported_devices *supported_devices;
28372 int i, j;
28373- struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
28374
28375 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
28376
28377diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_display.c linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_display.c
28378--- linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_display.c 2011-03-27 14:31:47.000000000 -0400
28379+++ linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_display.c 2011-04-17 15:56:46.000000000 -0400
28380@@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pl
28381
28382 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
28383 error = freq - current_freq;
28384- error = error < 0 ? 0xffffffff : error;
28385+ error = (int32_t)error < 0 ? 0xffffffff : error;
28386 } else
28387 error = abs(current_freq - freq);
28388 vco_diff = abs(vco - best_vco);
28389diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_drv.h linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_drv.h
28390--- linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_drv.h 2011-03-27 14:31:47.000000000 -0400
28391+++ linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_drv.h 2011-05-04 17:56:28.000000000 -0400
28392@@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
28393
28394 /* SW interrupt */
28395 wait_queue_head_t swi_queue;
28396- atomic_t swi_emitted;
28397+ atomic_unchecked_t swi_emitted;
28398 int vblank_crtc;
28399 uint32_t irq_enable_reg;
28400 uint32_t r500_disp_irq_reg;
28401diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_fence.c linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_fence.c
28402--- linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_fence.c 2011-03-27 14:31:47.000000000 -0400
28403+++ linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_fence.c 2011-05-04 17:56:28.000000000 -0400
28404@@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_devi
28405 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
28406 return 0;
28407 }
28408- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
28409+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
28410 if (!rdev->cp.ready) {
28411 /* FIXME: cp is not running assume everythings is done right
28412 * away
28413@@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct rade
28414 return r;
28415 }
28416 WREG32(rdev->fence_drv.scratch_reg, 0);
28417- atomic_set(&rdev->fence_drv.seq, 0);
28418+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
28419 INIT_LIST_HEAD(&rdev->fence_drv.created);
28420 INIT_LIST_HEAD(&rdev->fence_drv.emited);
28421 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
28422diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/radeon.h linux-2.6.32.41/drivers/gpu/drm/radeon/radeon.h
28423--- linux-2.6.32.41/drivers/gpu/drm/radeon/radeon.h 2011-03-27 14:31:47.000000000 -0400
28424+++ linux-2.6.32.41/drivers/gpu/drm/radeon/radeon.h 2011-05-04 17:56:28.000000000 -0400
28425@@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device
28426 */
28427 struct radeon_fence_driver {
28428 uint32_t scratch_reg;
28429- atomic_t seq;
28430+ atomic_unchecked_t seq;
28431 uint32_t last_seq;
28432 unsigned long count_timeout;
28433 wait_queue_head_t queue;
28434diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_ioc32.c linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_ioc32.c
28435--- linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-03-27 14:31:47.000000000 -0400
28436+++ linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-04-23 13:57:24.000000000 -0400
28437@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(str
28438 request = compat_alloc_user_space(sizeof(*request));
28439 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
28440 || __put_user(req32.param, &request->param)
28441- || __put_user((void __user *)(unsigned long)req32.value,
28442+ || __put_user((unsigned long)req32.value,
28443 &request->value))
28444 return -EFAULT;
28445
28446diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_irq.c linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_irq.c
28447--- linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_irq.c 2011-03-27 14:31:47.000000000 -0400
28448+++ linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_irq.c 2011-05-04 17:56:28.000000000 -0400
28449@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
28450 unsigned int ret;
28451 RING_LOCALS;
28452
28453- atomic_inc(&dev_priv->swi_emitted);
28454- ret = atomic_read(&dev_priv->swi_emitted);
28455+ atomic_inc_unchecked(&dev_priv->swi_emitted);
28456+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
28457
28458 BEGIN_RING(4);
28459 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
28460@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
28461 drm_radeon_private_t *dev_priv =
28462 (drm_radeon_private_t *) dev->dev_private;
28463
28464- atomic_set(&dev_priv->swi_emitted, 0);
28465+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
28466 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
28467
28468 dev->max_vblank_count = 0x001fffff;
28469diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_state.c linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_state.c
28470--- linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_state.c 2011-03-27 14:31:47.000000000 -0400
28471+++ linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_state.c 2011-04-17 15:56:46.000000000 -0400
28472@@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm
28473 {
28474 drm_radeon_private_t *dev_priv = dev->dev_private;
28475 drm_radeon_getparam_t *param = data;
28476- int value;
28477+ int value = 0;
28478
28479 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
28480
28481diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_ttm.c linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_ttm.c
28482--- linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_ttm.c 2011-03-27 14:31:47.000000000 -0400
28483+++ linux-2.6.32.41/drivers/gpu/drm/radeon/radeon_ttm.c 2011-04-17 15:56:46.000000000 -0400
28484@@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_devic
28485 DRM_INFO("radeon: ttm finalized\n");
28486 }
28487
28488-static struct vm_operations_struct radeon_ttm_vm_ops;
28489-static const struct vm_operations_struct *ttm_vm_ops = NULL;
28490-
28491-static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
28492-{
28493- struct ttm_buffer_object *bo;
28494- int r;
28495-
28496- bo = (struct ttm_buffer_object *)vma->vm_private_data;
28497- if (bo == NULL) {
28498- return VM_FAULT_NOPAGE;
28499- }
28500- r = ttm_vm_ops->fault(vma, vmf);
28501- return r;
28502-}
28503-
28504 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
28505 {
28506 struct drm_file *file_priv;
28507 struct radeon_device *rdev;
28508- int r;
28509
28510 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
28511 return drm_mmap(filp, vma);
28512@@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struc
28513
28514 file_priv = (struct drm_file *)filp->private_data;
28515 rdev = file_priv->minor->dev->dev_private;
28516- if (rdev == NULL) {
28517+ if (!rdev)
28518 return -EINVAL;
28519- }
28520- r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
28521- if (unlikely(r != 0)) {
28522- return r;
28523- }
28524- if (unlikely(ttm_vm_ops == NULL)) {
28525- ttm_vm_ops = vma->vm_ops;
28526- radeon_ttm_vm_ops = *ttm_vm_ops;
28527- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
28528- }
28529- vma->vm_ops = &radeon_ttm_vm_ops;
28530- return 0;
28531+ return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
28532 }
28533
28534
28535diff -urNp linux-2.6.32.41/drivers/gpu/drm/radeon/rs690.c linux-2.6.32.41/drivers/gpu/drm/radeon/rs690.c
28536--- linux-2.6.32.41/drivers/gpu/drm/radeon/rs690.c 2011-03-27 14:31:47.000000000 -0400
28537+++ linux-2.6.32.41/drivers/gpu/drm/radeon/rs690.c 2011-04-17 15:56:46.000000000 -0400
28538@@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct
28539 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
28540 rdev->pm.sideport_bandwidth.full)
28541 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
28542- read_delay_latency.full = rfixed_const(370 * 800 * 1000);
28543+ read_delay_latency.full = rfixed_const(800 * 1000);
28544 read_delay_latency.full = rfixed_div(read_delay_latency,
28545 rdev->pm.igp_sideport_mclk);
28546+ a.full = rfixed_const(370);
28547+ read_delay_latency.full = rfixed_mul(read_delay_latency, a);
28548 } else {
28549 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
28550 rdev->pm.k8_bandwidth.full)
28551diff -urNp linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_bo.c linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_bo.c
28552--- linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_bo.c 2011-03-27 14:31:47.000000000 -0400
28553+++ linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_bo.c 2011-04-23 12:56:11.000000000 -0400
28554@@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_a
28555 NULL
28556 };
28557
28558-static struct sysfs_ops ttm_bo_global_ops = {
28559+static const struct sysfs_ops ttm_bo_global_ops = {
28560 .show = &ttm_bo_global_show
28561 };
28562
28563diff -urNp linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_bo_vm.c linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_bo_vm.c
28564--- linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-03-27 14:31:47.000000000 -0400
28565+++ linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-04-17 15:56:46.000000000 -0400
28566@@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_are
28567 {
28568 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
28569 vma->vm_private_data;
28570- struct ttm_bo_device *bdev = bo->bdev;
28571+ struct ttm_bo_device *bdev;
28572 unsigned long bus_base;
28573 unsigned long bus_offset;
28574 unsigned long bus_size;
28575@@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_are
28576 unsigned long address = (unsigned long)vmf->virtual_address;
28577 int retval = VM_FAULT_NOPAGE;
28578
28579+ if (!bo)
28580+ return VM_FAULT_NOPAGE;
28581+ bdev = bo->bdev;
28582+
28583 /*
28584 * Work around locking order reversal in fault / nopfn
28585 * between mmap_sem and bo_reserve: Perform a trylock operation
28586diff -urNp linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_global.c linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_global.c
28587--- linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_global.c 2011-03-27 14:31:47.000000000 -0400
28588+++ linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_global.c 2011-04-17 15:56:46.000000000 -0400
28589@@ -36,7 +36,7 @@
28590 struct ttm_global_item {
28591 struct mutex mutex;
28592 void *object;
28593- int refcount;
28594+ atomic_t refcount;
28595 };
28596
28597 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
28598@@ -49,7 +49,7 @@ void ttm_global_init(void)
28599 struct ttm_global_item *item = &glob[i];
28600 mutex_init(&item->mutex);
28601 item->object = NULL;
28602- item->refcount = 0;
28603+ atomic_set(&item->refcount, 0);
28604 }
28605 }
28606
28607@@ -59,7 +59,7 @@ void ttm_global_release(void)
28608 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
28609 struct ttm_global_item *item = &glob[i];
28610 BUG_ON(item->object != NULL);
28611- BUG_ON(item->refcount != 0);
28612+ BUG_ON(atomic_read(&item->refcount) != 0);
28613 }
28614 }
28615
28616@@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_globa
28617 void *object;
28618
28619 mutex_lock(&item->mutex);
28620- if (item->refcount == 0) {
28621+ if (atomic_read(&item->refcount) == 0) {
28622 item->object = kzalloc(ref->size, GFP_KERNEL);
28623 if (unlikely(item->object == NULL)) {
28624 ret = -ENOMEM;
28625@@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_globa
28626 goto out_err;
28627
28628 }
28629- ++item->refcount;
28630+ atomic_inc(&item->refcount);
28631 ref->object = item->object;
28632 object = item->object;
28633 mutex_unlock(&item->mutex);
28634@@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_gl
28635 struct ttm_global_item *item = &glob[ref->global_type];
28636
28637 mutex_lock(&item->mutex);
28638- BUG_ON(item->refcount == 0);
28639+ BUG_ON(atomic_read(&item->refcount) == 0);
28640 BUG_ON(ref->object != item->object);
28641- if (--item->refcount == 0) {
28642+ if (atomic_dec_and_test(&item->refcount)) {
28643 ref->release(ref);
28644 item->object = NULL;
28645 }
28646diff -urNp linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_memory.c linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_memory.c
28647--- linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_memory.c 2011-03-27 14:31:47.000000000 -0400
28648+++ linux-2.6.32.41/drivers/gpu/drm/ttm/ttm_memory.c 2011-04-17 15:56:46.000000000 -0400
28649@@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_at
28650 NULL
28651 };
28652
28653-static struct sysfs_ops ttm_mem_zone_ops = {
28654+static const struct sysfs_ops ttm_mem_zone_ops = {
28655 .show = &ttm_mem_zone_show,
28656 .store = &ttm_mem_zone_store
28657 };
28658diff -urNp linux-2.6.32.41/drivers/gpu/drm/via/via_drv.h linux-2.6.32.41/drivers/gpu/drm/via/via_drv.h
28659--- linux-2.6.32.41/drivers/gpu/drm/via/via_drv.h 2011-03-27 14:31:47.000000000 -0400
28660+++ linux-2.6.32.41/drivers/gpu/drm/via/via_drv.h 2011-05-04 17:56:28.000000000 -0400
28661@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
28662 typedef uint32_t maskarray_t[5];
28663
28664 typedef struct drm_via_irq {
28665- atomic_t irq_received;
28666+ atomic_unchecked_t irq_received;
28667 uint32_t pending_mask;
28668 uint32_t enable_mask;
28669 wait_queue_head_t irq_queue;
28670@@ -75,7 +75,7 @@ typedef struct drm_via_private {
28671 struct timeval last_vblank;
28672 int last_vblank_valid;
28673 unsigned usec_per_vblank;
28674- atomic_t vbl_received;
28675+ atomic_unchecked_t vbl_received;
28676 drm_via_state_t hc_state;
28677 char pci_buf[VIA_PCI_BUF_SIZE];
28678 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
28679diff -urNp linux-2.6.32.41/drivers/gpu/drm/via/via_irq.c linux-2.6.32.41/drivers/gpu/drm/via/via_irq.c
28680--- linux-2.6.32.41/drivers/gpu/drm/via/via_irq.c 2011-03-27 14:31:47.000000000 -0400
28681+++ linux-2.6.32.41/drivers/gpu/drm/via/via_irq.c 2011-05-04 17:56:28.000000000 -0400
28682@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
28683 if (crtc != 0)
28684 return 0;
28685
28686- return atomic_read(&dev_priv->vbl_received);
28687+ return atomic_read_unchecked(&dev_priv->vbl_received);
28688 }
28689
28690 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28691@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
28692
28693 status = VIA_READ(VIA_REG_INTERRUPT);
28694 if (status & VIA_IRQ_VBLANK_PENDING) {
28695- atomic_inc(&dev_priv->vbl_received);
28696- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
28697+ atomic_inc_unchecked(&dev_priv->vbl_received);
28698+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
28699 do_gettimeofday(&cur_vblank);
28700 if (dev_priv->last_vblank_valid) {
28701 dev_priv->usec_per_vblank =
28702@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
28703 dev_priv->last_vblank = cur_vblank;
28704 dev_priv->last_vblank_valid = 1;
28705 }
28706- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
28707+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
28708 DRM_DEBUG("US per vblank is: %u\n",
28709 dev_priv->usec_per_vblank);
28710 }
28711@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
28712
28713 for (i = 0; i < dev_priv->num_irqs; ++i) {
28714 if (status & cur_irq->pending_mask) {
28715- atomic_inc(&cur_irq->irq_received);
28716+ atomic_inc_unchecked(&cur_irq->irq_received);
28717 DRM_WAKEUP(&cur_irq->irq_queue);
28718 handled = 1;
28719 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
28720@@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device *
28721 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
28722 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
28723 masks[irq][4]));
28724- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
28725+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
28726 } else {
28727 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
28728 (((cur_irq_sequence =
28729- atomic_read(&cur_irq->irq_received)) -
28730+ atomic_read_unchecked(&cur_irq->irq_received)) -
28731 *sequence) <= (1 << 23)));
28732 }
28733 *sequence = cur_irq_sequence;
28734@@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct dr
28735 }
28736
28737 for (i = 0; i < dev_priv->num_irqs; ++i) {
28738- atomic_set(&cur_irq->irq_received, 0);
28739+ atomic_set_unchecked(&cur_irq->irq_received, 0);
28740 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
28741 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
28742 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
28743@@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev,
28744 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
28745 case VIA_IRQ_RELATIVE:
28746 irqwait->request.sequence +=
28747- atomic_read(&cur_irq->irq_received);
28748+ atomic_read_unchecked(&cur_irq->irq_received);
28749 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
28750 case VIA_IRQ_ABSOLUTE:
28751 break;
28752diff -urNp linux-2.6.32.41/drivers/hid/hid-core.c linux-2.6.32.41/drivers/hid/hid-core.c
28753--- linux-2.6.32.41/drivers/hid/hid-core.c 2011-05-10 22:12:01.000000000 -0400
28754+++ linux-2.6.32.41/drivers/hid/hid-core.c 2011-05-10 22:12:32.000000000 -0400
28755@@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device
28756
28757 int hid_add_device(struct hid_device *hdev)
28758 {
28759- static atomic_t id = ATOMIC_INIT(0);
28760+ static atomic_unchecked_t id = ATOMIC_INIT(0);
28761 int ret;
28762
28763 if (WARN_ON(hdev->status & HID_STAT_ADDED))
28764@@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hd
28765 /* XXX hack, any other cleaner solution after the driver core
28766 * is converted to allow more than 20 bytes as the device name? */
28767 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
28768- hdev->vendor, hdev->product, atomic_inc_return(&id));
28769+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
28770
28771 ret = device_add(&hdev->dev);
28772 if (!ret)
28773diff -urNp linux-2.6.32.41/drivers/hid/usbhid/hiddev.c linux-2.6.32.41/drivers/hid/usbhid/hiddev.c
28774--- linux-2.6.32.41/drivers/hid/usbhid/hiddev.c 2011-03-27 14:31:47.000000000 -0400
28775+++ linux-2.6.32.41/drivers/hid/usbhid/hiddev.c 2011-04-17 15:56:46.000000000 -0400
28776@@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *fi
28777 return put_user(HID_VERSION, (int __user *)arg);
28778
28779 case HIDIOCAPPLICATION:
28780- if (arg < 0 || arg >= hid->maxapplication)
28781+ if (arg >= hid->maxapplication)
28782 return -EINVAL;
28783
28784 for (i = 0; i < hid->maxcollection; i++)
28785diff -urNp linux-2.6.32.41/drivers/hwmon/lis3lv02d.c linux-2.6.32.41/drivers/hwmon/lis3lv02d.c
28786--- linux-2.6.32.41/drivers/hwmon/lis3lv02d.c 2011-03-27 14:31:47.000000000 -0400
28787+++ linux-2.6.32.41/drivers/hwmon/lis3lv02d.c 2011-05-04 17:56:28.000000000 -0400
28788@@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(in
28789 * the lid is closed. This leads to interrupts as soon as a little move
28790 * is done.
28791 */
28792- atomic_inc(&lis3_dev.count);
28793+ atomic_inc_unchecked(&lis3_dev.count);
28794
28795 wake_up_interruptible(&lis3_dev.misc_wait);
28796 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
28797@@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct in
28798 if (test_and_set_bit(0, &lis3_dev.misc_opened))
28799 return -EBUSY; /* already open */
28800
28801- atomic_set(&lis3_dev.count, 0);
28802+ atomic_set_unchecked(&lis3_dev.count, 0);
28803
28804 /*
28805 * The sensor can generate interrupts for free-fall and direction
28806@@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struc
28807 add_wait_queue(&lis3_dev.misc_wait, &wait);
28808 while (true) {
28809 set_current_state(TASK_INTERRUPTIBLE);
28810- data = atomic_xchg(&lis3_dev.count, 0);
28811+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
28812 if (data)
28813 break;
28814
28815@@ -244,7 +244,7 @@ out:
28816 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
28817 {
28818 poll_wait(file, &lis3_dev.misc_wait, wait);
28819- if (atomic_read(&lis3_dev.count))
28820+ if (atomic_read_unchecked(&lis3_dev.count))
28821 return POLLIN | POLLRDNORM;
28822 return 0;
28823 }
28824diff -urNp linux-2.6.32.41/drivers/hwmon/lis3lv02d.h linux-2.6.32.41/drivers/hwmon/lis3lv02d.h
28825--- linux-2.6.32.41/drivers/hwmon/lis3lv02d.h 2011-03-27 14:31:47.000000000 -0400
28826+++ linux-2.6.32.41/drivers/hwmon/lis3lv02d.h 2011-05-04 17:56:28.000000000 -0400
28827@@ -201,7 +201,7 @@ struct lis3lv02d {
28828
28829 struct input_polled_dev *idev; /* input device */
28830 struct platform_device *pdev; /* platform device */
28831- atomic_t count; /* interrupt count after last read */
28832+ atomic_unchecked_t count; /* interrupt count after last read */
28833 int xcalib; /* calibrated null value for x */
28834 int ycalib; /* calibrated null value for y */
28835 int zcalib; /* calibrated null value for z */
28836diff -urNp linux-2.6.32.41/drivers/hwmon/sht15.c linux-2.6.32.41/drivers/hwmon/sht15.c
28837--- linux-2.6.32.41/drivers/hwmon/sht15.c 2011-03-27 14:31:47.000000000 -0400
28838+++ linux-2.6.32.41/drivers/hwmon/sht15.c 2011-05-04 17:56:28.000000000 -0400
28839@@ -112,7 +112,7 @@ struct sht15_data {
28840 int supply_uV;
28841 int supply_uV_valid;
28842 struct work_struct update_supply_work;
28843- atomic_t interrupt_handled;
28844+ atomic_unchecked_t interrupt_handled;
28845 };
28846
28847 /**
28848@@ -245,13 +245,13 @@ static inline int sht15_update_single_va
28849 return ret;
28850
28851 gpio_direction_input(data->pdata->gpio_data);
28852- atomic_set(&data->interrupt_handled, 0);
28853+ atomic_set_unchecked(&data->interrupt_handled, 0);
28854
28855 enable_irq(gpio_to_irq(data->pdata->gpio_data));
28856 if (gpio_get_value(data->pdata->gpio_data) == 0) {
28857 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
28858 /* Only relevant if the interrupt hasn't occured. */
28859- if (!atomic_read(&data->interrupt_handled))
28860+ if (!atomic_read_unchecked(&data->interrupt_handled))
28861 schedule_work(&data->read_work);
28862 }
28863 ret = wait_event_timeout(data->wait_queue,
28864@@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired
28865 struct sht15_data *data = d;
28866 /* First disable the interrupt */
28867 disable_irq_nosync(irq);
28868- atomic_inc(&data->interrupt_handled);
28869+ atomic_inc_unchecked(&data->interrupt_handled);
28870 /* Then schedule a reading work struct */
28871 if (data->flag != SHT15_READING_NOTHING)
28872 schedule_work(&data->read_work);
28873@@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct wo
28874 here as could have gone low in meantime so verify
28875 it hasn't!
28876 */
28877- atomic_set(&data->interrupt_handled, 0);
28878+ atomic_set_unchecked(&data->interrupt_handled, 0);
28879 enable_irq(gpio_to_irq(data->pdata->gpio_data));
28880 /* If still not occured or another handler has been scheduled */
28881 if (gpio_get_value(data->pdata->gpio_data)
28882- || atomic_read(&data->interrupt_handled))
28883+ || atomic_read_unchecked(&data->interrupt_handled))
28884 return;
28885 }
28886 /* Read the data back from the device */
28887diff -urNp linux-2.6.32.41/drivers/hwmon/w83791d.c linux-2.6.32.41/drivers/hwmon/w83791d.c
28888--- linux-2.6.32.41/drivers/hwmon/w83791d.c 2011-03-27 14:31:47.000000000 -0400
28889+++ linux-2.6.32.41/drivers/hwmon/w83791d.c 2011-04-17 15:56:46.000000000 -0400
28890@@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_cli
28891 struct i2c_board_info *info);
28892 static int w83791d_remove(struct i2c_client *client);
28893
28894-static int w83791d_read(struct i2c_client *client, u8 register);
28895-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
28896+static int w83791d_read(struct i2c_client *client, u8 reg);
28897+static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
28898 static struct w83791d_data *w83791d_update_device(struct device *dev);
28899
28900 #ifdef DEBUG
28901diff -urNp linux-2.6.32.41/drivers/ide/ide-cd.c linux-2.6.32.41/drivers/ide/ide-cd.c
28902--- linux-2.6.32.41/drivers/ide/ide-cd.c 2011-03-27 14:31:47.000000000 -0400
28903+++ linux-2.6.32.41/drivers/ide/ide-cd.c 2011-04-17 15:56:46.000000000 -0400
28904@@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_
28905 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
28906 if ((unsigned long)buf & alignment
28907 || blk_rq_bytes(rq) & q->dma_pad_mask
28908- || object_is_on_stack(buf))
28909+ || object_starts_on_stack(buf))
28910 drive->dma = 0;
28911 }
28912 }
28913diff -urNp linux-2.6.32.41/drivers/ide/ide-floppy.c linux-2.6.32.41/drivers/ide/ide-floppy.c
28914--- linux-2.6.32.41/drivers/ide/ide-floppy.c 2011-03-27 14:31:47.000000000 -0400
28915+++ linux-2.6.32.41/drivers/ide/ide-floppy.c 2011-05-16 21:46:57.000000000 -0400
28916@@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_d
28917 u8 pc_buf[256], header_len, desc_cnt;
28918 int i, rc = 1, blocks, length;
28919
28920+ pax_track_stack();
28921+
28922 ide_debug_log(IDE_DBG_FUNC, "enter");
28923
28924 drive->bios_cyl = 0;
28925diff -urNp linux-2.6.32.41/drivers/ide/setup-pci.c linux-2.6.32.41/drivers/ide/setup-pci.c
28926--- linux-2.6.32.41/drivers/ide/setup-pci.c 2011-03-27 14:31:47.000000000 -0400
28927+++ linux-2.6.32.41/drivers/ide/setup-pci.c 2011-05-16 21:46:57.000000000 -0400
28928@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
28929 int ret, i, n_ports = dev2 ? 4 : 2;
28930 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
28931
28932+ pax_track_stack();
28933+
28934 for (i = 0; i < n_ports / 2; i++) {
28935 ret = ide_setup_pci_controller(pdev[i], d, !i);
28936 if (ret < 0)
28937diff -urNp linux-2.6.32.41/drivers/ieee1394/dv1394.c linux-2.6.32.41/drivers/ieee1394/dv1394.c
28938--- linux-2.6.32.41/drivers/ieee1394/dv1394.c 2011-03-27 14:31:47.000000000 -0400
28939+++ linux-2.6.32.41/drivers/ieee1394/dv1394.c 2011-04-23 12:56:11.000000000 -0400
28940@@ -739,7 +739,7 @@ static void frame_prepare(struct video_c
28941 based upon DIF section and sequence
28942 */
28943
28944-static void inline
28945+static inline void
28946 frame_put_packet (struct frame *f, struct packet *p)
28947 {
28948 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
28949diff -urNp linux-2.6.32.41/drivers/ieee1394/hosts.c linux-2.6.32.41/drivers/ieee1394/hosts.c
28950--- linux-2.6.32.41/drivers/ieee1394/hosts.c 2011-03-27 14:31:47.000000000 -0400
28951+++ linux-2.6.32.41/drivers/ieee1394/hosts.c 2011-04-17 15:56:46.000000000 -0400
28952@@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso
28953 }
28954
28955 static struct hpsb_host_driver dummy_driver = {
28956+ .name = "dummy",
28957 .transmit_packet = dummy_transmit_packet,
28958 .devctl = dummy_devctl,
28959 .isoctl = dummy_isoctl
28960diff -urNp linux-2.6.32.41/drivers/ieee1394/init_ohci1394_dma.c linux-2.6.32.41/drivers/ieee1394/init_ohci1394_dma.c
28961--- linux-2.6.32.41/drivers/ieee1394/init_ohci1394_dma.c 2011-03-27 14:31:47.000000000 -0400
28962+++ linux-2.6.32.41/drivers/ieee1394/init_ohci1394_dma.c 2011-04-17 15:56:46.000000000 -0400
28963@@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_con
28964 for (func = 0; func < 8; func++) {
28965 u32 class = read_pci_config(num,slot,func,
28966 PCI_CLASS_REVISION);
28967- if ((class == 0xffffffff))
28968+ if (class == 0xffffffff)
28969 continue; /* No device at this func */
28970
28971 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
28972diff -urNp linux-2.6.32.41/drivers/ieee1394/ohci1394.c linux-2.6.32.41/drivers/ieee1394/ohci1394.c
28973--- linux-2.6.32.41/drivers/ieee1394/ohci1394.c 2011-03-27 14:31:47.000000000 -0400
28974+++ linux-2.6.32.41/drivers/ieee1394/ohci1394.c 2011-04-23 12:56:11.000000000 -0400
28975@@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_
28976 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
28977
28978 /* Module Parameters */
28979-static int phys_dma = 1;
28980+static int phys_dma;
28981 module_param(phys_dma, int, 0444);
28982-MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
28983+MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
28984
28985 static void dma_trm_tasklet(unsigned long data);
28986 static void dma_trm_reset(struct dma_trm_ctx *d);
28987diff -urNp linux-2.6.32.41/drivers/ieee1394/sbp2.c linux-2.6.32.41/drivers/ieee1394/sbp2.c
28988--- linux-2.6.32.41/drivers/ieee1394/sbp2.c 2011-03-27 14:31:47.000000000 -0400
28989+++ linux-2.6.32.41/drivers/ieee1394/sbp2.c 2011-04-23 12:56:11.000000000 -0400
28990@@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 prot
28991 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
28992 MODULE_LICENSE("GPL");
28993
28994-static int sbp2_module_init(void)
28995+static int __init sbp2_module_init(void)
28996 {
28997 int ret;
28998
28999diff -urNp linux-2.6.32.41/drivers/infiniband/core/cm.c linux-2.6.32.41/drivers/infiniband/core/cm.c
29000--- linux-2.6.32.41/drivers/infiniband/core/cm.c 2011-03-27 14:31:47.000000000 -0400
29001+++ linux-2.6.32.41/drivers/infiniband/core/cm.c 2011-04-17 15:56:46.000000000 -0400
29002@@ -112,7 +112,7 @@ static char const counter_group_names[CM
29003
29004 struct cm_counter_group {
29005 struct kobject obj;
29006- atomic_long_t counter[CM_ATTR_COUNT];
29007+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
29008 };
29009
29010 struct cm_counter_attribute {
29011@@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm
29012 struct ib_mad_send_buf *msg = NULL;
29013 int ret;
29014
29015- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29016+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29017 counter[CM_REQ_COUNTER]);
29018
29019 /* Quick state check to discard duplicate REQs. */
29020@@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm
29021 if (!cm_id_priv)
29022 return;
29023
29024- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29025+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29026 counter[CM_REP_COUNTER]);
29027 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
29028 if (ret)
29029@@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work
29030 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
29031 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
29032 spin_unlock_irq(&cm_id_priv->lock);
29033- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29034+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29035 counter[CM_RTU_COUNTER]);
29036 goto out;
29037 }
29038@@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_wor
29039 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
29040 dreq_msg->local_comm_id);
29041 if (!cm_id_priv) {
29042- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29043+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29044 counter[CM_DREQ_COUNTER]);
29045 cm_issue_drep(work->port, work->mad_recv_wc);
29046 return -EINVAL;
29047@@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_wor
29048 case IB_CM_MRA_REP_RCVD:
29049 break;
29050 case IB_CM_TIMEWAIT:
29051- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29052+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29053 counter[CM_DREQ_COUNTER]);
29054 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29055 goto unlock;
29056@@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_wor
29057 cm_free_msg(msg);
29058 goto deref;
29059 case IB_CM_DREQ_RCVD:
29060- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29061+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29062 counter[CM_DREQ_COUNTER]);
29063 goto unlock;
29064 default:
29065@@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work
29066 ib_modify_mad(cm_id_priv->av.port->mad_agent,
29067 cm_id_priv->msg, timeout)) {
29068 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
29069- atomic_long_inc(&work->port->
29070+ atomic_long_inc_unchecked(&work->port->
29071 counter_group[CM_RECV_DUPLICATES].
29072 counter[CM_MRA_COUNTER]);
29073 goto out;
29074@@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work
29075 break;
29076 case IB_CM_MRA_REQ_RCVD:
29077 case IB_CM_MRA_REP_RCVD:
29078- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29079+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29080 counter[CM_MRA_COUNTER]);
29081 /* fall through */
29082 default:
29083@@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work
29084 case IB_CM_LAP_IDLE:
29085 break;
29086 case IB_CM_MRA_LAP_SENT:
29087- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29088+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29089 counter[CM_LAP_COUNTER]);
29090 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29091 goto unlock;
29092@@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work
29093 cm_free_msg(msg);
29094 goto deref;
29095 case IB_CM_LAP_RCVD:
29096- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29097+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29098 counter[CM_LAP_COUNTER]);
29099 goto unlock;
29100 default:
29101@@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm
29102 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
29103 if (cur_cm_id_priv) {
29104 spin_unlock_irq(&cm.lock);
29105- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29106+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29107 counter[CM_SIDR_REQ_COUNTER]);
29108 goto out; /* Duplicate message. */
29109 }
29110@@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_ma
29111 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
29112 msg->retries = 1;
29113
29114- atomic_long_add(1 + msg->retries,
29115+ atomic_long_add_unchecked(1 + msg->retries,
29116 &port->counter_group[CM_XMIT].counter[attr_index]);
29117 if (msg->retries)
29118- atomic_long_add(msg->retries,
29119+ atomic_long_add_unchecked(msg->retries,
29120 &port->counter_group[CM_XMIT_RETRIES].
29121 counter[attr_index]);
29122
29123@@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_ma
29124 }
29125
29126 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
29127- atomic_long_inc(&port->counter_group[CM_RECV].
29128+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
29129 counter[attr_id - CM_ATTR_ID_OFFSET]);
29130
29131 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
29132@@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct ko
29133 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
29134
29135 return sprintf(buf, "%ld\n",
29136- atomic_long_read(&group->counter[cm_attr->index]));
29137+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
29138 }
29139
29140-static struct sysfs_ops cm_counter_ops = {
29141+static const struct sysfs_ops cm_counter_ops = {
29142 .show = cm_show_counter
29143 };
29144
29145diff -urNp linux-2.6.32.41/drivers/infiniband/core/fmr_pool.c linux-2.6.32.41/drivers/infiniband/core/fmr_pool.c
29146--- linux-2.6.32.41/drivers/infiniband/core/fmr_pool.c 2011-03-27 14:31:47.000000000 -0400
29147+++ linux-2.6.32.41/drivers/infiniband/core/fmr_pool.c 2011-05-04 17:56:28.000000000 -0400
29148@@ -97,8 +97,8 @@ struct ib_fmr_pool {
29149
29150 struct task_struct *thread;
29151
29152- atomic_t req_ser;
29153- atomic_t flush_ser;
29154+ atomic_unchecked_t req_ser;
29155+ atomic_unchecked_t flush_ser;
29156
29157 wait_queue_head_t force_wait;
29158 };
29159@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
29160 struct ib_fmr_pool *pool = pool_ptr;
29161
29162 do {
29163- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
29164+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
29165 ib_fmr_batch_release(pool);
29166
29167- atomic_inc(&pool->flush_ser);
29168+ atomic_inc_unchecked(&pool->flush_ser);
29169 wake_up_interruptible(&pool->force_wait);
29170
29171 if (pool->flush_function)
29172@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
29173 }
29174
29175 set_current_state(TASK_INTERRUPTIBLE);
29176- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
29177+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
29178 !kthread_should_stop())
29179 schedule();
29180 __set_current_state(TASK_RUNNING);
29181@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
29182 pool->dirty_watermark = params->dirty_watermark;
29183 pool->dirty_len = 0;
29184 spin_lock_init(&pool->pool_lock);
29185- atomic_set(&pool->req_ser, 0);
29186- atomic_set(&pool->flush_ser, 0);
29187+ atomic_set_unchecked(&pool->req_ser, 0);
29188+ atomic_set_unchecked(&pool->flush_ser, 0);
29189 init_waitqueue_head(&pool->force_wait);
29190
29191 pool->thread = kthread_run(ib_fmr_cleanup_thread,
29192@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
29193 }
29194 spin_unlock_irq(&pool->pool_lock);
29195
29196- serial = atomic_inc_return(&pool->req_ser);
29197+ serial = atomic_inc_return_unchecked(&pool->req_ser);
29198 wake_up_process(pool->thread);
29199
29200 if (wait_event_interruptible(pool->force_wait,
29201- atomic_read(&pool->flush_ser) - serial >= 0))
29202+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
29203 return -EINTR;
29204
29205 return 0;
29206@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
29207 } else {
29208 list_add_tail(&fmr->list, &pool->dirty_list);
29209 if (++pool->dirty_len >= pool->dirty_watermark) {
29210- atomic_inc(&pool->req_ser);
29211+ atomic_inc_unchecked(&pool->req_ser);
29212 wake_up_process(pool->thread);
29213 }
29214 }
29215diff -urNp linux-2.6.32.41/drivers/infiniband/core/sysfs.c linux-2.6.32.41/drivers/infiniband/core/sysfs.c
29216--- linux-2.6.32.41/drivers/infiniband/core/sysfs.c 2011-03-27 14:31:47.000000000 -0400
29217+++ linux-2.6.32.41/drivers/infiniband/core/sysfs.c 2011-04-17 15:56:46.000000000 -0400
29218@@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kob
29219 return port_attr->show(p, port_attr, buf);
29220 }
29221
29222-static struct sysfs_ops port_sysfs_ops = {
29223+static const struct sysfs_ops port_sysfs_ops = {
29224 .show = port_attr_show
29225 };
29226
29227diff -urNp linux-2.6.32.41/drivers/infiniband/core/uverbs_marshall.c linux-2.6.32.41/drivers/infiniband/core/uverbs_marshall.c
29228--- linux-2.6.32.41/drivers/infiniband/core/uverbs_marshall.c 2011-03-27 14:31:47.000000000 -0400
29229+++ linux-2.6.32.41/drivers/infiniband/core/uverbs_marshall.c 2011-04-17 15:56:46.000000000 -0400
29230@@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_u
29231 dst->grh.sgid_index = src->grh.sgid_index;
29232 dst->grh.hop_limit = src->grh.hop_limit;
29233 dst->grh.traffic_class = src->grh.traffic_class;
29234+ memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
29235 dst->dlid = src->dlid;
29236 dst->sl = src->sl;
29237 dst->src_path_bits = src->src_path_bits;
29238 dst->static_rate = src->static_rate;
29239 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
29240 dst->port_num = src->port_num;
29241+ dst->reserved = 0;
29242 }
29243 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
29244
29245 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
29246 struct ib_qp_attr *src)
29247 {
29248+ dst->qp_state = src->qp_state;
29249 dst->cur_qp_state = src->cur_qp_state;
29250 dst->path_mtu = src->path_mtu;
29251 dst->path_mig_state = src->path_mig_state;
29252@@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_u
29253 dst->rnr_retry = src->rnr_retry;
29254 dst->alt_port_num = src->alt_port_num;
29255 dst->alt_timeout = src->alt_timeout;
29256+ memset(dst->reserved, 0, sizeof(dst->reserved));
29257 }
29258 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
29259
29260diff -urNp linux-2.6.32.41/drivers/infiniband/hw/ipath/ipath_fs.c linux-2.6.32.41/drivers/infiniband/hw/ipath/ipath_fs.c
29261--- linux-2.6.32.41/drivers/infiniband/hw/ipath/ipath_fs.c 2011-03-27 14:31:47.000000000 -0400
29262+++ linux-2.6.32.41/drivers/infiniband/hw/ipath/ipath_fs.c 2011-05-16 21:46:57.000000000 -0400
29263@@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(stru
29264 struct infinipath_counters counters;
29265 struct ipath_devdata *dd;
29266
29267+ pax_track_stack();
29268+
29269 dd = file->f_path.dentry->d_inode->i_private;
29270 dd->ipath_f_read_counters(dd, &counters);
29271
29272diff -urNp linux-2.6.32.41/drivers/infiniband/hw/nes/nes.c linux-2.6.32.41/drivers/infiniband/hw/nes/nes.c
29273--- linux-2.6.32.41/drivers/infiniband/hw/nes/nes.c 2011-03-27 14:31:47.000000000 -0400
29274+++ linux-2.6.32.41/drivers/infiniband/hw/nes/nes.c 2011-05-04 17:56:28.000000000 -0400
29275@@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
29276 LIST_HEAD(nes_adapter_list);
29277 static LIST_HEAD(nes_dev_list);
29278
29279-atomic_t qps_destroyed;
29280+atomic_unchecked_t qps_destroyed;
29281
29282 static unsigned int ee_flsh_adapter;
29283 static unsigned int sysfs_nonidx_addr;
29284@@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(str
29285 struct nes_adapter *nesadapter = nesdev->nesadapter;
29286 u32 qp_id;
29287
29288- atomic_inc(&qps_destroyed);
29289+ atomic_inc_unchecked(&qps_destroyed);
29290
29291 /* Free the control structures */
29292
29293diff -urNp linux-2.6.32.41/drivers/infiniband/hw/nes/nes_cm.c linux-2.6.32.41/drivers/infiniband/hw/nes/nes_cm.c
29294--- linux-2.6.32.41/drivers/infiniband/hw/nes/nes_cm.c 2011-03-27 14:31:47.000000000 -0400
29295+++ linux-2.6.32.41/drivers/infiniband/hw/nes/nes_cm.c 2011-05-04 17:56:28.000000000 -0400
29296@@ -69,11 +69,11 @@ u32 cm_packets_received;
29297 u32 cm_listens_created;
29298 u32 cm_listens_destroyed;
29299 u32 cm_backlog_drops;
29300-atomic_t cm_loopbacks;
29301-atomic_t cm_nodes_created;
29302-atomic_t cm_nodes_destroyed;
29303-atomic_t cm_accel_dropped_pkts;
29304-atomic_t cm_resets_recvd;
29305+atomic_unchecked_t cm_loopbacks;
29306+atomic_unchecked_t cm_nodes_created;
29307+atomic_unchecked_t cm_nodes_destroyed;
29308+atomic_unchecked_t cm_accel_dropped_pkts;
29309+atomic_unchecked_t cm_resets_recvd;
29310
29311 static inline int mini_cm_accelerated(struct nes_cm_core *,
29312 struct nes_cm_node *);
29313@@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
29314
29315 static struct nes_cm_core *g_cm_core;
29316
29317-atomic_t cm_connects;
29318-atomic_t cm_accepts;
29319-atomic_t cm_disconnects;
29320-atomic_t cm_closes;
29321-atomic_t cm_connecteds;
29322-atomic_t cm_connect_reqs;
29323-atomic_t cm_rejects;
29324+atomic_unchecked_t cm_connects;
29325+atomic_unchecked_t cm_accepts;
29326+atomic_unchecked_t cm_disconnects;
29327+atomic_unchecked_t cm_closes;
29328+atomic_unchecked_t cm_connecteds;
29329+atomic_unchecked_t cm_connect_reqs;
29330+atomic_unchecked_t cm_rejects;
29331
29332
29333 /**
29334@@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(
29335 cm_node->rem_mac);
29336
29337 add_hte_node(cm_core, cm_node);
29338- atomic_inc(&cm_nodes_created);
29339+ atomic_inc_unchecked(&cm_nodes_created);
29340
29341 return cm_node;
29342 }
29343@@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm
29344 }
29345
29346 atomic_dec(&cm_core->node_cnt);
29347- atomic_inc(&cm_nodes_destroyed);
29348+ atomic_inc_unchecked(&cm_nodes_destroyed);
29349 nesqp = cm_node->nesqp;
29350 if (nesqp) {
29351 nesqp->cm_node = NULL;
29352@@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm
29353
29354 static void drop_packet(struct sk_buff *skb)
29355 {
29356- atomic_inc(&cm_accel_dropped_pkts);
29357+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
29358 dev_kfree_skb_any(skb);
29359 }
29360
29361@@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm
29362
29363 int reset = 0; /* whether to send reset in case of err.. */
29364 int passive_state;
29365- atomic_inc(&cm_resets_recvd);
29366+ atomic_inc_unchecked(&cm_resets_recvd);
29367 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
29368 " refcnt=%d\n", cm_node, cm_node->state,
29369 atomic_read(&cm_node->ref_count));
29370@@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_conne
29371 rem_ref_cm_node(cm_node->cm_core, cm_node);
29372 return NULL;
29373 }
29374- atomic_inc(&cm_loopbacks);
29375+ atomic_inc_unchecked(&cm_loopbacks);
29376 loopbackremotenode->loopbackpartner = cm_node;
29377 loopbackremotenode->tcp_cntxt.rcv_wscale =
29378 NES_CM_DEFAULT_RCV_WND_SCALE;
29379@@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_c
29380 add_ref_cm_node(cm_node);
29381 } else if (cm_node->state == NES_CM_STATE_TSA) {
29382 rem_ref_cm_node(cm_core, cm_node);
29383- atomic_inc(&cm_accel_dropped_pkts);
29384+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
29385 dev_kfree_skb_any(skb);
29386 break;
29387 }
29388@@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct ne
29389
29390 if ((cm_id) && (cm_id->event_handler)) {
29391 if (issue_disconn) {
29392- atomic_inc(&cm_disconnects);
29393+ atomic_inc_unchecked(&cm_disconnects);
29394 cm_event.event = IW_CM_EVENT_DISCONNECT;
29395 cm_event.status = disconn_status;
29396 cm_event.local_addr = cm_id->local_addr;
29397@@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct ne
29398 }
29399
29400 if (issue_close) {
29401- atomic_inc(&cm_closes);
29402+ atomic_inc_unchecked(&cm_closes);
29403 nes_disconnect(nesqp, 1);
29404
29405 cm_id->provider_data = nesqp;
29406@@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
29407
29408 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
29409 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
29410- atomic_inc(&cm_accepts);
29411+ atomic_inc_unchecked(&cm_accepts);
29412
29413 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
29414 atomic_read(&nesvnic->netdev->refcnt));
29415@@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
29416
29417 struct nes_cm_core *cm_core;
29418
29419- atomic_inc(&cm_rejects);
29420+ atomic_inc_unchecked(&cm_rejects);
29421 cm_node = (struct nes_cm_node *) cm_id->provider_data;
29422 loopback = cm_node->loopbackpartner;
29423 cm_core = cm_node->cm_core;
29424@@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id,
29425 ntohl(cm_id->local_addr.sin_addr.s_addr),
29426 ntohs(cm_id->local_addr.sin_port));
29427
29428- atomic_inc(&cm_connects);
29429+ atomic_inc_unchecked(&cm_connects);
29430 nesqp->active_conn = 1;
29431
29432 /* cache the cm_id in the qp */
29433@@ -3195,7 +3195,7 @@ static void cm_event_connected(struct ne
29434 if (nesqp->destroyed) {
29435 return;
29436 }
29437- atomic_inc(&cm_connecteds);
29438+ atomic_inc_unchecked(&cm_connecteds);
29439 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
29440 " local port 0x%04X. jiffies = %lu.\n",
29441 nesqp->hwqp.qp_id,
29442@@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm
29443
29444 ret = cm_id->event_handler(cm_id, &cm_event);
29445 cm_id->add_ref(cm_id);
29446- atomic_inc(&cm_closes);
29447+ atomic_inc_unchecked(&cm_closes);
29448 cm_event.event = IW_CM_EVENT_CLOSE;
29449 cm_event.status = IW_CM_EVENT_STATUS_OK;
29450 cm_event.provider_data = cm_id->provider_data;
29451@@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_
29452 return;
29453 cm_id = cm_node->cm_id;
29454
29455- atomic_inc(&cm_connect_reqs);
29456+ atomic_inc_unchecked(&cm_connect_reqs);
29457 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
29458 cm_node, cm_id, jiffies);
29459
29460@@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct n
29461 return;
29462 cm_id = cm_node->cm_id;
29463
29464- atomic_inc(&cm_connect_reqs);
29465+ atomic_inc_unchecked(&cm_connect_reqs);
29466 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
29467 cm_node, cm_id, jiffies);
29468
29469diff -urNp linux-2.6.32.41/drivers/infiniband/hw/nes/nes.h linux-2.6.32.41/drivers/infiniband/hw/nes/nes.h
29470--- linux-2.6.32.41/drivers/infiniband/hw/nes/nes.h 2011-03-27 14:31:47.000000000 -0400
29471+++ linux-2.6.32.41/drivers/infiniband/hw/nes/nes.h 2011-05-04 17:56:28.000000000 -0400
29472@@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
29473 extern unsigned int wqm_quanta;
29474 extern struct list_head nes_adapter_list;
29475
29476-extern atomic_t cm_connects;
29477-extern atomic_t cm_accepts;
29478-extern atomic_t cm_disconnects;
29479-extern atomic_t cm_closes;
29480-extern atomic_t cm_connecteds;
29481-extern atomic_t cm_connect_reqs;
29482-extern atomic_t cm_rejects;
29483-extern atomic_t mod_qp_timouts;
29484-extern atomic_t qps_created;
29485-extern atomic_t qps_destroyed;
29486-extern atomic_t sw_qps_destroyed;
29487+extern atomic_unchecked_t cm_connects;
29488+extern atomic_unchecked_t cm_accepts;
29489+extern atomic_unchecked_t cm_disconnects;
29490+extern atomic_unchecked_t cm_closes;
29491+extern atomic_unchecked_t cm_connecteds;
29492+extern atomic_unchecked_t cm_connect_reqs;
29493+extern atomic_unchecked_t cm_rejects;
29494+extern atomic_unchecked_t mod_qp_timouts;
29495+extern atomic_unchecked_t qps_created;
29496+extern atomic_unchecked_t qps_destroyed;
29497+extern atomic_unchecked_t sw_qps_destroyed;
29498 extern u32 mh_detected;
29499 extern u32 mh_pauses_sent;
29500 extern u32 cm_packets_sent;
29501@@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
29502 extern u32 cm_listens_created;
29503 extern u32 cm_listens_destroyed;
29504 extern u32 cm_backlog_drops;
29505-extern atomic_t cm_loopbacks;
29506-extern atomic_t cm_nodes_created;
29507-extern atomic_t cm_nodes_destroyed;
29508-extern atomic_t cm_accel_dropped_pkts;
29509-extern atomic_t cm_resets_recvd;
29510+extern atomic_unchecked_t cm_loopbacks;
29511+extern atomic_unchecked_t cm_nodes_created;
29512+extern atomic_unchecked_t cm_nodes_destroyed;
29513+extern atomic_unchecked_t cm_accel_dropped_pkts;
29514+extern atomic_unchecked_t cm_resets_recvd;
29515
29516 extern u32 int_mod_timer_init;
29517 extern u32 int_mod_cq_depth_256;
29518diff -urNp linux-2.6.32.41/drivers/infiniband/hw/nes/nes_nic.c linux-2.6.32.41/drivers/infiniband/hw/nes/nes_nic.c
29519--- linux-2.6.32.41/drivers/infiniband/hw/nes/nes_nic.c 2011-03-27 14:31:47.000000000 -0400
29520+++ linux-2.6.32.41/drivers/infiniband/hw/nes/nes_nic.c 2011-05-04 17:56:28.000000000 -0400
29521@@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats
29522 target_stat_values[++index] = mh_detected;
29523 target_stat_values[++index] = mh_pauses_sent;
29524 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
29525- target_stat_values[++index] = atomic_read(&cm_connects);
29526- target_stat_values[++index] = atomic_read(&cm_accepts);
29527- target_stat_values[++index] = atomic_read(&cm_disconnects);
29528- target_stat_values[++index] = atomic_read(&cm_connecteds);
29529- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
29530- target_stat_values[++index] = atomic_read(&cm_rejects);
29531- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
29532- target_stat_values[++index] = atomic_read(&qps_created);
29533- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
29534- target_stat_values[++index] = atomic_read(&qps_destroyed);
29535- target_stat_values[++index] = atomic_read(&cm_closes);
29536+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
29537+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
29538+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
29539+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
29540+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
29541+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
29542+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
29543+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
29544+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
29545+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
29546+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
29547 target_stat_values[++index] = cm_packets_sent;
29548 target_stat_values[++index] = cm_packets_bounced;
29549 target_stat_values[++index] = cm_packets_created;
29550@@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats
29551 target_stat_values[++index] = cm_listens_created;
29552 target_stat_values[++index] = cm_listens_destroyed;
29553 target_stat_values[++index] = cm_backlog_drops;
29554- target_stat_values[++index] = atomic_read(&cm_loopbacks);
29555- target_stat_values[++index] = atomic_read(&cm_nodes_created);
29556- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
29557- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
29558- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
29559+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
29560+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
29561+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
29562+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
29563+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
29564 target_stat_values[++index] = int_mod_timer_init;
29565 target_stat_values[++index] = int_mod_cq_depth_1;
29566 target_stat_values[++index] = int_mod_cq_depth_4;
29567diff -urNp linux-2.6.32.41/drivers/infiniband/hw/nes/nes_verbs.c linux-2.6.32.41/drivers/infiniband/hw/nes/nes_verbs.c
29568--- linux-2.6.32.41/drivers/infiniband/hw/nes/nes_verbs.c 2011-03-27 14:31:47.000000000 -0400
29569+++ linux-2.6.32.41/drivers/infiniband/hw/nes/nes_verbs.c 2011-05-04 17:56:28.000000000 -0400
29570@@ -45,9 +45,9 @@
29571
29572 #include <rdma/ib_umem.h>
29573
29574-atomic_t mod_qp_timouts;
29575-atomic_t qps_created;
29576-atomic_t sw_qps_destroyed;
29577+atomic_unchecked_t mod_qp_timouts;
29578+atomic_unchecked_t qps_created;
29579+atomic_unchecked_t sw_qps_destroyed;
29580
29581 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
29582
29583@@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struc
29584 if (init_attr->create_flags)
29585 return ERR_PTR(-EINVAL);
29586
29587- atomic_inc(&qps_created);
29588+ atomic_inc_unchecked(&qps_created);
29589 switch (init_attr->qp_type) {
29590 case IB_QPT_RC:
29591 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
29592@@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *
29593 struct iw_cm_event cm_event;
29594 int ret;
29595
29596- atomic_inc(&sw_qps_destroyed);
29597+ atomic_inc_unchecked(&sw_qps_destroyed);
29598 nesqp->destroyed = 1;
29599
29600 /* Blow away the connection if it exists. */
29601diff -urNp linux-2.6.32.41/drivers/input/gameport/gameport.c linux-2.6.32.41/drivers/input/gameport/gameport.c
29602--- linux-2.6.32.41/drivers/input/gameport/gameport.c 2011-03-27 14:31:47.000000000 -0400
29603+++ linux-2.6.32.41/drivers/input/gameport/gameport.c 2011-05-04 17:56:28.000000000 -0400
29604@@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
29605 */
29606 static void gameport_init_port(struct gameport *gameport)
29607 {
29608- static atomic_t gameport_no = ATOMIC_INIT(0);
29609+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
29610
29611 __module_get(THIS_MODULE);
29612
29613 mutex_init(&gameport->drv_mutex);
29614 device_initialize(&gameport->dev);
29615- dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
29616+ dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
29617 gameport->dev.bus = &gameport_bus;
29618 gameport->dev.release = gameport_release_port;
29619 if (gameport->parent)
29620diff -urNp linux-2.6.32.41/drivers/input/input.c linux-2.6.32.41/drivers/input/input.c
29621--- linux-2.6.32.41/drivers/input/input.c 2011-03-27 14:31:47.000000000 -0400
29622+++ linux-2.6.32.41/drivers/input/input.c 2011-05-04 17:56:28.000000000 -0400
29623@@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
29624 */
29625 int input_register_device(struct input_dev *dev)
29626 {
29627- static atomic_t input_no = ATOMIC_INIT(0);
29628+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
29629 struct input_handler *handler;
29630 const char *path;
29631 int error;
29632@@ -1585,7 +1585,7 @@ int input_register_device(struct input_d
29633 dev->setkeycode = input_default_setkeycode;
29634
29635 dev_set_name(&dev->dev, "input%ld",
29636- (unsigned long) atomic_inc_return(&input_no) - 1);
29637+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
29638
29639 error = device_add(&dev->dev);
29640 if (error)
29641diff -urNp linux-2.6.32.41/drivers/input/joystick/sidewinder.c linux-2.6.32.41/drivers/input/joystick/sidewinder.c
29642--- linux-2.6.32.41/drivers/input/joystick/sidewinder.c 2011-03-27 14:31:47.000000000 -0400
29643+++ linux-2.6.32.41/drivers/input/joystick/sidewinder.c 2011-05-18 20:09:36.000000000 -0400
29644@@ -30,6 +30,7 @@
29645 #include <linux/kernel.h>
29646 #include <linux/module.h>
29647 #include <linux/slab.h>
29648+#include <linux/sched.h>
29649 #include <linux/init.h>
29650 #include <linux/input.h>
29651 #include <linux/gameport.h>
29652@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
29653 unsigned char buf[SW_LENGTH];
29654 int i;
29655
29656+ pax_track_stack();
29657+
29658 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
29659
29660 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
29661diff -urNp linux-2.6.32.41/drivers/input/joystick/xpad.c linux-2.6.32.41/drivers/input/joystick/xpad.c
29662--- linux-2.6.32.41/drivers/input/joystick/xpad.c 2011-03-27 14:31:47.000000000 -0400
29663+++ linux-2.6.32.41/drivers/input/joystick/xpad.c 2011-05-04 17:56:28.000000000 -0400
29664@@ -621,7 +621,7 @@ static void xpad_led_set(struct led_clas
29665
29666 static int xpad_led_probe(struct usb_xpad *xpad)
29667 {
29668- static atomic_t led_seq = ATOMIC_INIT(0);
29669+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
29670 long led_no;
29671 struct xpad_led *led;
29672 struct led_classdev *led_cdev;
29673@@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpa
29674 if (!led)
29675 return -ENOMEM;
29676
29677- led_no = (long)atomic_inc_return(&led_seq) - 1;
29678+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
29679
29680 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
29681 led->xpad = xpad;
29682diff -urNp linux-2.6.32.41/drivers/input/serio/serio.c linux-2.6.32.41/drivers/input/serio/serio.c
29683--- linux-2.6.32.41/drivers/input/serio/serio.c 2011-03-27 14:31:47.000000000 -0400
29684+++ linux-2.6.32.41/drivers/input/serio/serio.c 2011-05-04 17:56:28.000000000 -0400
29685@@ -527,7 +527,7 @@ static void serio_release_port(struct de
29686 */
29687 static void serio_init_port(struct serio *serio)
29688 {
29689- static atomic_t serio_no = ATOMIC_INIT(0);
29690+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
29691
29692 __module_get(THIS_MODULE);
29693
29694@@ -536,7 +536,7 @@ static void serio_init_port(struct serio
29695 mutex_init(&serio->drv_mutex);
29696 device_initialize(&serio->dev);
29697 dev_set_name(&serio->dev, "serio%ld",
29698- (long)atomic_inc_return(&serio_no) - 1);
29699+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
29700 serio->dev.bus = &serio_bus;
29701 serio->dev.release = serio_release_port;
29702 if (serio->parent) {
29703diff -urNp linux-2.6.32.41/drivers/isdn/gigaset/common.c linux-2.6.32.41/drivers/isdn/gigaset/common.c
29704--- linux-2.6.32.41/drivers/isdn/gigaset/common.c 2011-03-27 14:31:47.000000000 -0400
29705+++ linux-2.6.32.41/drivers/isdn/gigaset/common.c 2011-04-17 15:56:46.000000000 -0400
29706@@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct
29707 cs->commands_pending = 0;
29708 cs->cur_at_seq = 0;
29709 cs->gotfwver = -1;
29710- cs->open_count = 0;
29711+ local_set(&cs->open_count, 0);
29712 cs->dev = NULL;
29713 cs->tty = NULL;
29714 cs->tty_dev = NULL;
29715diff -urNp linux-2.6.32.41/drivers/isdn/gigaset/gigaset.h linux-2.6.32.41/drivers/isdn/gigaset/gigaset.h
29716--- linux-2.6.32.41/drivers/isdn/gigaset/gigaset.h 2011-03-27 14:31:47.000000000 -0400
29717+++ linux-2.6.32.41/drivers/isdn/gigaset/gigaset.h 2011-04-17 15:56:46.000000000 -0400
29718@@ -34,6 +34,7 @@
29719 #include <linux/tty_driver.h>
29720 #include <linux/list.h>
29721 #include <asm/atomic.h>
29722+#include <asm/local.h>
29723
29724 #define GIG_VERSION {0,5,0,0}
29725 #define GIG_COMPAT {0,4,0,0}
29726@@ -446,7 +447,7 @@ struct cardstate {
29727 spinlock_t cmdlock;
29728 unsigned curlen, cmdbytes;
29729
29730- unsigned open_count;
29731+ local_t open_count;
29732 struct tty_struct *tty;
29733 struct tasklet_struct if_wake_tasklet;
29734 unsigned control_state;
29735diff -urNp linux-2.6.32.41/drivers/isdn/gigaset/interface.c linux-2.6.32.41/drivers/isdn/gigaset/interface.c
29736--- linux-2.6.32.41/drivers/isdn/gigaset/interface.c 2011-03-27 14:31:47.000000000 -0400
29737+++ linux-2.6.32.41/drivers/isdn/gigaset/interface.c 2011-04-17 15:56:46.000000000 -0400
29738@@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tt
29739 return -ERESTARTSYS; // FIXME -EINTR?
29740 tty->driver_data = cs;
29741
29742- ++cs->open_count;
29743-
29744- if (cs->open_count == 1) {
29745+ if (local_inc_return(&cs->open_count) == 1) {
29746 spin_lock_irqsave(&cs->lock, flags);
29747 cs->tty = tty;
29748 spin_unlock_irqrestore(&cs->lock, flags);
29749@@ -195,10 +193,10 @@ static void if_close(struct tty_struct *
29750
29751 if (!cs->connected)
29752 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
29753- else if (!cs->open_count)
29754+ else if (!local_read(&cs->open_count))
29755 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29756 else {
29757- if (!--cs->open_count) {
29758+ if (!local_dec_return(&cs->open_count)) {
29759 spin_lock_irqsave(&cs->lock, flags);
29760 cs->tty = NULL;
29761 spin_unlock_irqrestore(&cs->lock, flags);
29762@@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *t
29763 if (!cs->connected) {
29764 gig_dbg(DEBUG_IF, "not connected");
29765 retval = -ENODEV;
29766- } else if (!cs->open_count)
29767+ } else if (!local_read(&cs->open_count))
29768 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29769 else {
29770 retval = 0;
29771@@ -361,7 +359,7 @@ static int if_write(struct tty_struct *t
29772 if (!cs->connected) {
29773 gig_dbg(DEBUG_IF, "not connected");
29774 retval = -ENODEV;
29775- } else if (!cs->open_count)
29776+ } else if (!local_read(&cs->open_count))
29777 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29778 else if (cs->mstate != MS_LOCKED) {
29779 dev_warn(cs->dev, "can't write to unlocked device\n");
29780@@ -395,7 +393,7 @@ static int if_write_room(struct tty_stru
29781 if (!cs->connected) {
29782 gig_dbg(DEBUG_IF, "not connected");
29783 retval = -ENODEV;
29784- } else if (!cs->open_count)
29785+ } else if (!local_read(&cs->open_count))
29786 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29787 else if (cs->mstate != MS_LOCKED) {
29788 dev_warn(cs->dev, "can't write to unlocked device\n");
29789@@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty
29790
29791 if (!cs->connected)
29792 gig_dbg(DEBUG_IF, "not connected");
29793- else if (!cs->open_count)
29794+ else if (!local_read(&cs->open_count))
29795 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29796 else if (cs->mstate != MS_LOCKED)
29797 dev_warn(cs->dev, "can't write to unlocked device\n");
29798@@ -453,7 +451,7 @@ static void if_throttle(struct tty_struc
29799
29800 if (!cs->connected)
29801 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
29802- else if (!cs->open_count)
29803+ else if (!local_read(&cs->open_count))
29804 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29805 else {
29806 //FIXME
29807@@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_str
29808
29809 if (!cs->connected)
29810 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
29811- else if (!cs->open_count)
29812+ else if (!local_read(&cs->open_count))
29813 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29814 else {
29815 //FIXME
29816@@ -510,7 +508,7 @@ static void if_set_termios(struct tty_st
29817 goto out;
29818 }
29819
29820- if (!cs->open_count) {
29821+ if (!local_read(&cs->open_count)) {
29822 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29823 goto out;
29824 }
29825diff -urNp linux-2.6.32.41/drivers/isdn/hardware/avm/b1.c linux-2.6.32.41/drivers/isdn/hardware/avm/b1.c
29826--- linux-2.6.32.41/drivers/isdn/hardware/avm/b1.c 2011-03-27 14:31:47.000000000 -0400
29827+++ linux-2.6.32.41/drivers/isdn/hardware/avm/b1.c 2011-04-17 15:56:46.000000000 -0400
29828@@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capilo
29829 }
29830 if (left) {
29831 if (t4file->user) {
29832- if (copy_from_user(buf, dp, left))
29833+ if (left > sizeof buf || copy_from_user(buf, dp, left))
29834 return -EFAULT;
29835 } else {
29836 memcpy(buf, dp, left);
29837@@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capilo
29838 }
29839 if (left) {
29840 if (config->user) {
29841- if (copy_from_user(buf, dp, left))
29842+ if (left > sizeof buf || copy_from_user(buf, dp, left))
29843 return -EFAULT;
29844 } else {
29845 memcpy(buf, dp, left);
29846diff -urNp linux-2.6.32.41/drivers/isdn/hardware/eicon/capidtmf.c linux-2.6.32.41/drivers/isdn/hardware/eicon/capidtmf.c
29847--- linux-2.6.32.41/drivers/isdn/hardware/eicon/capidtmf.c 2011-03-27 14:31:47.000000000 -0400
29848+++ linux-2.6.32.41/drivers/isdn/hardware/eicon/capidtmf.c 2011-05-16 21:46:57.000000000 -0400
29849@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
29850 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
29851 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
29852
29853+ pax_track_stack();
29854
29855 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
29856 {
29857diff -urNp linux-2.6.32.41/drivers/isdn/hardware/eicon/capifunc.c linux-2.6.32.41/drivers/isdn/hardware/eicon/capifunc.c
29858--- linux-2.6.32.41/drivers/isdn/hardware/eicon/capifunc.c 2011-03-27 14:31:47.000000000 -0400
29859+++ linux-2.6.32.41/drivers/isdn/hardware/eicon/capifunc.c 2011-05-16 21:46:57.000000000 -0400
29860@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
29861 IDI_SYNC_REQ req;
29862 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29863
29864+ pax_track_stack();
29865+
29866 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29867
29868 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29869diff -urNp linux-2.6.32.41/drivers/isdn/hardware/eicon/diddfunc.c linux-2.6.32.41/drivers/isdn/hardware/eicon/diddfunc.c
29870--- linux-2.6.32.41/drivers/isdn/hardware/eicon/diddfunc.c 2011-03-27 14:31:47.000000000 -0400
29871+++ linux-2.6.32.41/drivers/isdn/hardware/eicon/diddfunc.c 2011-05-16 21:46:57.000000000 -0400
29872@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
29873 IDI_SYNC_REQ req;
29874 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29875
29876+ pax_track_stack();
29877+
29878 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29879
29880 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29881diff -urNp linux-2.6.32.41/drivers/isdn/hardware/eicon/divasfunc.c linux-2.6.32.41/drivers/isdn/hardware/eicon/divasfunc.c
29882--- linux-2.6.32.41/drivers/isdn/hardware/eicon/divasfunc.c 2011-03-27 14:31:47.000000000 -0400
29883+++ linux-2.6.32.41/drivers/isdn/hardware/eicon/divasfunc.c 2011-05-16 21:46:57.000000000 -0400
29884@@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_di
29885 IDI_SYNC_REQ req;
29886 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29887
29888+ pax_track_stack();
29889+
29890 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29891
29892 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29893diff -urNp linux-2.6.32.41/drivers/isdn/hardware/eicon/idifunc.c linux-2.6.32.41/drivers/isdn/hardware/eicon/idifunc.c
29894--- linux-2.6.32.41/drivers/isdn/hardware/eicon/idifunc.c 2011-03-27 14:31:47.000000000 -0400
29895+++ linux-2.6.32.41/drivers/isdn/hardware/eicon/idifunc.c 2011-05-16 21:46:57.000000000 -0400
29896@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
29897 IDI_SYNC_REQ req;
29898 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29899
29900+ pax_track_stack();
29901+
29902 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29903
29904 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29905diff -urNp linux-2.6.32.41/drivers/isdn/hardware/eicon/message.c linux-2.6.32.41/drivers/isdn/hardware/eicon/message.c
29906--- linux-2.6.32.41/drivers/isdn/hardware/eicon/message.c 2011-03-27 14:31:47.000000000 -0400
29907+++ linux-2.6.32.41/drivers/isdn/hardware/eicon/message.c 2011-05-16 21:46:57.000000000 -0400
29908@@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
29909 dword d;
29910 word w;
29911
29912+ pax_track_stack();
29913+
29914 a = plci->adapter;
29915 Id = ((word)plci->Id<<8)|a->Id;
29916 PUT_WORD(&SS_Ind[4],0x0000);
29917@@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE
29918 word j, n, w;
29919 dword d;
29920
29921+ pax_track_stack();
29922+
29923
29924 for(i=0;i<8;i++) bp_parms[i].length = 0;
29925 for(i=0;i<2;i++) global_config[i].length = 0;
29926@@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARS
29927 const byte llc3[] = {4,3,2,2,6,6,0};
29928 const byte header[] = {0,2,3,3,0,0,0};
29929
29930+ pax_track_stack();
29931+
29932 for(i=0;i<8;i++) bp_parms[i].length = 0;
29933 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
29934 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
29935@@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI
29936 word appl_number_group_type[MAX_APPL];
29937 PLCI *auxplci;
29938
29939+ pax_track_stack();
29940+
29941 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
29942
29943 if(!a->group_optimization_enabled)
29944diff -urNp linux-2.6.32.41/drivers/isdn/hardware/eicon/mntfunc.c linux-2.6.32.41/drivers/isdn/hardware/eicon/mntfunc.c
29945--- linux-2.6.32.41/drivers/isdn/hardware/eicon/mntfunc.c 2011-03-27 14:31:47.000000000 -0400
29946+++ linux-2.6.32.41/drivers/isdn/hardware/eicon/mntfunc.c 2011-05-16 21:46:57.000000000 -0400
29947@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
29948 IDI_SYNC_REQ req;
29949 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
29950
29951+ pax_track_stack();
29952+
29953 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
29954
29955 for (x = 0; x < MAX_DESCRIPTORS; x++) {
29956diff -urNp linux-2.6.32.41/drivers/isdn/i4l/isdn_common.c linux-2.6.32.41/drivers/isdn/i4l/isdn_common.c
29957--- linux-2.6.32.41/drivers/isdn/i4l/isdn_common.c 2011-03-27 14:31:47.000000000 -0400
29958+++ linux-2.6.32.41/drivers/isdn/i4l/isdn_common.c 2011-05-16 21:46:57.000000000 -0400
29959@@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct f
29960 } iocpar;
29961 void __user *argp = (void __user *)arg;
29962
29963+ pax_track_stack();
29964+
29965 #define name iocpar.name
29966 #define bname iocpar.bname
29967 #define iocts iocpar.iocts
29968diff -urNp linux-2.6.32.41/drivers/isdn/icn/icn.c linux-2.6.32.41/drivers/isdn/icn/icn.c
29969--- linux-2.6.32.41/drivers/isdn/icn/icn.c 2011-03-27 14:31:47.000000000 -0400
29970+++ linux-2.6.32.41/drivers/isdn/icn/icn.c 2011-04-17 15:56:46.000000000 -0400
29971@@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len
29972 if (count > len)
29973 count = len;
29974 if (user) {
29975- if (copy_from_user(msg, buf, count))
29976+ if (count > sizeof msg || copy_from_user(msg, buf, count))
29977 return -EFAULT;
29978 } else
29979 memcpy(msg, buf, count);
29980diff -urNp linux-2.6.32.41/drivers/isdn/mISDN/socket.c linux-2.6.32.41/drivers/isdn/mISDN/socket.c
29981--- linux-2.6.32.41/drivers/isdn/mISDN/socket.c 2011-03-27 14:31:47.000000000 -0400
29982+++ linux-2.6.32.41/drivers/isdn/mISDN/socket.c 2011-04-17 15:56:46.000000000 -0400
29983@@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, uns
29984 if (dev) {
29985 struct mISDN_devinfo di;
29986
29987+ memset(&di, 0, sizeof(di));
29988 di.id = dev->id;
29989 di.Dprotocols = dev->Dprotocols;
29990 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
29991@@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, uns
29992 if (dev) {
29993 struct mISDN_devinfo di;
29994
29995+ memset(&di, 0, sizeof(di));
29996 di.id = dev->id;
29997 di.Dprotocols = dev->Dprotocols;
29998 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
29999diff -urNp linux-2.6.32.41/drivers/isdn/sc/interrupt.c linux-2.6.32.41/drivers/isdn/sc/interrupt.c
30000--- linux-2.6.32.41/drivers/isdn/sc/interrupt.c 2011-03-27 14:31:47.000000000 -0400
30001+++ linux-2.6.32.41/drivers/isdn/sc/interrupt.c 2011-04-17 15:56:46.000000000 -0400
30002@@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy,
30003 }
30004 else if(callid>=0x0000 && callid<=0x7FFF)
30005 {
30006+ int len;
30007+
30008 pr_debug("%s: Got Incoming Call\n",
30009 sc_adapter[card]->devicename);
30010- strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
30011- strcpy(setup.eazmsn,
30012- sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
30013+ len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
30014+ sizeof(setup.phone));
30015+ if (len >= sizeof(setup.phone))
30016+ continue;
30017+ len = strlcpy(setup.eazmsn,
30018+ sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
30019+ sizeof(setup.eazmsn));
30020+ if (len >= sizeof(setup.eazmsn))
30021+ continue;
30022 setup.si1 = 7;
30023 setup.si2 = 0;
30024 setup.plan = 0;
30025@@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy,
30026 * Handle a GetMyNumber Rsp
30027 */
30028 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
30029- strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
30030+ strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
30031+ rcvmsg.msg_data.byte_array,
30032+ sizeof(rcvmsg.msg_data.byte_array));
30033 continue;
30034 }
30035
30036diff -urNp linux-2.6.32.41/drivers/lguest/core.c linux-2.6.32.41/drivers/lguest/core.c
30037--- linux-2.6.32.41/drivers/lguest/core.c 2011-03-27 14:31:47.000000000 -0400
30038+++ linux-2.6.32.41/drivers/lguest/core.c 2011-04-17 15:56:46.000000000 -0400
30039@@ -91,9 +91,17 @@ static __init int map_switcher(void)
30040 * it's worked so far. The end address needs +1 because __get_vm_area
30041 * allocates an extra guard page, so we need space for that.
30042 */
30043+
30044+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
30045+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30046+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
30047+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30048+#else
30049 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30050 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
30051 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30052+#endif
30053+
30054 if (!switcher_vma) {
30055 err = -ENOMEM;
30056 printk("lguest: could not map switcher pages high\n");
30057@@ -118,7 +126,7 @@ static __init int map_switcher(void)
30058 * Now the Switcher is mapped at the right address, we can't fail!
30059 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
30060 */
30061- memcpy(switcher_vma->addr, start_switcher_text,
30062+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
30063 end_switcher_text - start_switcher_text);
30064
30065 printk(KERN_INFO "lguest: mapped switcher at %p\n",
30066diff -urNp linux-2.6.32.41/drivers/lguest/x86/core.c linux-2.6.32.41/drivers/lguest/x86/core.c
30067--- linux-2.6.32.41/drivers/lguest/x86/core.c 2011-03-27 14:31:47.000000000 -0400
30068+++ linux-2.6.32.41/drivers/lguest/x86/core.c 2011-04-17 15:56:46.000000000 -0400
30069@@ -59,7 +59,7 @@ static struct {
30070 /* Offset from where switcher.S was compiled to where we've copied it */
30071 static unsigned long switcher_offset(void)
30072 {
30073- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
30074+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
30075 }
30076
30077 /* This cpu's struct lguest_pages. */
30078@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
30079 * These copies are pretty cheap, so we do them unconditionally: */
30080 /* Save the current Host top-level page directory.
30081 */
30082+
30083+#ifdef CONFIG_PAX_PER_CPU_PGD
30084+ pages->state.host_cr3 = read_cr3();
30085+#else
30086 pages->state.host_cr3 = __pa(current->mm->pgd);
30087+#endif
30088+
30089 /*
30090 * Set up the Guest's page tables to see this CPU's pages (and no
30091 * other CPU's pages).
30092@@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
30093 * compiled-in switcher code and the high-mapped copy we just made.
30094 */
30095 for (i = 0; i < IDT_ENTRIES; i++)
30096- default_idt_entries[i] += switcher_offset();
30097+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
30098
30099 /*
30100 * Set up the Switcher's per-cpu areas.
30101@@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
30102 * it will be undisturbed when we switch. To change %cs and jump we
30103 * need this structure to feed to Intel's "lcall" instruction.
30104 */
30105- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
30106+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
30107 lguest_entry.segment = LGUEST_CS;
30108
30109 /*
30110diff -urNp linux-2.6.32.41/drivers/lguest/x86/switcher_32.S linux-2.6.32.41/drivers/lguest/x86/switcher_32.S
30111--- linux-2.6.32.41/drivers/lguest/x86/switcher_32.S 2011-03-27 14:31:47.000000000 -0400
30112+++ linux-2.6.32.41/drivers/lguest/x86/switcher_32.S 2011-04-17 15:56:46.000000000 -0400
30113@@ -87,6 +87,7 @@
30114 #include <asm/page.h>
30115 #include <asm/segment.h>
30116 #include <asm/lguest.h>
30117+#include <asm/processor-flags.h>
30118
30119 // We mark the start of the code to copy
30120 // It's placed in .text tho it's never run here
30121@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
30122 // Changes type when we load it: damn Intel!
30123 // For after we switch over our page tables
30124 // That entry will be read-only: we'd crash.
30125+
30126+#ifdef CONFIG_PAX_KERNEXEC
30127+ mov %cr0, %edx
30128+ xor $X86_CR0_WP, %edx
30129+ mov %edx, %cr0
30130+#endif
30131+
30132 movl $(GDT_ENTRY_TSS*8), %edx
30133 ltr %dx
30134
30135@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
30136 // Let's clear it again for our return.
30137 // The GDT descriptor of the Host
30138 // Points to the table after two "size" bytes
30139- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
30140+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
30141 // Clear "used" from type field (byte 5, bit 2)
30142- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
30143+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
30144+
30145+#ifdef CONFIG_PAX_KERNEXEC
30146+ mov %cr0, %eax
30147+ xor $X86_CR0_WP, %eax
30148+ mov %eax, %cr0
30149+#endif
30150
30151 // Once our page table's switched, the Guest is live!
30152 // The Host fades as we run this final step.
30153@@ -295,13 +309,12 @@ deliver_to_host:
30154 // I consulted gcc, and it gave
30155 // These instructions, which I gladly credit:
30156 leal (%edx,%ebx,8), %eax
30157- movzwl (%eax),%edx
30158- movl 4(%eax), %eax
30159- xorw %ax, %ax
30160- orl %eax, %edx
30161+ movl 4(%eax), %edx
30162+ movw (%eax), %dx
30163 // Now the address of the handler's in %edx
30164 // We call it now: its "iret" drops us home.
30165- jmp *%edx
30166+ ljmp $__KERNEL_CS, $1f
30167+1: jmp *%edx
30168
30169 // Every interrupt can come to us here
30170 // But we must truly tell each apart.
30171diff -urNp linux-2.6.32.41/drivers/macintosh/via-pmu-backlight.c linux-2.6.32.41/drivers/macintosh/via-pmu-backlight.c
30172--- linux-2.6.32.41/drivers/macintosh/via-pmu-backlight.c 2011-03-27 14:31:47.000000000 -0400
30173+++ linux-2.6.32.41/drivers/macintosh/via-pmu-backlight.c 2011-04-17 15:56:46.000000000 -0400
30174@@ -15,7 +15,7 @@
30175
30176 #define MAX_PMU_LEVEL 0xFF
30177
30178-static struct backlight_ops pmu_backlight_data;
30179+static const struct backlight_ops pmu_backlight_data;
30180 static DEFINE_SPINLOCK(pmu_backlight_lock);
30181 static int sleeping, uses_pmu_bl;
30182 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
30183@@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(
30184 return bd->props.brightness;
30185 }
30186
30187-static struct backlight_ops pmu_backlight_data = {
30188+static const struct backlight_ops pmu_backlight_data = {
30189 .get_brightness = pmu_backlight_get_brightness,
30190 .update_status = pmu_backlight_update_status,
30191
30192diff -urNp linux-2.6.32.41/drivers/macintosh/via-pmu.c linux-2.6.32.41/drivers/macintosh/via-pmu.c
30193--- linux-2.6.32.41/drivers/macintosh/via-pmu.c 2011-03-27 14:31:47.000000000 -0400
30194+++ linux-2.6.32.41/drivers/macintosh/via-pmu.c 2011-04-17 15:56:46.000000000 -0400
30195@@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state
30196 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
30197 }
30198
30199-static struct platform_suspend_ops pmu_pm_ops = {
30200+static const struct platform_suspend_ops pmu_pm_ops = {
30201 .enter = powerbook_sleep,
30202 .valid = pmu_sleep_valid,
30203 };
30204diff -urNp linux-2.6.32.41/drivers/md/dm.c linux-2.6.32.41/drivers/md/dm.c
30205--- linux-2.6.32.41/drivers/md/dm.c 2011-03-27 14:31:47.000000000 -0400
30206+++ linux-2.6.32.41/drivers/md/dm.c 2011-05-04 17:56:28.000000000 -0400
30207@@ -163,9 +163,9 @@ struct mapped_device {
30208 /*
30209 * Event handling.
30210 */
30211- atomic_t event_nr;
30212+ atomic_unchecked_t event_nr;
30213 wait_queue_head_t eventq;
30214- atomic_t uevent_seq;
30215+ atomic_unchecked_t uevent_seq;
30216 struct list_head uevent_list;
30217 spinlock_t uevent_lock; /* Protect access to uevent_list */
30218
30219@@ -1770,8 +1770,8 @@ static struct mapped_device *alloc_dev(i
30220 rwlock_init(&md->map_lock);
30221 atomic_set(&md->holders, 1);
30222 atomic_set(&md->open_count, 0);
30223- atomic_set(&md->event_nr, 0);
30224- atomic_set(&md->uevent_seq, 0);
30225+ atomic_set_unchecked(&md->event_nr, 0);
30226+ atomic_set_unchecked(&md->uevent_seq, 0);
30227 INIT_LIST_HEAD(&md->uevent_list);
30228 spin_lock_init(&md->uevent_lock);
30229
30230@@ -1921,7 +1921,7 @@ static void event_callback(void *context
30231
30232 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
30233
30234- atomic_inc(&md->event_nr);
30235+ atomic_inc_unchecked(&md->event_nr);
30236 wake_up(&md->eventq);
30237 }
30238
30239@@ -2556,18 +2556,18 @@ void dm_kobject_uevent(struct mapped_dev
30240
30241 uint32_t dm_next_uevent_seq(struct mapped_device *md)
30242 {
30243- return atomic_add_return(1, &md->uevent_seq);
30244+ return atomic_add_return_unchecked(1, &md->uevent_seq);
30245 }
30246
30247 uint32_t dm_get_event_nr(struct mapped_device *md)
30248 {
30249- return atomic_read(&md->event_nr);
30250+ return atomic_read_unchecked(&md->event_nr);
30251 }
30252
30253 int dm_wait_event(struct mapped_device *md, int event_nr)
30254 {
30255 return wait_event_interruptible(md->eventq,
30256- (event_nr != atomic_read(&md->event_nr)));
30257+ (event_nr != atomic_read_unchecked(&md->event_nr)));
30258 }
30259
30260 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
30261diff -urNp linux-2.6.32.41/drivers/md/dm-ioctl.c linux-2.6.32.41/drivers/md/dm-ioctl.c
30262--- linux-2.6.32.41/drivers/md/dm-ioctl.c 2011-03-27 14:31:47.000000000 -0400
30263+++ linux-2.6.32.41/drivers/md/dm-ioctl.c 2011-04-17 15:56:46.000000000 -0400
30264@@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, str
30265 cmd == DM_LIST_VERSIONS_CMD)
30266 return 0;
30267
30268- if ((cmd == DM_DEV_CREATE_CMD)) {
30269+ if (cmd == DM_DEV_CREATE_CMD) {
30270 if (!*param->name) {
30271 DMWARN("name not supplied when creating device");
30272 return -EINVAL;
30273diff -urNp linux-2.6.32.41/drivers/md/dm-raid1.c linux-2.6.32.41/drivers/md/dm-raid1.c
30274--- linux-2.6.32.41/drivers/md/dm-raid1.c 2011-03-27 14:31:47.000000000 -0400
30275+++ linux-2.6.32.41/drivers/md/dm-raid1.c 2011-05-04 17:56:28.000000000 -0400
30276@@ -41,7 +41,7 @@ enum dm_raid1_error {
30277
30278 struct mirror {
30279 struct mirror_set *ms;
30280- atomic_t error_count;
30281+ atomic_unchecked_t error_count;
30282 unsigned long error_type;
30283 struct dm_dev *dev;
30284 sector_t offset;
30285@@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m
30286 * simple way to tell if a device has encountered
30287 * errors.
30288 */
30289- atomic_inc(&m->error_count);
30290+ atomic_inc_unchecked(&m->error_count);
30291
30292 if (test_and_set_bit(error_type, &m->error_type))
30293 return;
30294@@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m
30295 }
30296
30297 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
30298- if (!atomic_read(&new->error_count)) {
30299+ if (!atomic_read_unchecked(&new->error_count)) {
30300 set_default_mirror(new);
30301 break;
30302 }
30303@@ -363,7 +363,7 @@ static struct mirror *choose_mirror(stru
30304 struct mirror *m = get_default_mirror(ms);
30305
30306 do {
30307- if (likely(!atomic_read(&m->error_count)))
30308+ if (likely(!atomic_read_unchecked(&m->error_count)))
30309 return m;
30310
30311 if (m-- == ms->mirror)
30312@@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
30313 {
30314 struct mirror *default_mirror = get_default_mirror(m->ms);
30315
30316- return !atomic_read(&default_mirror->error_count);
30317+ return !atomic_read_unchecked(&default_mirror->error_count);
30318 }
30319
30320 static int mirror_available(struct mirror_set *ms, struct bio *bio)
30321@@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *
30322 */
30323 if (likely(region_in_sync(ms, region, 1)))
30324 m = choose_mirror(ms, bio->bi_sector);
30325- else if (m && atomic_read(&m->error_count))
30326+ else if (m && atomic_read_unchecked(&m->error_count))
30327 m = NULL;
30328
30329 if (likely(m))
30330@@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set
30331 }
30332
30333 ms->mirror[mirror].ms = ms;
30334- atomic_set(&(ms->mirror[mirror].error_count), 0);
30335+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
30336 ms->mirror[mirror].error_type = 0;
30337 ms->mirror[mirror].offset = offset;
30338
30339@@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_targ
30340 */
30341 static char device_status_char(struct mirror *m)
30342 {
30343- if (!atomic_read(&(m->error_count)))
30344+ if (!atomic_read_unchecked(&(m->error_count)))
30345 return 'A';
30346
30347 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
30348diff -urNp linux-2.6.32.41/drivers/md/dm-stripe.c linux-2.6.32.41/drivers/md/dm-stripe.c
30349--- linux-2.6.32.41/drivers/md/dm-stripe.c 2011-03-27 14:31:47.000000000 -0400
30350+++ linux-2.6.32.41/drivers/md/dm-stripe.c 2011-05-04 17:56:28.000000000 -0400
30351@@ -20,7 +20,7 @@ struct stripe {
30352 struct dm_dev *dev;
30353 sector_t physical_start;
30354
30355- atomic_t error_count;
30356+ atomic_unchecked_t error_count;
30357 };
30358
30359 struct stripe_c {
30360@@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *
30361 kfree(sc);
30362 return r;
30363 }
30364- atomic_set(&(sc->stripe[i].error_count), 0);
30365+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
30366 }
30367
30368 ti->private = sc;
30369@@ -257,7 +257,7 @@ static int stripe_status(struct dm_targe
30370 DMEMIT("%d ", sc->stripes);
30371 for (i = 0; i < sc->stripes; i++) {
30372 DMEMIT("%s ", sc->stripe[i].dev->name);
30373- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
30374+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
30375 'D' : 'A';
30376 }
30377 buffer[i] = '\0';
30378@@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_targe
30379 */
30380 for (i = 0; i < sc->stripes; i++)
30381 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
30382- atomic_inc(&(sc->stripe[i].error_count));
30383- if (atomic_read(&(sc->stripe[i].error_count)) <
30384+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
30385+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
30386 DM_IO_ERROR_THRESHOLD)
30387 queue_work(kstriped, &sc->kstriped_ws);
30388 }
30389diff -urNp linux-2.6.32.41/drivers/md/dm-sysfs.c linux-2.6.32.41/drivers/md/dm-sysfs.c
30390--- linux-2.6.32.41/drivers/md/dm-sysfs.c 2011-03-27 14:31:47.000000000 -0400
30391+++ linux-2.6.32.41/drivers/md/dm-sysfs.c 2011-04-17 15:56:46.000000000 -0400
30392@@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
30393 NULL,
30394 };
30395
30396-static struct sysfs_ops dm_sysfs_ops = {
30397+static const struct sysfs_ops dm_sysfs_ops = {
30398 .show = dm_attr_show,
30399 };
30400
30401diff -urNp linux-2.6.32.41/drivers/md/dm-table.c linux-2.6.32.41/drivers/md/dm-table.c
30402--- linux-2.6.32.41/drivers/md/dm-table.c 2011-03-27 14:31:47.000000000 -0400
30403+++ linux-2.6.32.41/drivers/md/dm-table.c 2011-04-17 15:56:46.000000000 -0400
30404@@ -359,7 +359,7 @@ static int device_area_is_invalid(struct
30405 if (!dev_size)
30406 return 0;
30407
30408- if ((start >= dev_size) || (start + len > dev_size)) {
30409+ if ((start >= dev_size) || (len > dev_size - start)) {
30410 DMWARN("%s: %s too small for target: "
30411 "start=%llu, len=%llu, dev_size=%llu",
30412 dm_device_name(ti->table->md), bdevname(bdev, b),
30413diff -urNp linux-2.6.32.41/drivers/md/md.c linux-2.6.32.41/drivers/md/md.c
30414--- linux-2.6.32.41/drivers/md/md.c 2011-03-27 14:31:47.000000000 -0400
30415+++ linux-2.6.32.41/drivers/md/md.c 2011-05-04 17:56:20.000000000 -0400
30416@@ -153,10 +153,10 @@ static int start_readonly;
30417 * start build, activate spare
30418 */
30419 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
30420-static atomic_t md_event_count;
30421+static atomic_unchecked_t md_event_count;
30422 void md_new_event(mddev_t *mddev)
30423 {
30424- atomic_inc(&md_event_count);
30425+ atomic_inc_unchecked(&md_event_count);
30426 wake_up(&md_event_waiters);
30427 }
30428 EXPORT_SYMBOL_GPL(md_new_event);
30429@@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
30430 */
30431 static void md_new_event_inintr(mddev_t *mddev)
30432 {
30433- atomic_inc(&md_event_count);
30434+ atomic_inc_unchecked(&md_event_count);
30435 wake_up(&md_event_waiters);
30436 }
30437
30438@@ -1218,7 +1218,7 @@ static int super_1_load(mdk_rdev_t *rdev
30439
30440 rdev->preferred_minor = 0xffff;
30441 rdev->data_offset = le64_to_cpu(sb->data_offset);
30442- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
30443+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
30444
30445 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
30446 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
30447@@ -1392,7 +1392,7 @@ static void super_1_sync(mddev_t *mddev,
30448 else
30449 sb->resync_offset = cpu_to_le64(0);
30450
30451- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
30452+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
30453
30454 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
30455 sb->size = cpu_to_le64(mddev->dev_sectors);
30456@@ -2214,7 +2214,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
30457 static ssize_t
30458 errors_show(mdk_rdev_t *rdev, char *page)
30459 {
30460- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
30461+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
30462 }
30463
30464 static ssize_t
30465@@ -2223,7 +2223,7 @@ errors_store(mdk_rdev_t *rdev, const cha
30466 char *e;
30467 unsigned long n = simple_strtoul(buf, &e, 10);
30468 if (*buf && (*e == 0 || *e == '\n')) {
30469- atomic_set(&rdev->corrected_errors, n);
30470+ atomic_set_unchecked(&rdev->corrected_errors, n);
30471 return len;
30472 }
30473 return -EINVAL;
30474@@ -2517,7 +2517,7 @@ static void rdev_free(struct kobject *ko
30475 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
30476 kfree(rdev);
30477 }
30478-static struct sysfs_ops rdev_sysfs_ops = {
30479+static const struct sysfs_ops rdev_sysfs_ops = {
30480 .show = rdev_attr_show,
30481 .store = rdev_attr_store,
30482 };
30483@@ -2566,8 +2566,8 @@ static mdk_rdev_t *md_import_device(dev_
30484 rdev->data_offset = 0;
30485 rdev->sb_events = 0;
30486 atomic_set(&rdev->nr_pending, 0);
30487- atomic_set(&rdev->read_errors, 0);
30488- atomic_set(&rdev->corrected_errors, 0);
30489+ atomic_set_unchecked(&rdev->read_errors, 0);
30490+ atomic_set_unchecked(&rdev->corrected_errors, 0);
30491
30492 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
30493 if (!size) {
30494@@ -3887,7 +3887,7 @@ static void md_free(struct kobject *ko)
30495 kfree(mddev);
30496 }
30497
30498-static struct sysfs_ops md_sysfs_ops = {
30499+static const struct sysfs_ops md_sysfs_ops = {
30500 .show = md_attr_show,
30501 .store = md_attr_store,
30502 };
30503@@ -4474,7 +4474,8 @@ out:
30504 err = 0;
30505 blk_integrity_unregister(disk);
30506 md_new_event(mddev);
30507- sysfs_notify_dirent(mddev->sysfs_state);
30508+ if (mddev->sysfs_state)
30509+ sysfs_notify_dirent(mddev->sysfs_state);
30510 return err;
30511 }
30512
30513@@ -5954,7 +5955,7 @@ static int md_seq_show(struct seq_file *
30514
30515 spin_unlock(&pers_lock);
30516 seq_printf(seq, "\n");
30517- mi->event = atomic_read(&md_event_count);
30518+ mi->event = atomic_read_unchecked(&md_event_count);
30519 return 0;
30520 }
30521 if (v == (void*)2) {
30522@@ -6043,7 +6044,7 @@ static int md_seq_show(struct seq_file *
30523 chunk_kb ? "KB" : "B");
30524 if (bitmap->file) {
30525 seq_printf(seq, ", file: ");
30526- seq_path(seq, &bitmap->file->f_path, " \t\n");
30527+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
30528 }
30529
30530 seq_printf(seq, "\n");
30531@@ -6077,7 +6078,7 @@ static int md_seq_open(struct inode *ino
30532 else {
30533 struct seq_file *p = file->private_data;
30534 p->private = mi;
30535- mi->event = atomic_read(&md_event_count);
30536+ mi->event = atomic_read_unchecked(&md_event_count);
30537 }
30538 return error;
30539 }
30540@@ -6093,7 +6094,7 @@ static unsigned int mdstat_poll(struct f
30541 /* always allow read */
30542 mask = POLLIN | POLLRDNORM;
30543
30544- if (mi->event != atomic_read(&md_event_count))
30545+ if (mi->event != atomic_read_unchecked(&md_event_count))
30546 mask |= POLLERR | POLLPRI;
30547 return mask;
30548 }
30549@@ -6137,7 +6138,7 @@ static int is_mddev_idle(mddev_t *mddev,
30550 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
30551 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
30552 (int)part_stat_read(&disk->part0, sectors[1]) -
30553- atomic_read(&disk->sync_io);
30554+ atomic_read_unchecked(&disk->sync_io);
30555 /* sync IO will cause sync_io to increase before the disk_stats
30556 * as sync_io is counted when a request starts, and
30557 * disk_stats is counted when it completes.
30558diff -urNp linux-2.6.32.41/drivers/md/md.h linux-2.6.32.41/drivers/md/md.h
30559--- linux-2.6.32.41/drivers/md/md.h 2011-03-27 14:31:47.000000000 -0400
30560+++ linux-2.6.32.41/drivers/md/md.h 2011-05-04 17:56:20.000000000 -0400
30561@@ -94,10 +94,10 @@ struct mdk_rdev_s
30562 * only maintained for arrays that
30563 * support hot removal
30564 */
30565- atomic_t read_errors; /* number of consecutive read errors that
30566+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
30567 * we have tried to ignore.
30568 */
30569- atomic_t corrected_errors; /* number of corrected read errors,
30570+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
30571 * for reporting to userspace and storing
30572 * in superblock.
30573 */
30574@@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_
30575
30576 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
30577 {
30578- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
30579+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
30580 }
30581
30582 struct mdk_personality
30583diff -urNp linux-2.6.32.41/drivers/md/raid10.c linux-2.6.32.41/drivers/md/raid10.c
30584--- linux-2.6.32.41/drivers/md/raid10.c 2011-03-27 14:31:47.000000000 -0400
30585+++ linux-2.6.32.41/drivers/md/raid10.c 2011-05-04 17:56:28.000000000 -0400
30586@@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bi
30587 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
30588 set_bit(R10BIO_Uptodate, &r10_bio->state);
30589 else {
30590- atomic_add(r10_bio->sectors,
30591+ atomic_add_unchecked(r10_bio->sectors,
30592 &conf->mirrors[d].rdev->corrected_errors);
30593 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
30594 md_error(r10_bio->mddev,
30595@@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
30596 test_bit(In_sync, &rdev->flags)) {
30597 atomic_inc(&rdev->nr_pending);
30598 rcu_read_unlock();
30599- atomic_add(s, &rdev->corrected_errors);
30600+ atomic_add_unchecked(s, &rdev->corrected_errors);
30601 if (sync_page_io(rdev->bdev,
30602 r10_bio->devs[sl].addr +
30603 sect + rdev->data_offset,
30604diff -urNp linux-2.6.32.41/drivers/md/raid1.c linux-2.6.32.41/drivers/md/raid1.c
30605--- linux-2.6.32.41/drivers/md/raid1.c 2011-03-27 14:31:47.000000000 -0400
30606+++ linux-2.6.32.41/drivers/md/raid1.c 2011-05-04 17:56:28.000000000 -0400
30607@@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *
30608 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
30609 continue;
30610 rdev = conf->mirrors[d].rdev;
30611- atomic_add(s, &rdev->corrected_errors);
30612+ atomic_add_unchecked(s, &rdev->corrected_errors);
30613 if (sync_page_io(rdev->bdev,
30614 sect + rdev->data_offset,
30615 s<<9,
30616@@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf,
30617 /* Well, this device is dead */
30618 md_error(mddev, rdev);
30619 else {
30620- atomic_add(s, &rdev->corrected_errors);
30621+ atomic_add_unchecked(s, &rdev->corrected_errors);
30622 printk(KERN_INFO
30623 "raid1:%s: read error corrected "
30624 "(%d sectors at %llu on %s)\n",
30625diff -urNp linux-2.6.32.41/drivers/md/raid5.c linux-2.6.32.41/drivers/md/raid5.c
30626--- linux-2.6.32.41/drivers/md/raid5.c 2011-03-27 14:31:47.000000000 -0400
30627+++ linux-2.6.32.41/drivers/md/raid5.c 2011-05-16 21:46:57.000000000 -0400
30628@@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_hea
30629 bi->bi_next = NULL;
30630 if (rw == WRITE &&
30631 test_bit(R5_ReWrite, &sh->dev[i].flags))
30632- atomic_add(STRIPE_SECTORS,
30633+ atomic_add_unchecked(STRIPE_SECTORS,
30634 &rdev->corrected_errors);
30635 generic_make_request(bi);
30636 } else {
30637@@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struc
30638 clear_bit(R5_ReadError, &sh->dev[i].flags);
30639 clear_bit(R5_ReWrite, &sh->dev[i].flags);
30640 }
30641- if (atomic_read(&conf->disks[i].rdev->read_errors))
30642- atomic_set(&conf->disks[i].rdev->read_errors, 0);
30643+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
30644+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
30645 } else {
30646 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
30647 int retry = 0;
30648 rdev = conf->disks[i].rdev;
30649
30650 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
30651- atomic_inc(&rdev->read_errors);
30652+ atomic_inc_unchecked(&rdev->read_errors);
30653 if (conf->mddev->degraded >= conf->max_degraded)
30654 printk_rl(KERN_WARNING
30655 "raid5:%s: read error not correctable "
30656@@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struc
30657 (unsigned long long)(sh->sector
30658 + rdev->data_offset),
30659 bdn);
30660- else if (atomic_read(&rdev->read_errors)
30661+ else if (atomic_read_unchecked(&rdev->read_errors)
30662 > conf->max_nr_stripes)
30663 printk(KERN_WARNING
30664 "raid5:%s: Too many read errors, failing device %s.\n",
30665@@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct s
30666 sector_t r_sector;
30667 struct stripe_head sh2;
30668
30669+ pax_track_stack();
30670
30671 chunk_offset = sector_div(new_sector, sectors_per_chunk);
30672 stripe = new_sector;
30673diff -urNp linux-2.6.32.41/drivers/media/common/saa7146_hlp.c linux-2.6.32.41/drivers/media/common/saa7146_hlp.c
30674--- linux-2.6.32.41/drivers/media/common/saa7146_hlp.c 2011-03-27 14:31:47.000000000 -0400
30675+++ linux-2.6.32.41/drivers/media/common/saa7146_hlp.c 2011-05-16 21:46:57.000000000 -0400
30676@@ -353,6 +353,8 @@ static void calculate_clipping_registers
30677
30678 int x[32], y[32], w[32], h[32];
30679
30680+ pax_track_stack();
30681+
30682 /* clear out memory */
30683 memset(&line_list[0], 0x00, sizeof(u32)*32);
30684 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
30685diff -urNp linux-2.6.32.41/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-2.6.32.41/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
30686--- linux-2.6.32.41/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-03-27 14:31:47.000000000 -0400
30687+++ linux-2.6.32.41/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-05-16 21:46:57.000000000 -0400
30688@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
30689 u8 buf[HOST_LINK_BUF_SIZE];
30690 int i;
30691
30692+ pax_track_stack();
30693+
30694 dprintk("%s\n", __func__);
30695
30696 /* check if we have space for a link buf in the rx_buffer */
30697@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
30698 unsigned long timeout;
30699 int written;
30700
30701+ pax_track_stack();
30702+
30703 dprintk("%s\n", __func__);
30704
30705 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
30706diff -urNp linux-2.6.32.41/drivers/media/dvb/dvb-core/dvbdev.c linux-2.6.32.41/drivers/media/dvb/dvb-core/dvbdev.c
30707--- linux-2.6.32.41/drivers/media/dvb/dvb-core/dvbdev.c 2011-03-27 14:31:47.000000000 -0400
30708+++ linux-2.6.32.41/drivers/media/dvb/dvb-core/dvbdev.c 2011-04-17 15:56:46.000000000 -0400
30709@@ -191,6 +191,7 @@ int dvb_register_device(struct dvb_adapt
30710 const struct dvb_device *template, void *priv, int type)
30711 {
30712 struct dvb_device *dvbdev;
30713+ /* cannot be const */
30714 struct file_operations *dvbdevfops;
30715 struct device *clsdev;
30716 int minor;
30717diff -urNp linux-2.6.32.41/drivers/media/dvb/dvb-usb/dib0700_core.c linux-2.6.32.41/drivers/media/dvb/dvb-usb/dib0700_core.c
30718--- linux-2.6.32.41/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-03-27 14:31:47.000000000 -0400
30719+++ linux-2.6.32.41/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-05-16 21:46:57.000000000 -0400
30720@@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb
30721
30722 u8 buf[260];
30723
30724+ pax_track_stack();
30725+
30726 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
30727 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
30728
30729diff -urNp linux-2.6.32.41/drivers/media/dvb/frontends/or51211.c linux-2.6.32.41/drivers/media/dvb/frontends/or51211.c
30730--- linux-2.6.32.41/drivers/media/dvb/frontends/or51211.c 2011-03-27 14:31:47.000000000 -0400
30731+++ linux-2.6.32.41/drivers/media/dvb/frontends/or51211.c 2011-05-16 21:46:57.000000000 -0400
30732@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
30733 u8 tudata[585];
30734 int i;
30735
30736+ pax_track_stack();
30737+
30738 dprintk("Firmware is %zd bytes\n",fw->size);
30739
30740 /* Get eprom data */
30741diff -urNp linux-2.6.32.41/drivers/media/radio/radio-cadet.c linux-2.6.32.41/drivers/media/radio/radio-cadet.c
30742--- linux-2.6.32.41/drivers/media/radio/radio-cadet.c 2011-03-27 14:31:47.000000000 -0400
30743+++ linux-2.6.32.41/drivers/media/radio/radio-cadet.c 2011-04-17 15:56:46.000000000 -0400
30744@@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *f
30745 while (i < count && dev->rdsin != dev->rdsout)
30746 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
30747
30748- if (copy_to_user(data, readbuf, i))
30749+ if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
30750 return -EFAULT;
30751 return i;
30752 }
30753diff -urNp linux-2.6.32.41/drivers/media/video/cx18/cx18-driver.c linux-2.6.32.41/drivers/media/video/cx18/cx18-driver.c
30754--- linux-2.6.32.41/drivers/media/video/cx18/cx18-driver.c 2011-03-27 14:31:47.000000000 -0400
30755+++ linux-2.6.32.41/drivers/media/video/cx18/cx18-driver.c 2011-05-16 21:46:57.000000000 -0400
30756@@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl
30757
30758 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
30759
30760-static atomic_t cx18_instance = ATOMIC_INIT(0);
30761+static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
30762
30763 /* Parameter declarations */
30764 static int cardtype[CX18_MAX_CARDS];
30765@@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
30766 struct i2c_client c;
30767 u8 eedata[256];
30768
30769+ pax_track_stack();
30770+
30771 memset(&c, 0, sizeof(c));
30772 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
30773 c.adapter = &cx->i2c_adap[0];
30774@@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct p
30775 struct cx18 *cx;
30776
30777 /* FIXME - module parameter arrays constrain max instances */
30778- i = atomic_inc_return(&cx18_instance) - 1;
30779+ i = atomic_inc_return_unchecked(&cx18_instance) - 1;
30780 if (i >= CX18_MAX_CARDS) {
30781 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
30782 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
30783diff -urNp linux-2.6.32.41/drivers/media/video/ivtv/ivtv-driver.c linux-2.6.32.41/drivers/media/video/ivtv/ivtv-driver.c
30784--- linux-2.6.32.41/drivers/media/video/ivtv/ivtv-driver.c 2011-03-27 14:31:47.000000000 -0400
30785+++ linux-2.6.32.41/drivers/media/video/ivtv/ivtv-driver.c 2011-05-04 17:56:28.000000000 -0400
30786@@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl
30787 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
30788
30789 /* ivtv instance counter */
30790-static atomic_t ivtv_instance = ATOMIC_INIT(0);
30791+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
30792
30793 /* Parameter declarations */
30794 static int cardtype[IVTV_MAX_CARDS];
30795diff -urNp linux-2.6.32.41/drivers/media/video/omap24xxcam.c linux-2.6.32.41/drivers/media/video/omap24xxcam.c
30796--- linux-2.6.32.41/drivers/media/video/omap24xxcam.c 2011-03-27 14:31:47.000000000 -0400
30797+++ linux-2.6.32.41/drivers/media/video/omap24xxcam.c 2011-05-04 17:56:28.000000000 -0400
30798@@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(str
30799 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
30800
30801 do_gettimeofday(&vb->ts);
30802- vb->field_count = atomic_add_return(2, &fh->field_count);
30803+ vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
30804 if (csr & csr_error) {
30805 vb->state = VIDEOBUF_ERROR;
30806 if (!atomic_read(&fh->cam->in_reset)) {
30807diff -urNp linux-2.6.32.41/drivers/media/video/omap24xxcam.h linux-2.6.32.41/drivers/media/video/omap24xxcam.h
30808--- linux-2.6.32.41/drivers/media/video/omap24xxcam.h 2011-03-27 14:31:47.000000000 -0400
30809+++ linux-2.6.32.41/drivers/media/video/omap24xxcam.h 2011-05-04 17:56:28.000000000 -0400
30810@@ -533,7 +533,7 @@ struct omap24xxcam_fh {
30811 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
30812 struct videobuf_queue vbq;
30813 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
30814- atomic_t field_count; /* field counter for videobuf_buffer */
30815+ atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
30816 /* accessing cam here doesn't need serialisation: it's constant */
30817 struct omap24xxcam_device *cam;
30818 };
30819diff -urNp linux-2.6.32.41/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-2.6.32.41/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
30820--- linux-2.6.32.41/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-03-27 14:31:47.000000000 -0400
30821+++ linux-2.6.32.41/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-05-16 21:46:57.000000000 -0400
30822@@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
30823 u8 *eeprom;
30824 struct tveeprom tvdata;
30825
30826+ pax_track_stack();
30827+
30828 memset(&tvdata,0,sizeof(tvdata));
30829
30830 eeprom = pvr2_eeprom_fetch(hdw);
30831diff -urNp linux-2.6.32.41/drivers/media/video/saa7134/saa6752hs.c linux-2.6.32.41/drivers/media/video/saa7134/saa6752hs.c
30832--- linux-2.6.32.41/drivers/media/video/saa7134/saa6752hs.c 2011-03-27 14:31:47.000000000 -0400
30833+++ linux-2.6.32.41/drivers/media/video/saa7134/saa6752hs.c 2011-05-16 21:46:57.000000000 -0400
30834@@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_su
30835 unsigned char localPAT[256];
30836 unsigned char localPMT[256];
30837
30838+ pax_track_stack();
30839+
30840 /* Set video format - must be done first as it resets other settings */
30841 set_reg8(client, 0x41, h->video_format);
30842
30843diff -urNp linux-2.6.32.41/drivers/media/video/saa7164/saa7164-cmd.c linux-2.6.32.41/drivers/media/video/saa7164/saa7164-cmd.c
30844--- linux-2.6.32.41/drivers/media/video/saa7164/saa7164-cmd.c 2011-03-27 14:31:47.000000000 -0400
30845+++ linux-2.6.32.41/drivers/media/video/saa7164/saa7164-cmd.c 2011-05-16 21:46:57.000000000 -0400
30846@@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_d
30847 wait_queue_head_t *q = 0;
30848 dprintk(DBGLVL_CMD, "%s()\n", __func__);
30849
30850+ pax_track_stack();
30851+
30852 /* While any outstand message on the bus exists... */
30853 do {
30854
30855@@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
30856 u8 tmp[512];
30857 dprintk(DBGLVL_CMD, "%s()\n", __func__);
30858
30859+ pax_track_stack();
30860+
30861 while (loop) {
30862
30863 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
30864diff -urNp linux-2.6.32.41/drivers/media/video/usbvideo/konicawc.c linux-2.6.32.41/drivers/media/video/usbvideo/konicawc.c
30865--- linux-2.6.32.41/drivers/media/video/usbvideo/konicawc.c 2011-03-27 14:31:47.000000000 -0400
30866+++ linux-2.6.32.41/drivers/media/video/usbvideo/konicawc.c 2011-04-17 15:56:46.000000000 -0400
30867@@ -225,7 +225,7 @@ static void konicawc_register_input(stru
30868 int error;
30869
30870 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
30871- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
30872+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
30873
30874 cam->input = input_dev = input_allocate_device();
30875 if (!input_dev) {
30876diff -urNp linux-2.6.32.41/drivers/media/video/usbvideo/quickcam_messenger.c linux-2.6.32.41/drivers/media/video/usbvideo/quickcam_messenger.c
30877--- linux-2.6.32.41/drivers/media/video/usbvideo/quickcam_messenger.c 2011-03-27 14:31:47.000000000 -0400
30878+++ linux-2.6.32.41/drivers/media/video/usbvideo/quickcam_messenger.c 2011-04-17 15:56:46.000000000 -0400
30879@@ -89,7 +89,7 @@ static void qcm_register_input(struct qc
30880 int error;
30881
30882 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
30883- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
30884+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
30885
30886 cam->input = input_dev = input_allocate_device();
30887 if (!input_dev) {
30888diff -urNp linux-2.6.32.41/drivers/media/video/usbvision/usbvision-core.c linux-2.6.32.41/drivers/media/video/usbvision/usbvision-core.c
30889--- linux-2.6.32.41/drivers/media/video/usbvision/usbvision-core.c 2011-03-27 14:31:47.000000000 -0400
30890+++ linux-2.6.32.41/drivers/media/video/usbvision/usbvision-core.c 2011-05-16 21:46:57.000000000 -0400
30891@@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_c
30892 unsigned char rv, gv, bv;
30893 static unsigned char *Y, *U, *V;
30894
30895+ pax_track_stack();
30896+
30897 frame = usbvision->curFrame;
30898 imageSize = frame->frmwidth * frame->frmheight;
30899 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
30900diff -urNp linux-2.6.32.41/drivers/media/video/v4l2-device.c linux-2.6.32.41/drivers/media/video/v4l2-device.c
30901--- linux-2.6.32.41/drivers/media/video/v4l2-device.c 2011-03-27 14:31:47.000000000 -0400
30902+++ linux-2.6.32.41/drivers/media/video/v4l2-device.c 2011-05-04 17:56:28.000000000 -0400
30903@@ -50,9 +50,9 @@ int v4l2_device_register(struct device *
30904 EXPORT_SYMBOL_GPL(v4l2_device_register);
30905
30906 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
30907- atomic_t *instance)
30908+ atomic_unchecked_t *instance)
30909 {
30910- int num = atomic_inc_return(instance) - 1;
30911+ int num = atomic_inc_return_unchecked(instance) - 1;
30912 int len = strlen(basename);
30913
30914 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
30915diff -urNp linux-2.6.32.41/drivers/media/video/videobuf-dma-sg.c linux-2.6.32.41/drivers/media/video/videobuf-dma-sg.c
30916--- linux-2.6.32.41/drivers/media/video/videobuf-dma-sg.c 2011-03-27 14:31:47.000000000 -0400
30917+++ linux-2.6.32.41/drivers/media/video/videobuf-dma-sg.c 2011-05-16 21:46:57.000000000 -0400
30918@@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
30919 {
30920 struct videobuf_queue q;
30921
30922+ pax_track_stack();
30923+
30924 /* Required to make generic handler to call __videobuf_alloc */
30925 q.int_ops = &sg_ops;
30926
30927diff -urNp linux-2.6.32.41/drivers/message/fusion/mptbase.c linux-2.6.32.41/drivers/message/fusion/mptbase.c
30928--- linux-2.6.32.41/drivers/message/fusion/mptbase.c 2011-03-27 14:31:47.000000000 -0400
30929+++ linux-2.6.32.41/drivers/message/fusion/mptbase.c 2011-04-17 15:56:46.000000000 -0400
30930@@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **s
30931 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
30932 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
30933
30934+#ifdef CONFIG_GRKERNSEC_HIDESYM
30935+ len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
30936+ NULL, NULL);
30937+#else
30938 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
30939 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
30940+#endif
30941+
30942 /*
30943 * Rounding UP to nearest 4-kB boundary here...
30944 */
30945diff -urNp linux-2.6.32.41/drivers/message/fusion/mptsas.c linux-2.6.32.41/drivers/message/fusion/mptsas.c
30946--- linux-2.6.32.41/drivers/message/fusion/mptsas.c 2011-03-27 14:31:47.000000000 -0400
30947+++ linux-2.6.32.41/drivers/message/fusion/mptsas.c 2011-04-17 15:56:46.000000000 -0400
30948@@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devin
30949 return 0;
30950 }
30951
30952+static inline void
30953+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
30954+{
30955+ if (phy_info->port_details) {
30956+ phy_info->port_details->rphy = rphy;
30957+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
30958+ ioc->name, rphy));
30959+ }
30960+
30961+ if (rphy) {
30962+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
30963+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
30964+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
30965+ ioc->name, rphy, rphy->dev.release));
30966+ }
30967+}
30968+
30969 /* no mutex */
30970 static void
30971 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
30972@@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
30973 return NULL;
30974 }
30975
30976-static inline void
30977-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
30978-{
30979- if (phy_info->port_details) {
30980- phy_info->port_details->rphy = rphy;
30981- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
30982- ioc->name, rphy));
30983- }
30984-
30985- if (rphy) {
30986- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
30987- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
30988- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
30989- ioc->name, rphy, rphy->dev.release));
30990- }
30991-}
30992-
30993 static inline struct sas_port *
30994 mptsas_get_port(struct mptsas_phyinfo *phy_info)
30995 {
30996diff -urNp linux-2.6.32.41/drivers/message/fusion/mptscsih.c linux-2.6.32.41/drivers/message/fusion/mptscsih.c
30997--- linux-2.6.32.41/drivers/message/fusion/mptscsih.c 2011-03-27 14:31:47.000000000 -0400
30998+++ linux-2.6.32.41/drivers/message/fusion/mptscsih.c 2011-04-17 15:56:46.000000000 -0400
30999@@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
31000
31001 h = shost_priv(SChost);
31002
31003- if (h) {
31004- if (h->info_kbuf == NULL)
31005- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31006- return h->info_kbuf;
31007- h->info_kbuf[0] = '\0';
31008+ if (!h)
31009+ return NULL;
31010
31011- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31012- h->info_kbuf[size-1] = '\0';
31013- }
31014+ if (h->info_kbuf == NULL)
31015+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31016+ return h->info_kbuf;
31017+ h->info_kbuf[0] = '\0';
31018+
31019+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31020+ h->info_kbuf[size-1] = '\0';
31021
31022 return h->info_kbuf;
31023 }
31024diff -urNp linux-2.6.32.41/drivers/message/i2o/i2o_config.c linux-2.6.32.41/drivers/message/i2o/i2o_config.c
31025--- linux-2.6.32.41/drivers/message/i2o/i2o_config.c 2011-03-27 14:31:47.000000000 -0400
31026+++ linux-2.6.32.41/drivers/message/i2o/i2o_config.c 2011-05-16 21:46:57.000000000 -0400
31027@@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned lon
31028 struct i2o_message *msg;
31029 unsigned int iop;
31030
31031+ pax_track_stack();
31032+
31033 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
31034 return -EFAULT;
31035
31036diff -urNp linux-2.6.32.41/drivers/message/i2o/i2o_proc.c linux-2.6.32.41/drivers/message/i2o/i2o_proc.c
31037--- linux-2.6.32.41/drivers/message/i2o/i2o_proc.c 2011-03-27 14:31:47.000000000 -0400
31038+++ linux-2.6.32.41/drivers/message/i2o/i2o_proc.c 2011-04-17 15:56:46.000000000 -0400
31039@@ -259,13 +259,6 @@ static char *scsi_devices[] = {
31040 "Array Controller Device"
31041 };
31042
31043-static char *chtostr(u8 * chars, int n)
31044-{
31045- char tmp[256];
31046- tmp[0] = 0;
31047- return strncat(tmp, (char *)chars, n);
31048-}
31049-
31050 static int i2o_report_query_status(struct seq_file *seq, int block_status,
31051 char *group)
31052 {
31053@@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct
31054
31055 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
31056 seq_printf(seq, "%-#8x", ddm_table.module_id);
31057- seq_printf(seq, "%-29s",
31058- chtostr(ddm_table.module_name_version, 28));
31059+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
31060 seq_printf(seq, "%9d ", ddm_table.data_size);
31061 seq_printf(seq, "%8d", ddm_table.code_size);
31062
31063@@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(s
31064
31065 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
31066 seq_printf(seq, "%-#8x", dst->module_id);
31067- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
31068- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
31069+ seq_printf(seq, "%-.28s", dst->module_name_version);
31070+ seq_printf(seq, "%-.8s", dst->date);
31071 seq_printf(seq, "%8d ", dst->module_size);
31072 seq_printf(seq, "%8d ", dst->mpb_size);
31073 seq_printf(seq, "0x%04x", dst->module_flags);
31074@@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(str
31075 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
31076 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
31077 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
31078- seq_printf(seq, "Vendor info : %s\n",
31079- chtostr((u8 *) (work32 + 2), 16));
31080- seq_printf(seq, "Product info : %s\n",
31081- chtostr((u8 *) (work32 + 6), 16));
31082- seq_printf(seq, "Description : %s\n",
31083- chtostr((u8 *) (work32 + 10), 16));
31084- seq_printf(seq, "Product rev. : %s\n",
31085- chtostr((u8 *) (work32 + 14), 8));
31086+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
31087+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
31088+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
31089+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
31090
31091 seq_printf(seq, "Serial number : ");
31092 print_serial_number(seq, (u8 *) (work32 + 16),
31093@@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(str
31094 }
31095
31096 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
31097- seq_printf(seq, "Module name : %s\n",
31098- chtostr(result.module_name, 24));
31099- seq_printf(seq, "Module revision : %s\n",
31100- chtostr(result.module_rev, 8));
31101+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
31102+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
31103
31104 seq_printf(seq, "Serial number : ");
31105 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
31106@@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq
31107 return 0;
31108 }
31109
31110- seq_printf(seq, "Device name : %s\n",
31111- chtostr(result.device_name, 64));
31112- seq_printf(seq, "Service name : %s\n",
31113- chtostr(result.service_name, 64));
31114- seq_printf(seq, "Physical name : %s\n",
31115- chtostr(result.physical_location, 64));
31116- seq_printf(seq, "Instance number : %s\n",
31117- chtostr(result.instance_number, 4));
31118+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
31119+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
31120+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
31121+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
31122
31123 return 0;
31124 }
31125diff -urNp linux-2.6.32.41/drivers/message/i2o/iop.c linux-2.6.32.41/drivers/message/i2o/iop.c
31126--- linux-2.6.32.41/drivers/message/i2o/iop.c 2011-03-27 14:31:47.000000000 -0400
31127+++ linux-2.6.32.41/drivers/message/i2o/iop.c 2011-05-04 17:56:28.000000000 -0400
31128@@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
31129
31130 spin_lock_irqsave(&c->context_list_lock, flags);
31131
31132- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
31133- atomic_inc(&c->context_list_counter);
31134+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
31135+ atomic_inc_unchecked(&c->context_list_counter);
31136
31137- entry->context = atomic_read(&c->context_list_counter);
31138+ entry->context = atomic_read_unchecked(&c->context_list_counter);
31139
31140 list_add(&entry->list, &c->context_list);
31141
31142@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(voi
31143
31144 #if BITS_PER_LONG == 64
31145 spin_lock_init(&c->context_list_lock);
31146- atomic_set(&c->context_list_counter, 0);
31147+ atomic_set_unchecked(&c->context_list_counter, 0);
31148 INIT_LIST_HEAD(&c->context_list);
31149 #endif
31150
31151diff -urNp linux-2.6.32.41/drivers/mfd/wm8350-i2c.c linux-2.6.32.41/drivers/mfd/wm8350-i2c.c
31152--- linux-2.6.32.41/drivers/mfd/wm8350-i2c.c 2011-03-27 14:31:47.000000000 -0400
31153+++ linux-2.6.32.41/drivers/mfd/wm8350-i2c.c 2011-05-16 21:46:57.000000000 -0400
31154@@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struc
31155 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
31156 int ret;
31157
31158+ pax_track_stack();
31159+
31160 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
31161 return -EINVAL;
31162
31163diff -urNp linux-2.6.32.41/drivers/misc/kgdbts.c linux-2.6.32.41/drivers/misc/kgdbts.c
31164--- linux-2.6.32.41/drivers/misc/kgdbts.c 2011-03-27 14:31:47.000000000 -0400
31165+++ linux-2.6.32.41/drivers/misc/kgdbts.c 2011-04-17 15:56:46.000000000 -0400
31166@@ -118,7 +118,7 @@
31167 } while (0)
31168 #define MAX_CONFIG_LEN 40
31169
31170-static struct kgdb_io kgdbts_io_ops;
31171+static const struct kgdb_io kgdbts_io_ops;
31172 static char get_buf[BUFMAX];
31173 static int get_buf_cnt;
31174 static char put_buf[BUFMAX];
31175@@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void
31176 module_put(THIS_MODULE);
31177 }
31178
31179-static struct kgdb_io kgdbts_io_ops = {
31180+static const struct kgdb_io kgdbts_io_ops = {
31181 .name = "kgdbts",
31182 .read_char = kgdbts_get_char,
31183 .write_char = kgdbts_put_char,
31184diff -urNp linux-2.6.32.41/drivers/misc/sgi-gru/gruhandles.c linux-2.6.32.41/drivers/misc/sgi-gru/gruhandles.c
31185--- linux-2.6.32.41/drivers/misc/sgi-gru/gruhandles.c 2011-03-27 14:31:47.000000000 -0400
31186+++ linux-2.6.32.41/drivers/misc/sgi-gru/gruhandles.c 2011-04-17 15:56:46.000000000 -0400
31187@@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistic
31188
31189 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
31190 {
31191- atomic_long_inc(&mcs_op_statistics[op].count);
31192- atomic_long_add(clks, &mcs_op_statistics[op].total);
31193+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
31194+ atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
31195 if (mcs_op_statistics[op].max < clks)
31196 mcs_op_statistics[op].max = clks;
31197 }
31198diff -urNp linux-2.6.32.41/drivers/misc/sgi-gru/gruprocfs.c linux-2.6.32.41/drivers/misc/sgi-gru/gruprocfs.c
31199--- linux-2.6.32.41/drivers/misc/sgi-gru/gruprocfs.c 2011-03-27 14:31:47.000000000 -0400
31200+++ linux-2.6.32.41/drivers/misc/sgi-gru/gruprocfs.c 2011-04-17 15:56:46.000000000 -0400
31201@@ -32,9 +32,9 @@
31202
31203 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
31204
31205-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
31206+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
31207 {
31208- unsigned long val = atomic_long_read(v);
31209+ unsigned long val = atomic_long_read_unchecked(v);
31210
31211 if (val)
31212 seq_printf(s, "%16lu %s\n", val, id);
31213@@ -136,8 +136,8 @@ static int mcs_statistics_show(struct se
31214 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
31215
31216 for (op = 0; op < mcsop_last; op++) {
31217- count = atomic_long_read(&mcs_op_statistics[op].count);
31218- total = atomic_long_read(&mcs_op_statistics[op].total);
31219+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
31220+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
31221 max = mcs_op_statistics[op].max;
31222 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
31223 count ? total / count : 0, max);
31224diff -urNp linux-2.6.32.41/drivers/misc/sgi-gru/grutables.h linux-2.6.32.41/drivers/misc/sgi-gru/grutables.h
31225--- linux-2.6.32.41/drivers/misc/sgi-gru/grutables.h 2011-03-27 14:31:47.000000000 -0400
31226+++ linux-2.6.32.41/drivers/misc/sgi-gru/grutables.h 2011-04-17 15:56:46.000000000 -0400
31227@@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
31228 * GRU statistics.
31229 */
31230 struct gru_stats_s {
31231- atomic_long_t vdata_alloc;
31232- atomic_long_t vdata_free;
31233- atomic_long_t gts_alloc;
31234- atomic_long_t gts_free;
31235- atomic_long_t vdata_double_alloc;
31236- atomic_long_t gts_double_allocate;
31237- atomic_long_t assign_context;
31238- atomic_long_t assign_context_failed;
31239- atomic_long_t free_context;
31240- atomic_long_t load_user_context;
31241- atomic_long_t load_kernel_context;
31242- atomic_long_t lock_kernel_context;
31243- atomic_long_t unlock_kernel_context;
31244- atomic_long_t steal_user_context;
31245- atomic_long_t steal_kernel_context;
31246- atomic_long_t steal_context_failed;
31247- atomic_long_t nopfn;
31248- atomic_long_t break_cow;
31249- atomic_long_t asid_new;
31250- atomic_long_t asid_next;
31251- atomic_long_t asid_wrap;
31252- atomic_long_t asid_reuse;
31253- atomic_long_t intr;
31254- atomic_long_t intr_mm_lock_failed;
31255- atomic_long_t call_os;
31256- atomic_long_t call_os_offnode_reference;
31257- atomic_long_t call_os_check_for_bug;
31258- atomic_long_t call_os_wait_queue;
31259- atomic_long_t user_flush_tlb;
31260- atomic_long_t user_unload_context;
31261- atomic_long_t user_exception;
31262- atomic_long_t set_context_option;
31263- atomic_long_t migrate_check;
31264- atomic_long_t migrated_retarget;
31265- atomic_long_t migrated_unload;
31266- atomic_long_t migrated_unload_delay;
31267- atomic_long_t migrated_nopfn_retarget;
31268- atomic_long_t migrated_nopfn_unload;
31269- atomic_long_t tlb_dropin;
31270- atomic_long_t tlb_dropin_fail_no_asid;
31271- atomic_long_t tlb_dropin_fail_upm;
31272- atomic_long_t tlb_dropin_fail_invalid;
31273- atomic_long_t tlb_dropin_fail_range_active;
31274- atomic_long_t tlb_dropin_fail_idle;
31275- atomic_long_t tlb_dropin_fail_fmm;
31276- atomic_long_t tlb_dropin_fail_no_exception;
31277- atomic_long_t tlb_dropin_fail_no_exception_war;
31278- atomic_long_t tfh_stale_on_fault;
31279- atomic_long_t mmu_invalidate_range;
31280- atomic_long_t mmu_invalidate_page;
31281- atomic_long_t mmu_clear_flush_young;
31282- atomic_long_t flush_tlb;
31283- atomic_long_t flush_tlb_gru;
31284- atomic_long_t flush_tlb_gru_tgh;
31285- atomic_long_t flush_tlb_gru_zero_asid;
31286-
31287- atomic_long_t copy_gpa;
31288-
31289- atomic_long_t mesq_receive;
31290- atomic_long_t mesq_receive_none;
31291- atomic_long_t mesq_send;
31292- atomic_long_t mesq_send_failed;
31293- atomic_long_t mesq_noop;
31294- atomic_long_t mesq_send_unexpected_error;
31295- atomic_long_t mesq_send_lb_overflow;
31296- atomic_long_t mesq_send_qlimit_reached;
31297- atomic_long_t mesq_send_amo_nacked;
31298- atomic_long_t mesq_send_put_nacked;
31299- atomic_long_t mesq_qf_not_full;
31300- atomic_long_t mesq_qf_locked;
31301- atomic_long_t mesq_qf_noop_not_full;
31302- atomic_long_t mesq_qf_switch_head_failed;
31303- atomic_long_t mesq_qf_unexpected_error;
31304- atomic_long_t mesq_noop_unexpected_error;
31305- atomic_long_t mesq_noop_lb_overflow;
31306- atomic_long_t mesq_noop_qlimit_reached;
31307- atomic_long_t mesq_noop_amo_nacked;
31308- atomic_long_t mesq_noop_put_nacked;
31309+ atomic_long_unchecked_t vdata_alloc;
31310+ atomic_long_unchecked_t vdata_free;
31311+ atomic_long_unchecked_t gts_alloc;
31312+ atomic_long_unchecked_t gts_free;
31313+ atomic_long_unchecked_t vdata_double_alloc;
31314+ atomic_long_unchecked_t gts_double_allocate;
31315+ atomic_long_unchecked_t assign_context;
31316+ atomic_long_unchecked_t assign_context_failed;
31317+ atomic_long_unchecked_t free_context;
31318+ atomic_long_unchecked_t load_user_context;
31319+ atomic_long_unchecked_t load_kernel_context;
31320+ atomic_long_unchecked_t lock_kernel_context;
31321+ atomic_long_unchecked_t unlock_kernel_context;
31322+ atomic_long_unchecked_t steal_user_context;
31323+ atomic_long_unchecked_t steal_kernel_context;
31324+ atomic_long_unchecked_t steal_context_failed;
31325+ atomic_long_unchecked_t nopfn;
31326+ atomic_long_unchecked_t break_cow;
31327+ atomic_long_unchecked_t asid_new;
31328+ atomic_long_unchecked_t asid_next;
31329+ atomic_long_unchecked_t asid_wrap;
31330+ atomic_long_unchecked_t asid_reuse;
31331+ atomic_long_unchecked_t intr;
31332+ atomic_long_unchecked_t intr_mm_lock_failed;
31333+ atomic_long_unchecked_t call_os;
31334+ atomic_long_unchecked_t call_os_offnode_reference;
31335+ atomic_long_unchecked_t call_os_check_for_bug;
31336+ atomic_long_unchecked_t call_os_wait_queue;
31337+ atomic_long_unchecked_t user_flush_tlb;
31338+ atomic_long_unchecked_t user_unload_context;
31339+ atomic_long_unchecked_t user_exception;
31340+ atomic_long_unchecked_t set_context_option;
31341+ atomic_long_unchecked_t migrate_check;
31342+ atomic_long_unchecked_t migrated_retarget;
31343+ atomic_long_unchecked_t migrated_unload;
31344+ atomic_long_unchecked_t migrated_unload_delay;
31345+ atomic_long_unchecked_t migrated_nopfn_retarget;
31346+ atomic_long_unchecked_t migrated_nopfn_unload;
31347+ atomic_long_unchecked_t tlb_dropin;
31348+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
31349+ atomic_long_unchecked_t tlb_dropin_fail_upm;
31350+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
31351+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
31352+ atomic_long_unchecked_t tlb_dropin_fail_idle;
31353+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
31354+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
31355+ atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
31356+ atomic_long_unchecked_t tfh_stale_on_fault;
31357+ atomic_long_unchecked_t mmu_invalidate_range;
31358+ atomic_long_unchecked_t mmu_invalidate_page;
31359+ atomic_long_unchecked_t mmu_clear_flush_young;
31360+ atomic_long_unchecked_t flush_tlb;
31361+ atomic_long_unchecked_t flush_tlb_gru;
31362+ atomic_long_unchecked_t flush_tlb_gru_tgh;
31363+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
31364+
31365+ atomic_long_unchecked_t copy_gpa;
31366+
31367+ atomic_long_unchecked_t mesq_receive;
31368+ atomic_long_unchecked_t mesq_receive_none;
31369+ atomic_long_unchecked_t mesq_send;
31370+ atomic_long_unchecked_t mesq_send_failed;
31371+ atomic_long_unchecked_t mesq_noop;
31372+ atomic_long_unchecked_t mesq_send_unexpected_error;
31373+ atomic_long_unchecked_t mesq_send_lb_overflow;
31374+ atomic_long_unchecked_t mesq_send_qlimit_reached;
31375+ atomic_long_unchecked_t mesq_send_amo_nacked;
31376+ atomic_long_unchecked_t mesq_send_put_nacked;
31377+ atomic_long_unchecked_t mesq_qf_not_full;
31378+ atomic_long_unchecked_t mesq_qf_locked;
31379+ atomic_long_unchecked_t mesq_qf_noop_not_full;
31380+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
31381+ atomic_long_unchecked_t mesq_qf_unexpected_error;
31382+ atomic_long_unchecked_t mesq_noop_unexpected_error;
31383+ atomic_long_unchecked_t mesq_noop_lb_overflow;
31384+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
31385+ atomic_long_unchecked_t mesq_noop_amo_nacked;
31386+ atomic_long_unchecked_t mesq_noop_put_nacked;
31387
31388 };
31389
31390@@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start
31391 cchop_deallocate, tghop_invalidate, mcsop_last};
31392
31393 struct mcs_op_statistic {
31394- atomic_long_t count;
31395- atomic_long_t total;
31396+ atomic_long_unchecked_t count;
31397+ atomic_long_unchecked_t total;
31398 unsigned long max;
31399 };
31400
31401@@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_st
31402
31403 #define STAT(id) do { \
31404 if (gru_options & OPT_STATS) \
31405- atomic_long_inc(&gru_stats.id); \
31406+ atomic_long_inc_unchecked(&gru_stats.id); \
31407 } while (0)
31408
31409 #ifdef CONFIG_SGI_GRU_DEBUG
31410diff -urNp linux-2.6.32.41/drivers/mtd/chips/cfi_cmdset_0001.c linux-2.6.32.41/drivers/mtd/chips/cfi_cmdset_0001.c
31411--- linux-2.6.32.41/drivers/mtd/chips/cfi_cmdset_0001.c 2011-03-27 14:31:47.000000000 -0400
31412+++ linux-2.6.32.41/drivers/mtd/chips/cfi_cmdset_0001.c 2011-05-16 21:46:57.000000000 -0400
31413@@ -743,6 +743,8 @@ static int chip_ready (struct map_info *
31414 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
31415 unsigned long timeo = jiffies + HZ;
31416
31417+ pax_track_stack();
31418+
31419 /* Prevent setting state FL_SYNCING for chip in suspended state. */
31420 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
31421 goto sleep;
31422@@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(stru
31423 unsigned long initial_adr;
31424 int initial_len = len;
31425
31426+ pax_track_stack();
31427+
31428 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
31429 adr += chip->start;
31430 initial_adr = adr;
31431@@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(st
31432 int retries = 3;
31433 int ret;
31434
31435+ pax_track_stack();
31436+
31437 adr += chip->start;
31438
31439 retry:
31440diff -urNp linux-2.6.32.41/drivers/mtd/chips/cfi_cmdset_0020.c linux-2.6.32.41/drivers/mtd/chips/cfi_cmdset_0020.c
31441--- linux-2.6.32.41/drivers/mtd/chips/cfi_cmdset_0020.c 2011-03-27 14:31:47.000000000 -0400
31442+++ linux-2.6.32.41/drivers/mtd/chips/cfi_cmdset_0020.c 2011-05-16 21:46:57.000000000 -0400
31443@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
31444 unsigned long cmd_addr;
31445 struct cfi_private *cfi = map->fldrv_priv;
31446
31447+ pax_track_stack();
31448+
31449 adr += chip->start;
31450
31451 /* Ensure cmd read/writes are aligned. */
31452@@ -428,6 +430,8 @@ static inline int do_write_buffer(struct
31453 DECLARE_WAITQUEUE(wait, current);
31454 int wbufsize, z;
31455
31456+ pax_track_stack();
31457+
31458 /* M58LW064A requires bus alignment for buffer wriets -- saw */
31459 if (adr & (map_bankwidth(map)-1))
31460 return -EINVAL;
31461@@ -742,6 +746,8 @@ static inline int do_erase_oneblock(stru
31462 DECLARE_WAITQUEUE(wait, current);
31463 int ret = 0;
31464
31465+ pax_track_stack();
31466+
31467 adr += chip->start;
31468
31469 /* Let's determine this according to the interleave only once */
31470@@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struc
31471 unsigned long timeo = jiffies + HZ;
31472 DECLARE_WAITQUEUE(wait, current);
31473
31474+ pax_track_stack();
31475+
31476 adr += chip->start;
31477
31478 /* Let's determine this according to the interleave only once */
31479@@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(str
31480 unsigned long timeo = jiffies + HZ;
31481 DECLARE_WAITQUEUE(wait, current);
31482
31483+ pax_track_stack();
31484+
31485 adr += chip->start;
31486
31487 /* Let's determine this according to the interleave only once */
31488diff -urNp linux-2.6.32.41/drivers/mtd/devices/doc2000.c linux-2.6.32.41/drivers/mtd/devices/doc2000.c
31489--- linux-2.6.32.41/drivers/mtd/devices/doc2000.c 2011-03-27 14:31:47.000000000 -0400
31490+++ linux-2.6.32.41/drivers/mtd/devices/doc2000.c 2011-04-17 15:56:46.000000000 -0400
31491@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
31492
31493 /* The ECC will not be calculated correctly if less than 512 is written */
31494 /* DBB-
31495- if (len != 0x200 && eccbuf)
31496+ if (len != 0x200)
31497 printk(KERN_WARNING
31498 "ECC needs a full sector write (adr: %lx size %lx)\n",
31499 (long) to, (long) len);
31500diff -urNp linux-2.6.32.41/drivers/mtd/devices/doc2001.c linux-2.6.32.41/drivers/mtd/devices/doc2001.c
31501--- linux-2.6.32.41/drivers/mtd/devices/doc2001.c 2011-03-27 14:31:47.000000000 -0400
31502+++ linux-2.6.32.41/drivers/mtd/devices/doc2001.c 2011-04-17 15:56:46.000000000 -0400
31503@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
31504 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
31505
31506 /* Don't allow read past end of device */
31507- if (from >= this->totlen)
31508+ if (from >= this->totlen || !len)
31509 return -EINVAL;
31510
31511 /* Don't allow a single read to cross a 512-byte block boundary */
31512diff -urNp linux-2.6.32.41/drivers/mtd/ftl.c linux-2.6.32.41/drivers/mtd/ftl.c
31513--- linux-2.6.32.41/drivers/mtd/ftl.c 2011-03-27 14:31:47.000000000 -0400
31514+++ linux-2.6.32.41/drivers/mtd/ftl.c 2011-05-16 21:46:57.000000000 -0400
31515@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
31516 loff_t offset;
31517 uint16_t srcunitswap = cpu_to_le16(srcunit);
31518
31519+ pax_track_stack();
31520+
31521 eun = &part->EUNInfo[srcunit];
31522 xfer = &part->XferInfo[xferunit];
31523 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
31524diff -urNp linux-2.6.32.41/drivers/mtd/inftlcore.c linux-2.6.32.41/drivers/mtd/inftlcore.c
31525--- linux-2.6.32.41/drivers/mtd/inftlcore.c 2011-03-27 14:31:47.000000000 -0400
31526+++ linux-2.6.32.41/drivers/mtd/inftlcore.c 2011-05-16 21:46:57.000000000 -0400
31527@@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLr
31528 struct inftl_oob oob;
31529 size_t retlen;
31530
31531+ pax_track_stack();
31532+
31533 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
31534 "pending=%d)\n", inftl, thisVUC, pendingblock);
31535
31536diff -urNp linux-2.6.32.41/drivers/mtd/inftlmount.c linux-2.6.32.41/drivers/mtd/inftlmount.c
31537--- linux-2.6.32.41/drivers/mtd/inftlmount.c 2011-03-27 14:31:47.000000000 -0400
31538+++ linux-2.6.32.41/drivers/mtd/inftlmount.c 2011-05-16 21:46:57.000000000 -0400
31539@@ -54,6 +54,8 @@ static int find_boot_record(struct INFTL
31540 struct INFTLPartition *ip;
31541 size_t retlen;
31542
31543+ pax_track_stack();
31544+
31545 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
31546
31547 /*
31548diff -urNp linux-2.6.32.41/drivers/mtd/lpddr/qinfo_probe.c linux-2.6.32.41/drivers/mtd/lpddr/qinfo_probe.c
31549--- linux-2.6.32.41/drivers/mtd/lpddr/qinfo_probe.c 2011-03-27 14:31:47.000000000 -0400
31550+++ linux-2.6.32.41/drivers/mtd/lpddr/qinfo_probe.c 2011-05-16 21:46:57.000000000 -0400
31551@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
31552 {
31553 map_word pfow_val[4];
31554
31555+ pax_track_stack();
31556+
31557 /* Check identification string */
31558 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
31559 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
31560diff -urNp linux-2.6.32.41/drivers/mtd/mtdchar.c linux-2.6.32.41/drivers/mtd/mtdchar.c
31561--- linux-2.6.32.41/drivers/mtd/mtdchar.c 2011-03-27 14:31:47.000000000 -0400
31562+++ linux-2.6.32.41/drivers/mtd/mtdchar.c 2011-05-16 21:46:57.000000000 -0400
31563@@ -460,6 +460,8 @@ static int mtd_ioctl(struct inode *inode
31564 u_long size;
31565 struct mtd_info_user info;
31566
31567+ pax_track_stack();
31568+
31569 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
31570
31571 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
31572diff -urNp linux-2.6.32.41/drivers/mtd/nftlcore.c linux-2.6.32.41/drivers/mtd/nftlcore.c
31573--- linux-2.6.32.41/drivers/mtd/nftlcore.c 2011-03-27 14:31:47.000000000 -0400
31574+++ linux-2.6.32.41/drivers/mtd/nftlcore.c 2011-05-16 21:46:57.000000000 -0400
31575@@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLre
31576 int inplace = 1;
31577 size_t retlen;
31578
31579+ pax_track_stack();
31580+
31581 memset(BlockMap, 0xff, sizeof(BlockMap));
31582 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
31583
31584diff -urNp linux-2.6.32.41/drivers/mtd/nftlmount.c linux-2.6.32.41/drivers/mtd/nftlmount.c
31585--- linux-2.6.32.41/drivers/mtd/nftlmount.c 2011-03-27 14:31:47.000000000 -0400
31586+++ linux-2.6.32.41/drivers/mtd/nftlmount.c 2011-05-18 20:09:37.000000000 -0400
31587@@ -23,6 +23,7 @@
31588 #include <asm/errno.h>
31589 #include <linux/delay.h>
31590 #include <linux/slab.h>
31591+#include <linux/sched.h>
31592 #include <linux/mtd/mtd.h>
31593 #include <linux/mtd/nand.h>
31594 #include <linux/mtd/nftl.h>
31595@@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLr
31596 struct mtd_info *mtd = nftl->mbd.mtd;
31597 unsigned int i;
31598
31599+ pax_track_stack();
31600+
31601 /* Assume logical EraseSize == physical erasesize for starting the scan.
31602 We'll sort it out later if we find a MediaHeader which says otherwise */
31603 /* Actually, we won't. The new DiskOnChip driver has already scanned
31604diff -urNp linux-2.6.32.41/drivers/mtd/ubi/build.c linux-2.6.32.41/drivers/mtd/ubi/build.c
31605--- linux-2.6.32.41/drivers/mtd/ubi/build.c 2011-03-27 14:31:47.000000000 -0400
31606+++ linux-2.6.32.41/drivers/mtd/ubi/build.c 2011-04-17 15:56:46.000000000 -0400
31607@@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
31608 static int __init bytes_str_to_int(const char *str)
31609 {
31610 char *endp;
31611- unsigned long result;
31612+ unsigned long result, scale = 1;
31613
31614 result = simple_strtoul(str, &endp, 0);
31615 if (str == endp || result >= INT_MAX) {
31616@@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const
31617
31618 switch (*endp) {
31619 case 'G':
31620- result *= 1024;
31621+ scale *= 1024;
31622 case 'M':
31623- result *= 1024;
31624+ scale *= 1024;
31625 case 'K':
31626- result *= 1024;
31627+ scale *= 1024;
31628 if (endp[1] == 'i' && endp[2] == 'B')
31629 endp += 2;
31630 case '\0':
31631@@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const
31632 return -EINVAL;
31633 }
31634
31635- return result;
31636+ if ((intoverflow_t)result*scale >= INT_MAX) {
31637+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
31638+ str);
31639+ return -EINVAL;
31640+ }
31641+
31642+ return result*scale;
31643 }
31644
31645 /**
31646diff -urNp linux-2.6.32.41/drivers/net/bnx2.c linux-2.6.32.41/drivers/net/bnx2.c
31647--- linux-2.6.32.41/drivers/net/bnx2.c 2011-03-27 14:31:47.000000000 -0400
31648+++ linux-2.6.32.41/drivers/net/bnx2.c 2011-05-16 21:46:57.000000000 -0400
31649@@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
31650 int rc = 0;
31651 u32 magic, csum;
31652
31653+ pax_track_stack();
31654+
31655 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
31656 goto test_nvram_done;
31657
31658diff -urNp linux-2.6.32.41/drivers/net/cxgb3/t3_hw.c linux-2.6.32.41/drivers/net/cxgb3/t3_hw.c
31659--- linux-2.6.32.41/drivers/net/cxgb3/t3_hw.c 2011-03-27 14:31:47.000000000 -0400
31660+++ linux-2.6.32.41/drivers/net/cxgb3/t3_hw.c 2011-05-16 21:46:57.000000000 -0400
31661@@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter
31662 int i, addr, ret;
31663 struct t3_vpd vpd;
31664
31665+ pax_track_stack();
31666+
31667 /*
31668 * Card information is normally at VPD_BASE but some early cards had
31669 * it at 0.
31670diff -urNp linux-2.6.32.41/drivers/net/e1000e/82571.c linux-2.6.32.41/drivers/net/e1000e/82571.c
31671--- linux-2.6.32.41/drivers/net/e1000e/82571.c 2011-03-27 14:31:47.000000000 -0400
31672+++ linux-2.6.32.41/drivers/net/e1000e/82571.c 2011-04-17 15:56:46.000000000 -0400
31673@@ -212,6 +212,7 @@ static s32 e1000_init_mac_params_82571(s
31674 {
31675 struct e1000_hw *hw = &adapter->hw;
31676 struct e1000_mac_info *mac = &hw->mac;
31677+ /* cannot be const */
31678 struct e1000_mac_operations *func = &mac->ops;
31679 u32 swsm = 0;
31680 u32 swsm2 = 0;
31681@@ -1656,7 +1657,7 @@ static void e1000_clear_hw_cntrs_82571(s
31682 temp = er32(ICRXDMTC);
31683 }
31684
31685-static struct e1000_mac_operations e82571_mac_ops = {
31686+static const struct e1000_mac_operations e82571_mac_ops = {
31687 /* .check_mng_mode: mac type dependent */
31688 /* .check_for_link: media type dependent */
31689 .id_led_init = e1000e_id_led_init,
31690@@ -1674,7 +1675,7 @@ static struct e1000_mac_operations e8257
31691 .setup_led = e1000e_setup_led_generic,
31692 };
31693
31694-static struct e1000_phy_operations e82_phy_ops_igp = {
31695+static const struct e1000_phy_operations e82_phy_ops_igp = {
31696 .acquire_phy = e1000_get_hw_semaphore_82571,
31697 .check_reset_block = e1000e_check_reset_block_generic,
31698 .commit_phy = NULL,
31699@@ -1691,7 +1692,7 @@ static struct e1000_phy_operations e82_p
31700 .cfg_on_link_up = NULL,
31701 };
31702
31703-static struct e1000_phy_operations e82_phy_ops_m88 = {
31704+static const struct e1000_phy_operations e82_phy_ops_m88 = {
31705 .acquire_phy = e1000_get_hw_semaphore_82571,
31706 .check_reset_block = e1000e_check_reset_block_generic,
31707 .commit_phy = e1000e_phy_sw_reset,
31708@@ -1708,7 +1709,7 @@ static struct e1000_phy_operations e82_p
31709 .cfg_on_link_up = NULL,
31710 };
31711
31712-static struct e1000_phy_operations e82_phy_ops_bm = {
31713+static const struct e1000_phy_operations e82_phy_ops_bm = {
31714 .acquire_phy = e1000_get_hw_semaphore_82571,
31715 .check_reset_block = e1000e_check_reset_block_generic,
31716 .commit_phy = e1000e_phy_sw_reset,
31717@@ -1725,7 +1726,7 @@ static struct e1000_phy_operations e82_p
31718 .cfg_on_link_up = NULL,
31719 };
31720
31721-static struct e1000_nvm_operations e82571_nvm_ops = {
31722+static const struct e1000_nvm_operations e82571_nvm_ops = {
31723 .acquire_nvm = e1000_acquire_nvm_82571,
31724 .read_nvm = e1000e_read_nvm_eerd,
31725 .release_nvm = e1000_release_nvm_82571,
31726diff -urNp linux-2.6.32.41/drivers/net/e1000e/e1000.h linux-2.6.32.41/drivers/net/e1000e/e1000.h
31727--- linux-2.6.32.41/drivers/net/e1000e/e1000.h 2011-03-27 14:31:47.000000000 -0400
31728+++ linux-2.6.32.41/drivers/net/e1000e/e1000.h 2011-04-17 15:56:46.000000000 -0400
31729@@ -375,9 +375,9 @@ struct e1000_info {
31730 u32 pba;
31731 u32 max_hw_frame_size;
31732 s32 (*get_variants)(struct e1000_adapter *);
31733- struct e1000_mac_operations *mac_ops;
31734- struct e1000_phy_operations *phy_ops;
31735- struct e1000_nvm_operations *nvm_ops;
31736+ const struct e1000_mac_operations *mac_ops;
31737+ const struct e1000_phy_operations *phy_ops;
31738+ const struct e1000_nvm_operations *nvm_ops;
31739 };
31740
31741 /* hardware capability, feature, and workaround flags */
31742diff -urNp linux-2.6.32.41/drivers/net/e1000e/es2lan.c linux-2.6.32.41/drivers/net/e1000e/es2lan.c
31743--- linux-2.6.32.41/drivers/net/e1000e/es2lan.c 2011-03-27 14:31:47.000000000 -0400
31744+++ linux-2.6.32.41/drivers/net/e1000e/es2lan.c 2011-04-17 15:56:46.000000000 -0400
31745@@ -207,6 +207,7 @@ static s32 e1000_init_mac_params_80003es
31746 {
31747 struct e1000_hw *hw = &adapter->hw;
31748 struct e1000_mac_info *mac = &hw->mac;
31749+ /* cannot be const */
31750 struct e1000_mac_operations *func = &mac->ops;
31751
31752 /* Set media type */
31753@@ -1365,7 +1366,7 @@ static void e1000_clear_hw_cntrs_80003es
31754 temp = er32(ICRXDMTC);
31755 }
31756
31757-static struct e1000_mac_operations es2_mac_ops = {
31758+static const struct e1000_mac_operations es2_mac_ops = {
31759 .id_led_init = e1000e_id_led_init,
31760 .check_mng_mode = e1000e_check_mng_mode_generic,
31761 /* check_for_link dependent on media type */
31762@@ -1383,7 +1384,7 @@ static struct e1000_mac_operations es2_m
31763 .setup_led = e1000e_setup_led_generic,
31764 };
31765
31766-static struct e1000_phy_operations es2_phy_ops = {
31767+static const struct e1000_phy_operations es2_phy_ops = {
31768 .acquire_phy = e1000_acquire_phy_80003es2lan,
31769 .check_reset_block = e1000e_check_reset_block_generic,
31770 .commit_phy = e1000e_phy_sw_reset,
31771@@ -1400,7 +1401,7 @@ static struct e1000_phy_operations es2_p
31772 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
31773 };
31774
31775-static struct e1000_nvm_operations es2_nvm_ops = {
31776+static const struct e1000_nvm_operations es2_nvm_ops = {
31777 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
31778 .read_nvm = e1000e_read_nvm_eerd,
31779 .release_nvm = e1000_release_nvm_80003es2lan,
31780diff -urNp linux-2.6.32.41/drivers/net/e1000e/hw.h linux-2.6.32.41/drivers/net/e1000e/hw.h
31781--- linux-2.6.32.41/drivers/net/e1000e/hw.h 2011-03-27 14:31:47.000000000 -0400
31782+++ linux-2.6.32.41/drivers/net/e1000e/hw.h 2011-04-17 15:56:46.000000000 -0400
31783@@ -756,34 +756,34 @@ struct e1000_mac_operations {
31784
31785 /* Function pointers for the PHY. */
31786 struct e1000_phy_operations {
31787- s32 (*acquire_phy)(struct e1000_hw *);
31788- s32 (*check_polarity)(struct e1000_hw *);
31789- s32 (*check_reset_block)(struct e1000_hw *);
31790- s32 (*commit_phy)(struct e1000_hw *);
31791- s32 (*force_speed_duplex)(struct e1000_hw *);
31792- s32 (*get_cfg_done)(struct e1000_hw *hw);
31793- s32 (*get_cable_length)(struct e1000_hw *);
31794- s32 (*get_phy_info)(struct e1000_hw *);
31795- s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *);
31796- s32 (*read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
31797- void (*release_phy)(struct e1000_hw *);
31798- s32 (*reset_phy)(struct e1000_hw *);
31799- s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
31800- s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
31801- s32 (*write_phy_reg)(struct e1000_hw *, u32, u16);
31802- s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
31803- s32 (*cfg_on_link_up)(struct e1000_hw *);
31804+ s32 (* acquire_phy)(struct e1000_hw *);
31805+ s32 (* check_polarity)(struct e1000_hw *);
31806+ s32 (* check_reset_block)(struct e1000_hw *);
31807+ s32 (* commit_phy)(struct e1000_hw *);
31808+ s32 (* force_speed_duplex)(struct e1000_hw *);
31809+ s32 (* get_cfg_done)(struct e1000_hw *hw);
31810+ s32 (* get_cable_length)(struct e1000_hw *);
31811+ s32 (* get_phy_info)(struct e1000_hw *);
31812+ s32 (* read_phy_reg)(struct e1000_hw *, u32, u16 *);
31813+ s32 (* read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
31814+ void (* release_phy)(struct e1000_hw *);
31815+ s32 (* reset_phy)(struct e1000_hw *);
31816+ s32 (* set_d0_lplu_state)(struct e1000_hw *, bool);
31817+ s32 (* set_d3_lplu_state)(struct e1000_hw *, bool);
31818+ s32 (* write_phy_reg)(struct e1000_hw *, u32, u16);
31819+ s32 (* write_phy_reg_locked)(struct e1000_hw *, u32, u16);
31820+ s32 (* cfg_on_link_up)(struct e1000_hw *);
31821 };
31822
31823 /* Function pointers for the NVM. */
31824 struct e1000_nvm_operations {
31825- s32 (*acquire_nvm)(struct e1000_hw *);
31826- s32 (*read_nvm)(struct e1000_hw *, u16, u16, u16 *);
31827- void (*release_nvm)(struct e1000_hw *);
31828- s32 (*update_nvm)(struct e1000_hw *);
31829- s32 (*valid_led_default)(struct e1000_hw *, u16 *);
31830- s32 (*validate_nvm)(struct e1000_hw *);
31831- s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
31832+ s32 (* const acquire_nvm)(struct e1000_hw *);
31833+ s32 (* const read_nvm)(struct e1000_hw *, u16, u16, u16 *);
31834+ void (* const release_nvm)(struct e1000_hw *);
31835+ s32 (* const update_nvm)(struct e1000_hw *);
31836+ s32 (* const valid_led_default)(struct e1000_hw *, u16 *);
31837+ s32 (* const validate_nvm)(struct e1000_hw *);
31838+ s32 (* const write_nvm)(struct e1000_hw *, u16, u16, u16 *);
31839 };
31840
31841 struct e1000_mac_info {
31842diff -urNp linux-2.6.32.41/drivers/net/e1000e/ich8lan.c linux-2.6.32.41/drivers/net/e1000e/ich8lan.c
31843--- linux-2.6.32.41/drivers/net/e1000e/ich8lan.c 2011-05-10 22:12:01.000000000 -0400
31844+++ linux-2.6.32.41/drivers/net/e1000e/ich8lan.c 2011-05-10 22:12:32.000000000 -0400
31845@@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan
31846 }
31847 }
31848
31849-static struct e1000_mac_operations ich8_mac_ops = {
31850+static const struct e1000_mac_operations ich8_mac_ops = {
31851 .id_led_init = e1000e_id_led_init,
31852 .check_mng_mode = e1000_check_mng_mode_ich8lan,
31853 .check_for_link = e1000_check_for_copper_link_ich8lan,
31854@@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_
31855 /* id_led_init dependent on mac type */
31856 };
31857
31858-static struct e1000_phy_operations ich8_phy_ops = {
31859+static const struct e1000_phy_operations ich8_phy_ops = {
31860 .acquire_phy = e1000_acquire_swflag_ich8lan,
31861 .check_reset_block = e1000_check_reset_block_ich8lan,
31862 .commit_phy = NULL,
31863@@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_
31864 .write_phy_reg = e1000e_write_phy_reg_igp,
31865 };
31866
31867-static struct e1000_nvm_operations ich8_nvm_ops = {
31868+static const struct e1000_nvm_operations ich8_nvm_ops = {
31869 .acquire_nvm = e1000_acquire_nvm_ich8lan,
31870 .read_nvm = e1000_read_nvm_ich8lan,
31871 .release_nvm = e1000_release_nvm_ich8lan,
31872diff -urNp linux-2.6.32.41/drivers/net/hamradio/6pack.c linux-2.6.32.41/drivers/net/hamradio/6pack.c
31873--- linux-2.6.32.41/drivers/net/hamradio/6pack.c 2011-03-27 14:31:47.000000000 -0400
31874+++ linux-2.6.32.41/drivers/net/hamradio/6pack.c 2011-05-16 21:46:57.000000000 -0400
31875@@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct t
31876 unsigned char buf[512];
31877 int count1;
31878
31879+ pax_track_stack();
31880+
31881 if (!count)
31882 return;
31883
31884diff -urNp linux-2.6.32.41/drivers/net/ibmveth.c linux-2.6.32.41/drivers/net/ibmveth.c
31885--- linux-2.6.32.41/drivers/net/ibmveth.c 2011-03-27 14:31:47.000000000 -0400
31886+++ linux-2.6.32.41/drivers/net/ibmveth.c 2011-04-17 15:56:46.000000000 -0400
31887@@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attr
31888 NULL,
31889 };
31890
31891-static struct sysfs_ops veth_pool_ops = {
31892+static const struct sysfs_ops veth_pool_ops = {
31893 .show = veth_pool_show,
31894 .store = veth_pool_store,
31895 };
31896diff -urNp linux-2.6.32.41/drivers/net/igb/e1000_82575.c linux-2.6.32.41/drivers/net/igb/e1000_82575.c
31897--- linux-2.6.32.41/drivers/net/igb/e1000_82575.c 2011-03-27 14:31:47.000000000 -0400
31898+++ linux-2.6.32.41/drivers/net/igb/e1000_82575.c 2011-04-17 15:56:46.000000000 -0400
31899@@ -1410,7 +1410,7 @@ void igb_vmdq_set_replication_pf(struct
31900 wr32(E1000_VT_CTL, vt_ctl);
31901 }
31902
31903-static struct e1000_mac_operations e1000_mac_ops_82575 = {
31904+static const struct e1000_mac_operations e1000_mac_ops_82575 = {
31905 .reset_hw = igb_reset_hw_82575,
31906 .init_hw = igb_init_hw_82575,
31907 .check_for_link = igb_check_for_link_82575,
31908@@ -1419,13 +1419,13 @@ static struct e1000_mac_operations e1000
31909 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
31910 };
31911
31912-static struct e1000_phy_operations e1000_phy_ops_82575 = {
31913+static const struct e1000_phy_operations e1000_phy_ops_82575 = {
31914 .acquire = igb_acquire_phy_82575,
31915 .get_cfg_done = igb_get_cfg_done_82575,
31916 .release = igb_release_phy_82575,
31917 };
31918
31919-static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
31920+static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
31921 .acquire = igb_acquire_nvm_82575,
31922 .read = igb_read_nvm_eerd,
31923 .release = igb_release_nvm_82575,
31924diff -urNp linux-2.6.32.41/drivers/net/igb/e1000_hw.h linux-2.6.32.41/drivers/net/igb/e1000_hw.h
31925--- linux-2.6.32.41/drivers/net/igb/e1000_hw.h 2011-03-27 14:31:47.000000000 -0400
31926+++ linux-2.6.32.41/drivers/net/igb/e1000_hw.h 2011-04-17 15:56:46.000000000 -0400
31927@@ -305,17 +305,17 @@ struct e1000_phy_operations {
31928 };
31929
31930 struct e1000_nvm_operations {
31931- s32 (*acquire)(struct e1000_hw *);
31932- s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
31933- void (*release)(struct e1000_hw *);
31934- s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
31935+ s32 (* const acquire)(struct e1000_hw *);
31936+ s32 (* const read)(struct e1000_hw *, u16, u16, u16 *);
31937+ void (* const release)(struct e1000_hw *);
31938+ s32 (* const write)(struct e1000_hw *, u16, u16, u16 *);
31939 };
31940
31941 struct e1000_info {
31942 s32 (*get_invariants)(struct e1000_hw *);
31943- struct e1000_mac_operations *mac_ops;
31944- struct e1000_phy_operations *phy_ops;
31945- struct e1000_nvm_operations *nvm_ops;
31946+ const struct e1000_mac_operations *mac_ops;
31947+ const struct e1000_phy_operations *phy_ops;
31948+ const struct e1000_nvm_operations *nvm_ops;
31949 };
31950
31951 extern const struct e1000_info e1000_82575_info;
31952diff -urNp linux-2.6.32.41/drivers/net/iseries_veth.c linux-2.6.32.41/drivers/net/iseries_veth.c
31953--- linux-2.6.32.41/drivers/net/iseries_veth.c 2011-03-27 14:31:47.000000000 -0400
31954+++ linux-2.6.32.41/drivers/net/iseries_veth.c 2011-04-17 15:56:46.000000000 -0400
31955@@ -384,7 +384,7 @@ static struct attribute *veth_cnx_defaul
31956 NULL
31957 };
31958
31959-static struct sysfs_ops veth_cnx_sysfs_ops = {
31960+static const struct sysfs_ops veth_cnx_sysfs_ops = {
31961 .show = veth_cnx_attribute_show
31962 };
31963
31964@@ -441,7 +441,7 @@ static struct attribute *veth_port_defau
31965 NULL
31966 };
31967
31968-static struct sysfs_ops veth_port_sysfs_ops = {
31969+static const struct sysfs_ops veth_port_sysfs_ops = {
31970 .show = veth_port_attribute_show
31971 };
31972
31973diff -urNp linux-2.6.32.41/drivers/net/ixgb/ixgb_main.c linux-2.6.32.41/drivers/net/ixgb/ixgb_main.c
31974--- linux-2.6.32.41/drivers/net/ixgb/ixgb_main.c 2011-03-27 14:31:47.000000000 -0400
31975+++ linux-2.6.32.41/drivers/net/ixgb/ixgb_main.c 2011-05-16 21:46:57.000000000 -0400
31976@@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev
31977 u32 rctl;
31978 int i;
31979
31980+ pax_track_stack();
31981+
31982 /* Check for Promiscuous and All Multicast modes */
31983
31984 rctl = IXGB_READ_REG(hw, RCTL);
31985diff -urNp linux-2.6.32.41/drivers/net/ixgb/ixgb_param.c linux-2.6.32.41/drivers/net/ixgb/ixgb_param.c
31986--- linux-2.6.32.41/drivers/net/ixgb/ixgb_param.c 2011-03-27 14:31:47.000000000 -0400
31987+++ linux-2.6.32.41/drivers/net/ixgb/ixgb_param.c 2011-05-16 21:46:57.000000000 -0400
31988@@ -260,6 +260,9 @@ void __devinit
31989 ixgb_check_options(struct ixgb_adapter *adapter)
31990 {
31991 int bd = adapter->bd_number;
31992+
31993+ pax_track_stack();
31994+
31995 if (bd >= IXGB_MAX_NIC) {
31996 printk(KERN_NOTICE
31997 "Warning: no configuration for board #%i\n", bd);
31998diff -urNp linux-2.6.32.41/drivers/net/mlx4/main.c linux-2.6.32.41/drivers/net/mlx4/main.c
31999--- linux-2.6.32.41/drivers/net/mlx4/main.c 2011-03-27 14:31:47.000000000 -0400
32000+++ linux-2.6.32.41/drivers/net/mlx4/main.c 2011-05-18 20:09:37.000000000 -0400
32001@@ -38,6 +38,7 @@
32002 #include <linux/errno.h>
32003 #include <linux/pci.h>
32004 #include <linux/dma-mapping.h>
32005+#include <linux/sched.h>
32006
32007 #include <linux/mlx4/device.h>
32008 #include <linux/mlx4/doorbell.h>
32009@@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev
32010 u64 icm_size;
32011 int err;
32012
32013+ pax_track_stack();
32014+
32015 err = mlx4_QUERY_FW(dev);
32016 if (err) {
32017 if (err == -EACCES)
32018diff -urNp linux-2.6.32.41/drivers/net/niu.c linux-2.6.32.41/drivers/net/niu.c
32019--- linux-2.6.32.41/drivers/net/niu.c 2011-05-10 22:12:01.000000000 -0400
32020+++ linux-2.6.32.41/drivers/net/niu.c 2011-05-16 21:46:57.000000000 -0400
32021@@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struc
32022 int i, num_irqs, err;
32023 u8 first_ldg;
32024
32025+ pax_track_stack();
32026+
32027 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
32028 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
32029 ldg_num_map[i] = first_ldg + i;
32030diff -urNp linux-2.6.32.41/drivers/net/pcnet32.c linux-2.6.32.41/drivers/net/pcnet32.c
32031--- linux-2.6.32.41/drivers/net/pcnet32.c 2011-03-27 14:31:47.000000000 -0400
32032+++ linux-2.6.32.41/drivers/net/pcnet32.c 2011-04-17 15:56:46.000000000 -0400
32033@@ -79,7 +79,7 @@ static int cards_found;
32034 /*
32035 * VLB I/O addresses
32036 */
32037-static unsigned int pcnet32_portlist[] __initdata =
32038+static unsigned int pcnet32_portlist[] __devinitdata =
32039 { 0x300, 0x320, 0x340, 0x360, 0 };
32040
32041 static int pcnet32_debug = 0;
32042diff -urNp linux-2.6.32.41/drivers/net/tg3.h linux-2.6.32.41/drivers/net/tg3.h
32043--- linux-2.6.32.41/drivers/net/tg3.h 2011-03-27 14:31:47.000000000 -0400
32044+++ linux-2.6.32.41/drivers/net/tg3.h 2011-04-17 15:56:46.000000000 -0400
32045@@ -95,6 +95,7 @@
32046 #define CHIPREV_ID_5750_A0 0x4000
32047 #define CHIPREV_ID_5750_A1 0x4001
32048 #define CHIPREV_ID_5750_A3 0x4003
32049+#define CHIPREV_ID_5750_C1 0x4201
32050 #define CHIPREV_ID_5750_C2 0x4202
32051 #define CHIPREV_ID_5752_A0_HW 0x5000
32052 #define CHIPREV_ID_5752_A0 0x6000
32053diff -urNp linux-2.6.32.41/drivers/net/tulip/de2104x.c linux-2.6.32.41/drivers/net/tulip/de2104x.c
32054--- linux-2.6.32.41/drivers/net/tulip/de2104x.c 2011-03-27 14:31:47.000000000 -0400
32055+++ linux-2.6.32.41/drivers/net/tulip/de2104x.c 2011-05-16 21:46:57.000000000 -0400
32056@@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_i
32057 struct de_srom_info_leaf *il;
32058 void *bufp;
32059
32060+ pax_track_stack();
32061+
32062 /* download entire eeprom */
32063 for (i = 0; i < DE_EEPROM_WORDS; i++)
32064 ((__le16 *)ee_data)[i] =
32065diff -urNp linux-2.6.32.41/drivers/net/tulip/de4x5.c linux-2.6.32.41/drivers/net/tulip/de4x5.c
32066--- linux-2.6.32.41/drivers/net/tulip/de4x5.c 2011-03-27 14:31:47.000000000 -0400
32067+++ linux-2.6.32.41/drivers/net/tulip/de4x5.c 2011-04-17 15:56:46.000000000 -0400
32068@@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, stru
32069 for (i=0; i<ETH_ALEN; i++) {
32070 tmp.addr[i] = dev->dev_addr[i];
32071 }
32072- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32073+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32074 break;
32075
32076 case DE4X5_SET_HWADDR: /* Set the hardware address */
32077@@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, stru
32078 spin_lock_irqsave(&lp->lock, flags);
32079 memcpy(&statbuf, &lp->pktStats, ioc->len);
32080 spin_unlock_irqrestore(&lp->lock, flags);
32081- if (copy_to_user(ioc->data, &statbuf, ioc->len))
32082+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
32083 return -EFAULT;
32084 break;
32085 }
32086diff -urNp linux-2.6.32.41/drivers/net/usb/hso.c linux-2.6.32.41/drivers/net/usb/hso.c
32087--- linux-2.6.32.41/drivers/net/usb/hso.c 2011-03-27 14:31:47.000000000 -0400
32088+++ linux-2.6.32.41/drivers/net/usb/hso.c 2011-04-17 15:56:46.000000000 -0400
32089@@ -71,7 +71,7 @@
32090 #include <asm/byteorder.h>
32091 #include <linux/serial_core.h>
32092 #include <linux/serial.h>
32093-
32094+#include <asm/local.h>
32095
32096 #define DRIVER_VERSION "1.2"
32097 #define MOD_AUTHOR "Option Wireless"
32098@@ -258,7 +258,7 @@ struct hso_serial {
32099
32100 /* from usb_serial_port */
32101 struct tty_struct *tty;
32102- int open_count;
32103+ local_t open_count;
32104 spinlock_t serial_lock;
32105
32106 int (*write_data) (struct hso_serial *serial);
32107@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_
32108 struct urb *urb;
32109
32110 urb = serial->rx_urb[0];
32111- if (serial->open_count > 0) {
32112+ if (local_read(&serial->open_count) > 0) {
32113 count = put_rxbuf_data(urb, serial);
32114 if (count == -1)
32115 return;
32116@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_cal
32117 DUMP1(urb->transfer_buffer, urb->actual_length);
32118
32119 /* Anyone listening? */
32120- if (serial->open_count == 0)
32121+ if (local_read(&serial->open_count) == 0)
32122 return;
32123
32124 if (status == 0) {
32125@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
32126 spin_unlock_irq(&serial->serial_lock);
32127
32128 /* check for port already opened, if not set the termios */
32129- serial->open_count++;
32130- if (serial->open_count == 1) {
32131+ if (local_inc_return(&serial->open_count) == 1) {
32132 tty->low_latency = 1;
32133 serial->rx_state = RX_IDLE;
32134 /* Force default termio settings */
32135@@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_st
32136 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
32137 if (result) {
32138 hso_stop_serial_device(serial->parent);
32139- serial->open_count--;
32140+ local_dec(&serial->open_count);
32141 kref_put(&serial->parent->ref, hso_serial_ref_free);
32142 }
32143 } else {
32144@@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_
32145
32146 /* reset the rts and dtr */
32147 /* do the actual close */
32148- serial->open_count--;
32149+ local_dec(&serial->open_count);
32150
32151- if (serial->open_count <= 0) {
32152- serial->open_count = 0;
32153+ if (local_read(&serial->open_count) <= 0) {
32154+ local_set(&serial->open_count, 0);
32155 spin_lock_irq(&serial->serial_lock);
32156 if (serial->tty == tty) {
32157 serial->tty->driver_data = NULL;
32158@@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struc
32159
32160 /* the actual setup */
32161 spin_lock_irqsave(&serial->serial_lock, flags);
32162- if (serial->open_count)
32163+ if (local_read(&serial->open_count))
32164 _hso_serial_set_termios(tty, old);
32165 else
32166 tty->termios = old;
32167@@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interfa
32168 /* Start all serial ports */
32169 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
32170 if (serial_table[i] && (serial_table[i]->interface == iface)) {
32171- if (dev2ser(serial_table[i])->open_count) {
32172+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
32173 result =
32174 hso_start_serial_device(serial_table[i], GFP_NOIO);
32175 hso_kick_transmit(dev2ser(serial_table[i]));
32176diff -urNp linux-2.6.32.41/drivers/net/vxge/vxge-main.c linux-2.6.32.41/drivers/net/vxge/vxge-main.c
32177--- linux-2.6.32.41/drivers/net/vxge/vxge-main.c 2011-03-27 14:31:47.000000000 -0400
32178+++ linux-2.6.32.41/drivers/net/vxge/vxge-main.c 2011-05-16 21:46:57.000000000 -0400
32179@@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_T
32180 struct sk_buff *completed[NR_SKB_COMPLETED];
32181 int more;
32182
32183+ pax_track_stack();
32184+
32185 do {
32186 more = 0;
32187 skb_ptr = completed;
32188@@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_conf
32189 u8 mtable[256] = {0}; /* CPU to vpath mapping */
32190 int index;
32191
32192+ pax_track_stack();
32193+
32194 /*
32195 * Filling
32196 * - itable with bucket numbers
32197diff -urNp linux-2.6.32.41/drivers/net/wan/cycx_x25.c linux-2.6.32.41/drivers/net/wan/cycx_x25.c
32198--- linux-2.6.32.41/drivers/net/wan/cycx_x25.c 2011-03-27 14:31:47.000000000 -0400
32199+++ linux-2.6.32.41/drivers/net/wan/cycx_x25.c 2011-05-16 21:46:57.000000000 -0400
32200@@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned
32201 unsigned char hex[1024],
32202 * phex = hex;
32203
32204+ pax_track_stack();
32205+
32206 if (len >= (sizeof(hex) / 2))
32207 len = (sizeof(hex) / 2) - 1;
32208
32209diff -urNp linux-2.6.32.41/drivers/net/wimax/i2400m/usb-fw.c linux-2.6.32.41/drivers/net/wimax/i2400m/usb-fw.c
32210--- linux-2.6.32.41/drivers/net/wimax/i2400m/usb-fw.c 2011-03-27 14:31:47.000000000 -0400
32211+++ linux-2.6.32.41/drivers/net/wimax/i2400m/usb-fw.c 2011-05-16 21:46:57.000000000 -0400
32212@@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
32213 int do_autopm = 1;
32214 DECLARE_COMPLETION_ONSTACK(notif_completion);
32215
32216+ pax_track_stack();
32217+
32218 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
32219 i2400m, ack, ack_size);
32220 BUG_ON(_ack == i2400m->bm_ack_buf);
32221diff -urNp linux-2.6.32.41/drivers/net/wireless/airo.c linux-2.6.32.41/drivers/net/wireless/airo.c
32222--- linux-2.6.32.41/drivers/net/wireless/airo.c 2011-03-27 14:31:47.000000000 -0400
32223+++ linux-2.6.32.41/drivers/net/wireless/airo.c 2011-05-16 21:46:57.000000000 -0400
32224@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
32225 BSSListElement * loop_net;
32226 BSSListElement * tmp_net;
32227
32228+ pax_track_stack();
32229+
32230 /* Blow away current list of scan results */
32231 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
32232 list_move_tail (&loop_net->list, &ai->network_free_list);
32233@@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *
32234 WepKeyRid wkr;
32235 int rc;
32236
32237+ pax_track_stack();
32238+
32239 memset( &mySsid, 0, sizeof( mySsid ) );
32240 kfree (ai->flash);
32241 ai->flash = NULL;
32242@@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct i
32243 __le32 *vals = stats.vals;
32244 int len;
32245
32246+ pax_track_stack();
32247+
32248 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
32249 return -ENOMEM;
32250 data = (struct proc_data *)file->private_data;
32251@@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct ino
32252 /* If doLoseSync is not 1, we won't do a Lose Sync */
32253 int doLoseSync = -1;
32254
32255+ pax_track_stack();
32256+
32257 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
32258 return -ENOMEM;
32259 data = (struct proc_data *)file->private_data;
32260@@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_de
32261 int i;
32262 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
32263
32264+ pax_track_stack();
32265+
32266 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
32267 if (!qual)
32268 return -ENOMEM;
32269@@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(str
32270 CapabilityRid cap_rid;
32271 __le32 *vals = stats_rid.vals;
32272
32273+ pax_track_stack();
32274+
32275 /* Get stats out of the card */
32276 clear_bit(JOB_WSTATS, &local->jobs);
32277 if (local->power.event) {
32278diff -urNp linux-2.6.32.41/drivers/net/wireless/ath/ath5k/debug.c linux-2.6.32.41/drivers/net/wireless/ath/ath5k/debug.c
32279--- linux-2.6.32.41/drivers/net/wireless/ath/ath5k/debug.c 2011-03-27 14:31:47.000000000 -0400
32280+++ linux-2.6.32.41/drivers/net/wireless/ath/ath5k/debug.c 2011-05-16 21:46:57.000000000 -0400
32281@@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct f
32282 unsigned int v;
32283 u64 tsf;
32284
32285+ pax_track_stack();
32286+
32287 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
32288 len += snprintf(buf+len, sizeof(buf)-len,
32289 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
32290@@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct fi
32291 unsigned int len = 0;
32292 unsigned int i;
32293
32294+ pax_track_stack();
32295+
32296 len += snprintf(buf+len, sizeof(buf)-len,
32297 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
32298
32299diff -urNp linux-2.6.32.41/drivers/net/wireless/ath/ath9k/debug.c linux-2.6.32.41/drivers/net/wireless/ath/ath9k/debug.c
32300--- linux-2.6.32.41/drivers/net/wireless/ath/ath9k/debug.c 2011-03-27 14:31:47.000000000 -0400
32301+++ linux-2.6.32.41/drivers/net/wireless/ath/ath9k/debug.c 2011-05-16 21:46:57.000000000 -0400
32302@@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struc
32303 char buf[512];
32304 unsigned int len = 0;
32305
32306+ pax_track_stack();
32307+
32308 len += snprintf(buf + len, sizeof(buf) - len,
32309 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
32310 len += snprintf(buf + len, sizeof(buf) - len,
32311@@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct fi
32312 int i;
32313 u8 addr[ETH_ALEN];
32314
32315+ pax_track_stack();
32316+
32317 len += snprintf(buf + len, sizeof(buf) - len,
32318 "primary: %s (%s chan=%d ht=%d)\n",
32319 wiphy_name(sc->pri_wiphy->hw->wiphy),
32320diff -urNp linux-2.6.32.41/drivers/net/wireless/b43/debugfs.c linux-2.6.32.41/drivers/net/wireless/b43/debugfs.c
32321--- linux-2.6.32.41/drivers/net/wireless/b43/debugfs.c 2011-03-27 14:31:47.000000000 -0400
32322+++ linux-2.6.32.41/drivers/net/wireless/b43/debugfs.c 2011-04-17 15:56:46.000000000 -0400
32323@@ -43,7 +43,7 @@ static struct dentry *rootdir;
32324 struct b43_debugfs_fops {
32325 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
32326 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
32327- struct file_operations fops;
32328+ const struct file_operations fops;
32329 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
32330 size_t file_struct_offset;
32331 };
32332diff -urNp linux-2.6.32.41/drivers/net/wireless/b43legacy/debugfs.c linux-2.6.32.41/drivers/net/wireless/b43legacy/debugfs.c
32333--- linux-2.6.32.41/drivers/net/wireless/b43legacy/debugfs.c 2011-03-27 14:31:47.000000000 -0400
32334+++ linux-2.6.32.41/drivers/net/wireless/b43legacy/debugfs.c 2011-04-17 15:56:46.000000000 -0400
32335@@ -44,7 +44,7 @@ static struct dentry *rootdir;
32336 struct b43legacy_debugfs_fops {
32337 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
32338 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
32339- struct file_operations fops;
32340+ const struct file_operations fops;
32341 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
32342 size_t file_struct_offset;
32343 /* Take wl->irq_lock before calling read/write? */
32344diff -urNp linux-2.6.32.41/drivers/net/wireless/ipw2x00/ipw2100.c linux-2.6.32.41/drivers/net/wireless/ipw2x00/ipw2100.c
32345--- linux-2.6.32.41/drivers/net/wireless/ipw2x00/ipw2100.c 2011-03-27 14:31:47.000000000 -0400
32346+++ linux-2.6.32.41/drivers/net/wireless/ipw2x00/ipw2100.c 2011-05-16 21:46:57.000000000 -0400
32347@@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2
32348 int err;
32349 DECLARE_SSID_BUF(ssid);
32350
32351+ pax_track_stack();
32352+
32353 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
32354
32355 if (ssid_len)
32356@@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw210
32357 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
32358 int err;
32359
32360+ pax_track_stack();
32361+
32362 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
32363 idx, keylen, len);
32364
32365diff -urNp linux-2.6.32.41/drivers/net/wireless/ipw2x00/libipw_rx.c linux-2.6.32.41/drivers/net/wireless/ipw2x00/libipw_rx.c
32366--- linux-2.6.32.41/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-03-27 14:31:47.000000000 -0400
32367+++ linux-2.6.32.41/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-05-16 21:46:57.000000000 -0400
32368@@ -1566,6 +1566,8 @@ static void libipw_process_probe_respons
32369 unsigned long flags;
32370 DECLARE_SSID_BUF(ssid);
32371
32372+ pax_track_stack();
32373+
32374 LIBIPW_DEBUG_SCAN("'%s' (%pM"
32375 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
32376 print_ssid(ssid, info_element->data, info_element->len),
32377diff -urNp linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-1000.c linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-1000.c
32378--- linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-03-27 14:31:47.000000000 -0400
32379+++ linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-04-17 15:56:46.000000000 -0400
32380@@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib =
32381 },
32382 };
32383
32384-static struct iwl_ops iwl1000_ops = {
32385+static const struct iwl_ops iwl1000_ops = {
32386 .ucode = &iwl5000_ucode,
32387 .lib = &iwl1000_lib,
32388 .hcmd = &iwl5000_hcmd,
32389diff -urNp linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-3945.c linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-3945.c
32390--- linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-03-27 14:31:47.000000000 -0400
32391+++ linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-04-17 15:56:46.000000000 -0400
32392@@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945
32393 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
32394 };
32395
32396-static struct iwl_ops iwl3945_ops = {
32397+static const struct iwl_ops iwl3945_ops = {
32398 .ucode = &iwl3945_ucode,
32399 .lib = &iwl3945_lib,
32400 .hcmd = &iwl3945_hcmd,
32401diff -urNp linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-4965.c linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-4965.c
32402--- linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-03-27 14:31:47.000000000 -0400
32403+++ linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-04-17 15:56:46.000000000 -0400
32404@@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib =
32405 },
32406 };
32407
32408-static struct iwl_ops iwl4965_ops = {
32409+static const struct iwl_ops iwl4965_ops = {
32410 .ucode = &iwl4965_ucode,
32411 .lib = &iwl4965_lib,
32412 .hcmd = &iwl4965_hcmd,
32413diff -urNp linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-5000.c linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-5000.c
32414--- linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-05-10 22:12:01.000000000 -0400
32415+++ linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-05-10 22:12:32.000000000 -0400
32416@@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib =
32417 },
32418 };
32419
32420-struct iwl_ops iwl5000_ops = {
32421+const struct iwl_ops iwl5000_ops = {
32422 .ucode = &iwl5000_ucode,
32423 .lib = &iwl5000_lib,
32424 .hcmd = &iwl5000_hcmd,
32425 .utils = &iwl5000_hcmd_utils,
32426 };
32427
32428-static struct iwl_ops iwl5150_ops = {
32429+static const struct iwl_ops iwl5150_ops = {
32430 .ucode = &iwl5000_ucode,
32431 .lib = &iwl5150_lib,
32432 .hcmd = &iwl5000_hcmd,
32433diff -urNp linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-6000.c linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-6000.c
32434--- linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-03-27 14:31:47.000000000 -0400
32435+++ linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-04-17 15:56:46.000000000 -0400
32436@@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000
32437 .calc_rssi = iwl5000_calc_rssi,
32438 };
32439
32440-static struct iwl_ops iwl6000_ops = {
32441+static const struct iwl_ops iwl6000_ops = {
32442 .ucode = &iwl5000_ucode,
32443 .lib = &iwl6000_lib,
32444 .hcmd = &iwl5000_hcmd,
32445diff -urNp linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
32446--- linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-03-27 14:31:47.000000000 -0400
32447+++ linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-05-16 21:46:57.000000000 -0400
32448@@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, s
32449 u8 active_index = 0;
32450 s32 tpt = 0;
32451
32452+ pax_track_stack();
32453+
32454 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
32455
32456 if (!ieee80211_is_data(hdr->frame_control) ||
32457@@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_
32458 u8 valid_tx_ant = 0;
32459 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
32460
32461+ pax_track_stack();
32462+
32463 /* Override starting rate (index 0) if needed for debug purposes */
32464 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
32465
32466diff -urNp linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-debugfs.c
32467--- linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-03-27 14:31:47.000000000 -0400
32468+++ linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-05-16 21:46:57.000000000 -0400
32469@@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(str
32470 int pos = 0;
32471 const size_t bufsz = sizeof(buf);
32472
32473+ pax_track_stack();
32474+
32475 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
32476 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
32477 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
32478@@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
32479 const size_t bufsz = sizeof(buf);
32480 ssize_t ret;
32481
32482+ pax_track_stack();
32483+
32484 for (i = 0; i < AC_NUM; i++) {
32485 pos += scnprintf(buf + pos, bufsz - pos,
32486 "\tcw_min\tcw_max\taifsn\ttxop\n");
32487diff -urNp linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-debug.h linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-debug.h
32488--- linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-03-27 14:31:47.000000000 -0400
32489+++ linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-04-17 15:56:46.000000000 -0400
32490@@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_pri
32491 #endif
32492
32493 #else
32494-#define IWL_DEBUG(__priv, level, fmt, args...)
32495-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
32496+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
32497+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
32498 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
32499 void *p, u32 len)
32500 {}
32501diff -urNp linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-dev.h linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-dev.h
32502--- linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-03-27 14:31:47.000000000 -0400
32503+++ linux-2.6.32.41/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-04-17 15:56:46.000000000 -0400
32504@@ -68,7 +68,7 @@ struct iwl_tx_queue;
32505
32506 /* shared structures from iwl-5000.c */
32507 extern struct iwl_mod_params iwl50_mod_params;
32508-extern struct iwl_ops iwl5000_ops;
32509+extern const struct iwl_ops iwl5000_ops;
32510 extern struct iwl_ucode_ops iwl5000_ucode;
32511 extern struct iwl_lib_ops iwl5000_lib;
32512 extern struct iwl_hcmd_ops iwl5000_hcmd;
32513diff -urNp linux-2.6.32.41/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-2.6.32.41/drivers/net/wireless/iwmc3200wifi/debugfs.c
32514--- linux-2.6.32.41/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-03-27 14:31:47.000000000 -0400
32515+++ linux-2.6.32.41/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-05-16 21:46:57.000000000 -0400
32516@@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
32517 int buf_len = 512;
32518 size_t len = 0;
32519
32520+ pax_track_stack();
32521+
32522 if (*ppos != 0)
32523 return 0;
32524 if (count < sizeof(buf))
32525diff -urNp linux-2.6.32.41/drivers/net/wireless/libertas/debugfs.c linux-2.6.32.41/drivers/net/wireless/libertas/debugfs.c
32526--- linux-2.6.32.41/drivers/net/wireless/libertas/debugfs.c 2011-03-27 14:31:47.000000000 -0400
32527+++ linux-2.6.32.41/drivers/net/wireless/libertas/debugfs.c 2011-04-17 15:56:46.000000000 -0400
32528@@ -708,7 +708,7 @@ out_unlock:
32529 struct lbs_debugfs_files {
32530 const char *name;
32531 int perm;
32532- struct file_operations fops;
32533+ const struct file_operations fops;
32534 };
32535
32536 static const struct lbs_debugfs_files debugfs_files[] = {
32537diff -urNp linux-2.6.32.41/drivers/net/wireless/rndis_wlan.c linux-2.6.32.41/drivers/net/wireless/rndis_wlan.c
32538--- linux-2.6.32.41/drivers/net/wireless/rndis_wlan.c 2011-03-27 14:31:47.000000000 -0400
32539+++ linux-2.6.32.41/drivers/net/wireless/rndis_wlan.c 2011-04-17 15:56:46.000000000 -0400
32540@@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbn
32541
32542 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
32543
32544- if (rts_threshold < 0 || rts_threshold > 2347)
32545+ if (rts_threshold > 2347)
32546 rts_threshold = 2347;
32547
32548 tmp = cpu_to_le32(rts_threshold);
32549diff -urNp linux-2.6.32.41/drivers/oprofile/buffer_sync.c linux-2.6.32.41/drivers/oprofile/buffer_sync.c
32550--- linux-2.6.32.41/drivers/oprofile/buffer_sync.c 2011-03-27 14:31:47.000000000 -0400
32551+++ linux-2.6.32.41/drivers/oprofile/buffer_sync.c 2011-04-17 15:56:46.000000000 -0400
32552@@ -341,7 +341,7 @@ static void add_data(struct op_entry *en
32553 if (cookie == NO_COOKIE)
32554 offset = pc;
32555 if (cookie == INVALID_COOKIE) {
32556- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
32557+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
32558 offset = pc;
32559 }
32560 if (cookie != last_cookie) {
32561@@ -385,14 +385,14 @@ add_sample(struct mm_struct *mm, struct
32562 /* add userspace sample */
32563
32564 if (!mm) {
32565- atomic_inc(&oprofile_stats.sample_lost_no_mm);
32566+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
32567 return 0;
32568 }
32569
32570 cookie = lookup_dcookie(mm, s->eip, &offset);
32571
32572 if (cookie == INVALID_COOKIE) {
32573- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
32574+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
32575 return 0;
32576 }
32577
32578@@ -561,7 +561,7 @@ void sync_buffer(int cpu)
32579 /* ignore backtraces if failed to add a sample */
32580 if (state == sb_bt_start) {
32581 state = sb_bt_ignore;
32582- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
32583+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
32584 }
32585 }
32586 release_mm(mm);
32587diff -urNp linux-2.6.32.41/drivers/oprofile/event_buffer.c linux-2.6.32.41/drivers/oprofile/event_buffer.c
32588--- linux-2.6.32.41/drivers/oprofile/event_buffer.c 2011-03-27 14:31:47.000000000 -0400
32589+++ linux-2.6.32.41/drivers/oprofile/event_buffer.c 2011-04-17 15:56:46.000000000 -0400
32590@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
32591 }
32592
32593 if (buffer_pos == buffer_size) {
32594- atomic_inc(&oprofile_stats.event_lost_overflow);
32595+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
32596 return;
32597 }
32598
32599diff -urNp linux-2.6.32.41/drivers/oprofile/oprof.c linux-2.6.32.41/drivers/oprofile/oprof.c
32600--- linux-2.6.32.41/drivers/oprofile/oprof.c 2011-03-27 14:31:47.000000000 -0400
32601+++ linux-2.6.32.41/drivers/oprofile/oprof.c 2011-04-17 15:56:46.000000000 -0400
32602@@ -110,7 +110,7 @@ static void switch_worker(struct work_st
32603 if (oprofile_ops.switch_events())
32604 return;
32605
32606- atomic_inc(&oprofile_stats.multiplex_counter);
32607+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
32608 start_switch_worker();
32609 }
32610
32611diff -urNp linux-2.6.32.41/drivers/oprofile/oprofilefs.c linux-2.6.32.41/drivers/oprofile/oprofilefs.c
32612--- linux-2.6.32.41/drivers/oprofile/oprofilefs.c 2011-03-27 14:31:47.000000000 -0400
32613+++ linux-2.6.32.41/drivers/oprofile/oprofilefs.c 2011-04-17 15:56:46.000000000 -0400
32614@@ -187,7 +187,7 @@ static const struct file_operations atom
32615
32616
32617 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
32618- char const *name, atomic_t *val)
32619+ char const *name, atomic_unchecked_t *val)
32620 {
32621 struct dentry *d = __oprofilefs_create_file(sb, root, name,
32622 &atomic_ro_fops, 0444);
32623diff -urNp linux-2.6.32.41/drivers/oprofile/oprofile_stats.c linux-2.6.32.41/drivers/oprofile/oprofile_stats.c
32624--- linux-2.6.32.41/drivers/oprofile/oprofile_stats.c 2011-03-27 14:31:47.000000000 -0400
32625+++ linux-2.6.32.41/drivers/oprofile/oprofile_stats.c 2011-04-17 15:56:46.000000000 -0400
32626@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
32627 cpu_buf->sample_invalid_eip = 0;
32628 }
32629
32630- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
32631- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
32632- atomic_set(&oprofile_stats.event_lost_overflow, 0);
32633- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
32634- atomic_set(&oprofile_stats.multiplex_counter, 0);
32635+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
32636+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
32637+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
32638+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
32639+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
32640 }
32641
32642
32643diff -urNp linux-2.6.32.41/drivers/oprofile/oprofile_stats.h linux-2.6.32.41/drivers/oprofile/oprofile_stats.h
32644--- linux-2.6.32.41/drivers/oprofile/oprofile_stats.h 2011-03-27 14:31:47.000000000 -0400
32645+++ linux-2.6.32.41/drivers/oprofile/oprofile_stats.h 2011-04-17 15:56:46.000000000 -0400
32646@@ -13,11 +13,11 @@
32647 #include <asm/atomic.h>
32648
32649 struct oprofile_stat_struct {
32650- atomic_t sample_lost_no_mm;
32651- atomic_t sample_lost_no_mapping;
32652- atomic_t bt_lost_no_mapping;
32653- atomic_t event_lost_overflow;
32654- atomic_t multiplex_counter;
32655+ atomic_unchecked_t sample_lost_no_mm;
32656+ atomic_unchecked_t sample_lost_no_mapping;
32657+ atomic_unchecked_t bt_lost_no_mapping;
32658+ atomic_unchecked_t event_lost_overflow;
32659+ atomic_unchecked_t multiplex_counter;
32660 };
32661
32662 extern struct oprofile_stat_struct oprofile_stats;
32663diff -urNp linux-2.6.32.41/drivers/parisc/pdc_stable.c linux-2.6.32.41/drivers/parisc/pdc_stable.c
32664--- linux-2.6.32.41/drivers/parisc/pdc_stable.c 2011-03-27 14:31:47.000000000 -0400
32665+++ linux-2.6.32.41/drivers/parisc/pdc_stable.c 2011-04-17 15:56:46.000000000 -0400
32666@@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj
32667 return ret;
32668 }
32669
32670-static struct sysfs_ops pdcspath_attr_ops = {
32671+static const struct sysfs_ops pdcspath_attr_ops = {
32672 .show = pdcspath_attr_show,
32673 .store = pdcspath_attr_store,
32674 };
32675diff -urNp linux-2.6.32.41/drivers/parport/procfs.c linux-2.6.32.41/drivers/parport/procfs.c
32676--- linux-2.6.32.41/drivers/parport/procfs.c 2011-03-27 14:31:47.000000000 -0400
32677+++ linux-2.6.32.41/drivers/parport/procfs.c 2011-04-17 15:56:46.000000000 -0400
32678@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
32679
32680 *ppos += len;
32681
32682- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
32683+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
32684 }
32685
32686 #ifdef CONFIG_PARPORT_1284
32687@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
32688
32689 *ppos += len;
32690
32691- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
32692+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
32693 }
32694 #endif /* IEEE1284.3 support. */
32695
32696diff -urNp linux-2.6.32.41/drivers/pci/hotplug/acpiphp_glue.c linux-2.6.32.41/drivers/pci/hotplug/acpiphp_glue.c
32697--- linux-2.6.32.41/drivers/pci/hotplug/acpiphp_glue.c 2011-03-27 14:31:47.000000000 -0400
32698+++ linux-2.6.32.41/drivers/pci/hotplug/acpiphp_glue.c 2011-04-17 15:56:46.000000000 -0400
32699@@ -111,7 +111,7 @@ static int post_dock_fixups(struct notif
32700 }
32701
32702
32703-static struct acpi_dock_ops acpiphp_dock_ops = {
32704+static const struct acpi_dock_ops acpiphp_dock_ops = {
32705 .handler = handle_hotplug_event_func,
32706 };
32707
32708diff -urNp linux-2.6.32.41/drivers/pci/hotplug/cpqphp_nvram.c linux-2.6.32.41/drivers/pci/hotplug/cpqphp_nvram.c
32709--- linux-2.6.32.41/drivers/pci/hotplug/cpqphp_nvram.c 2011-03-27 14:31:47.000000000 -0400
32710+++ linux-2.6.32.41/drivers/pci/hotplug/cpqphp_nvram.c 2011-04-17 15:56:46.000000000 -0400
32711@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
32712
32713 void compaq_nvram_init (void __iomem *rom_start)
32714 {
32715+
32716+#ifndef CONFIG_PAX_KERNEXEC
32717 if (rom_start) {
32718 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
32719 }
32720+#endif
32721+
32722 dbg("int15 entry = %p\n", compaq_int15_entry_point);
32723
32724 /* initialize our int15 lock */
32725diff -urNp linux-2.6.32.41/drivers/pci/hotplug/fakephp.c linux-2.6.32.41/drivers/pci/hotplug/fakephp.c
32726--- linux-2.6.32.41/drivers/pci/hotplug/fakephp.c 2011-03-27 14:31:47.000000000 -0400
32727+++ linux-2.6.32.41/drivers/pci/hotplug/fakephp.c 2011-04-17 15:56:46.000000000 -0400
32728@@ -73,7 +73,7 @@ static void legacy_release(struct kobjec
32729 }
32730
32731 static struct kobj_type legacy_ktype = {
32732- .sysfs_ops = &(struct sysfs_ops){
32733+ .sysfs_ops = &(const struct sysfs_ops){
32734 .store = legacy_store, .show = legacy_show
32735 },
32736 .release = &legacy_release,
32737diff -urNp linux-2.6.32.41/drivers/pci/intel-iommu.c linux-2.6.32.41/drivers/pci/intel-iommu.c
32738--- linux-2.6.32.41/drivers/pci/intel-iommu.c 2011-05-10 22:12:01.000000000 -0400
32739+++ linux-2.6.32.41/drivers/pci/intel-iommu.c 2011-05-10 22:12:33.000000000 -0400
32740@@ -2643,7 +2643,7 @@ error:
32741 return 0;
32742 }
32743
32744-static dma_addr_t intel_map_page(struct device *dev, struct page *page,
32745+dma_addr_t intel_map_page(struct device *dev, struct page *page,
32746 unsigned long offset, size_t size,
32747 enum dma_data_direction dir,
32748 struct dma_attrs *attrs)
32749@@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain
32750 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
32751 }
32752
32753-static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
32754+void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
32755 size_t size, enum dma_data_direction dir,
32756 struct dma_attrs *attrs)
32757 {
32758@@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct devi
32759 }
32760 }
32761
32762-static void *intel_alloc_coherent(struct device *hwdev, size_t size,
32763+void *intel_alloc_coherent(struct device *hwdev, size_t size,
32764 dma_addr_t *dma_handle, gfp_t flags)
32765 {
32766 void *vaddr;
32767@@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct
32768 return NULL;
32769 }
32770
32771-static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
32772+void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
32773 dma_addr_t dma_handle)
32774 {
32775 int order;
32776@@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct d
32777 free_pages((unsigned long)vaddr, order);
32778 }
32779
32780-static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
32781+void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
32782 int nelems, enum dma_data_direction dir,
32783 struct dma_attrs *attrs)
32784 {
32785@@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(str
32786 return nelems;
32787 }
32788
32789-static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
32790+int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
32791 enum dma_data_direction dir, struct dma_attrs *attrs)
32792 {
32793 int i;
32794@@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *h
32795 return nelems;
32796 }
32797
32798-static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
32799+int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
32800 {
32801 return !dma_addr;
32802 }
32803
32804-struct dma_map_ops intel_dma_ops = {
32805+const struct dma_map_ops intel_dma_ops = {
32806 .alloc_coherent = intel_alloc_coherent,
32807 .free_coherent = intel_free_coherent,
32808 .map_sg = intel_map_sg,
32809diff -urNp linux-2.6.32.41/drivers/pci/pcie/aspm.c linux-2.6.32.41/drivers/pci/pcie/aspm.c
32810--- linux-2.6.32.41/drivers/pci/pcie/aspm.c 2011-03-27 14:31:47.000000000 -0400
32811+++ linux-2.6.32.41/drivers/pci/pcie/aspm.c 2011-04-17 15:56:46.000000000 -0400
32812@@ -27,9 +27,9 @@
32813 #define MODULE_PARAM_PREFIX "pcie_aspm."
32814
32815 /* Note: those are not register definitions */
32816-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
32817-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
32818-#define ASPM_STATE_L1 (4) /* L1 state */
32819+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
32820+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
32821+#define ASPM_STATE_L1 (4U) /* L1 state */
32822 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
32823 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
32824
32825diff -urNp linux-2.6.32.41/drivers/pci/probe.c linux-2.6.32.41/drivers/pci/probe.c
32826--- linux-2.6.32.41/drivers/pci/probe.c 2011-03-27 14:31:47.000000000 -0400
32827+++ linux-2.6.32.41/drivers/pci/probe.c 2011-04-17 15:56:46.000000000 -0400
32828@@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(
32829 return ret;
32830 }
32831
32832-static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
32833+static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
32834 struct device_attribute *attr,
32835 char *buf)
32836 {
32837 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
32838 }
32839
32840-static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
32841+static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
32842 struct device_attribute *attr,
32843 char *buf)
32844 {
32845diff -urNp linux-2.6.32.41/drivers/pci/proc.c linux-2.6.32.41/drivers/pci/proc.c
32846--- linux-2.6.32.41/drivers/pci/proc.c 2011-03-27 14:31:47.000000000 -0400
32847+++ linux-2.6.32.41/drivers/pci/proc.c 2011-04-17 15:56:46.000000000 -0400
32848@@ -480,7 +480,16 @@ static const struct file_operations proc
32849 static int __init pci_proc_init(void)
32850 {
32851 struct pci_dev *dev = NULL;
32852+
32853+#ifdef CONFIG_GRKERNSEC_PROC_ADD
32854+#ifdef CONFIG_GRKERNSEC_PROC_USER
32855+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
32856+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
32857+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
32858+#endif
32859+#else
32860 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
32861+#endif
32862 proc_create("devices", 0, proc_bus_pci_dir,
32863 &proc_bus_pci_dev_operations);
32864 proc_initialized = 1;
32865diff -urNp linux-2.6.32.41/drivers/pci/slot.c linux-2.6.32.41/drivers/pci/slot.c
32866--- linux-2.6.32.41/drivers/pci/slot.c 2011-03-27 14:31:47.000000000 -0400
32867+++ linux-2.6.32.41/drivers/pci/slot.c 2011-04-17 15:56:46.000000000 -0400
32868@@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struc
32869 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
32870 }
32871
32872-static struct sysfs_ops pci_slot_sysfs_ops = {
32873+static const struct sysfs_ops pci_slot_sysfs_ops = {
32874 .show = pci_slot_attr_show,
32875 .store = pci_slot_attr_store,
32876 };
32877diff -urNp linux-2.6.32.41/drivers/pcmcia/pcmcia_ioctl.c linux-2.6.32.41/drivers/pcmcia/pcmcia_ioctl.c
32878--- linux-2.6.32.41/drivers/pcmcia/pcmcia_ioctl.c 2011-03-27 14:31:47.000000000 -0400
32879+++ linux-2.6.32.41/drivers/pcmcia/pcmcia_ioctl.c 2011-04-17 15:56:46.000000000 -0400
32880@@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode
32881 return -EFAULT;
32882 }
32883 }
32884- buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
32885+ buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
32886 if (!buf)
32887 return -ENOMEM;
32888
32889diff -urNp linux-2.6.32.41/drivers/platform/x86/acer-wmi.c linux-2.6.32.41/drivers/platform/x86/acer-wmi.c
32890--- linux-2.6.32.41/drivers/platform/x86/acer-wmi.c 2011-03-27 14:31:47.000000000 -0400
32891+++ linux-2.6.32.41/drivers/platform/x86/acer-wmi.c 2011-04-17 15:56:46.000000000 -0400
32892@@ -918,7 +918,7 @@ static int update_bl_status(struct backl
32893 return 0;
32894 }
32895
32896-static struct backlight_ops acer_bl_ops = {
32897+static const struct backlight_ops acer_bl_ops = {
32898 .get_brightness = read_brightness,
32899 .update_status = update_bl_status,
32900 };
32901diff -urNp linux-2.6.32.41/drivers/platform/x86/asus_acpi.c linux-2.6.32.41/drivers/platform/x86/asus_acpi.c
32902--- linux-2.6.32.41/drivers/platform/x86/asus_acpi.c 2011-03-27 14:31:47.000000000 -0400
32903+++ linux-2.6.32.41/drivers/platform/x86/asus_acpi.c 2011-04-17 15:56:46.000000000 -0400
32904@@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_
32905 return 0;
32906 }
32907
32908-static struct backlight_ops asus_backlight_data = {
32909+static const struct backlight_ops asus_backlight_data = {
32910 .get_brightness = read_brightness,
32911 .update_status = set_brightness_status,
32912 };
32913diff -urNp linux-2.6.32.41/drivers/platform/x86/asus-laptop.c linux-2.6.32.41/drivers/platform/x86/asus-laptop.c
32914--- linux-2.6.32.41/drivers/platform/x86/asus-laptop.c 2011-03-27 14:31:47.000000000 -0400
32915+++ linux-2.6.32.41/drivers/platform/x86/asus-laptop.c 2011-04-17 15:56:46.000000000 -0400
32916@@ -250,7 +250,7 @@ static struct backlight_device *asus_bac
32917 */
32918 static int read_brightness(struct backlight_device *bd);
32919 static int update_bl_status(struct backlight_device *bd);
32920-static struct backlight_ops asusbl_ops = {
32921+static const struct backlight_ops asusbl_ops = {
32922 .get_brightness = read_brightness,
32923 .update_status = update_bl_status,
32924 };
32925diff -urNp linux-2.6.32.41/drivers/platform/x86/compal-laptop.c linux-2.6.32.41/drivers/platform/x86/compal-laptop.c
32926--- linux-2.6.32.41/drivers/platform/x86/compal-laptop.c 2011-03-27 14:31:47.000000000 -0400
32927+++ linux-2.6.32.41/drivers/platform/x86/compal-laptop.c 2011-04-17 15:56:46.000000000 -0400
32928@@ -163,7 +163,7 @@ static int bl_update_status(struct backl
32929 return set_lcd_level(b->props.brightness);
32930 }
32931
32932-static struct backlight_ops compalbl_ops = {
32933+static const struct backlight_ops compalbl_ops = {
32934 .get_brightness = bl_get_brightness,
32935 .update_status = bl_update_status,
32936 };
32937diff -urNp linux-2.6.32.41/drivers/platform/x86/dell-laptop.c linux-2.6.32.41/drivers/platform/x86/dell-laptop.c
32938--- linux-2.6.32.41/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:01.000000000 -0400
32939+++ linux-2.6.32.41/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:33.000000000 -0400
32940@@ -318,7 +318,7 @@ static int dell_get_intensity(struct bac
32941 return buffer.output[1];
32942 }
32943
32944-static struct backlight_ops dell_ops = {
32945+static const struct backlight_ops dell_ops = {
32946 .get_brightness = dell_get_intensity,
32947 .update_status = dell_send_intensity,
32948 };
32949diff -urNp linux-2.6.32.41/drivers/platform/x86/eeepc-laptop.c linux-2.6.32.41/drivers/platform/x86/eeepc-laptop.c
32950--- linux-2.6.32.41/drivers/platform/x86/eeepc-laptop.c 2011-03-27 14:31:47.000000000 -0400
32951+++ linux-2.6.32.41/drivers/platform/x86/eeepc-laptop.c 2011-04-17 15:56:46.000000000 -0400
32952@@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device
32953 */
32954 static int read_brightness(struct backlight_device *bd);
32955 static int update_bl_status(struct backlight_device *bd);
32956-static struct backlight_ops eeepcbl_ops = {
32957+static const struct backlight_ops eeepcbl_ops = {
32958 .get_brightness = read_brightness,
32959 .update_status = update_bl_status,
32960 };
32961diff -urNp linux-2.6.32.41/drivers/platform/x86/fujitsu-laptop.c linux-2.6.32.41/drivers/platform/x86/fujitsu-laptop.c
32962--- linux-2.6.32.41/drivers/platform/x86/fujitsu-laptop.c 2011-03-27 14:31:47.000000000 -0400
32963+++ linux-2.6.32.41/drivers/platform/x86/fujitsu-laptop.c 2011-04-17 15:56:46.000000000 -0400
32964@@ -436,7 +436,7 @@ static int bl_update_status(struct backl
32965 return ret;
32966 }
32967
32968-static struct backlight_ops fujitsubl_ops = {
32969+static const struct backlight_ops fujitsubl_ops = {
32970 .get_brightness = bl_get_brightness,
32971 .update_status = bl_update_status,
32972 };
32973diff -urNp linux-2.6.32.41/drivers/platform/x86/msi-laptop.c linux-2.6.32.41/drivers/platform/x86/msi-laptop.c
32974--- linux-2.6.32.41/drivers/platform/x86/msi-laptop.c 2011-03-27 14:31:47.000000000 -0400
32975+++ linux-2.6.32.41/drivers/platform/x86/msi-laptop.c 2011-04-17 15:56:46.000000000 -0400
32976@@ -161,7 +161,7 @@ static int bl_update_status(struct backl
32977 return set_lcd_level(b->props.brightness);
32978 }
32979
32980-static struct backlight_ops msibl_ops = {
32981+static const struct backlight_ops msibl_ops = {
32982 .get_brightness = bl_get_brightness,
32983 .update_status = bl_update_status,
32984 };
32985diff -urNp linux-2.6.32.41/drivers/platform/x86/panasonic-laptop.c linux-2.6.32.41/drivers/platform/x86/panasonic-laptop.c
32986--- linux-2.6.32.41/drivers/platform/x86/panasonic-laptop.c 2011-03-27 14:31:47.000000000 -0400
32987+++ linux-2.6.32.41/drivers/platform/x86/panasonic-laptop.c 2011-04-17 15:56:46.000000000 -0400
32988@@ -352,7 +352,7 @@ static int bl_set_status(struct backligh
32989 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
32990 }
32991
32992-static struct backlight_ops pcc_backlight_ops = {
32993+static const struct backlight_ops pcc_backlight_ops = {
32994 .get_brightness = bl_get,
32995 .update_status = bl_set_status,
32996 };
32997diff -urNp linux-2.6.32.41/drivers/platform/x86/sony-laptop.c linux-2.6.32.41/drivers/platform/x86/sony-laptop.c
32998--- linux-2.6.32.41/drivers/platform/x86/sony-laptop.c 2011-03-27 14:31:47.000000000 -0400
32999+++ linux-2.6.32.41/drivers/platform/x86/sony-laptop.c 2011-04-17 15:56:46.000000000 -0400
33000@@ -850,7 +850,7 @@ static int sony_backlight_get_brightness
33001 }
33002
33003 static struct backlight_device *sony_backlight_device;
33004-static struct backlight_ops sony_backlight_ops = {
33005+static const struct backlight_ops sony_backlight_ops = {
33006 .update_status = sony_backlight_update_status,
33007 .get_brightness = sony_backlight_get_brightness,
33008 };
33009diff -urNp linux-2.6.32.41/drivers/platform/x86/thinkpad_acpi.c linux-2.6.32.41/drivers/platform/x86/thinkpad_acpi.c
33010--- linux-2.6.32.41/drivers/platform/x86/thinkpad_acpi.c 2011-03-27 14:31:47.000000000 -0400
33011+++ linux-2.6.32.41/drivers/platform/x86/thinkpad_acpi.c 2011-04-17 15:56:46.000000000 -0400
33012@@ -6122,7 +6122,7 @@ static void tpacpi_brightness_notify_cha
33013 BACKLIGHT_UPDATE_HOTKEY);
33014 }
33015
33016-static struct backlight_ops ibm_backlight_data = {
33017+static const struct backlight_ops ibm_backlight_data = {
33018 .get_brightness = brightness_get,
33019 .update_status = brightness_update_status,
33020 };
33021diff -urNp linux-2.6.32.41/drivers/platform/x86/toshiba_acpi.c linux-2.6.32.41/drivers/platform/x86/toshiba_acpi.c
33022--- linux-2.6.32.41/drivers/platform/x86/toshiba_acpi.c 2011-03-27 14:31:47.000000000 -0400
33023+++ linux-2.6.32.41/drivers/platform/x86/toshiba_acpi.c 2011-04-17 15:56:46.000000000 -0400
33024@@ -671,7 +671,7 @@ static acpi_status remove_device(void)
33025 return AE_OK;
33026 }
33027
33028-static struct backlight_ops toshiba_backlight_data = {
33029+static const struct backlight_ops toshiba_backlight_data = {
33030 .get_brightness = get_lcd,
33031 .update_status = set_lcd_status,
33032 };
33033diff -urNp linux-2.6.32.41/drivers/pnp/pnpbios/bioscalls.c linux-2.6.32.41/drivers/pnp/pnpbios/bioscalls.c
33034--- linux-2.6.32.41/drivers/pnp/pnpbios/bioscalls.c 2011-03-27 14:31:47.000000000 -0400
33035+++ linux-2.6.32.41/drivers/pnp/pnpbios/bioscalls.c 2011-04-17 15:56:46.000000000 -0400
33036@@ -60,7 +60,7 @@ do { \
33037 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
33038 } while(0)
33039
33040-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
33041+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
33042 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
33043
33044 /*
33045@@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func
33046
33047 cpu = get_cpu();
33048 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
33049+
33050+ pax_open_kernel();
33051 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
33052+ pax_close_kernel();
33053
33054 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
33055 spin_lock_irqsave(&pnp_bios_lock, flags);
33056@@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func
33057 :"memory");
33058 spin_unlock_irqrestore(&pnp_bios_lock, flags);
33059
33060+ pax_open_kernel();
33061 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
33062+ pax_close_kernel();
33063+
33064 put_cpu();
33065
33066 /* If we get here and this is set then the PnP BIOS faulted on us. */
33067@@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 n
33068 return status;
33069 }
33070
33071-void pnpbios_calls_init(union pnp_bios_install_struct *header)
33072+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
33073 {
33074 int i;
33075
33076@@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_i
33077 pnp_bios_callpoint.offset = header->fields.pm16offset;
33078 pnp_bios_callpoint.segment = PNP_CS16;
33079
33080+ pax_open_kernel();
33081+
33082 for_each_possible_cpu(i) {
33083 struct desc_struct *gdt = get_cpu_gdt_table(i);
33084 if (!gdt)
33085@@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_i
33086 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
33087 (unsigned long)__va(header->fields.pm16dseg));
33088 }
33089+
33090+ pax_close_kernel();
33091 }
33092diff -urNp linux-2.6.32.41/drivers/pnp/resource.c linux-2.6.32.41/drivers/pnp/resource.c
33093--- linux-2.6.32.41/drivers/pnp/resource.c 2011-03-27 14:31:47.000000000 -0400
33094+++ linux-2.6.32.41/drivers/pnp/resource.c 2011-04-17 15:56:46.000000000 -0400
33095@@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
33096 return 1;
33097
33098 /* check if the resource is valid */
33099- if (*irq < 0 || *irq > 15)
33100+ if (*irq > 15)
33101 return 0;
33102
33103 /* check if the resource is reserved */
33104@@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
33105 return 1;
33106
33107 /* check if the resource is valid */
33108- if (*dma < 0 || *dma == 4 || *dma > 7)
33109+ if (*dma == 4 || *dma > 7)
33110 return 0;
33111
33112 /* check if the resource is reserved */
33113diff -urNp linux-2.6.32.41/drivers/rtc/rtc-dev.c linux-2.6.32.41/drivers/rtc/rtc-dev.c
33114--- linux-2.6.32.41/drivers/rtc/rtc-dev.c 2011-03-27 14:31:47.000000000 -0400
33115+++ linux-2.6.32.41/drivers/rtc/rtc-dev.c 2011-04-17 15:56:46.000000000 -0400
33116@@ -14,6 +14,7 @@
33117 #include <linux/module.h>
33118 #include <linux/rtc.h>
33119 #include <linux/sched.h>
33120+#include <linux/grsecurity.h>
33121 #include "rtc-core.h"
33122
33123 static dev_t rtc_devt;
33124@@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *f
33125 if (copy_from_user(&tm, uarg, sizeof(tm)))
33126 return -EFAULT;
33127
33128+ gr_log_timechange();
33129+
33130 return rtc_set_time(rtc, &tm);
33131
33132 case RTC_PIE_ON:
33133diff -urNp linux-2.6.32.41/drivers/s390/cio/qdio_perf.c linux-2.6.32.41/drivers/s390/cio/qdio_perf.c
33134--- linux-2.6.32.41/drivers/s390/cio/qdio_perf.c 2011-03-27 14:31:47.000000000 -0400
33135+++ linux-2.6.32.41/drivers/s390/cio/qdio_perf.c 2011-04-17 15:56:46.000000000 -0400
33136@@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_
33137 static int qdio_perf_proc_show(struct seq_file *m, void *v)
33138 {
33139 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
33140- (long)atomic_long_read(&perf_stats.qdio_int));
33141+ (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
33142 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
33143- (long)atomic_long_read(&perf_stats.pci_int));
33144+ (long)atomic_long_read_unchecked(&perf_stats.pci_int));
33145 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
33146- (long)atomic_long_read(&perf_stats.thin_int));
33147+ (long)atomic_long_read_unchecked(&perf_stats.thin_int));
33148 seq_printf(m, "\n");
33149 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
33150- (long)atomic_long_read(&perf_stats.tasklet_inbound));
33151+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
33152 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
33153- (long)atomic_long_read(&perf_stats.tasklet_outbound));
33154+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
33155 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
33156- (long)atomic_long_read(&perf_stats.tasklet_thinint),
33157- (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
33158+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
33159+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
33160 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
33161- (long)atomic_long_read(&perf_stats.thinint_inbound),
33162- (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
33163+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
33164+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
33165 seq_printf(m, "\n");
33166 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
33167- (long)atomic_long_read(&perf_stats.siga_in));
33168+ (long)atomic_long_read_unchecked(&perf_stats.siga_in));
33169 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
33170- (long)atomic_long_read(&perf_stats.siga_out));
33171+ (long)atomic_long_read_unchecked(&perf_stats.siga_out));
33172 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
33173- (long)atomic_long_read(&perf_stats.siga_sync));
33174+ (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
33175 seq_printf(m, "\n");
33176 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
33177- (long)atomic_long_read(&perf_stats.inbound_handler));
33178+ (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
33179 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
33180- (long)atomic_long_read(&perf_stats.outbound_handler));
33181+ (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
33182 seq_printf(m, "\n");
33183 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
33184- (long)atomic_long_read(&perf_stats.fast_requeue));
33185+ (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
33186 seq_printf(m, "Number of outbound target full condition\t: %li\n",
33187- (long)atomic_long_read(&perf_stats.outbound_target_full));
33188+ (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
33189 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
33190- (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
33191+ (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
33192 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
33193- (long)atomic_long_read(&perf_stats.debug_stop_polling));
33194+ (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
33195 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
33196- (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
33197+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
33198 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
33199- (long)atomic_long_read(&perf_stats.debug_eqbs_all),
33200- (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
33201+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
33202+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
33203 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
33204- (long)atomic_long_read(&perf_stats.debug_sqbs_all),
33205- (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
33206+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
33207+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
33208 seq_printf(m, "\n");
33209 return 0;
33210 }
33211diff -urNp linux-2.6.32.41/drivers/s390/cio/qdio_perf.h linux-2.6.32.41/drivers/s390/cio/qdio_perf.h
33212--- linux-2.6.32.41/drivers/s390/cio/qdio_perf.h 2011-03-27 14:31:47.000000000 -0400
33213+++ linux-2.6.32.41/drivers/s390/cio/qdio_perf.h 2011-04-17 15:56:46.000000000 -0400
33214@@ -13,46 +13,46 @@
33215
33216 struct qdio_perf_stats {
33217 /* interrupt handler calls */
33218- atomic_long_t qdio_int;
33219- atomic_long_t pci_int;
33220- atomic_long_t thin_int;
33221+ atomic_long_unchecked_t qdio_int;
33222+ atomic_long_unchecked_t pci_int;
33223+ atomic_long_unchecked_t thin_int;
33224
33225 /* tasklet runs */
33226- atomic_long_t tasklet_inbound;
33227- atomic_long_t tasklet_outbound;
33228- atomic_long_t tasklet_thinint;
33229- atomic_long_t tasklet_thinint_loop;
33230- atomic_long_t thinint_inbound;
33231- atomic_long_t thinint_inbound_loop;
33232- atomic_long_t thinint_inbound_loop2;
33233+ atomic_long_unchecked_t tasklet_inbound;
33234+ atomic_long_unchecked_t tasklet_outbound;
33235+ atomic_long_unchecked_t tasklet_thinint;
33236+ atomic_long_unchecked_t tasklet_thinint_loop;
33237+ atomic_long_unchecked_t thinint_inbound;
33238+ atomic_long_unchecked_t thinint_inbound_loop;
33239+ atomic_long_unchecked_t thinint_inbound_loop2;
33240
33241 /* signal adapter calls */
33242- atomic_long_t siga_out;
33243- atomic_long_t siga_in;
33244- atomic_long_t siga_sync;
33245+ atomic_long_unchecked_t siga_out;
33246+ atomic_long_unchecked_t siga_in;
33247+ atomic_long_unchecked_t siga_sync;
33248
33249 /* misc */
33250- atomic_long_t inbound_handler;
33251- atomic_long_t outbound_handler;
33252- atomic_long_t fast_requeue;
33253- atomic_long_t outbound_target_full;
33254+ atomic_long_unchecked_t inbound_handler;
33255+ atomic_long_unchecked_t outbound_handler;
33256+ atomic_long_unchecked_t fast_requeue;
33257+ atomic_long_unchecked_t outbound_target_full;
33258
33259 /* for debugging */
33260- atomic_long_t debug_tl_out_timer;
33261- atomic_long_t debug_stop_polling;
33262- atomic_long_t debug_eqbs_all;
33263- atomic_long_t debug_eqbs_incomplete;
33264- atomic_long_t debug_sqbs_all;
33265- atomic_long_t debug_sqbs_incomplete;
33266+ atomic_long_unchecked_t debug_tl_out_timer;
33267+ atomic_long_unchecked_t debug_stop_polling;
33268+ atomic_long_unchecked_t debug_eqbs_all;
33269+ atomic_long_unchecked_t debug_eqbs_incomplete;
33270+ atomic_long_unchecked_t debug_sqbs_all;
33271+ atomic_long_unchecked_t debug_sqbs_incomplete;
33272 };
33273
33274 extern struct qdio_perf_stats perf_stats;
33275 extern int qdio_performance_stats;
33276
33277-static inline void qdio_perf_stat_inc(atomic_long_t *count)
33278+static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
33279 {
33280 if (qdio_performance_stats)
33281- atomic_long_inc(count);
33282+ atomic_long_inc_unchecked(count);
33283 }
33284
33285 int qdio_setup_perf_stats(void);
33286diff -urNp linux-2.6.32.41/drivers/scsi/aacraid/commctrl.c linux-2.6.32.41/drivers/scsi/aacraid/commctrl.c
33287--- linux-2.6.32.41/drivers/scsi/aacraid/commctrl.c 2011-03-27 14:31:47.000000000 -0400
33288+++ linux-2.6.32.41/drivers/scsi/aacraid/commctrl.c 2011-05-16 21:46:57.000000000 -0400
33289@@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_d
33290 u32 actual_fibsize64, actual_fibsize = 0;
33291 int i;
33292
33293+ pax_track_stack();
33294
33295 if (dev->in_reset) {
33296 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
33297diff -urNp linux-2.6.32.41/drivers/scsi/aic94xx/aic94xx_init.c linux-2.6.32.41/drivers/scsi/aic94xx/aic94xx_init.c
33298--- linux-2.6.32.41/drivers/scsi/aic94xx/aic94xx_init.c 2011-03-27 14:31:47.000000000 -0400
33299+++ linux-2.6.32.41/drivers/scsi/aic94xx/aic94xx_init.c 2011-04-17 15:56:46.000000000 -0400
33300@@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(stru
33301 flash_error_table[i].reason);
33302 }
33303
33304-static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
33305+static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
33306 asd_show_update_bios, asd_store_update_bios);
33307
33308 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
33309diff -urNp linux-2.6.32.41/drivers/scsi/BusLogic.c linux-2.6.32.41/drivers/scsi/BusLogic.c
33310--- linux-2.6.32.41/drivers/scsi/BusLogic.c 2011-03-27 14:31:47.000000000 -0400
33311+++ linux-2.6.32.41/drivers/scsi/BusLogic.c 2011-05-16 21:46:57.000000000 -0400
33312@@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFla
33313 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
33314 *PrototypeHostAdapter)
33315 {
33316+ pax_track_stack();
33317+
33318 /*
33319 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
33320 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
33321diff -urNp linux-2.6.32.41/drivers/scsi/dpt_i2o.c linux-2.6.32.41/drivers/scsi/dpt_i2o.c
33322--- linux-2.6.32.41/drivers/scsi/dpt_i2o.c 2011-03-27 14:31:47.000000000 -0400
33323+++ linux-2.6.32.41/drivers/scsi/dpt_i2o.c 2011-05-16 21:46:57.000000000 -0400
33324@@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* p
33325 dma_addr_t addr;
33326 ulong flags = 0;
33327
33328+ pax_track_stack();
33329+
33330 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
33331 // get user msg size in u32s
33332 if(get_user(size, &user_msg[0])){
33333@@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
33334 s32 rcode;
33335 dma_addr_t addr;
33336
33337+ pax_track_stack();
33338+
33339 memset(msg, 0 , sizeof(msg));
33340 len = scsi_bufflen(cmd);
33341 direction = 0x00000000;
33342diff -urNp linux-2.6.32.41/drivers/scsi/eata.c linux-2.6.32.41/drivers/scsi/eata.c
33343--- linux-2.6.32.41/drivers/scsi/eata.c 2011-03-27 14:31:47.000000000 -0400
33344+++ linux-2.6.32.41/drivers/scsi/eata.c 2011-05-16 21:46:57.000000000 -0400
33345@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
33346 struct hostdata *ha;
33347 char name[16];
33348
33349+ pax_track_stack();
33350+
33351 sprintf(name, "%s%d", driver_name, j);
33352
33353 if (!request_region(port_base, REGION_SIZE, driver_name)) {
33354diff -urNp linux-2.6.32.41/drivers/scsi/fcoe/libfcoe.c linux-2.6.32.41/drivers/scsi/fcoe/libfcoe.c
33355--- linux-2.6.32.41/drivers/scsi/fcoe/libfcoe.c 2011-03-27 14:31:47.000000000 -0400
33356+++ linux-2.6.32.41/drivers/scsi/fcoe/libfcoe.c 2011-05-16 21:46:57.000000000 -0400
33357@@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fc
33358 size_t rlen;
33359 size_t dlen;
33360
33361+ pax_track_stack();
33362+
33363 fiph = (struct fip_header *)skb->data;
33364 sub = fiph->fip_subcode;
33365 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
33366diff -urNp linux-2.6.32.41/drivers/scsi/gdth.c linux-2.6.32.41/drivers/scsi/gdth.c
33367--- linux-2.6.32.41/drivers/scsi/gdth.c 2011-03-27 14:31:47.000000000 -0400
33368+++ linux-2.6.32.41/drivers/scsi/gdth.c 2011-05-16 21:46:57.000000000 -0400
33369@@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
33370 ulong flags;
33371 gdth_ha_str *ha;
33372
33373+ pax_track_stack();
33374+
33375 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
33376 return -EFAULT;
33377 ha = gdth_find_ha(ldrv.ionode);
33378@@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg
33379 gdth_ha_str *ha;
33380 int rval;
33381
33382+ pax_track_stack();
33383+
33384 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
33385 res.number >= MAX_HDRIVES)
33386 return -EFAULT;
33387@@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg,
33388 gdth_ha_str *ha;
33389 int rval;
33390
33391+ pax_track_stack();
33392+
33393 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
33394 return -EFAULT;
33395 ha = gdth_find_ha(gen.ionode);
33396@@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
33397 int i;
33398 gdth_cmd_str gdtcmd;
33399 char cmnd[MAX_COMMAND_SIZE];
33400+
33401+ pax_track_stack();
33402+
33403 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
33404
33405 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
33406diff -urNp linux-2.6.32.41/drivers/scsi/gdth_proc.c linux-2.6.32.41/drivers/scsi/gdth_proc.c
33407--- linux-2.6.32.41/drivers/scsi/gdth_proc.c 2011-03-27 14:31:47.000000000 -0400
33408+++ linux-2.6.32.41/drivers/scsi/gdth_proc.c 2011-05-16 21:46:57.000000000 -0400
33409@@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi
33410 ulong64 paddr;
33411
33412 char cmnd[MAX_COMMAND_SIZE];
33413+
33414+ pax_track_stack();
33415+
33416 memset(cmnd, 0xff, 12);
33417 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
33418
33419@@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,ch
33420 gdth_hget_str *phg;
33421 char cmnd[MAX_COMMAND_SIZE];
33422
33423+ pax_track_stack();
33424+
33425 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
33426 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
33427 if (!gdtcmd || !estr)
33428diff -urNp linux-2.6.32.41/drivers/scsi/hosts.c linux-2.6.32.41/drivers/scsi/hosts.c
33429--- linux-2.6.32.41/drivers/scsi/hosts.c 2011-03-27 14:31:47.000000000 -0400
33430+++ linux-2.6.32.41/drivers/scsi/hosts.c 2011-05-04 17:56:28.000000000 -0400
33431@@ -40,7 +40,7 @@
33432 #include "scsi_logging.h"
33433
33434
33435-static atomic_t scsi_host_next_hn; /* host_no for next new host */
33436+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
33437
33438
33439 static void scsi_host_cls_release(struct device *dev)
33440@@ -344,7 +344,7 @@ struct Scsi_Host *scsi_host_alloc(struct
33441 * subtract one because we increment first then return, but we need to
33442 * know what the next host number was before increment
33443 */
33444- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
33445+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
33446 shost->dma_channel = 0xff;
33447
33448 /* These three are default values which can be overridden */
33449diff -urNp linux-2.6.32.41/drivers/scsi/ipr.c linux-2.6.32.41/drivers/scsi/ipr.c
33450--- linux-2.6.32.41/drivers/scsi/ipr.c 2011-03-27 14:31:47.000000000 -0400
33451+++ linux-2.6.32.41/drivers/scsi/ipr.c 2011-04-17 15:56:46.000000000 -0400
33452@@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_q
33453 return true;
33454 }
33455
33456-static struct ata_port_operations ipr_sata_ops = {
33457+static const struct ata_port_operations ipr_sata_ops = {
33458 .phy_reset = ipr_ata_phy_reset,
33459 .hardreset = ipr_sata_reset,
33460 .post_internal_cmd = ipr_ata_post_internal,
33461diff -urNp linux-2.6.32.41/drivers/scsi/libfc/fc_exch.c linux-2.6.32.41/drivers/scsi/libfc/fc_exch.c
33462--- linux-2.6.32.41/drivers/scsi/libfc/fc_exch.c 2011-03-27 14:31:47.000000000 -0400
33463+++ linux-2.6.32.41/drivers/scsi/libfc/fc_exch.c 2011-04-17 15:56:46.000000000 -0400
33464@@ -86,12 +86,12 @@ struct fc_exch_mgr {
33465 * all together if not used XXX
33466 */
33467 struct {
33468- atomic_t no_free_exch;
33469- atomic_t no_free_exch_xid;
33470- atomic_t xid_not_found;
33471- atomic_t xid_busy;
33472- atomic_t seq_not_found;
33473- atomic_t non_bls_resp;
33474+ atomic_unchecked_t no_free_exch;
33475+ atomic_unchecked_t no_free_exch_xid;
33476+ atomic_unchecked_t xid_not_found;
33477+ atomic_unchecked_t xid_busy;
33478+ atomic_unchecked_t seq_not_found;
33479+ atomic_unchecked_t non_bls_resp;
33480 } stats;
33481 };
33482 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
33483@@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(
33484 /* allocate memory for exchange */
33485 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
33486 if (!ep) {
33487- atomic_inc(&mp->stats.no_free_exch);
33488+ atomic_inc_unchecked(&mp->stats.no_free_exch);
33489 goto out;
33490 }
33491 memset(ep, 0, sizeof(*ep));
33492@@ -557,7 +557,7 @@ out:
33493 return ep;
33494 err:
33495 spin_unlock_bh(&pool->lock);
33496- atomic_inc(&mp->stats.no_free_exch_xid);
33497+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
33498 mempool_free(ep, mp->ep_pool);
33499 return NULL;
33500 }
33501@@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33502 xid = ntohs(fh->fh_ox_id); /* we originated exch */
33503 ep = fc_exch_find(mp, xid);
33504 if (!ep) {
33505- atomic_inc(&mp->stats.xid_not_found);
33506+ atomic_inc_unchecked(&mp->stats.xid_not_found);
33507 reject = FC_RJT_OX_ID;
33508 goto out;
33509 }
33510@@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33511 ep = fc_exch_find(mp, xid);
33512 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
33513 if (ep) {
33514- atomic_inc(&mp->stats.xid_busy);
33515+ atomic_inc_unchecked(&mp->stats.xid_busy);
33516 reject = FC_RJT_RX_ID;
33517 goto rel;
33518 }
33519@@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33520 }
33521 xid = ep->xid; /* get our XID */
33522 } else if (!ep) {
33523- atomic_inc(&mp->stats.xid_not_found);
33524+ atomic_inc_unchecked(&mp->stats.xid_not_found);
33525 reject = FC_RJT_RX_ID; /* XID not found */
33526 goto out;
33527 }
33528@@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33529 } else {
33530 sp = &ep->seq;
33531 if (sp->id != fh->fh_seq_id) {
33532- atomic_inc(&mp->stats.seq_not_found);
33533+ atomic_inc_unchecked(&mp->stats.seq_not_found);
33534 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
33535 goto rel;
33536 }
33537@@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct
33538
33539 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
33540 if (!ep) {
33541- atomic_inc(&mp->stats.xid_not_found);
33542+ atomic_inc_unchecked(&mp->stats.xid_not_found);
33543 goto out;
33544 }
33545 if (ep->esb_stat & ESB_ST_COMPLETE) {
33546- atomic_inc(&mp->stats.xid_not_found);
33547+ atomic_inc_unchecked(&mp->stats.xid_not_found);
33548 goto out;
33549 }
33550 if (ep->rxid == FC_XID_UNKNOWN)
33551 ep->rxid = ntohs(fh->fh_rx_id);
33552 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
33553- atomic_inc(&mp->stats.xid_not_found);
33554+ atomic_inc_unchecked(&mp->stats.xid_not_found);
33555 goto rel;
33556 }
33557 if (ep->did != ntoh24(fh->fh_s_id) &&
33558 ep->did != FC_FID_FLOGI) {
33559- atomic_inc(&mp->stats.xid_not_found);
33560+ atomic_inc_unchecked(&mp->stats.xid_not_found);
33561 goto rel;
33562 }
33563 sof = fr_sof(fp);
33564@@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct
33565 } else {
33566 sp = &ep->seq;
33567 if (sp->id != fh->fh_seq_id) {
33568- atomic_inc(&mp->stats.seq_not_found);
33569+ atomic_inc_unchecked(&mp->stats.seq_not_found);
33570 goto rel;
33571 }
33572 }
33573@@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_
33574 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
33575
33576 if (!sp)
33577- atomic_inc(&mp->stats.xid_not_found);
33578+ atomic_inc_unchecked(&mp->stats.xid_not_found);
33579 else
33580- atomic_inc(&mp->stats.non_bls_resp);
33581+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
33582
33583 fc_frame_free(fp);
33584 }
33585diff -urNp linux-2.6.32.41/drivers/scsi/libsas/sas_ata.c linux-2.6.32.41/drivers/scsi/libsas/sas_ata.c
33586--- linux-2.6.32.41/drivers/scsi/libsas/sas_ata.c 2011-03-27 14:31:47.000000000 -0400
33587+++ linux-2.6.32.41/drivers/scsi/libsas/sas_ata.c 2011-04-23 12:56:11.000000000 -0400
33588@@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_l
33589 }
33590 }
33591
33592-static struct ata_port_operations sas_sata_ops = {
33593+static const struct ata_port_operations sas_sata_ops = {
33594 .phy_reset = sas_ata_phy_reset,
33595 .post_internal_cmd = sas_ata_post_internal,
33596 .qc_defer = ata_std_qc_defer,
33597diff -urNp linux-2.6.32.41/drivers/scsi/lpfc/lpfc_debugfs.c linux-2.6.32.41/drivers/scsi/lpfc/lpfc_debugfs.c
33598--- linux-2.6.32.41/drivers/scsi/lpfc/lpfc_debugfs.c 2011-03-27 14:31:47.000000000 -0400
33599+++ linux-2.6.32.41/drivers/scsi/lpfc/lpfc_debugfs.c 2011-05-16 21:46:57.000000000 -0400
33600@@ -124,7 +124,7 @@ struct lpfc_debug {
33601 int len;
33602 };
33603
33604-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
33605+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
33606 static unsigned long lpfc_debugfs_start_time = 0L;
33607
33608 /**
33609@@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
33610 lpfc_debugfs_enable = 0;
33611
33612 len = 0;
33613- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
33614+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
33615 (lpfc_debugfs_max_disc_trc - 1);
33616 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
33617 dtp = vport->disc_trc + i;
33618@@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
33619 lpfc_debugfs_enable = 0;
33620
33621 len = 0;
33622- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
33623+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
33624 (lpfc_debugfs_max_slow_ring_trc - 1);
33625 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
33626 dtp = phba->slow_ring_trc + i;
33627@@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
33628 uint32_t *ptr;
33629 char buffer[1024];
33630
33631+ pax_track_stack();
33632+
33633 off = 0;
33634 spin_lock_irq(&phba->hbalock);
33635
33636@@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
33637 !vport || !vport->disc_trc)
33638 return;
33639
33640- index = atomic_inc_return(&vport->disc_trc_cnt) &
33641+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
33642 (lpfc_debugfs_max_disc_trc - 1);
33643 dtp = vport->disc_trc + index;
33644 dtp->fmt = fmt;
33645 dtp->data1 = data1;
33646 dtp->data2 = data2;
33647 dtp->data3 = data3;
33648- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
33649+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
33650 dtp->jif = jiffies;
33651 #endif
33652 return;
33653@@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
33654 !phba || !phba->slow_ring_trc)
33655 return;
33656
33657- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
33658+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
33659 (lpfc_debugfs_max_slow_ring_trc - 1);
33660 dtp = phba->slow_ring_trc + index;
33661 dtp->fmt = fmt;
33662 dtp->data1 = data1;
33663 dtp->data2 = data2;
33664 dtp->data3 = data3;
33665- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
33666+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
33667 dtp->jif = jiffies;
33668 #endif
33669 return;
33670@@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
33671 "slow_ring buffer\n");
33672 goto debug_failed;
33673 }
33674- atomic_set(&phba->slow_ring_trc_cnt, 0);
33675+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
33676 memset(phba->slow_ring_trc, 0,
33677 (sizeof(struct lpfc_debugfs_trc) *
33678 lpfc_debugfs_max_slow_ring_trc));
33679@@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
33680 "buffer\n");
33681 goto debug_failed;
33682 }
33683- atomic_set(&vport->disc_trc_cnt, 0);
33684+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
33685
33686 snprintf(name, sizeof(name), "discovery_trace");
33687 vport->debug_disc_trc =
33688diff -urNp linux-2.6.32.41/drivers/scsi/lpfc/lpfc.h linux-2.6.32.41/drivers/scsi/lpfc/lpfc.h
33689--- linux-2.6.32.41/drivers/scsi/lpfc/lpfc.h 2011-03-27 14:31:47.000000000 -0400
33690+++ linux-2.6.32.41/drivers/scsi/lpfc/lpfc.h 2011-05-04 17:56:28.000000000 -0400
33691@@ -400,7 +400,7 @@ struct lpfc_vport {
33692 struct dentry *debug_nodelist;
33693 struct dentry *vport_debugfs_root;
33694 struct lpfc_debugfs_trc *disc_trc;
33695- atomic_t disc_trc_cnt;
33696+ atomic_unchecked_t disc_trc_cnt;
33697 #endif
33698 uint8_t stat_data_enabled;
33699 uint8_t stat_data_blocked;
33700@@ -725,8 +725,8 @@ struct lpfc_hba {
33701 struct timer_list fabric_block_timer;
33702 unsigned long bit_flags;
33703 #define FABRIC_COMANDS_BLOCKED 0
33704- atomic_t num_rsrc_err;
33705- atomic_t num_cmd_success;
33706+ atomic_unchecked_t num_rsrc_err;
33707+ atomic_unchecked_t num_cmd_success;
33708 unsigned long last_rsrc_error_time;
33709 unsigned long last_ramp_down_time;
33710 unsigned long last_ramp_up_time;
33711@@ -740,7 +740,7 @@ struct lpfc_hba {
33712 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
33713 struct dentry *debug_slow_ring_trc;
33714 struct lpfc_debugfs_trc *slow_ring_trc;
33715- atomic_t slow_ring_trc_cnt;
33716+ atomic_unchecked_t slow_ring_trc_cnt;
33717 #endif
33718
33719 /* Used for deferred freeing of ELS data buffers */
33720diff -urNp linux-2.6.32.41/drivers/scsi/lpfc/lpfc_scsi.c linux-2.6.32.41/drivers/scsi/lpfc/lpfc_scsi.c
33721--- linux-2.6.32.41/drivers/scsi/lpfc/lpfc_scsi.c 2011-03-27 14:31:47.000000000 -0400
33722+++ linux-2.6.32.41/drivers/scsi/lpfc/lpfc_scsi.c 2011-05-04 17:56:28.000000000 -0400
33723@@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
33724 uint32_t evt_posted;
33725
33726 spin_lock_irqsave(&phba->hbalock, flags);
33727- atomic_inc(&phba->num_rsrc_err);
33728+ atomic_inc_unchecked(&phba->num_rsrc_err);
33729 phba->last_rsrc_error_time = jiffies;
33730
33731 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
33732@@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
33733 unsigned long flags;
33734 struct lpfc_hba *phba = vport->phba;
33735 uint32_t evt_posted;
33736- atomic_inc(&phba->num_cmd_success);
33737+ atomic_inc_unchecked(&phba->num_cmd_success);
33738
33739 if (vport->cfg_lun_queue_depth <= queue_depth)
33740 return;
33741@@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
33742 int i;
33743 struct lpfc_rport_data *rdata;
33744
33745- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
33746- num_cmd_success = atomic_read(&phba->num_cmd_success);
33747+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
33748+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
33749
33750 vports = lpfc_create_vport_work_array(phba);
33751 if (vports != NULL)
33752@@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
33753 }
33754 }
33755 lpfc_destroy_vport_work_array(phba, vports);
33756- atomic_set(&phba->num_rsrc_err, 0);
33757- atomic_set(&phba->num_cmd_success, 0);
33758+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
33759+ atomic_set_unchecked(&phba->num_cmd_success, 0);
33760 }
33761
33762 /**
33763@@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
33764 }
33765 }
33766 lpfc_destroy_vport_work_array(phba, vports);
33767- atomic_set(&phba->num_rsrc_err, 0);
33768- atomic_set(&phba->num_cmd_success, 0);
33769+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
33770+ atomic_set_unchecked(&phba->num_cmd_success, 0);
33771 }
33772
33773 /**
33774diff -urNp linux-2.6.32.41/drivers/scsi/megaraid/megaraid_mbox.c linux-2.6.32.41/drivers/scsi/megaraid/megaraid_mbox.c
33775--- linux-2.6.32.41/drivers/scsi/megaraid/megaraid_mbox.c 2011-03-27 14:31:47.000000000 -0400
33776+++ linux-2.6.32.41/drivers/scsi/megaraid/megaraid_mbox.c 2011-05-16 21:46:57.000000000 -0400
33777@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
33778 int rval;
33779 int i;
33780
33781+ pax_track_stack();
33782+
33783 // Allocate memory for the base list of scb for management module.
33784 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
33785
33786diff -urNp linux-2.6.32.41/drivers/scsi/osd/osd_initiator.c linux-2.6.32.41/drivers/scsi/osd/osd_initiator.c
33787--- linux-2.6.32.41/drivers/scsi/osd/osd_initiator.c 2011-03-27 14:31:47.000000000 -0400
33788+++ linux-2.6.32.41/drivers/scsi/osd/osd_initiator.c 2011-05-16 21:46:57.000000000 -0400
33789@@ -94,6 +94,8 @@ static int _osd_print_system_info(struct
33790 int nelem = ARRAY_SIZE(get_attrs), a = 0;
33791 int ret;
33792
33793+ pax_track_stack();
33794+
33795 or = osd_start_request(od, GFP_KERNEL);
33796 if (!or)
33797 return -ENOMEM;
33798diff -urNp linux-2.6.32.41/drivers/scsi/pmcraid.c linux-2.6.32.41/drivers/scsi/pmcraid.c
33799--- linux-2.6.32.41/drivers/scsi/pmcraid.c 2011-05-10 22:12:01.000000000 -0400
33800+++ linux-2.6.32.41/drivers/scsi/pmcraid.c 2011-05-10 22:12:33.000000000 -0400
33801@@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct sc
33802 res->scsi_dev = scsi_dev;
33803 scsi_dev->hostdata = res;
33804 res->change_detected = 0;
33805- atomic_set(&res->read_failures, 0);
33806- atomic_set(&res->write_failures, 0);
33807+ atomic_set_unchecked(&res->read_failures, 0);
33808+ atomic_set_unchecked(&res->write_failures, 0);
33809 rc = 0;
33810 }
33811 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
33812@@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct
33813
33814 /* If this was a SCSI read/write command keep count of errors */
33815 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
33816- atomic_inc(&res->read_failures);
33817+ atomic_inc_unchecked(&res->read_failures);
33818 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
33819- atomic_inc(&res->write_failures);
33820+ atomic_inc_unchecked(&res->write_failures);
33821
33822 if (!RES_IS_GSCSI(res->cfg_entry) &&
33823 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
33824@@ -4113,7 +4113,7 @@ static void pmcraid_worker_function(stru
33825
33826 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
33827 /* add resources only after host is added into system */
33828- if (!atomic_read(&pinstance->expose_resources))
33829+ if (!atomic_read_unchecked(&pinstance->expose_resources))
33830 return;
33831
33832 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
33833@@ -4847,7 +4847,7 @@ static int __devinit pmcraid_init_instan
33834 init_waitqueue_head(&pinstance->reset_wait_q);
33835
33836 atomic_set(&pinstance->outstanding_cmds, 0);
33837- atomic_set(&pinstance->expose_resources, 0);
33838+ atomic_set_unchecked(&pinstance->expose_resources, 0);
33839
33840 INIT_LIST_HEAD(&pinstance->free_res_q);
33841 INIT_LIST_HEAD(&pinstance->used_res_q);
33842@@ -5499,7 +5499,7 @@ static int __devinit pmcraid_probe(
33843 /* Schedule worker thread to handle CCN and take care of adding and
33844 * removing devices to OS
33845 */
33846- atomic_set(&pinstance->expose_resources, 1);
33847+ atomic_set_unchecked(&pinstance->expose_resources, 1);
33848 schedule_work(&pinstance->worker_q);
33849 return rc;
33850
33851diff -urNp linux-2.6.32.41/drivers/scsi/pmcraid.h linux-2.6.32.41/drivers/scsi/pmcraid.h
33852--- linux-2.6.32.41/drivers/scsi/pmcraid.h 2011-03-27 14:31:47.000000000 -0400
33853+++ linux-2.6.32.41/drivers/scsi/pmcraid.h 2011-05-04 17:56:28.000000000 -0400
33854@@ -690,7 +690,7 @@ struct pmcraid_instance {
33855 atomic_t outstanding_cmds;
33856
33857 /* should add/delete resources to mid-layer now ?*/
33858- atomic_t expose_resources;
33859+ atomic_unchecked_t expose_resources;
33860
33861 /* Tasklet to handle deferred processing */
33862 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
33863@@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
33864 struct list_head queue; /* link to "to be exposed" resources */
33865 struct pmcraid_config_table_entry cfg_entry;
33866 struct scsi_device *scsi_dev; /* Link scsi_device structure */
33867- atomic_t read_failures; /* count of failed READ commands */
33868- atomic_t write_failures; /* count of failed WRITE commands */
33869+ atomic_unchecked_t read_failures; /* count of failed READ commands */
33870+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
33871
33872 /* To indicate add/delete/modify during CCN */
33873 u8 change_detected;
33874diff -urNp linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_def.h linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_def.h
33875--- linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_def.h 2011-03-27 14:31:47.000000000 -0400
33876+++ linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_def.h 2011-05-04 17:56:28.000000000 -0400
33877@@ -240,7 +240,7 @@ struct ddb_entry {
33878 atomic_t retry_relogin_timer; /* Min Time between relogins
33879 * (4000 only) */
33880 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
33881- atomic_t relogin_retry_count; /* Num of times relogin has been
33882+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
33883 * retried */
33884
33885 uint16_t port;
33886diff -urNp linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_init.c linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_init.c
33887--- linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_init.c 2011-03-27 14:31:47.000000000 -0400
33888+++ linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_init.c 2011-05-04 17:56:28.000000000 -0400
33889@@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_
33890 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
33891 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
33892 atomic_set(&ddb_entry->relogin_timer, 0);
33893- atomic_set(&ddb_entry->relogin_retry_count, 0);
33894+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
33895 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
33896 list_add_tail(&ddb_entry->list, &ha->ddb_list);
33897 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
33898@@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct s
33899 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
33900 atomic_set(&ddb_entry->port_down_timer,
33901 ha->port_down_retry_count);
33902- atomic_set(&ddb_entry->relogin_retry_count, 0);
33903+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
33904 atomic_set(&ddb_entry->relogin_timer, 0);
33905 clear_bit(DF_RELOGIN, &ddb_entry->flags);
33906 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
33907diff -urNp linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_os.c linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_os.c
33908--- linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_os.c 2011-03-27 14:31:47.000000000 -0400
33909+++ linux-2.6.32.41/drivers/scsi/qla4xxx/ql4_os.c 2011-05-04 17:56:28.000000000 -0400
33910@@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_ql
33911 ddb_entry->fw_ddb_device_state ==
33912 DDB_DS_SESSION_FAILED) {
33913 /* Reset retry relogin timer */
33914- atomic_inc(&ddb_entry->relogin_retry_count);
33915+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
33916 DEBUG2(printk("scsi%ld: index[%d] relogin"
33917 " timed out-retrying"
33918 " relogin (%d)\n",
33919 ha->host_no,
33920 ddb_entry->fw_ddb_index,
33921- atomic_read(&ddb_entry->
33922+ atomic_read_unchecked(&ddb_entry->
33923 relogin_retry_count))
33924 );
33925 start_dpc++;
33926diff -urNp linux-2.6.32.41/drivers/scsi/scsi.c linux-2.6.32.41/drivers/scsi/scsi.c
33927--- linux-2.6.32.41/drivers/scsi/scsi.c 2011-03-27 14:31:47.000000000 -0400
33928+++ linux-2.6.32.41/drivers/scsi/scsi.c 2011-05-04 17:56:28.000000000 -0400
33929@@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
33930 unsigned long timeout;
33931 int rtn = 0;
33932
33933- atomic_inc(&cmd->device->iorequest_cnt);
33934+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
33935
33936 /* check if the device is still usable */
33937 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
33938diff -urNp linux-2.6.32.41/drivers/scsi/scsi_debug.c linux-2.6.32.41/drivers/scsi/scsi_debug.c
33939--- linux-2.6.32.41/drivers/scsi/scsi_debug.c 2011-03-27 14:31:47.000000000 -0400
33940+++ linux-2.6.32.41/drivers/scsi/scsi_debug.c 2011-05-16 21:46:57.000000000 -0400
33941@@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_
33942 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
33943 unsigned char *cmd = (unsigned char *)scp->cmnd;
33944
33945+ pax_track_stack();
33946+
33947 if ((errsts = check_readiness(scp, 1, devip)))
33948 return errsts;
33949 memset(arr, 0, sizeof(arr));
33950@@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cm
33951 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
33952 unsigned char *cmd = (unsigned char *)scp->cmnd;
33953
33954+ pax_track_stack();
33955+
33956 if ((errsts = check_readiness(scp, 1, devip)))
33957 return errsts;
33958 memset(arr, 0, sizeof(arr));
33959diff -urNp linux-2.6.32.41/drivers/scsi/scsi_lib.c linux-2.6.32.41/drivers/scsi/scsi_lib.c
33960--- linux-2.6.32.41/drivers/scsi/scsi_lib.c 2011-05-10 22:12:01.000000000 -0400
33961+++ linux-2.6.32.41/drivers/scsi/scsi_lib.c 2011-05-10 22:12:33.000000000 -0400
33962@@ -1384,7 +1384,7 @@ static void scsi_kill_request(struct req
33963
33964 scsi_init_cmd_errh(cmd);
33965 cmd->result = DID_NO_CONNECT << 16;
33966- atomic_inc(&cmd->device->iorequest_cnt);
33967+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
33968
33969 /*
33970 * SCSI request completion path will do scsi_device_unbusy(),
33971@@ -1415,9 +1415,9 @@ static void scsi_softirq_done(struct req
33972 */
33973 cmd->serial_number = 0;
33974
33975- atomic_inc(&cmd->device->iodone_cnt);
33976+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
33977 if (cmd->result)
33978- atomic_inc(&cmd->device->ioerr_cnt);
33979+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
33980
33981 disposition = scsi_decide_disposition(cmd);
33982 if (disposition != SUCCESS &&
33983diff -urNp linux-2.6.32.41/drivers/scsi/scsi_sysfs.c linux-2.6.32.41/drivers/scsi/scsi_sysfs.c
33984--- linux-2.6.32.41/drivers/scsi/scsi_sysfs.c 2011-05-10 22:12:01.000000000 -0400
33985+++ linux-2.6.32.41/drivers/scsi/scsi_sysfs.c 2011-05-10 22:12:33.000000000 -0400
33986@@ -661,7 +661,7 @@ show_iostat_##field(struct device *dev,
33987 char *buf) \
33988 { \
33989 struct scsi_device *sdev = to_scsi_device(dev); \
33990- unsigned long long count = atomic_read(&sdev->field); \
33991+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
33992 return snprintf(buf, 20, "0x%llx\n", count); \
33993 } \
33994 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
33995diff -urNp linux-2.6.32.41/drivers/scsi/scsi_transport_fc.c linux-2.6.32.41/drivers/scsi/scsi_transport_fc.c
33996--- linux-2.6.32.41/drivers/scsi/scsi_transport_fc.c 2011-03-27 14:31:47.000000000 -0400
33997+++ linux-2.6.32.41/drivers/scsi/scsi_transport_fc.c 2011-05-04 17:56:28.000000000 -0400
33998@@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
33999 * Netlink Infrastructure
34000 */
34001
34002-static atomic_t fc_event_seq;
34003+static atomic_unchecked_t fc_event_seq;
34004
34005 /**
34006 * fc_get_event_number - Obtain the next sequential FC event number
34007@@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
34008 u32
34009 fc_get_event_number(void)
34010 {
34011- return atomic_add_return(1, &fc_event_seq);
34012+ return atomic_add_return_unchecked(1, &fc_event_seq);
34013 }
34014 EXPORT_SYMBOL(fc_get_event_number);
34015
34016@@ -641,7 +641,7 @@ static __init int fc_transport_init(void
34017 {
34018 int error;
34019
34020- atomic_set(&fc_event_seq, 0);
34021+ atomic_set_unchecked(&fc_event_seq, 0);
34022
34023 error = transport_class_register(&fc_host_class);
34024 if (error)
34025diff -urNp linux-2.6.32.41/drivers/scsi/scsi_transport_iscsi.c linux-2.6.32.41/drivers/scsi/scsi_transport_iscsi.c
34026--- linux-2.6.32.41/drivers/scsi/scsi_transport_iscsi.c 2011-03-27 14:31:47.000000000 -0400
34027+++ linux-2.6.32.41/drivers/scsi/scsi_transport_iscsi.c 2011-05-04 17:56:28.000000000 -0400
34028@@ -81,7 +81,7 @@ struct iscsi_internal {
34029 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
34030 };
34031
34032-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
34033+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
34034 static struct workqueue_struct *iscsi_eh_timer_workq;
34035
34036 /*
34037@@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_s
34038 int err;
34039
34040 ihost = shost->shost_data;
34041- session->sid = atomic_add_return(1, &iscsi_session_nr);
34042+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
34043
34044 if (id == ISCSI_MAX_TARGET) {
34045 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
34046@@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(v
34047 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
34048 ISCSI_TRANSPORT_VERSION);
34049
34050- atomic_set(&iscsi_session_nr, 0);
34051+ atomic_set_unchecked(&iscsi_session_nr, 0);
34052
34053 err = class_register(&iscsi_transport_class);
34054 if (err)
34055diff -urNp linux-2.6.32.41/drivers/scsi/scsi_transport_srp.c linux-2.6.32.41/drivers/scsi/scsi_transport_srp.c
34056--- linux-2.6.32.41/drivers/scsi/scsi_transport_srp.c 2011-03-27 14:31:47.000000000 -0400
34057+++ linux-2.6.32.41/drivers/scsi/scsi_transport_srp.c 2011-05-04 17:56:28.000000000 -0400
34058@@ -33,7 +33,7 @@
34059 #include "scsi_transport_srp_internal.h"
34060
34061 struct srp_host_attrs {
34062- atomic_t next_port_id;
34063+ atomic_unchecked_t next_port_id;
34064 };
34065 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
34066
34067@@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
34068 struct Scsi_Host *shost = dev_to_shost(dev);
34069 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
34070
34071- atomic_set(&srp_host->next_port_id, 0);
34072+ atomic_set_unchecked(&srp_host->next_port_id, 0);
34073 return 0;
34074 }
34075
34076@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
34077 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
34078 rport->roles = ids->roles;
34079
34080- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
34081+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
34082 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
34083
34084 transport_setup_device(&rport->dev);
34085diff -urNp linux-2.6.32.41/drivers/scsi/sg.c linux-2.6.32.41/drivers/scsi/sg.c
34086--- linux-2.6.32.41/drivers/scsi/sg.c 2011-03-27 14:31:47.000000000 -0400
34087+++ linux-2.6.32.41/drivers/scsi/sg.c 2011-04-17 15:56:46.000000000 -0400
34088@@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
34089 const struct file_operations * fops;
34090 };
34091
34092-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
34093+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
34094 {"allow_dio", &adio_fops},
34095 {"debug", &debug_fops},
34096 {"def_reserved_size", &dressz_fops},
34097@@ -2307,7 +2307,7 @@ sg_proc_init(void)
34098 {
34099 int k, mask;
34100 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
34101- struct sg_proc_leaf * leaf;
34102+ const struct sg_proc_leaf * leaf;
34103
34104 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
34105 if (!sg_proc_sgp)
34106diff -urNp linux-2.6.32.41/drivers/scsi/sym53c8xx_2/sym_glue.c linux-2.6.32.41/drivers/scsi/sym53c8xx_2/sym_glue.c
34107--- linux-2.6.32.41/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-03-27 14:31:47.000000000 -0400
34108+++ linux-2.6.32.41/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-05-16 21:46:57.000000000 -0400
34109@@ -1754,6 +1754,8 @@ static int __devinit sym2_probe(struct p
34110 int do_iounmap = 0;
34111 int do_disable_device = 1;
34112
34113+ pax_track_stack();
34114+
34115 memset(&sym_dev, 0, sizeof(sym_dev));
34116 memset(&nvram, 0, sizeof(nvram));
34117 sym_dev.pdev = pdev;
34118diff -urNp linux-2.6.32.41/drivers/serial/kgdboc.c linux-2.6.32.41/drivers/serial/kgdboc.c
34119--- linux-2.6.32.41/drivers/serial/kgdboc.c 2011-03-27 14:31:47.000000000 -0400
34120+++ linux-2.6.32.41/drivers/serial/kgdboc.c 2011-04-17 15:56:46.000000000 -0400
34121@@ -18,7 +18,7 @@
34122
34123 #define MAX_CONFIG_LEN 40
34124
34125-static struct kgdb_io kgdboc_io_ops;
34126+static const struct kgdb_io kgdboc_io_ops;
34127
34128 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
34129 static int configured = -1;
34130@@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void
34131 module_put(THIS_MODULE);
34132 }
34133
34134-static struct kgdb_io kgdboc_io_ops = {
34135+static const struct kgdb_io kgdboc_io_ops = {
34136 .name = "kgdboc",
34137 .read_char = kgdboc_get_char,
34138 .write_char = kgdboc_put_char,
34139diff -urNp linux-2.6.32.41/drivers/spi/spi.c linux-2.6.32.41/drivers/spi/spi.c
34140--- linux-2.6.32.41/drivers/spi/spi.c 2011-03-27 14:31:47.000000000 -0400
34141+++ linux-2.6.32.41/drivers/spi/spi.c 2011-05-04 17:56:28.000000000 -0400
34142@@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, str
34143 EXPORT_SYMBOL_GPL(spi_sync);
34144
34145 /* portable code must never pass more than 32 bytes */
34146-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
34147+#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
34148
34149 static u8 *buf;
34150
34151diff -urNp linux-2.6.32.41/drivers/staging/android/binder.c linux-2.6.32.41/drivers/staging/android/binder.c
34152--- linux-2.6.32.41/drivers/staging/android/binder.c 2011-03-27 14:31:47.000000000 -0400
34153+++ linux-2.6.32.41/drivers/staging/android/binder.c 2011-04-17 15:56:46.000000000 -0400
34154@@ -2756,7 +2756,7 @@ static void binder_vma_close(struct vm_a
34155 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
34156 }
34157
34158-static struct vm_operations_struct binder_vm_ops = {
34159+static const struct vm_operations_struct binder_vm_ops = {
34160 .open = binder_vma_open,
34161 .close = binder_vma_close,
34162 };
34163diff -urNp linux-2.6.32.41/drivers/staging/b3dfg/b3dfg.c linux-2.6.32.41/drivers/staging/b3dfg/b3dfg.c
34164--- linux-2.6.32.41/drivers/staging/b3dfg/b3dfg.c 2011-03-27 14:31:47.000000000 -0400
34165+++ linux-2.6.32.41/drivers/staging/b3dfg/b3dfg.c 2011-04-17 15:56:46.000000000 -0400
34166@@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_are
34167 return VM_FAULT_NOPAGE;
34168 }
34169
34170-static struct vm_operations_struct b3dfg_vm_ops = {
34171+static const struct vm_operations_struct b3dfg_vm_ops = {
34172 .fault = b3dfg_vma_fault,
34173 };
34174
34175@@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp,
34176 return r;
34177 }
34178
34179-static struct file_operations b3dfg_fops = {
34180+static const struct file_operations b3dfg_fops = {
34181 .owner = THIS_MODULE,
34182 .open = b3dfg_open,
34183 .release = b3dfg_release,
34184diff -urNp linux-2.6.32.41/drivers/staging/comedi/comedi_fops.c linux-2.6.32.41/drivers/staging/comedi/comedi_fops.c
34185--- linux-2.6.32.41/drivers/staging/comedi/comedi_fops.c 2011-03-27 14:31:47.000000000 -0400
34186+++ linux-2.6.32.41/drivers/staging/comedi/comedi_fops.c 2011-04-17 15:56:46.000000000 -0400
34187@@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct
34188 mutex_unlock(&dev->mutex);
34189 }
34190
34191-static struct vm_operations_struct comedi_vm_ops = {
34192+static const struct vm_operations_struct comedi_vm_ops = {
34193 .close = comedi_unmap,
34194 };
34195
34196diff -urNp linux-2.6.32.41/drivers/staging/dream/qdsp5/adsp_driver.c linux-2.6.32.41/drivers/staging/dream/qdsp5/adsp_driver.c
34197--- linux-2.6.32.41/drivers/staging/dream/qdsp5/adsp_driver.c 2011-03-27 14:31:47.000000000 -0400
34198+++ linux-2.6.32.41/drivers/staging/dream/qdsp5/adsp_driver.c 2011-04-17 15:56:46.000000000 -0400
34199@@ -576,7 +576,7 @@ static struct adsp_device *inode_to_devi
34200 static dev_t adsp_devno;
34201 static struct class *adsp_class;
34202
34203-static struct file_operations adsp_fops = {
34204+static const struct file_operations adsp_fops = {
34205 .owner = THIS_MODULE,
34206 .open = adsp_open,
34207 .unlocked_ioctl = adsp_ioctl,
34208diff -urNp linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_aac.c linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_aac.c
34209--- linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_aac.c 2011-03-27 14:31:47.000000000 -0400
34210+++ linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_aac.c 2011-04-17 15:56:46.000000000 -0400
34211@@ -1022,7 +1022,7 @@ done:
34212 return rc;
34213 }
34214
34215-static struct file_operations audio_aac_fops = {
34216+static const struct file_operations audio_aac_fops = {
34217 .owner = THIS_MODULE,
34218 .open = audio_open,
34219 .release = audio_release,
34220diff -urNp linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_amrnb.c linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_amrnb.c
34221--- linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-03-27 14:31:47.000000000 -0400
34222+++ linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-04-17 15:56:46.000000000 -0400
34223@@ -833,7 +833,7 @@ done:
34224 return rc;
34225 }
34226
34227-static struct file_operations audio_amrnb_fops = {
34228+static const struct file_operations audio_amrnb_fops = {
34229 .owner = THIS_MODULE,
34230 .open = audamrnb_open,
34231 .release = audamrnb_release,
34232diff -urNp linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_evrc.c linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_evrc.c
34233--- linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_evrc.c 2011-03-27 14:31:47.000000000 -0400
34234+++ linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_evrc.c 2011-04-17 15:56:46.000000000 -0400
34235@@ -805,7 +805,7 @@ dma_fail:
34236 return rc;
34237 }
34238
34239-static struct file_operations audio_evrc_fops = {
34240+static const struct file_operations audio_evrc_fops = {
34241 .owner = THIS_MODULE,
34242 .open = audevrc_open,
34243 .release = audevrc_release,
34244diff -urNp linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_in.c linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_in.c
34245--- linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_in.c 2011-03-27 14:31:47.000000000 -0400
34246+++ linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_in.c 2011-04-17 15:56:46.000000000 -0400
34247@@ -913,7 +913,7 @@ static int audpre_open(struct inode *ino
34248 return 0;
34249 }
34250
34251-static struct file_operations audio_fops = {
34252+static const struct file_operations audio_fops = {
34253 .owner = THIS_MODULE,
34254 .open = audio_in_open,
34255 .release = audio_in_release,
34256@@ -922,7 +922,7 @@ static struct file_operations audio_fops
34257 .unlocked_ioctl = audio_in_ioctl,
34258 };
34259
34260-static struct file_operations audpre_fops = {
34261+static const struct file_operations audpre_fops = {
34262 .owner = THIS_MODULE,
34263 .open = audpre_open,
34264 .unlocked_ioctl = audpre_ioctl,
34265diff -urNp linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_mp3.c linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_mp3.c
34266--- linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_mp3.c 2011-03-27 14:31:47.000000000 -0400
34267+++ linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_mp3.c 2011-04-17 15:56:46.000000000 -0400
34268@@ -941,7 +941,7 @@ done:
34269 return rc;
34270 }
34271
34272-static struct file_operations audio_mp3_fops = {
34273+static const struct file_operations audio_mp3_fops = {
34274 .owner = THIS_MODULE,
34275 .open = audio_open,
34276 .release = audio_release,
34277diff -urNp linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_out.c linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_out.c
34278--- linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_out.c 2011-03-27 14:31:47.000000000 -0400
34279+++ linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_out.c 2011-04-17 15:56:46.000000000 -0400
34280@@ -810,7 +810,7 @@ static int audpp_open(struct inode *inod
34281 return 0;
34282 }
34283
34284-static struct file_operations audio_fops = {
34285+static const struct file_operations audio_fops = {
34286 .owner = THIS_MODULE,
34287 .open = audio_open,
34288 .release = audio_release,
34289@@ -819,7 +819,7 @@ static struct file_operations audio_fops
34290 .unlocked_ioctl = audio_ioctl,
34291 };
34292
34293-static struct file_operations audpp_fops = {
34294+static const struct file_operations audpp_fops = {
34295 .owner = THIS_MODULE,
34296 .open = audpp_open,
34297 .unlocked_ioctl = audpp_ioctl,
34298diff -urNp linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_qcelp.c linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_qcelp.c
34299--- linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-03-27 14:31:47.000000000 -0400
34300+++ linux-2.6.32.41/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-04-17 15:56:46.000000000 -0400
34301@@ -816,7 +816,7 @@ err:
34302 return rc;
34303 }
34304
34305-static struct file_operations audio_qcelp_fops = {
34306+static const struct file_operations audio_qcelp_fops = {
34307 .owner = THIS_MODULE,
34308 .open = audqcelp_open,
34309 .release = audqcelp_release,
34310diff -urNp linux-2.6.32.41/drivers/staging/dream/qdsp5/snd.c linux-2.6.32.41/drivers/staging/dream/qdsp5/snd.c
34311--- linux-2.6.32.41/drivers/staging/dream/qdsp5/snd.c 2011-03-27 14:31:47.000000000 -0400
34312+++ linux-2.6.32.41/drivers/staging/dream/qdsp5/snd.c 2011-04-17 15:56:46.000000000 -0400
34313@@ -242,7 +242,7 @@ err:
34314 return rc;
34315 }
34316
34317-static struct file_operations snd_fops = {
34318+static const struct file_operations snd_fops = {
34319 .owner = THIS_MODULE,
34320 .open = snd_open,
34321 .release = snd_release,
34322diff -urNp linux-2.6.32.41/drivers/staging/dream/smd/smd_qmi.c linux-2.6.32.41/drivers/staging/dream/smd/smd_qmi.c
34323--- linux-2.6.32.41/drivers/staging/dream/smd/smd_qmi.c 2011-03-27 14:31:47.000000000 -0400
34324+++ linux-2.6.32.41/drivers/staging/dream/smd/smd_qmi.c 2011-04-17 15:56:46.000000000 -0400
34325@@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip,
34326 return 0;
34327 }
34328
34329-static struct file_operations qmi_fops = {
34330+static const struct file_operations qmi_fops = {
34331 .owner = THIS_MODULE,
34332 .read = qmi_read,
34333 .write = qmi_write,
34334diff -urNp linux-2.6.32.41/drivers/staging/dream/smd/smd_rpcrouter_device.c linux-2.6.32.41/drivers/staging/dream/smd/smd_rpcrouter_device.c
34335--- linux-2.6.32.41/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-03-27 14:31:47.000000000 -0400
34336+++ linux-2.6.32.41/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-04-17 15:56:46.000000000 -0400
34337@@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file
34338 return rc;
34339 }
34340
34341-static struct file_operations rpcrouter_server_fops = {
34342+static const struct file_operations rpcrouter_server_fops = {
34343 .owner = THIS_MODULE,
34344 .open = rpcrouter_open,
34345 .release = rpcrouter_release,
34346@@ -224,7 +224,7 @@ static struct file_operations rpcrouter_
34347 .unlocked_ioctl = rpcrouter_ioctl,
34348 };
34349
34350-static struct file_operations rpcrouter_router_fops = {
34351+static const struct file_operations rpcrouter_router_fops = {
34352 .owner = THIS_MODULE,
34353 .open = rpcrouter_open,
34354 .release = rpcrouter_release,
34355diff -urNp linux-2.6.32.41/drivers/staging/dst/dcore.c linux-2.6.32.41/drivers/staging/dst/dcore.c
34356--- linux-2.6.32.41/drivers/staging/dst/dcore.c 2011-03-27 14:31:47.000000000 -0400
34357+++ linux-2.6.32.41/drivers/staging/dst/dcore.c 2011-04-17 15:56:46.000000000 -0400
34358@@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendi
34359 return 0;
34360 }
34361
34362-static struct block_device_operations dst_blk_ops = {
34363+static const struct block_device_operations dst_blk_ops = {
34364 .open = dst_bdev_open,
34365 .release = dst_bdev_release,
34366 .owner = THIS_MODULE,
34367@@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(s
34368 n->size = ctl->size;
34369
34370 atomic_set(&n->refcnt, 1);
34371- atomic_long_set(&n->gen, 0);
34372+ atomic_long_set_unchecked(&n->gen, 0);
34373 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
34374
34375 err = dst_node_sysfs_init(n);
34376diff -urNp linux-2.6.32.41/drivers/staging/dst/trans.c linux-2.6.32.41/drivers/staging/dst/trans.c
34377--- linux-2.6.32.41/drivers/staging/dst/trans.c 2011-03-27 14:31:47.000000000 -0400
34378+++ linux-2.6.32.41/drivers/staging/dst/trans.c 2011-04-17 15:56:46.000000000 -0400
34379@@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n,
34380 t->error = 0;
34381 t->retries = 0;
34382 atomic_set(&t->refcnt, 1);
34383- t->gen = atomic_long_inc_return(&n->gen);
34384+ t->gen = atomic_long_inc_return_unchecked(&n->gen);
34385
34386 t->enc = bio_data_dir(bio);
34387 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
34388diff -urNp linux-2.6.32.41/drivers/staging/et131x/et1310_tx.c linux-2.6.32.41/drivers/staging/et131x/et1310_tx.c
34389--- linux-2.6.32.41/drivers/staging/et131x/et1310_tx.c 2011-03-27 14:31:47.000000000 -0400
34390+++ linux-2.6.32.41/drivers/staging/et131x/et1310_tx.c 2011-05-04 17:56:28.000000000 -0400
34391@@ -710,11 +710,11 @@ inline void et131x_free_send_packet(stru
34392 struct net_device_stats *stats = &etdev->net_stats;
34393
34394 if (pMpTcb->Flags & fMP_DEST_BROAD)
34395- atomic_inc(&etdev->Stats.brdcstxmt);
34396+ atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
34397 else if (pMpTcb->Flags & fMP_DEST_MULTI)
34398- atomic_inc(&etdev->Stats.multixmt);
34399+ atomic_inc_unchecked(&etdev->Stats.multixmt);
34400 else
34401- atomic_inc(&etdev->Stats.unixmt);
34402+ atomic_inc_unchecked(&etdev->Stats.unixmt);
34403
34404 if (pMpTcb->Packet) {
34405 stats->tx_bytes += pMpTcb->Packet->len;
34406diff -urNp linux-2.6.32.41/drivers/staging/et131x/et131x_adapter.h linux-2.6.32.41/drivers/staging/et131x/et131x_adapter.h
34407--- linux-2.6.32.41/drivers/staging/et131x/et131x_adapter.h 2011-03-27 14:31:47.000000000 -0400
34408+++ linux-2.6.32.41/drivers/staging/et131x/et131x_adapter.h 2011-05-04 17:56:28.000000000 -0400
34409@@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
34410 * operations
34411 */
34412 u32 unircv; /* # multicast packets received */
34413- atomic_t unixmt; /* # multicast packets for Tx */
34414+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
34415 u32 multircv; /* # multicast packets received */
34416- atomic_t multixmt; /* # multicast packets for Tx */
34417+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
34418 u32 brdcstrcv; /* # broadcast packets received */
34419- atomic_t brdcstxmt; /* # broadcast packets for Tx */
34420+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
34421 u32 norcvbuf; /* # Rx packets discarded */
34422 u32 noxmtbuf; /* # Tx packets discarded */
34423
34424diff -urNp linux-2.6.32.41/drivers/staging/go7007/go7007-v4l2.c linux-2.6.32.41/drivers/staging/go7007/go7007-v4l2.c
34425--- linux-2.6.32.41/drivers/staging/go7007/go7007-v4l2.c 2011-03-27 14:31:47.000000000 -0400
34426+++ linux-2.6.32.41/drivers/staging/go7007/go7007-v4l2.c 2011-04-17 15:56:46.000000000 -0400
34427@@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_are
34428 return 0;
34429 }
34430
34431-static struct vm_operations_struct go7007_vm_ops = {
34432+static const struct vm_operations_struct go7007_vm_ops = {
34433 .open = go7007_vm_open,
34434 .close = go7007_vm_close,
34435 .fault = go7007_vm_fault,
34436diff -urNp linux-2.6.32.41/drivers/staging/hv/blkvsc_drv.c linux-2.6.32.41/drivers/staging/hv/blkvsc_drv.c
34437--- linux-2.6.32.41/drivers/staging/hv/blkvsc_drv.c 2011-03-27 14:31:47.000000000 -0400
34438+++ linux-2.6.32.41/drivers/staging/hv/blkvsc_drv.c 2011-04-17 15:56:46.000000000 -0400
34439@@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKV
34440 /* The one and only one */
34441 static struct blkvsc_driver_context g_blkvsc_drv;
34442
34443-static struct block_device_operations block_ops = {
34444+static const struct block_device_operations block_ops = {
34445 .owner = THIS_MODULE,
34446 .open = blkvsc_open,
34447 .release = blkvsc_release,
34448diff -urNp linux-2.6.32.41/drivers/staging/hv/Channel.c linux-2.6.32.41/drivers/staging/hv/Channel.c
34449--- linux-2.6.32.41/drivers/staging/hv/Channel.c 2011-04-17 17:00:52.000000000 -0400
34450+++ linux-2.6.32.41/drivers/staging/hv/Channel.c 2011-05-04 17:56:28.000000000 -0400
34451@@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vm
34452
34453 DPRINT_ENTER(VMBUS);
34454
34455- nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
34456- atomic_inc(&gVmbusConnection.NextGpadlHandle);
34457+ nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
34458+ atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
34459
34460 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
34461 ASSERT(msgInfo != NULL);
34462diff -urNp linux-2.6.32.41/drivers/staging/hv/Hv.c linux-2.6.32.41/drivers/staging/hv/Hv.c
34463--- linux-2.6.32.41/drivers/staging/hv/Hv.c 2011-03-27 14:31:47.000000000 -0400
34464+++ linux-2.6.32.41/drivers/staging/hv/Hv.c 2011-04-17 15:56:46.000000000 -0400
34465@@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, vo
34466 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
34467 u32 outputAddressHi = outputAddress >> 32;
34468 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
34469- volatile void *hypercallPage = gHvContext.HypercallPage;
34470+ volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
34471
34472 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
34473 Control, Input, Output);
34474diff -urNp linux-2.6.32.41/drivers/staging/hv/vmbus_drv.c linux-2.6.32.41/drivers/staging/hv/vmbus_drv.c
34475--- linux-2.6.32.41/drivers/staging/hv/vmbus_drv.c 2011-03-27 14:31:47.000000000 -0400
34476+++ linux-2.6.32.41/drivers/staging/hv/vmbus_drv.c 2011-05-04 17:56:28.000000000 -0400
34477@@ -532,7 +532,7 @@ static int vmbus_child_device_register(s
34478 to_device_context(root_device_obj);
34479 struct device_context *child_device_ctx =
34480 to_device_context(child_device_obj);
34481- static atomic_t device_num = ATOMIC_INIT(0);
34482+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
34483
34484 DPRINT_ENTER(VMBUS_DRV);
34485
34486@@ -541,7 +541,7 @@ static int vmbus_child_device_register(s
34487
34488 /* Set the device name. Otherwise, device_register() will fail. */
34489 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
34490- atomic_inc_return(&device_num));
34491+ atomic_inc_return_unchecked(&device_num));
34492
34493 /* The new device belongs to this bus */
34494 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
34495diff -urNp linux-2.6.32.41/drivers/staging/hv/VmbusPrivate.h linux-2.6.32.41/drivers/staging/hv/VmbusPrivate.h
34496--- linux-2.6.32.41/drivers/staging/hv/VmbusPrivate.h 2011-04-17 17:00:52.000000000 -0400
34497+++ linux-2.6.32.41/drivers/staging/hv/VmbusPrivate.h 2011-05-04 17:56:28.000000000 -0400
34498@@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
34499 struct VMBUS_CONNECTION {
34500 enum VMBUS_CONNECT_STATE ConnectState;
34501
34502- atomic_t NextGpadlHandle;
34503+ atomic_unchecked_t NextGpadlHandle;
34504
34505 /*
34506 * Represents channel interrupts. Each bit position represents a
34507diff -urNp linux-2.6.32.41/drivers/staging/octeon/ethernet.c linux-2.6.32.41/drivers/staging/octeon/ethernet.c
34508--- linux-2.6.32.41/drivers/staging/octeon/ethernet.c 2011-03-27 14:31:47.000000000 -0400
34509+++ linux-2.6.32.41/drivers/staging/octeon/ethernet.c 2011-05-04 17:56:28.000000000 -0400
34510@@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_
34511 * since the RX tasklet also increments it.
34512 */
34513 #ifdef CONFIG_64BIT
34514- atomic64_add(rx_status.dropped_packets,
34515- (atomic64_t *)&priv->stats.rx_dropped);
34516+ atomic64_add_unchecked(rx_status.dropped_packets,
34517+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
34518 #else
34519- atomic_add(rx_status.dropped_packets,
34520- (atomic_t *)&priv->stats.rx_dropped);
34521+ atomic_add_unchecked(rx_status.dropped_packets,
34522+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
34523 #endif
34524 }
34525
34526diff -urNp linux-2.6.32.41/drivers/staging/octeon/ethernet-rx.c linux-2.6.32.41/drivers/staging/octeon/ethernet-rx.c
34527--- linux-2.6.32.41/drivers/staging/octeon/ethernet-rx.c 2011-03-27 14:31:47.000000000 -0400
34528+++ linux-2.6.32.41/drivers/staging/octeon/ethernet-rx.c 2011-05-04 17:56:28.000000000 -0400
34529@@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long un
34530 /* Increment RX stats for virtual ports */
34531 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
34532 #ifdef CONFIG_64BIT
34533- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
34534- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
34535+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
34536+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
34537 #else
34538- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
34539- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
34540+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
34541+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
34542 #endif
34543 }
34544 netif_receive_skb(skb);
34545@@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long un
34546 dev->name);
34547 */
34548 #ifdef CONFIG_64BIT
34549- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
34550+ atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
34551 #else
34552- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
34553+ atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
34554 #endif
34555 dev_kfree_skb_irq(skb);
34556 }
34557diff -urNp linux-2.6.32.41/drivers/staging/panel/panel.c linux-2.6.32.41/drivers/staging/panel/panel.c
34558--- linux-2.6.32.41/drivers/staging/panel/panel.c 2011-03-27 14:31:47.000000000 -0400
34559+++ linux-2.6.32.41/drivers/staging/panel/panel.c 2011-04-17 15:56:46.000000000 -0400
34560@@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *ino
34561 return 0;
34562 }
34563
34564-static struct file_operations lcd_fops = {
34565+static const struct file_operations lcd_fops = {
34566 .write = lcd_write,
34567 .open = lcd_open,
34568 .release = lcd_release,
34569@@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *
34570 return 0;
34571 }
34572
34573-static struct file_operations keypad_fops = {
34574+static const struct file_operations keypad_fops = {
34575 .read = keypad_read, /* read */
34576 .open = keypad_open, /* open */
34577 .release = keypad_release, /* close */
34578diff -urNp linux-2.6.32.41/drivers/staging/phison/phison.c linux-2.6.32.41/drivers/staging/phison/phison.c
34579--- linux-2.6.32.41/drivers/staging/phison/phison.c 2011-03-27 14:31:47.000000000 -0400
34580+++ linux-2.6.32.41/drivers/staging/phison/phison.c 2011-04-17 15:56:46.000000000 -0400
34581@@ -43,7 +43,7 @@ static struct scsi_host_template phison_
34582 ATA_BMDMA_SHT(DRV_NAME),
34583 };
34584
34585-static struct ata_port_operations phison_ops = {
34586+static const struct ata_port_operations phison_ops = {
34587 .inherits = &ata_bmdma_port_ops,
34588 .prereset = phison_pre_reset,
34589 };
34590diff -urNp linux-2.6.32.41/drivers/staging/poch/poch.c linux-2.6.32.41/drivers/staging/poch/poch.c
34591--- linux-2.6.32.41/drivers/staging/poch/poch.c 2011-03-27 14:31:47.000000000 -0400
34592+++ linux-2.6.32.41/drivers/staging/poch/poch.c 2011-04-17 15:56:46.000000000 -0400
34593@@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inod
34594 return 0;
34595 }
34596
34597-static struct file_operations poch_fops = {
34598+static const struct file_operations poch_fops = {
34599 .owner = THIS_MODULE,
34600 .open = poch_open,
34601 .release = poch_release,
34602diff -urNp linux-2.6.32.41/drivers/staging/pohmelfs/inode.c linux-2.6.32.41/drivers/staging/pohmelfs/inode.c
34603--- linux-2.6.32.41/drivers/staging/pohmelfs/inode.c 2011-03-27 14:31:47.000000000 -0400
34604+++ linux-2.6.32.41/drivers/staging/pohmelfs/inode.c 2011-05-04 17:56:20.000000000 -0400
34605@@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct su
34606 mutex_init(&psb->mcache_lock);
34607 psb->mcache_root = RB_ROOT;
34608 psb->mcache_timeout = msecs_to_jiffies(5000);
34609- atomic_long_set(&psb->mcache_gen, 0);
34610+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
34611
34612 psb->trans_max_pages = 100;
34613
34614@@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct su
34615 INIT_LIST_HEAD(&psb->crypto_ready_list);
34616 INIT_LIST_HEAD(&psb->crypto_active_list);
34617
34618- atomic_set(&psb->trans_gen, 1);
34619+ atomic_set_unchecked(&psb->trans_gen, 1);
34620 atomic_long_set(&psb->total_inodes, 0);
34621
34622 mutex_init(&psb->state_lock);
34623diff -urNp linux-2.6.32.41/drivers/staging/pohmelfs/mcache.c linux-2.6.32.41/drivers/staging/pohmelfs/mcache.c
34624--- linux-2.6.32.41/drivers/staging/pohmelfs/mcache.c 2011-03-27 14:31:47.000000000 -0400
34625+++ linux-2.6.32.41/drivers/staging/pohmelfs/mcache.c 2011-04-17 15:56:46.000000000 -0400
34626@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
34627 m->data = data;
34628 m->start = start;
34629 m->size = size;
34630- m->gen = atomic_long_inc_return(&psb->mcache_gen);
34631+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
34632
34633 mutex_lock(&psb->mcache_lock);
34634 err = pohmelfs_mcache_insert(psb, m);
34635diff -urNp linux-2.6.32.41/drivers/staging/pohmelfs/netfs.h linux-2.6.32.41/drivers/staging/pohmelfs/netfs.h
34636--- linux-2.6.32.41/drivers/staging/pohmelfs/netfs.h 2011-03-27 14:31:47.000000000 -0400
34637+++ linux-2.6.32.41/drivers/staging/pohmelfs/netfs.h 2011-05-04 17:56:20.000000000 -0400
34638@@ -570,14 +570,14 @@ struct pohmelfs_config;
34639 struct pohmelfs_sb {
34640 struct rb_root mcache_root;
34641 struct mutex mcache_lock;
34642- atomic_long_t mcache_gen;
34643+ atomic_long_unchecked_t mcache_gen;
34644 unsigned long mcache_timeout;
34645
34646 unsigned int idx;
34647
34648 unsigned int trans_retries;
34649
34650- atomic_t trans_gen;
34651+ atomic_unchecked_t trans_gen;
34652
34653 unsigned int crypto_attached_size;
34654 unsigned int crypto_align_size;
34655diff -urNp linux-2.6.32.41/drivers/staging/pohmelfs/trans.c linux-2.6.32.41/drivers/staging/pohmelfs/trans.c
34656--- linux-2.6.32.41/drivers/staging/pohmelfs/trans.c 2011-03-27 14:31:47.000000000 -0400
34657+++ linux-2.6.32.41/drivers/staging/pohmelfs/trans.c 2011-05-04 17:56:28.000000000 -0400
34658@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
34659 int err;
34660 struct netfs_cmd *cmd = t->iovec.iov_base;
34661
34662- t->gen = atomic_inc_return(&psb->trans_gen);
34663+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
34664
34665 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
34666 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
34667diff -urNp linux-2.6.32.41/drivers/staging/sep/sep_driver.c linux-2.6.32.41/drivers/staging/sep/sep_driver.c
34668--- linux-2.6.32.41/drivers/staging/sep/sep_driver.c 2011-03-27 14:31:47.000000000 -0400
34669+++ linux-2.6.32.41/drivers/staging/sep/sep_driver.c 2011-04-17 15:56:46.000000000 -0400
34670@@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver
34671 static dev_t sep_devno;
34672
34673 /* the files operations structure of the driver */
34674-static struct file_operations sep_file_operations = {
34675+static const struct file_operations sep_file_operations = {
34676 .owner = THIS_MODULE,
34677 .ioctl = sep_ioctl,
34678 .poll = sep_poll,
34679diff -urNp linux-2.6.32.41/drivers/staging/usbip/vhci.h linux-2.6.32.41/drivers/staging/usbip/vhci.h
34680--- linux-2.6.32.41/drivers/staging/usbip/vhci.h 2011-03-27 14:31:47.000000000 -0400
34681+++ linux-2.6.32.41/drivers/staging/usbip/vhci.h 2011-05-04 17:56:28.000000000 -0400
34682@@ -92,7 +92,7 @@ struct vhci_hcd {
34683 unsigned resuming:1;
34684 unsigned long re_timeout;
34685
34686- atomic_t seqnum;
34687+ atomic_unchecked_t seqnum;
34688
34689 /*
34690 * NOTE:
34691diff -urNp linux-2.6.32.41/drivers/staging/usbip/vhci_hcd.c linux-2.6.32.41/drivers/staging/usbip/vhci_hcd.c
34692--- linux-2.6.32.41/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:01.000000000 -0400
34693+++ linux-2.6.32.41/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:33.000000000 -0400
34694@@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
34695 return;
34696 }
34697
34698- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
34699+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
34700 if (priv->seqnum == 0xffff)
34701 usbip_uinfo("seqnum max\n");
34702
34703@@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_h
34704 return -ENOMEM;
34705 }
34706
34707- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
34708+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
34709 if (unlink->seqnum == 0xffff)
34710 usbip_uinfo("seqnum max\n");
34711
34712@@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hc
34713 vdev->rhport = rhport;
34714 }
34715
34716- atomic_set(&vhci->seqnum, 0);
34717+ atomic_set_unchecked(&vhci->seqnum, 0);
34718 spin_lock_init(&vhci->lock);
34719
34720
34721diff -urNp linux-2.6.32.41/drivers/staging/usbip/vhci_rx.c linux-2.6.32.41/drivers/staging/usbip/vhci_rx.c
34722--- linux-2.6.32.41/drivers/staging/usbip/vhci_rx.c 2011-04-17 17:00:52.000000000 -0400
34723+++ linux-2.6.32.41/drivers/staging/usbip/vhci_rx.c 2011-05-04 17:56:28.000000000 -0400
34724@@ -78,7 +78,7 @@ static void vhci_recv_ret_submit(struct
34725 usbip_uerr("cannot find a urb of seqnum %u\n",
34726 pdu->base.seqnum);
34727 usbip_uinfo("max seqnum %d\n",
34728- atomic_read(&the_controller->seqnum));
34729+ atomic_read_unchecked(&the_controller->seqnum));
34730 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
34731 return;
34732 }
34733diff -urNp linux-2.6.32.41/drivers/staging/vme/devices/vme_user.c linux-2.6.32.41/drivers/staging/vme/devices/vme_user.c
34734--- linux-2.6.32.41/drivers/staging/vme/devices/vme_user.c 2011-03-27 14:31:47.000000000 -0400
34735+++ linux-2.6.32.41/drivers/staging/vme/devices/vme_user.c 2011-04-17 15:56:46.000000000 -0400
34736@@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *
34737 static int __init vme_user_probe(struct device *, int, int);
34738 static int __exit vme_user_remove(struct device *, int, int);
34739
34740-static struct file_operations vme_user_fops = {
34741+static const struct file_operations vme_user_fops = {
34742 .open = vme_user_open,
34743 .release = vme_user_release,
34744 .read = vme_user_read,
34745diff -urNp linux-2.6.32.41/drivers/telephony/ixj.c linux-2.6.32.41/drivers/telephony/ixj.c
34746--- linux-2.6.32.41/drivers/telephony/ixj.c 2011-03-27 14:31:47.000000000 -0400
34747+++ linux-2.6.32.41/drivers/telephony/ixj.c 2011-05-16 21:46:57.000000000 -0400
34748@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
34749 bool mContinue;
34750 char *pIn, *pOut;
34751
34752+ pax_track_stack();
34753+
34754 if (!SCI_Prepare(j))
34755 return 0;
34756
34757diff -urNp linux-2.6.32.41/drivers/uio/uio.c linux-2.6.32.41/drivers/uio/uio.c
34758--- linux-2.6.32.41/drivers/uio/uio.c 2011-03-27 14:31:47.000000000 -0400
34759+++ linux-2.6.32.41/drivers/uio/uio.c 2011-05-04 17:56:20.000000000 -0400
34760@@ -23,6 +23,7 @@
34761 #include <linux/string.h>
34762 #include <linux/kobject.h>
34763 #include <linux/uio_driver.h>
34764+#include <asm/local.h>
34765
34766 #define UIO_MAX_DEVICES 255
34767
34768@@ -30,10 +31,10 @@ struct uio_device {
34769 struct module *owner;
34770 struct device *dev;
34771 int minor;
34772- atomic_t event;
34773+ atomic_unchecked_t event;
34774 struct fasync_struct *async_queue;
34775 wait_queue_head_t wait;
34776- int vma_count;
34777+ local_t vma_count;
34778 struct uio_info *info;
34779 struct kobject *map_dir;
34780 struct kobject *portio_dir;
34781@@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobj
34782 return entry->show(mem, buf);
34783 }
34784
34785-static struct sysfs_ops map_sysfs_ops = {
34786+static const struct sysfs_ops map_sysfs_ops = {
34787 .show = map_type_show,
34788 };
34789
34790@@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct k
34791 return entry->show(port, buf);
34792 }
34793
34794-static struct sysfs_ops portio_sysfs_ops = {
34795+static const struct sysfs_ops portio_sysfs_ops = {
34796 .show = portio_type_show,
34797 };
34798
34799@@ -255,7 +256,7 @@ static ssize_t show_event(struct device
34800 struct uio_device *idev = dev_get_drvdata(dev);
34801 if (idev)
34802 return sprintf(buf, "%u\n",
34803- (unsigned int)atomic_read(&idev->event));
34804+ (unsigned int)atomic_read_unchecked(&idev->event));
34805 else
34806 return -ENODEV;
34807 }
34808@@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *i
34809 {
34810 struct uio_device *idev = info->uio_dev;
34811
34812- atomic_inc(&idev->event);
34813+ atomic_inc_unchecked(&idev->event);
34814 wake_up_interruptible(&idev->wait);
34815 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
34816 }
34817@@ -477,7 +478,7 @@ static int uio_open(struct inode *inode,
34818 }
34819
34820 listener->dev = idev;
34821- listener->event_count = atomic_read(&idev->event);
34822+ listener->event_count = atomic_read_unchecked(&idev->event);
34823 filep->private_data = listener;
34824
34825 if (idev->info->open) {
34826@@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file
34827 return -EIO;
34828
34829 poll_wait(filep, &idev->wait, wait);
34830- if (listener->event_count != atomic_read(&idev->event))
34831+ if (listener->event_count != atomic_read_unchecked(&idev->event))
34832 return POLLIN | POLLRDNORM;
34833 return 0;
34834 }
34835@@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *fil
34836 do {
34837 set_current_state(TASK_INTERRUPTIBLE);
34838
34839- event_count = atomic_read(&idev->event);
34840+ event_count = atomic_read_unchecked(&idev->event);
34841 if (event_count != listener->event_count) {
34842 if (copy_to_user(buf, &event_count, count))
34843 retval = -EFAULT;
34844@@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_
34845 static void uio_vma_open(struct vm_area_struct *vma)
34846 {
34847 struct uio_device *idev = vma->vm_private_data;
34848- idev->vma_count++;
34849+ local_inc(&idev->vma_count);
34850 }
34851
34852 static void uio_vma_close(struct vm_area_struct *vma)
34853 {
34854 struct uio_device *idev = vma->vm_private_data;
34855- idev->vma_count--;
34856+ local_dec(&idev->vma_count);
34857 }
34858
34859 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
34860@@ -840,7 +841,7 @@ int __uio_register_device(struct module
34861 idev->owner = owner;
34862 idev->info = info;
34863 init_waitqueue_head(&idev->wait);
34864- atomic_set(&idev->event, 0);
34865+ atomic_set_unchecked(&idev->event, 0);
34866
34867 ret = uio_get_minor(idev);
34868 if (ret)
34869diff -urNp linux-2.6.32.41/drivers/usb/atm/usbatm.c linux-2.6.32.41/drivers/usb/atm/usbatm.c
34870--- linux-2.6.32.41/drivers/usb/atm/usbatm.c 2011-03-27 14:31:47.000000000 -0400
34871+++ linux-2.6.32.41/drivers/usb/atm/usbatm.c 2011-04-17 15:56:46.000000000 -0400
34872@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(stru
34873 if (printk_ratelimit())
34874 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
34875 __func__, vpi, vci);
34876- atomic_inc(&vcc->stats->rx_err);
34877+ atomic_inc_unchecked(&vcc->stats->rx_err);
34878 return;
34879 }
34880
34881@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(stru
34882 if (length > ATM_MAX_AAL5_PDU) {
34883 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
34884 __func__, length, vcc);
34885- atomic_inc(&vcc->stats->rx_err);
34886+ atomic_inc_unchecked(&vcc->stats->rx_err);
34887 goto out;
34888 }
34889
34890@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(stru
34891 if (sarb->len < pdu_length) {
34892 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
34893 __func__, pdu_length, sarb->len, vcc);
34894- atomic_inc(&vcc->stats->rx_err);
34895+ atomic_inc_unchecked(&vcc->stats->rx_err);
34896 goto out;
34897 }
34898
34899 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
34900 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
34901 __func__, vcc);
34902- atomic_inc(&vcc->stats->rx_err);
34903+ atomic_inc_unchecked(&vcc->stats->rx_err);
34904 goto out;
34905 }
34906
34907@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(stru
34908 if (printk_ratelimit())
34909 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
34910 __func__, length);
34911- atomic_inc(&vcc->stats->rx_drop);
34912+ atomic_inc_unchecked(&vcc->stats->rx_drop);
34913 goto out;
34914 }
34915
34916@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(stru
34917
34918 vcc->push(vcc, skb);
34919
34920- atomic_inc(&vcc->stats->rx);
34921+ atomic_inc_unchecked(&vcc->stats->rx);
34922 out:
34923 skb_trim(sarb, 0);
34924 }
34925@@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned l
34926 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
34927
34928 usbatm_pop(vcc, skb);
34929- atomic_inc(&vcc->stats->tx);
34930+ atomic_inc_unchecked(&vcc->stats->tx);
34931
34932 skb = skb_dequeue(&instance->sndqueue);
34933 }
34934@@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct a
34935 if (!left--)
34936 return sprintf(page,
34937 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
34938- atomic_read(&atm_dev->stats.aal5.tx),
34939- atomic_read(&atm_dev->stats.aal5.tx_err),
34940- atomic_read(&atm_dev->stats.aal5.rx),
34941- atomic_read(&atm_dev->stats.aal5.rx_err),
34942- atomic_read(&atm_dev->stats.aal5.rx_drop));
34943+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
34944+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
34945+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
34946+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
34947+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
34948
34949 if (!left--) {
34950 if (instance->disconnected)
34951diff -urNp linux-2.6.32.41/drivers/usb/class/cdc-wdm.c linux-2.6.32.41/drivers/usb/class/cdc-wdm.c
34952--- linux-2.6.32.41/drivers/usb/class/cdc-wdm.c 2011-03-27 14:31:47.000000000 -0400
34953+++ linux-2.6.32.41/drivers/usb/class/cdc-wdm.c 2011-04-17 15:56:46.000000000 -0400
34954@@ -314,7 +314,7 @@ static ssize_t wdm_write
34955 if (r < 0)
34956 goto outnp;
34957
34958- if (!file->f_flags && O_NONBLOCK)
34959+ if (!(file->f_flags & O_NONBLOCK))
34960 r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
34961 &desc->flags));
34962 else
34963diff -urNp linux-2.6.32.41/drivers/usb/core/hcd.c linux-2.6.32.41/drivers/usb/core/hcd.c
34964--- linux-2.6.32.41/drivers/usb/core/hcd.c 2011-03-27 14:31:47.000000000 -0400
34965+++ linux-2.6.32.41/drivers/usb/core/hcd.c 2011-04-17 15:56:46.000000000 -0400
34966@@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutd
34967
34968 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
34969
34970-struct usb_mon_operations *mon_ops;
34971+const struct usb_mon_operations *mon_ops;
34972
34973 /*
34974 * The registration is unlocked.
34975@@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
34976 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
34977 */
34978
34979-int usb_mon_register (struct usb_mon_operations *ops)
34980+int usb_mon_register (const struct usb_mon_operations *ops)
34981 {
34982
34983 if (mon_ops)
34984diff -urNp linux-2.6.32.41/drivers/usb/core/hcd.h linux-2.6.32.41/drivers/usb/core/hcd.h
34985--- linux-2.6.32.41/drivers/usb/core/hcd.h 2011-03-27 14:31:47.000000000 -0400
34986+++ linux-2.6.32.41/drivers/usb/core/hcd.h 2011-04-17 15:56:46.000000000 -0400
34987@@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) {
34988 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
34989
34990 struct usb_mon_operations {
34991- void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
34992- void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
34993- void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
34994+ void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
34995+ void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
34996+ void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
34997 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
34998 };
34999
35000-extern struct usb_mon_operations *mon_ops;
35001+extern const struct usb_mon_operations *mon_ops;
35002
35003 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
35004 {
35005@@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(s
35006 (*mon_ops->urb_complete)(bus, urb, status);
35007 }
35008
35009-int usb_mon_register(struct usb_mon_operations *ops);
35010+int usb_mon_register(const struct usb_mon_operations *ops);
35011 void usb_mon_deregister(void);
35012
35013 #else
35014diff -urNp linux-2.6.32.41/drivers/usb/core/message.c linux-2.6.32.41/drivers/usb/core/message.c
35015--- linux-2.6.32.41/drivers/usb/core/message.c 2011-03-27 14:31:47.000000000 -0400
35016+++ linux-2.6.32.41/drivers/usb/core/message.c 2011-04-17 15:56:46.000000000 -0400
35017@@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device
35018 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
35019 if (buf) {
35020 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
35021- if (len > 0) {
35022- smallbuf = kmalloc(++len, GFP_NOIO);
35023+ if (len++ > 0) {
35024+ smallbuf = kmalloc(len, GFP_NOIO);
35025 if (!smallbuf)
35026 return buf;
35027 memcpy(smallbuf, buf, len);
35028diff -urNp linux-2.6.32.41/drivers/usb/misc/appledisplay.c linux-2.6.32.41/drivers/usb/misc/appledisplay.c
35029--- linux-2.6.32.41/drivers/usb/misc/appledisplay.c 2011-03-27 14:31:47.000000000 -0400
35030+++ linux-2.6.32.41/drivers/usb/misc/appledisplay.c 2011-04-17 15:56:46.000000000 -0400
35031@@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightnes
35032 return pdata->msgdata[1];
35033 }
35034
35035-static struct backlight_ops appledisplay_bl_data = {
35036+static const struct backlight_ops appledisplay_bl_data = {
35037 .get_brightness = appledisplay_bl_get_brightness,
35038 .update_status = appledisplay_bl_update_status,
35039 };
35040diff -urNp linux-2.6.32.41/drivers/usb/mon/mon_main.c linux-2.6.32.41/drivers/usb/mon/mon_main.c
35041--- linux-2.6.32.41/drivers/usb/mon/mon_main.c 2011-03-27 14:31:47.000000000 -0400
35042+++ linux-2.6.32.41/drivers/usb/mon/mon_main.c 2011-04-17 15:56:46.000000000 -0400
35043@@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
35044 /*
35045 * Ops
35046 */
35047-static struct usb_mon_operations mon_ops_0 = {
35048+static const struct usb_mon_operations mon_ops_0 = {
35049 .urb_submit = mon_submit,
35050 .urb_submit_error = mon_submit_error,
35051 .urb_complete = mon_complete,
35052diff -urNp linux-2.6.32.41/drivers/usb/wusbcore/wa-hc.h linux-2.6.32.41/drivers/usb/wusbcore/wa-hc.h
35053--- linux-2.6.32.41/drivers/usb/wusbcore/wa-hc.h 2011-03-27 14:31:47.000000000 -0400
35054+++ linux-2.6.32.41/drivers/usb/wusbcore/wa-hc.h 2011-05-04 17:56:28.000000000 -0400
35055@@ -192,7 +192,7 @@ struct wahc {
35056 struct list_head xfer_delayed_list;
35057 spinlock_t xfer_list_lock;
35058 struct work_struct xfer_work;
35059- atomic_t xfer_id_count;
35060+ atomic_unchecked_t xfer_id_count;
35061 };
35062
35063
35064@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
35065 INIT_LIST_HEAD(&wa->xfer_delayed_list);
35066 spin_lock_init(&wa->xfer_list_lock);
35067 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
35068- atomic_set(&wa->xfer_id_count, 1);
35069+ atomic_set_unchecked(&wa->xfer_id_count, 1);
35070 }
35071
35072 /**
35073diff -urNp linux-2.6.32.41/drivers/usb/wusbcore/wa-xfer.c linux-2.6.32.41/drivers/usb/wusbcore/wa-xfer.c
35074--- linux-2.6.32.41/drivers/usb/wusbcore/wa-xfer.c 2011-03-27 14:31:47.000000000 -0400
35075+++ linux-2.6.32.41/drivers/usb/wusbcore/wa-xfer.c 2011-05-04 17:56:28.000000000 -0400
35076@@ -293,7 +293,7 @@ out:
35077 */
35078 static void wa_xfer_id_init(struct wa_xfer *xfer)
35079 {
35080- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
35081+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
35082 }
35083
35084 /*
35085diff -urNp linux-2.6.32.41/drivers/uwb/wlp/messages.c linux-2.6.32.41/drivers/uwb/wlp/messages.c
35086--- linux-2.6.32.41/drivers/uwb/wlp/messages.c 2011-03-27 14:31:47.000000000 -0400
35087+++ linux-2.6.32.41/drivers/uwb/wlp/messages.c 2011-04-17 15:56:46.000000000 -0400
35088@@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct
35089 size_t len = skb->len;
35090 size_t used;
35091 ssize_t result;
35092- struct wlp_nonce enonce, rnonce;
35093+ struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
35094 enum wlp_assc_error assc_err;
35095 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
35096 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
35097diff -urNp linux-2.6.32.41/drivers/uwb/wlp/sysfs.c linux-2.6.32.41/drivers/uwb/wlp/sysfs.c
35098--- linux-2.6.32.41/drivers/uwb/wlp/sysfs.c 2011-03-27 14:31:47.000000000 -0400
35099+++ linux-2.6.32.41/drivers/uwb/wlp/sysfs.c 2011-04-17 15:56:46.000000000 -0400
35100@@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobjec
35101 return ret;
35102 }
35103
35104-static
35105-struct sysfs_ops wss_sysfs_ops = {
35106+static const struct sysfs_ops wss_sysfs_ops = {
35107 .show = wlp_wss_attr_show,
35108 .store = wlp_wss_attr_store,
35109 };
35110diff -urNp linux-2.6.32.41/drivers/video/atmel_lcdfb.c linux-2.6.32.41/drivers/video/atmel_lcdfb.c
35111--- linux-2.6.32.41/drivers/video/atmel_lcdfb.c 2011-03-27 14:31:47.000000000 -0400
35112+++ linux-2.6.32.41/drivers/video/atmel_lcdfb.c 2011-04-17 15:56:46.000000000 -0400
35113@@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struc
35114 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
35115 }
35116
35117-static struct backlight_ops atmel_lcdc_bl_ops = {
35118+static const struct backlight_ops atmel_lcdc_bl_ops = {
35119 .update_status = atmel_bl_update_status,
35120 .get_brightness = atmel_bl_get_brightness,
35121 };
35122diff -urNp linux-2.6.32.41/drivers/video/aty/aty128fb.c linux-2.6.32.41/drivers/video/aty/aty128fb.c
35123--- linux-2.6.32.41/drivers/video/aty/aty128fb.c 2011-03-27 14:31:47.000000000 -0400
35124+++ linux-2.6.32.41/drivers/video/aty/aty128fb.c 2011-04-17 15:56:46.000000000 -0400
35125@@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(stru
35126 return bd->props.brightness;
35127 }
35128
35129-static struct backlight_ops aty128_bl_data = {
35130+static const struct backlight_ops aty128_bl_data = {
35131 .get_brightness = aty128_bl_get_brightness,
35132 .update_status = aty128_bl_update_status,
35133 };
35134diff -urNp linux-2.6.32.41/drivers/video/aty/atyfb_base.c linux-2.6.32.41/drivers/video/aty/atyfb_base.c
35135--- linux-2.6.32.41/drivers/video/aty/atyfb_base.c 2011-03-27 14:31:47.000000000 -0400
35136+++ linux-2.6.32.41/drivers/video/aty/atyfb_base.c 2011-04-17 15:56:46.000000000 -0400
35137@@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct
35138 return bd->props.brightness;
35139 }
35140
35141-static struct backlight_ops aty_bl_data = {
35142+static const struct backlight_ops aty_bl_data = {
35143 .get_brightness = aty_bl_get_brightness,
35144 .update_status = aty_bl_update_status,
35145 };
35146diff -urNp linux-2.6.32.41/drivers/video/aty/radeon_backlight.c linux-2.6.32.41/drivers/video/aty/radeon_backlight.c
35147--- linux-2.6.32.41/drivers/video/aty/radeon_backlight.c 2011-03-27 14:31:47.000000000 -0400
35148+++ linux-2.6.32.41/drivers/video/aty/radeon_backlight.c 2011-04-17 15:56:46.000000000 -0400
35149@@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(stru
35150 return bd->props.brightness;
35151 }
35152
35153-static struct backlight_ops radeon_bl_data = {
35154+static const struct backlight_ops radeon_bl_data = {
35155 .get_brightness = radeon_bl_get_brightness,
35156 .update_status = radeon_bl_update_status,
35157 };
35158diff -urNp linux-2.6.32.41/drivers/video/backlight/adp5520_bl.c linux-2.6.32.41/drivers/video/backlight/adp5520_bl.c
35159--- linux-2.6.32.41/drivers/video/backlight/adp5520_bl.c 2011-03-27 14:31:47.000000000 -0400
35160+++ linux-2.6.32.41/drivers/video/backlight/adp5520_bl.c 2011-04-17 15:56:46.000000000 -0400
35161@@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(str
35162 return error ? data->current_brightness : reg_val;
35163 }
35164
35165-static struct backlight_ops adp5520_bl_ops = {
35166+static const struct backlight_ops adp5520_bl_ops = {
35167 .update_status = adp5520_bl_update_status,
35168 .get_brightness = adp5520_bl_get_brightness,
35169 };
35170diff -urNp linux-2.6.32.41/drivers/video/backlight/adx_bl.c linux-2.6.32.41/drivers/video/backlight/adx_bl.c
35171--- linux-2.6.32.41/drivers/video/backlight/adx_bl.c 2011-03-27 14:31:47.000000000 -0400
35172+++ linux-2.6.32.41/drivers/video/backlight/adx_bl.c 2011-04-17 15:56:46.000000000 -0400
35173@@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct
35174 return 1;
35175 }
35176
35177-static struct backlight_ops adx_backlight_ops = {
35178+static const struct backlight_ops adx_backlight_ops = {
35179 .options = 0,
35180 .update_status = adx_backlight_update_status,
35181 .get_brightness = adx_backlight_get_brightness,
35182diff -urNp linux-2.6.32.41/drivers/video/backlight/atmel-pwm-bl.c linux-2.6.32.41/drivers/video/backlight/atmel-pwm-bl.c
35183--- linux-2.6.32.41/drivers/video/backlight/atmel-pwm-bl.c 2011-03-27 14:31:47.000000000 -0400
35184+++ linux-2.6.32.41/drivers/video/backlight/atmel-pwm-bl.c 2011-04-17 15:56:46.000000000 -0400
35185@@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct
35186 return pwm_channel_enable(&pwmbl->pwmc);
35187 }
35188
35189-static struct backlight_ops atmel_pwm_bl_ops = {
35190+static const struct backlight_ops atmel_pwm_bl_ops = {
35191 .get_brightness = atmel_pwm_bl_get_intensity,
35192 .update_status = atmel_pwm_bl_set_intensity,
35193 };
35194diff -urNp linux-2.6.32.41/drivers/video/backlight/backlight.c linux-2.6.32.41/drivers/video/backlight/backlight.c
35195--- linux-2.6.32.41/drivers/video/backlight/backlight.c 2011-03-27 14:31:47.000000000 -0400
35196+++ linux-2.6.32.41/drivers/video/backlight/backlight.c 2011-04-17 15:56:46.000000000 -0400
35197@@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
35198 * ERR_PTR() or a pointer to the newly allocated device.
35199 */
35200 struct backlight_device *backlight_device_register(const char *name,
35201- struct device *parent, void *devdata, struct backlight_ops *ops)
35202+ struct device *parent, void *devdata, const struct backlight_ops *ops)
35203 {
35204 struct backlight_device *new_bd;
35205 int rc;
35206diff -urNp linux-2.6.32.41/drivers/video/backlight/corgi_lcd.c linux-2.6.32.41/drivers/video/backlight/corgi_lcd.c
35207--- linux-2.6.32.41/drivers/video/backlight/corgi_lcd.c 2011-03-27 14:31:47.000000000 -0400
35208+++ linux-2.6.32.41/drivers/video/backlight/corgi_lcd.c 2011-04-17 15:56:46.000000000 -0400
35209@@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit
35210 }
35211 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
35212
35213-static struct backlight_ops corgi_bl_ops = {
35214+static const struct backlight_ops corgi_bl_ops = {
35215 .get_brightness = corgi_bl_get_intensity,
35216 .update_status = corgi_bl_update_status,
35217 };
35218diff -urNp linux-2.6.32.41/drivers/video/backlight/cr_bllcd.c linux-2.6.32.41/drivers/video/backlight/cr_bllcd.c
35219--- linux-2.6.32.41/drivers/video/backlight/cr_bllcd.c 2011-03-27 14:31:47.000000000 -0400
35220+++ linux-2.6.32.41/drivers/video/backlight/cr_bllcd.c 2011-04-17 15:56:46.000000000 -0400
35221@@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(st
35222 return intensity;
35223 }
35224
35225-static struct backlight_ops cr_backlight_ops = {
35226+static const struct backlight_ops cr_backlight_ops = {
35227 .get_brightness = cr_backlight_get_intensity,
35228 .update_status = cr_backlight_set_intensity,
35229 };
35230diff -urNp linux-2.6.32.41/drivers/video/backlight/da903x_bl.c linux-2.6.32.41/drivers/video/backlight/da903x_bl.c
35231--- linux-2.6.32.41/drivers/video/backlight/da903x_bl.c 2011-03-27 14:31:47.000000000 -0400
35232+++ linux-2.6.32.41/drivers/video/backlight/da903x_bl.c 2011-04-17 15:56:46.000000000 -0400
35233@@ -94,7 +94,7 @@ static int da903x_backlight_get_brightne
35234 return data->current_brightness;
35235 }
35236
35237-static struct backlight_ops da903x_backlight_ops = {
35238+static const struct backlight_ops da903x_backlight_ops = {
35239 .update_status = da903x_backlight_update_status,
35240 .get_brightness = da903x_backlight_get_brightness,
35241 };
35242diff -urNp linux-2.6.32.41/drivers/video/backlight/generic_bl.c linux-2.6.32.41/drivers/video/backlight/generic_bl.c
35243--- linux-2.6.32.41/drivers/video/backlight/generic_bl.c 2011-03-27 14:31:47.000000000 -0400
35244+++ linux-2.6.32.41/drivers/video/backlight/generic_bl.c 2011-04-17 15:56:46.000000000 -0400
35245@@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
35246 }
35247 EXPORT_SYMBOL(corgibl_limit_intensity);
35248
35249-static struct backlight_ops genericbl_ops = {
35250+static const struct backlight_ops genericbl_ops = {
35251 .options = BL_CORE_SUSPENDRESUME,
35252 .get_brightness = genericbl_get_intensity,
35253 .update_status = genericbl_send_intensity,
35254diff -urNp linux-2.6.32.41/drivers/video/backlight/hp680_bl.c linux-2.6.32.41/drivers/video/backlight/hp680_bl.c
35255--- linux-2.6.32.41/drivers/video/backlight/hp680_bl.c 2011-03-27 14:31:47.000000000 -0400
35256+++ linux-2.6.32.41/drivers/video/backlight/hp680_bl.c 2011-04-17 15:56:46.000000000 -0400
35257@@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct
35258 return current_intensity;
35259 }
35260
35261-static struct backlight_ops hp680bl_ops = {
35262+static const struct backlight_ops hp680bl_ops = {
35263 .get_brightness = hp680bl_get_intensity,
35264 .update_status = hp680bl_set_intensity,
35265 };
35266diff -urNp linux-2.6.32.41/drivers/video/backlight/jornada720_bl.c linux-2.6.32.41/drivers/video/backlight/jornada720_bl.c
35267--- linux-2.6.32.41/drivers/video/backlight/jornada720_bl.c 2011-03-27 14:31:47.000000000 -0400
35268+++ linux-2.6.32.41/drivers/video/backlight/jornada720_bl.c 2011-04-17 15:56:46.000000000 -0400
35269@@ -93,7 +93,7 @@ out:
35270 return ret;
35271 }
35272
35273-static struct backlight_ops jornada_bl_ops = {
35274+static const struct backlight_ops jornada_bl_ops = {
35275 .get_brightness = jornada_bl_get_brightness,
35276 .update_status = jornada_bl_update_status,
35277 .options = BL_CORE_SUSPENDRESUME,
35278diff -urNp linux-2.6.32.41/drivers/video/backlight/kb3886_bl.c linux-2.6.32.41/drivers/video/backlight/kb3886_bl.c
35279--- linux-2.6.32.41/drivers/video/backlight/kb3886_bl.c 2011-03-27 14:31:47.000000000 -0400
35280+++ linux-2.6.32.41/drivers/video/backlight/kb3886_bl.c 2011-04-17 15:56:46.000000000 -0400
35281@@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct
35282 return kb3886bl_intensity;
35283 }
35284
35285-static struct backlight_ops kb3886bl_ops = {
35286+static const struct backlight_ops kb3886bl_ops = {
35287 .get_brightness = kb3886bl_get_intensity,
35288 .update_status = kb3886bl_send_intensity,
35289 };
35290diff -urNp linux-2.6.32.41/drivers/video/backlight/locomolcd.c linux-2.6.32.41/drivers/video/backlight/locomolcd.c
35291--- linux-2.6.32.41/drivers/video/backlight/locomolcd.c 2011-03-27 14:31:47.000000000 -0400
35292+++ linux-2.6.32.41/drivers/video/backlight/locomolcd.c 2011-04-17 15:56:46.000000000 -0400
35293@@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struc
35294 return current_intensity;
35295 }
35296
35297-static struct backlight_ops locomobl_data = {
35298+static const struct backlight_ops locomobl_data = {
35299 .get_brightness = locomolcd_get_intensity,
35300 .update_status = locomolcd_set_intensity,
35301 };
35302diff -urNp linux-2.6.32.41/drivers/video/backlight/mbp_nvidia_bl.c linux-2.6.32.41/drivers/video/backlight/mbp_nvidia_bl.c
35303--- linux-2.6.32.41/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:01.000000000 -0400
35304+++ linux-2.6.32.41/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:33.000000000 -0400
35305@@ -33,7 +33,7 @@ struct dmi_match_data {
35306 unsigned long iostart;
35307 unsigned long iolen;
35308 /* Backlight operations structure. */
35309- struct backlight_ops backlight_ops;
35310+ const struct backlight_ops backlight_ops;
35311 };
35312
35313 /* Module parameters. */
35314diff -urNp linux-2.6.32.41/drivers/video/backlight/omap1_bl.c linux-2.6.32.41/drivers/video/backlight/omap1_bl.c
35315--- linux-2.6.32.41/drivers/video/backlight/omap1_bl.c 2011-03-27 14:31:47.000000000 -0400
35316+++ linux-2.6.32.41/drivers/video/backlight/omap1_bl.c 2011-04-17 15:56:46.000000000 -0400
35317@@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct b
35318 return bl->current_intensity;
35319 }
35320
35321-static struct backlight_ops omapbl_ops = {
35322+static const struct backlight_ops omapbl_ops = {
35323 .get_brightness = omapbl_get_intensity,
35324 .update_status = omapbl_update_status,
35325 };
35326diff -urNp linux-2.6.32.41/drivers/video/backlight/progear_bl.c linux-2.6.32.41/drivers/video/backlight/progear_bl.c
35327--- linux-2.6.32.41/drivers/video/backlight/progear_bl.c 2011-03-27 14:31:47.000000000 -0400
35328+++ linux-2.6.32.41/drivers/video/backlight/progear_bl.c 2011-04-17 15:56:46.000000000 -0400
35329@@ -54,7 +54,7 @@ static int progearbl_get_intensity(struc
35330 return intensity - HW_LEVEL_MIN;
35331 }
35332
35333-static struct backlight_ops progearbl_ops = {
35334+static const struct backlight_ops progearbl_ops = {
35335 .get_brightness = progearbl_get_intensity,
35336 .update_status = progearbl_set_intensity,
35337 };
35338diff -urNp linux-2.6.32.41/drivers/video/backlight/pwm_bl.c linux-2.6.32.41/drivers/video/backlight/pwm_bl.c
35339--- linux-2.6.32.41/drivers/video/backlight/pwm_bl.c 2011-03-27 14:31:47.000000000 -0400
35340+++ linux-2.6.32.41/drivers/video/backlight/pwm_bl.c 2011-04-17 15:56:46.000000000 -0400
35341@@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(
35342 return bl->props.brightness;
35343 }
35344
35345-static struct backlight_ops pwm_backlight_ops = {
35346+static const struct backlight_ops pwm_backlight_ops = {
35347 .update_status = pwm_backlight_update_status,
35348 .get_brightness = pwm_backlight_get_brightness,
35349 };
35350diff -urNp linux-2.6.32.41/drivers/video/backlight/tosa_bl.c linux-2.6.32.41/drivers/video/backlight/tosa_bl.c
35351--- linux-2.6.32.41/drivers/video/backlight/tosa_bl.c 2011-03-27 14:31:47.000000000 -0400
35352+++ linux-2.6.32.41/drivers/video/backlight/tosa_bl.c 2011-04-17 15:56:46.000000000 -0400
35353@@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct
35354 return props->brightness;
35355 }
35356
35357-static struct backlight_ops bl_ops = {
35358+static const struct backlight_ops bl_ops = {
35359 .get_brightness = tosa_bl_get_brightness,
35360 .update_status = tosa_bl_update_status,
35361 };
35362diff -urNp linux-2.6.32.41/drivers/video/backlight/wm831x_bl.c linux-2.6.32.41/drivers/video/backlight/wm831x_bl.c
35363--- linux-2.6.32.41/drivers/video/backlight/wm831x_bl.c 2011-03-27 14:31:47.000000000 -0400
35364+++ linux-2.6.32.41/drivers/video/backlight/wm831x_bl.c 2011-04-17 15:56:46.000000000 -0400
35365@@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightne
35366 return data->current_brightness;
35367 }
35368
35369-static struct backlight_ops wm831x_backlight_ops = {
35370+static const struct backlight_ops wm831x_backlight_ops = {
35371 .options = BL_CORE_SUSPENDRESUME,
35372 .update_status = wm831x_backlight_update_status,
35373 .get_brightness = wm831x_backlight_get_brightness,
35374diff -urNp linux-2.6.32.41/drivers/video/bf54x-lq043fb.c linux-2.6.32.41/drivers/video/bf54x-lq043fb.c
35375--- linux-2.6.32.41/drivers/video/bf54x-lq043fb.c 2011-03-27 14:31:47.000000000 -0400
35376+++ linux-2.6.32.41/drivers/video/bf54x-lq043fb.c 2011-04-17 15:56:46.000000000 -0400
35377@@ -463,7 +463,7 @@ static int bl_get_brightness(struct back
35378 return 0;
35379 }
35380
35381-static struct backlight_ops bfin_lq043fb_bl_ops = {
35382+static const struct backlight_ops bfin_lq043fb_bl_ops = {
35383 .get_brightness = bl_get_brightness,
35384 };
35385
35386diff -urNp linux-2.6.32.41/drivers/video/bfin-t350mcqb-fb.c linux-2.6.32.41/drivers/video/bfin-t350mcqb-fb.c
35387--- linux-2.6.32.41/drivers/video/bfin-t350mcqb-fb.c 2011-03-27 14:31:47.000000000 -0400
35388+++ linux-2.6.32.41/drivers/video/bfin-t350mcqb-fb.c 2011-04-17 15:56:46.000000000 -0400
35389@@ -381,7 +381,7 @@ static int bl_get_brightness(struct back
35390 return 0;
35391 }
35392
35393-static struct backlight_ops bfin_lq043fb_bl_ops = {
35394+static const struct backlight_ops bfin_lq043fb_bl_ops = {
35395 .get_brightness = bl_get_brightness,
35396 };
35397
35398diff -urNp linux-2.6.32.41/drivers/video/fbcmap.c linux-2.6.32.41/drivers/video/fbcmap.c
35399--- linux-2.6.32.41/drivers/video/fbcmap.c 2011-03-27 14:31:47.000000000 -0400
35400+++ linux-2.6.32.41/drivers/video/fbcmap.c 2011-04-17 15:56:46.000000000 -0400
35401@@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user
35402 rc = -ENODEV;
35403 goto out;
35404 }
35405- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
35406- !info->fbops->fb_setcmap)) {
35407+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
35408 rc = -EINVAL;
35409 goto out1;
35410 }
35411diff -urNp linux-2.6.32.41/drivers/video/fbmem.c linux-2.6.32.41/drivers/video/fbmem.c
35412--- linux-2.6.32.41/drivers/video/fbmem.c 2011-03-27 14:31:47.000000000 -0400
35413+++ linux-2.6.32.41/drivers/video/fbmem.c 2011-05-16 21:46:57.000000000 -0400
35414@@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_in
35415 image->dx += image->width + 8;
35416 }
35417 } else if (rotate == FB_ROTATE_UD) {
35418- for (x = 0; x < num && image->dx >= 0; x++) {
35419+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
35420 info->fbops->fb_imageblit(info, image);
35421 image->dx -= image->width + 8;
35422 }
35423@@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_in
35424 image->dy += image->height + 8;
35425 }
35426 } else if (rotate == FB_ROTATE_CCW) {
35427- for (x = 0; x < num && image->dy >= 0; x++) {
35428+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
35429 info->fbops->fb_imageblit(info, image);
35430 image->dy -= image->height + 8;
35431 }
35432@@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct
35433 int flags = info->flags;
35434 int ret = 0;
35435
35436+ pax_track_stack();
35437+
35438 if (var->activate & FB_ACTIVATE_INV_MODE) {
35439 struct fb_videomode mode1, mode2;
35440
35441@@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *
35442 void __user *argp = (void __user *)arg;
35443 long ret = 0;
35444
35445+ pax_track_stack();
35446+
35447 switch (cmd) {
35448 case FBIOGET_VSCREENINFO:
35449 if (!lock_fb_info(info))
35450@@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *
35451 return -EFAULT;
35452 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
35453 return -EINVAL;
35454- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
35455+ if (con2fb.framebuffer >= FB_MAX)
35456 return -EINVAL;
35457 if (!registered_fb[con2fb.framebuffer])
35458 request_module("fb%d", con2fb.framebuffer);
35459diff -urNp linux-2.6.32.41/drivers/video/i810/i810_accel.c linux-2.6.32.41/drivers/video/i810/i810_accel.c
35460--- linux-2.6.32.41/drivers/video/i810/i810_accel.c 2011-03-27 14:31:47.000000000 -0400
35461+++ linux-2.6.32.41/drivers/video/i810/i810_accel.c 2011-04-17 15:56:46.000000000 -0400
35462@@ -73,6 +73,7 @@ static inline int wait_for_space(struct
35463 }
35464 }
35465 printk("ringbuffer lockup!!!\n");
35466+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
35467 i810_report_error(mmio);
35468 par->dev_flags |= LOCKUP;
35469 info->pixmap.scan_align = 1;
35470diff -urNp linux-2.6.32.41/drivers/video/nvidia/nv_backlight.c linux-2.6.32.41/drivers/video/nvidia/nv_backlight.c
35471--- linux-2.6.32.41/drivers/video/nvidia/nv_backlight.c 2011-03-27 14:31:47.000000000 -0400
35472+++ linux-2.6.32.41/drivers/video/nvidia/nv_backlight.c 2011-04-17 15:56:46.000000000 -0400
35473@@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(stru
35474 return bd->props.brightness;
35475 }
35476
35477-static struct backlight_ops nvidia_bl_ops = {
35478+static const struct backlight_ops nvidia_bl_ops = {
35479 .get_brightness = nvidia_bl_get_brightness,
35480 .update_status = nvidia_bl_update_status,
35481 };
35482diff -urNp linux-2.6.32.41/drivers/video/riva/fbdev.c linux-2.6.32.41/drivers/video/riva/fbdev.c
35483--- linux-2.6.32.41/drivers/video/riva/fbdev.c 2011-03-27 14:31:47.000000000 -0400
35484+++ linux-2.6.32.41/drivers/video/riva/fbdev.c 2011-04-17 15:56:46.000000000 -0400
35485@@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct
35486 return bd->props.brightness;
35487 }
35488
35489-static struct backlight_ops riva_bl_ops = {
35490+static const struct backlight_ops riva_bl_ops = {
35491 .get_brightness = riva_bl_get_brightness,
35492 .update_status = riva_bl_update_status,
35493 };
35494diff -urNp linux-2.6.32.41/drivers/video/uvesafb.c linux-2.6.32.41/drivers/video/uvesafb.c
35495--- linux-2.6.32.41/drivers/video/uvesafb.c 2011-03-27 14:31:47.000000000 -0400
35496+++ linux-2.6.32.41/drivers/video/uvesafb.c 2011-04-17 15:56:46.000000000 -0400
35497@@ -18,6 +18,7 @@
35498 #include <linux/fb.h>
35499 #include <linux/io.h>
35500 #include <linux/mutex.h>
35501+#include <linux/moduleloader.h>
35502 #include <video/edid.h>
35503 #include <video/uvesafb.h>
35504 #ifdef CONFIG_X86
35505@@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
35506 NULL,
35507 };
35508
35509- return call_usermodehelper(v86d_path, argv, envp, 1);
35510+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
35511 }
35512
35513 /*
35514@@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(
35515 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
35516 par->pmi_setpal = par->ypan = 0;
35517 } else {
35518+
35519+#ifdef CONFIG_PAX_KERNEXEC
35520+#ifdef CONFIG_MODULES
35521+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
35522+#endif
35523+ if (!par->pmi_code) {
35524+ par->pmi_setpal = par->ypan = 0;
35525+ return 0;
35526+ }
35527+#endif
35528+
35529 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
35530 + task->t.regs.edi);
35531+
35532+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35533+ pax_open_kernel();
35534+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
35535+ pax_close_kernel();
35536+
35537+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
35538+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
35539+#else
35540 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
35541 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
35542+#endif
35543+
35544 printk(KERN_INFO "uvesafb: protected mode interface info at "
35545 "%04x:%04x\n",
35546 (u16)task->t.regs.es, (u16)task->t.regs.edi);
35547@@ -1799,6 +1822,11 @@ out:
35548 if (par->vbe_modes)
35549 kfree(par->vbe_modes);
35550
35551+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35552+ if (par->pmi_code)
35553+ module_free_exec(NULL, par->pmi_code);
35554+#endif
35555+
35556 framebuffer_release(info);
35557 return err;
35558 }
35559@@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platfor
35560 kfree(par->vbe_state_orig);
35561 if (par->vbe_state_saved)
35562 kfree(par->vbe_state_saved);
35563+
35564+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35565+ if (par->pmi_code)
35566+ module_free_exec(NULL, par->pmi_code);
35567+#endif
35568+
35569 }
35570
35571 framebuffer_release(info);
35572diff -urNp linux-2.6.32.41/drivers/video/vesafb.c linux-2.6.32.41/drivers/video/vesafb.c
35573--- linux-2.6.32.41/drivers/video/vesafb.c 2011-03-27 14:31:47.000000000 -0400
35574+++ linux-2.6.32.41/drivers/video/vesafb.c 2011-04-17 15:56:46.000000000 -0400
35575@@ -9,6 +9,7 @@
35576 */
35577
35578 #include <linux/module.h>
35579+#include <linux/moduleloader.h>
35580 #include <linux/kernel.h>
35581 #include <linux/errno.h>
35582 #include <linux/string.h>
35583@@ -53,8 +54,8 @@ static int vram_remap __initdata; /*
35584 static int vram_total __initdata; /* Set total amount of memory */
35585 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
35586 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
35587-static void (*pmi_start)(void) __read_mostly;
35588-static void (*pmi_pal) (void) __read_mostly;
35589+static void (*pmi_start)(void) __read_only;
35590+static void (*pmi_pal) (void) __read_only;
35591 static int depth __read_mostly;
35592 static int vga_compat __read_mostly;
35593 /* --------------------------------------------------------------------- */
35594@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
35595 unsigned int size_vmode;
35596 unsigned int size_remap;
35597 unsigned int size_total;
35598+ void *pmi_code = NULL;
35599
35600 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
35601 return -ENODEV;
35602@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
35603 size_remap = size_total;
35604 vesafb_fix.smem_len = size_remap;
35605
35606-#ifndef __i386__
35607- screen_info.vesapm_seg = 0;
35608-#endif
35609-
35610 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
35611 printk(KERN_WARNING
35612 "vesafb: cannot reserve video memory at 0x%lx\n",
35613@@ -315,9 +313,21 @@ static int __init vesafb_probe(struct pl
35614 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
35615 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
35616
35617+#ifdef __i386__
35618+
35619+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35620+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
35621+ if (!pmi_code)
35622+#elif !defined(CONFIG_PAX_KERNEXEC)
35623+ if (0)
35624+#endif
35625+
35626+#endif
35627+ screen_info.vesapm_seg = 0;
35628+
35629 if (screen_info.vesapm_seg) {
35630- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
35631- screen_info.vesapm_seg,screen_info.vesapm_off);
35632+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
35633+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
35634 }
35635
35636 if (screen_info.vesapm_seg < 0xc000)
35637@@ -325,9 +335,25 @@ static int __init vesafb_probe(struct pl
35638
35639 if (ypan || pmi_setpal) {
35640 unsigned short *pmi_base;
35641- pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
35642- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
35643- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
35644+
35645+ pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
35646+
35647+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35648+ pax_open_kernel();
35649+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
35650+#else
35651+ pmi_code = pmi_base;
35652+#endif
35653+
35654+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
35655+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
35656+
35657+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35658+ pmi_start = ktva_ktla(pmi_start);
35659+ pmi_pal = ktva_ktla(pmi_pal);
35660+ pax_close_kernel();
35661+#endif
35662+
35663 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
35664 if (pmi_base[3]) {
35665 printk(KERN_INFO "vesafb: pmi: ports = ");
35666@@ -469,6 +495,11 @@ static int __init vesafb_probe(struct pl
35667 info->node, info->fix.id);
35668 return 0;
35669 err:
35670+
35671+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35672+ module_free_exec(NULL, pmi_code);
35673+#endif
35674+
35675 if (info->screen_base)
35676 iounmap(info->screen_base);
35677 framebuffer_release(info);
35678diff -urNp linux-2.6.32.41/drivers/xen/sys-hypervisor.c linux-2.6.32.41/drivers/xen/sys-hypervisor.c
35679--- linux-2.6.32.41/drivers/xen/sys-hypervisor.c 2011-03-27 14:31:47.000000000 -0400
35680+++ linux-2.6.32.41/drivers/xen/sys-hypervisor.c 2011-04-17 15:56:46.000000000 -0400
35681@@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct ko
35682 return 0;
35683 }
35684
35685-static struct sysfs_ops hyp_sysfs_ops = {
35686+static const struct sysfs_ops hyp_sysfs_ops = {
35687 .show = hyp_sysfs_show,
35688 .store = hyp_sysfs_store,
35689 };
35690diff -urNp linux-2.6.32.41/fs/9p/vfs_inode.c linux-2.6.32.41/fs/9p/vfs_inode.c
35691--- linux-2.6.32.41/fs/9p/vfs_inode.c 2011-03-27 14:31:47.000000000 -0400
35692+++ linux-2.6.32.41/fs/9p/vfs_inode.c 2011-04-17 15:56:46.000000000 -0400
35693@@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct
35694 static void
35695 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
35696 {
35697- char *s = nd_get_link(nd);
35698+ const char *s = nd_get_link(nd);
35699
35700 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
35701 IS_ERR(s) ? "<error>" : s);
35702diff -urNp linux-2.6.32.41/fs/aio.c linux-2.6.32.41/fs/aio.c
35703--- linux-2.6.32.41/fs/aio.c 2011-03-27 14:31:47.000000000 -0400
35704+++ linux-2.6.32.41/fs/aio.c 2011-05-16 21:46:57.000000000 -0400
35705@@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx
35706 size += sizeof(struct io_event) * nr_events;
35707 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
35708
35709- if (nr_pages < 0)
35710+ if (nr_pages <= 0)
35711 return -EINVAL;
35712
35713 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
35714@@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ct
35715 struct aio_timeout to;
35716 int retry = 0;
35717
35718+ pax_track_stack();
35719+
35720 /* needed to zero any padding within an entry (there shouldn't be
35721 * any, but C is fun!
35722 */
35723diff -urNp linux-2.6.32.41/fs/attr.c linux-2.6.32.41/fs/attr.c
35724--- linux-2.6.32.41/fs/attr.c 2011-03-27 14:31:47.000000000 -0400
35725+++ linux-2.6.32.41/fs/attr.c 2011-04-17 15:56:46.000000000 -0400
35726@@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode
35727 unsigned long limit;
35728
35729 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
35730+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
35731 if (limit != RLIM_INFINITY && offset > limit)
35732 goto out_sig;
35733 if (offset > inode->i_sb->s_maxbytes)
35734diff -urNp linux-2.6.32.41/fs/autofs/root.c linux-2.6.32.41/fs/autofs/root.c
35735--- linux-2.6.32.41/fs/autofs/root.c 2011-03-27 14:31:47.000000000 -0400
35736+++ linux-2.6.32.41/fs/autofs/root.c 2011-04-17 15:56:46.000000000 -0400
35737@@ -299,7 +299,8 @@ static int autofs_root_symlink(struct in
35738 set_bit(n,sbi->symlink_bitmap);
35739 sl = &sbi->symlink[n];
35740 sl->len = strlen(symname);
35741- sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
35742+ slsize = sl->len+1;
35743+ sl->data = kmalloc(slsize, GFP_KERNEL);
35744 if (!sl->data) {
35745 clear_bit(n,sbi->symlink_bitmap);
35746 unlock_kernel();
35747diff -urNp linux-2.6.32.41/fs/autofs4/symlink.c linux-2.6.32.41/fs/autofs4/symlink.c
35748--- linux-2.6.32.41/fs/autofs4/symlink.c 2011-03-27 14:31:47.000000000 -0400
35749+++ linux-2.6.32.41/fs/autofs4/symlink.c 2011-04-17 15:56:46.000000000 -0400
35750@@ -15,7 +15,7 @@
35751 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
35752 {
35753 struct autofs_info *ino = autofs4_dentry_ino(dentry);
35754- nd_set_link(nd, (char *)ino->u.symlink);
35755+ nd_set_link(nd, ino->u.symlink);
35756 return NULL;
35757 }
35758
35759diff -urNp linux-2.6.32.41/fs/befs/linuxvfs.c linux-2.6.32.41/fs/befs/linuxvfs.c
35760--- linux-2.6.32.41/fs/befs/linuxvfs.c 2011-03-27 14:31:47.000000000 -0400
35761+++ linux-2.6.32.41/fs/befs/linuxvfs.c 2011-04-17 15:56:46.000000000 -0400
35762@@ -493,7 +493,7 @@ static void befs_put_link(struct dentry
35763 {
35764 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
35765 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
35766- char *link = nd_get_link(nd);
35767+ const char *link = nd_get_link(nd);
35768 if (!IS_ERR(link))
35769 kfree(link);
35770 }
35771diff -urNp linux-2.6.32.41/fs/binfmt_aout.c linux-2.6.32.41/fs/binfmt_aout.c
35772--- linux-2.6.32.41/fs/binfmt_aout.c 2011-03-27 14:31:47.000000000 -0400
35773+++ linux-2.6.32.41/fs/binfmt_aout.c 2011-04-17 15:56:46.000000000 -0400
35774@@ -16,6 +16,7 @@
35775 #include <linux/string.h>
35776 #include <linux/fs.h>
35777 #include <linux/file.h>
35778+#include <linux/security.h>
35779 #include <linux/stat.h>
35780 #include <linux/fcntl.h>
35781 #include <linux/ptrace.h>
35782@@ -102,6 +103,8 @@ static int aout_core_dump(long signr, st
35783 #endif
35784 # define START_STACK(u) (u.start_stack)
35785
35786+ memset(&dump, 0, sizeof(dump));
35787+
35788 fs = get_fs();
35789 set_fs(KERNEL_DS);
35790 has_dumped = 1;
35791@@ -113,10 +116,12 @@ static int aout_core_dump(long signr, st
35792
35793 /* If the size of the dump file exceeds the rlimit, then see what would happen
35794 if we wrote the stack, but not the data area. */
35795+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
35796 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
35797 dump.u_dsize = 0;
35798
35799 /* Make sure we have enough room to write the stack and data areas. */
35800+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
35801 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
35802 dump.u_ssize = 0;
35803
35804@@ -146,9 +151,7 @@ static int aout_core_dump(long signr, st
35805 dump_size = dump.u_ssize << PAGE_SHIFT;
35806 DUMP_WRITE(dump_start,dump_size);
35807 }
35808-/* Finally dump the task struct. Not be used by gdb, but could be useful */
35809- set_fs(KERNEL_DS);
35810- DUMP_WRITE(current,sizeof(*current));
35811+/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
35812 end_coredump:
35813 set_fs(fs);
35814 return has_dumped;
35815@@ -249,6 +252,8 @@ static int load_aout_binary(struct linux
35816 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
35817 if (rlim >= RLIM_INFINITY)
35818 rlim = ~0;
35819+
35820+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
35821 if (ex.a_data + ex.a_bss > rlim)
35822 return -ENOMEM;
35823
35824@@ -277,6 +282,27 @@ static int load_aout_binary(struct linux
35825 install_exec_creds(bprm);
35826 current->flags &= ~PF_FORKNOEXEC;
35827
35828+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
35829+ current->mm->pax_flags = 0UL;
35830+#endif
35831+
35832+#ifdef CONFIG_PAX_PAGEEXEC
35833+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
35834+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
35835+
35836+#ifdef CONFIG_PAX_EMUTRAMP
35837+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
35838+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
35839+#endif
35840+
35841+#ifdef CONFIG_PAX_MPROTECT
35842+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
35843+ current->mm->pax_flags |= MF_PAX_MPROTECT;
35844+#endif
35845+
35846+ }
35847+#endif
35848+
35849 if (N_MAGIC(ex) == OMAGIC) {
35850 unsigned long text_addr, map_size;
35851 loff_t pos;
35852@@ -349,7 +375,7 @@ static int load_aout_binary(struct linux
35853
35854 down_write(&current->mm->mmap_sem);
35855 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
35856- PROT_READ | PROT_WRITE | PROT_EXEC,
35857+ PROT_READ | PROT_WRITE,
35858 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
35859 fd_offset + ex.a_text);
35860 up_write(&current->mm->mmap_sem);
35861diff -urNp linux-2.6.32.41/fs/binfmt_elf.c linux-2.6.32.41/fs/binfmt_elf.c
35862--- linux-2.6.32.41/fs/binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
35863+++ linux-2.6.32.41/fs/binfmt_elf.c 2011-05-16 21:46:57.000000000 -0400
35864@@ -50,6 +50,10 @@ static int elf_core_dump(long signr, str
35865 #define elf_core_dump NULL
35866 #endif
35867
35868+#ifdef CONFIG_PAX_MPROTECT
35869+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
35870+#endif
35871+
35872 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
35873 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
35874 #else
35875@@ -69,6 +73,11 @@ static struct linux_binfmt elf_format =
35876 .load_binary = load_elf_binary,
35877 .load_shlib = load_elf_library,
35878 .core_dump = elf_core_dump,
35879+
35880+#ifdef CONFIG_PAX_MPROTECT
35881+ .handle_mprotect= elf_handle_mprotect,
35882+#endif
35883+
35884 .min_coredump = ELF_EXEC_PAGESIZE,
35885 .hasvdso = 1
35886 };
35887@@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
35888
35889 static int set_brk(unsigned long start, unsigned long end)
35890 {
35891+ unsigned long e = end;
35892+
35893 start = ELF_PAGEALIGN(start);
35894 end = ELF_PAGEALIGN(end);
35895 if (end > start) {
35896@@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
35897 if (BAD_ADDR(addr))
35898 return addr;
35899 }
35900- current->mm->start_brk = current->mm->brk = end;
35901+ current->mm->start_brk = current->mm->brk = e;
35902 return 0;
35903 }
35904
35905@@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
35906 elf_addr_t __user *u_rand_bytes;
35907 const char *k_platform = ELF_PLATFORM;
35908 const char *k_base_platform = ELF_BASE_PLATFORM;
35909- unsigned char k_rand_bytes[16];
35910+ u32 k_rand_bytes[4];
35911 int items;
35912 elf_addr_t *elf_info;
35913 int ei_index = 0;
35914 const struct cred *cred = current_cred();
35915 struct vm_area_struct *vma;
35916+ unsigned long saved_auxv[AT_VECTOR_SIZE];
35917+
35918+ pax_track_stack();
35919
35920 /*
35921 * In some cases (e.g. Hyper-Threading), we want to avoid L1
35922@@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
35923 * Generate 16 random bytes for userspace PRNG seeding.
35924 */
35925 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
35926- u_rand_bytes = (elf_addr_t __user *)
35927- STACK_ALLOC(p, sizeof(k_rand_bytes));
35928+ srandom32(k_rand_bytes[0] ^ random32());
35929+ srandom32(k_rand_bytes[1] ^ random32());
35930+ srandom32(k_rand_bytes[2] ^ random32());
35931+ srandom32(k_rand_bytes[3] ^ random32());
35932+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
35933+ u_rand_bytes = (elf_addr_t __user *) p;
35934 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
35935 return -EFAULT;
35936
35937@@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
35938 return -EFAULT;
35939 current->mm->env_end = p;
35940
35941+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
35942+
35943 /* Put the elf_info on the stack in the right place. */
35944 sp = (elf_addr_t __user *)envp + 1;
35945- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
35946+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
35947 return -EFAULT;
35948 return 0;
35949 }
35950@@ -385,10 +405,10 @@ static unsigned long load_elf_interp(str
35951 {
35952 struct elf_phdr *elf_phdata;
35953 struct elf_phdr *eppnt;
35954- unsigned long load_addr = 0;
35955+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
35956 int load_addr_set = 0;
35957 unsigned long last_bss = 0, elf_bss = 0;
35958- unsigned long error = ~0UL;
35959+ unsigned long error = -EINVAL;
35960 unsigned long total_size;
35961 int retval, i, size;
35962
35963@@ -434,6 +454,11 @@ static unsigned long load_elf_interp(str
35964 goto out_close;
35965 }
35966
35967+#ifdef CONFIG_PAX_SEGMEXEC
35968+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
35969+ pax_task_size = SEGMEXEC_TASK_SIZE;
35970+#endif
35971+
35972 eppnt = elf_phdata;
35973 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
35974 if (eppnt->p_type == PT_LOAD) {
35975@@ -477,8 +502,8 @@ static unsigned long load_elf_interp(str
35976 k = load_addr + eppnt->p_vaddr;
35977 if (BAD_ADDR(k) ||
35978 eppnt->p_filesz > eppnt->p_memsz ||
35979- eppnt->p_memsz > TASK_SIZE ||
35980- TASK_SIZE - eppnt->p_memsz < k) {
35981+ eppnt->p_memsz > pax_task_size ||
35982+ pax_task_size - eppnt->p_memsz < k) {
35983 error = -ENOMEM;
35984 goto out_close;
35985 }
35986@@ -532,6 +557,194 @@ out:
35987 return error;
35988 }
35989
35990+#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
35991+static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
35992+{
35993+ unsigned long pax_flags = 0UL;
35994+
35995+#ifdef CONFIG_PAX_PAGEEXEC
35996+ if (elf_phdata->p_flags & PF_PAGEEXEC)
35997+ pax_flags |= MF_PAX_PAGEEXEC;
35998+#endif
35999+
36000+#ifdef CONFIG_PAX_SEGMEXEC
36001+ if (elf_phdata->p_flags & PF_SEGMEXEC)
36002+ pax_flags |= MF_PAX_SEGMEXEC;
36003+#endif
36004+
36005+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36006+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36007+ if (nx_enabled)
36008+ pax_flags &= ~MF_PAX_SEGMEXEC;
36009+ else
36010+ pax_flags &= ~MF_PAX_PAGEEXEC;
36011+ }
36012+#endif
36013+
36014+#ifdef CONFIG_PAX_EMUTRAMP
36015+ if (elf_phdata->p_flags & PF_EMUTRAMP)
36016+ pax_flags |= MF_PAX_EMUTRAMP;
36017+#endif
36018+
36019+#ifdef CONFIG_PAX_MPROTECT
36020+ if (elf_phdata->p_flags & PF_MPROTECT)
36021+ pax_flags |= MF_PAX_MPROTECT;
36022+#endif
36023+
36024+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
36025+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
36026+ pax_flags |= MF_PAX_RANDMMAP;
36027+#endif
36028+
36029+ return pax_flags;
36030+}
36031+#endif
36032+
36033+#ifdef CONFIG_PAX_PT_PAX_FLAGS
36034+static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
36035+{
36036+ unsigned long pax_flags = 0UL;
36037+
36038+#ifdef CONFIG_PAX_PAGEEXEC
36039+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
36040+ pax_flags |= MF_PAX_PAGEEXEC;
36041+#endif
36042+
36043+#ifdef CONFIG_PAX_SEGMEXEC
36044+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
36045+ pax_flags |= MF_PAX_SEGMEXEC;
36046+#endif
36047+
36048+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36049+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36050+ if (nx_enabled)
36051+ pax_flags &= ~MF_PAX_SEGMEXEC;
36052+ else
36053+ pax_flags &= ~MF_PAX_PAGEEXEC;
36054+ }
36055+#endif
36056+
36057+#ifdef CONFIG_PAX_EMUTRAMP
36058+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
36059+ pax_flags |= MF_PAX_EMUTRAMP;
36060+#endif
36061+
36062+#ifdef CONFIG_PAX_MPROTECT
36063+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
36064+ pax_flags |= MF_PAX_MPROTECT;
36065+#endif
36066+
36067+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
36068+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
36069+ pax_flags |= MF_PAX_RANDMMAP;
36070+#endif
36071+
36072+ return pax_flags;
36073+}
36074+#endif
36075+
36076+#ifdef CONFIG_PAX_EI_PAX
36077+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
36078+{
36079+ unsigned long pax_flags = 0UL;
36080+
36081+#ifdef CONFIG_PAX_PAGEEXEC
36082+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
36083+ pax_flags |= MF_PAX_PAGEEXEC;
36084+#endif
36085+
36086+#ifdef CONFIG_PAX_SEGMEXEC
36087+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
36088+ pax_flags |= MF_PAX_SEGMEXEC;
36089+#endif
36090+
36091+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36092+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36093+ if (nx_enabled)
36094+ pax_flags &= ~MF_PAX_SEGMEXEC;
36095+ else
36096+ pax_flags &= ~MF_PAX_PAGEEXEC;
36097+ }
36098+#endif
36099+
36100+#ifdef CONFIG_PAX_EMUTRAMP
36101+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
36102+ pax_flags |= MF_PAX_EMUTRAMP;
36103+#endif
36104+
36105+#ifdef CONFIG_PAX_MPROTECT
36106+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
36107+ pax_flags |= MF_PAX_MPROTECT;
36108+#endif
36109+
36110+#ifdef CONFIG_PAX_ASLR
36111+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
36112+ pax_flags |= MF_PAX_RANDMMAP;
36113+#endif
36114+
36115+ return pax_flags;
36116+}
36117+#endif
36118+
36119+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
36120+static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
36121+{
36122+ unsigned long pax_flags = 0UL;
36123+
36124+#ifdef CONFIG_PAX_PT_PAX_FLAGS
36125+ unsigned long i;
36126+ int found_flags = 0;
36127+#endif
36128+
36129+#ifdef CONFIG_PAX_EI_PAX
36130+ pax_flags = pax_parse_ei_pax(elf_ex);
36131+#endif
36132+
36133+#ifdef CONFIG_PAX_PT_PAX_FLAGS
36134+ for (i = 0UL; i < elf_ex->e_phnum; i++)
36135+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
36136+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
36137+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
36138+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
36139+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
36140+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
36141+ return -EINVAL;
36142+
36143+#ifdef CONFIG_PAX_SOFTMODE
36144+ if (pax_softmode)
36145+ pax_flags = pax_parse_softmode(&elf_phdata[i]);
36146+ else
36147+#endif
36148+
36149+ pax_flags = pax_parse_hardmode(&elf_phdata[i]);
36150+ found_flags = 1;
36151+ break;
36152+ }
36153+#endif
36154+
36155+#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
36156+ if (found_flags == 0) {
36157+ struct elf_phdr phdr;
36158+ memset(&phdr, 0, sizeof(phdr));
36159+ phdr.p_flags = PF_NOEMUTRAMP;
36160+#ifdef CONFIG_PAX_SOFTMODE
36161+ if (pax_softmode)
36162+ pax_flags = pax_parse_softmode(&phdr);
36163+ else
36164+#endif
36165+ pax_flags = pax_parse_hardmode(&phdr);
36166+ }
36167+#endif
36168+
36169+
36170+ if (0 > pax_check_flags(&pax_flags))
36171+ return -EINVAL;
36172+
36173+ current->mm->pax_flags = pax_flags;
36174+ return 0;
36175+}
36176+#endif
36177+
36178 /*
36179 * These are the functions used to load ELF style executables and shared
36180 * libraries. There is no binary dependent code anywhere else.
36181@@ -548,6 +761,11 @@ static unsigned long randomize_stack_top
36182 {
36183 unsigned int random_variable = 0;
36184
36185+#ifdef CONFIG_PAX_RANDUSTACK
36186+ if (randomize_va_space)
36187+ return stack_top - current->mm->delta_stack;
36188+#endif
36189+
36190 if ((current->flags & PF_RANDOMIZE) &&
36191 !(current->personality & ADDR_NO_RANDOMIZE)) {
36192 random_variable = get_random_int() & STACK_RND_MASK;
36193@@ -566,7 +784,7 @@ static int load_elf_binary(struct linux_
36194 unsigned long load_addr = 0, load_bias = 0;
36195 int load_addr_set = 0;
36196 char * elf_interpreter = NULL;
36197- unsigned long error;
36198+ unsigned long error = 0;
36199 struct elf_phdr *elf_ppnt, *elf_phdata;
36200 unsigned long elf_bss, elf_brk;
36201 int retval, i;
36202@@ -576,11 +794,11 @@ static int load_elf_binary(struct linux_
36203 unsigned long start_code, end_code, start_data, end_data;
36204 unsigned long reloc_func_desc = 0;
36205 int executable_stack = EXSTACK_DEFAULT;
36206- unsigned long def_flags = 0;
36207 struct {
36208 struct elfhdr elf_ex;
36209 struct elfhdr interp_elf_ex;
36210 } *loc;
36211+ unsigned long pax_task_size = TASK_SIZE;
36212
36213 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
36214 if (!loc) {
36215@@ -718,11 +936,80 @@ static int load_elf_binary(struct linux_
36216
36217 /* OK, This is the point of no return */
36218 current->flags &= ~PF_FORKNOEXEC;
36219- current->mm->def_flags = def_flags;
36220+
36221+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
36222+ current->mm->pax_flags = 0UL;
36223+#endif
36224+
36225+#ifdef CONFIG_PAX_DLRESOLVE
36226+ current->mm->call_dl_resolve = 0UL;
36227+#endif
36228+
36229+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
36230+ current->mm->call_syscall = 0UL;
36231+#endif
36232+
36233+#ifdef CONFIG_PAX_ASLR
36234+ current->mm->delta_mmap = 0UL;
36235+ current->mm->delta_stack = 0UL;
36236+#endif
36237+
36238+ current->mm->def_flags = 0;
36239+
36240+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
36241+ if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
36242+ send_sig(SIGKILL, current, 0);
36243+ goto out_free_dentry;
36244+ }
36245+#endif
36246+
36247+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
36248+ pax_set_initial_flags(bprm);
36249+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
36250+ if (pax_set_initial_flags_func)
36251+ (pax_set_initial_flags_func)(bprm);
36252+#endif
36253+
36254+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
36255+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
36256+ current->mm->context.user_cs_limit = PAGE_SIZE;
36257+ current->mm->def_flags |= VM_PAGEEXEC;
36258+ }
36259+#endif
36260+
36261+#ifdef CONFIG_PAX_SEGMEXEC
36262+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
36263+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
36264+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
36265+ pax_task_size = SEGMEXEC_TASK_SIZE;
36266+ }
36267+#endif
36268+
36269+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
36270+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36271+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
36272+ put_cpu();
36273+ }
36274+#endif
36275
36276 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
36277 may depend on the personality. */
36278 SET_PERSONALITY(loc->elf_ex);
36279+
36280+#ifdef CONFIG_PAX_ASLR
36281+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
36282+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
36283+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
36284+ }
36285+#endif
36286+
36287+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
36288+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36289+ executable_stack = EXSTACK_DISABLE_X;
36290+ current->personality &= ~READ_IMPLIES_EXEC;
36291+ } else
36292+#endif
36293+
36294 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
36295 current->personality |= READ_IMPLIES_EXEC;
36296
36297@@ -804,6 +1091,20 @@ static int load_elf_binary(struct linux_
36298 #else
36299 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
36300 #endif
36301+
36302+#ifdef CONFIG_PAX_RANDMMAP
36303+ /* PaX: randomize base address at the default exe base if requested */
36304+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
36305+#ifdef CONFIG_SPARC64
36306+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
36307+#else
36308+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
36309+#endif
36310+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
36311+ elf_flags |= MAP_FIXED;
36312+ }
36313+#endif
36314+
36315 }
36316
36317 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
36318@@ -836,9 +1137,9 @@ static int load_elf_binary(struct linux_
36319 * allowed task size. Note that p_filesz must always be
36320 * <= p_memsz so it is only necessary to check p_memsz.
36321 */
36322- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
36323- elf_ppnt->p_memsz > TASK_SIZE ||
36324- TASK_SIZE - elf_ppnt->p_memsz < k) {
36325+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
36326+ elf_ppnt->p_memsz > pax_task_size ||
36327+ pax_task_size - elf_ppnt->p_memsz < k) {
36328 /* set_brk can never work. Avoid overflows. */
36329 send_sig(SIGKILL, current, 0);
36330 retval = -EINVAL;
36331@@ -866,6 +1167,11 @@ static int load_elf_binary(struct linux_
36332 start_data += load_bias;
36333 end_data += load_bias;
36334
36335+#ifdef CONFIG_PAX_RANDMMAP
36336+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
36337+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
36338+#endif
36339+
36340 /* Calling set_brk effectively mmaps the pages that we need
36341 * for the bss and break sections. We must do this before
36342 * mapping in the interpreter, to make sure it doesn't wind
36343@@ -877,9 +1183,11 @@ static int load_elf_binary(struct linux_
36344 goto out_free_dentry;
36345 }
36346 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
36347- send_sig(SIGSEGV, current, 0);
36348- retval = -EFAULT; /* Nobody gets to see this, but.. */
36349- goto out_free_dentry;
36350+ /*
36351+ * This bss-zeroing can fail if the ELF
36352+ * file specifies odd protections. So
36353+ * we don't check the return value
36354+ */
36355 }
36356
36357 if (elf_interpreter) {
36358@@ -1112,8 +1420,10 @@ static int dump_seek(struct file *file,
36359 unsigned long n = off;
36360 if (n > PAGE_SIZE)
36361 n = PAGE_SIZE;
36362- if (!dump_write(file, buf, n))
36363+ if (!dump_write(file, buf, n)) {
36364+ free_page((unsigned long)buf);
36365 return 0;
36366+ }
36367 off -= n;
36368 }
36369 free_page((unsigned long)buf);
36370@@ -1125,7 +1435,7 @@ static int dump_seek(struct file *file,
36371 * Decide what to dump of a segment, part, all or none.
36372 */
36373 static unsigned long vma_dump_size(struct vm_area_struct *vma,
36374- unsigned long mm_flags)
36375+ unsigned long mm_flags, long signr)
36376 {
36377 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
36378
36379@@ -1159,7 +1469,7 @@ static unsigned long vma_dump_size(struc
36380 if (vma->vm_file == NULL)
36381 return 0;
36382
36383- if (FILTER(MAPPED_PRIVATE))
36384+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
36385 goto whole;
36386
36387 /*
36388@@ -1255,8 +1565,11 @@ static int writenote(struct memelfnote *
36389 #undef DUMP_WRITE
36390
36391 #define DUMP_WRITE(addr, nr) \
36392+ do { \
36393+ gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
36394 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
36395- goto end_coredump;
36396+ goto end_coredump; \
36397+ } while (0);
36398
36399 static void fill_elf_header(struct elfhdr *elf, int segs,
36400 u16 machine, u32 flags, u8 osabi)
36401@@ -1385,9 +1698,9 @@ static void fill_auxv_note(struct memelf
36402 {
36403 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
36404 int i = 0;
36405- do
36406+ do {
36407 i += 2;
36408- while (auxv[i - 2] != AT_NULL);
36409+ } while (auxv[i - 2] != AT_NULL);
36410 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
36411 }
36412
36413@@ -1973,7 +2286,7 @@ static int elf_core_dump(long signr, str
36414 phdr.p_offset = offset;
36415 phdr.p_vaddr = vma->vm_start;
36416 phdr.p_paddr = 0;
36417- phdr.p_filesz = vma_dump_size(vma, mm_flags);
36418+ phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
36419 phdr.p_memsz = vma->vm_end - vma->vm_start;
36420 offset += phdr.p_filesz;
36421 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
36422@@ -2006,7 +2319,7 @@ static int elf_core_dump(long signr, str
36423 unsigned long addr;
36424 unsigned long end;
36425
36426- end = vma->vm_start + vma_dump_size(vma, mm_flags);
36427+ end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
36428
36429 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
36430 struct page *page;
36431@@ -2015,6 +2328,7 @@ static int elf_core_dump(long signr, str
36432 page = get_dump_page(addr);
36433 if (page) {
36434 void *kaddr = kmap(page);
36435+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
36436 stop = ((size += PAGE_SIZE) > limit) ||
36437 !dump_write(file, kaddr, PAGE_SIZE);
36438 kunmap(page);
36439@@ -2042,6 +2356,97 @@ out:
36440
36441 #endif /* USE_ELF_CORE_DUMP */
36442
36443+#ifdef CONFIG_PAX_MPROTECT
36444+/* PaX: non-PIC ELF libraries need relocations on their executable segments
36445+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
36446+ * we'll remove VM_MAYWRITE for good on RELRO segments.
36447+ *
36448+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
36449+ * basis because we want to allow the common case and not the special ones.
36450+ */
36451+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
36452+{
36453+ struct elfhdr elf_h;
36454+ struct elf_phdr elf_p;
36455+ unsigned long i;
36456+ unsigned long oldflags;
36457+ bool is_textrel_rw, is_textrel_rx, is_relro;
36458+
36459+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
36460+ return;
36461+
36462+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
36463+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
36464+
36465+#ifdef CONFIG_PAX_ELFRELOCS
36466+ /* possible TEXTREL */
36467+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
36468+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
36469+#else
36470+ is_textrel_rw = false;
36471+ is_textrel_rx = false;
36472+#endif
36473+
36474+ /* possible RELRO */
36475+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
36476+
36477+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
36478+ return;
36479+
36480+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
36481+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
36482+
36483+#ifdef CONFIG_PAX_ETEXECRELOCS
36484+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
36485+#else
36486+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
36487+#endif
36488+
36489+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
36490+ !elf_check_arch(&elf_h) ||
36491+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
36492+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
36493+ return;
36494+
36495+ for (i = 0UL; i < elf_h.e_phnum; i++) {
36496+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
36497+ return;
36498+ switch (elf_p.p_type) {
36499+ case PT_DYNAMIC:
36500+ if (!is_textrel_rw && !is_textrel_rx)
36501+ continue;
36502+ i = 0UL;
36503+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
36504+ elf_dyn dyn;
36505+
36506+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
36507+ return;
36508+ if (dyn.d_tag == DT_NULL)
36509+ return;
36510+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
36511+ gr_log_textrel(vma);
36512+ if (is_textrel_rw)
36513+ vma->vm_flags |= VM_MAYWRITE;
36514+ else
36515+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
36516+ vma->vm_flags &= ~VM_MAYWRITE;
36517+ return;
36518+ }
36519+ i++;
36520+ }
36521+ return;
36522+
36523+ case PT_GNU_RELRO:
36524+ if (!is_relro)
36525+ continue;
36526+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
36527+ vma->vm_flags &= ~VM_MAYWRITE;
36528+ return;
36529+ }
36530+ }
36531+}
36532+#endif
36533+
36534 static int __init init_elf_binfmt(void)
36535 {
36536 return register_binfmt(&elf_format);
36537diff -urNp linux-2.6.32.41/fs/binfmt_flat.c linux-2.6.32.41/fs/binfmt_flat.c
36538--- linux-2.6.32.41/fs/binfmt_flat.c 2011-03-27 14:31:47.000000000 -0400
36539+++ linux-2.6.32.41/fs/binfmt_flat.c 2011-04-17 15:56:46.000000000 -0400
36540@@ -564,7 +564,9 @@ static int load_flat_file(struct linux_b
36541 realdatastart = (unsigned long) -ENOMEM;
36542 printk("Unable to allocate RAM for process data, errno %d\n",
36543 (int)-realdatastart);
36544+ down_write(&current->mm->mmap_sem);
36545 do_munmap(current->mm, textpos, text_len);
36546+ up_write(&current->mm->mmap_sem);
36547 ret = realdatastart;
36548 goto err;
36549 }
36550@@ -588,8 +590,10 @@ static int load_flat_file(struct linux_b
36551 }
36552 if (IS_ERR_VALUE(result)) {
36553 printk("Unable to read data+bss, errno %d\n", (int)-result);
36554+ down_write(&current->mm->mmap_sem);
36555 do_munmap(current->mm, textpos, text_len);
36556 do_munmap(current->mm, realdatastart, data_len + extra);
36557+ up_write(&current->mm->mmap_sem);
36558 ret = result;
36559 goto err;
36560 }
36561@@ -658,8 +662,10 @@ static int load_flat_file(struct linux_b
36562 }
36563 if (IS_ERR_VALUE(result)) {
36564 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
36565+ down_write(&current->mm->mmap_sem);
36566 do_munmap(current->mm, textpos, text_len + data_len + extra +
36567 MAX_SHARED_LIBS * sizeof(unsigned long));
36568+ up_write(&current->mm->mmap_sem);
36569 ret = result;
36570 goto err;
36571 }
36572diff -urNp linux-2.6.32.41/fs/bio.c linux-2.6.32.41/fs/bio.c
36573--- linux-2.6.32.41/fs/bio.c 2011-03-27 14:31:47.000000000 -0400
36574+++ linux-2.6.32.41/fs/bio.c 2011-04-17 15:56:46.000000000 -0400
36575@@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_cr
36576
36577 i = 0;
36578 while (i < bio_slab_nr) {
36579- struct bio_slab *bslab = &bio_slabs[i];
36580+ bslab = &bio_slabs[i];
36581
36582 if (!bslab->slab && entry == -1)
36583 entry = i;
36584@@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct b
36585 const int read = bio_data_dir(bio) == READ;
36586 struct bio_map_data *bmd = bio->bi_private;
36587 int i;
36588- char *p = bmd->sgvecs[0].iov_base;
36589+ char *p = (__force char *)bmd->sgvecs[0].iov_base;
36590
36591 __bio_for_each_segment(bvec, bio, i, 0) {
36592 char *addr = page_address(bvec->bv_page);
36593diff -urNp linux-2.6.32.41/fs/block_dev.c linux-2.6.32.41/fs/block_dev.c
36594--- linux-2.6.32.41/fs/block_dev.c 2011-03-27 14:31:47.000000000 -0400
36595+++ linux-2.6.32.41/fs/block_dev.c 2011-04-17 15:56:46.000000000 -0400
36596@@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev,
36597 else if (bdev->bd_contains == bdev)
36598 res = 0; /* is a whole device which isn't held */
36599
36600- else if (bdev->bd_contains->bd_holder == bd_claim)
36601+ else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
36602 res = 0; /* is a partition of a device that is being partitioned */
36603 else if (bdev->bd_contains->bd_holder != NULL)
36604 res = -EBUSY; /* is a partition of a held device */
36605diff -urNp linux-2.6.32.41/fs/btrfs/ctree.c linux-2.6.32.41/fs/btrfs/ctree.c
36606--- linux-2.6.32.41/fs/btrfs/ctree.c 2011-03-27 14:31:47.000000000 -0400
36607+++ linux-2.6.32.41/fs/btrfs/ctree.c 2011-04-17 15:56:46.000000000 -0400
36608@@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(st
36609 free_extent_buffer(buf);
36610 add_root_to_dirty_list(root);
36611 } else {
36612- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
36613- parent_start = parent->start;
36614- else
36615+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
36616+ if (parent)
36617+ parent_start = parent->start;
36618+ else
36619+ parent_start = 0;
36620+ } else
36621 parent_start = 0;
36622
36623 WARN_ON(trans->transid != btrfs_header_generation(parent));
36624@@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_tran
36625
36626 ret = 0;
36627 if (slot == 0) {
36628- struct btrfs_disk_key disk_key;
36629 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
36630 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
36631 }
36632diff -urNp linux-2.6.32.41/fs/btrfs/disk-io.c linux-2.6.32.41/fs/btrfs/disk-io.c
36633--- linux-2.6.32.41/fs/btrfs/disk-io.c 2011-04-17 17:00:52.000000000 -0400
36634+++ linux-2.6.32.41/fs/btrfs/disk-io.c 2011-04-17 17:03:11.000000000 -0400
36635@@ -39,7 +39,7 @@
36636 #include "tree-log.h"
36637 #include "free-space-cache.h"
36638
36639-static struct extent_io_ops btree_extent_io_ops;
36640+static const struct extent_io_ops btree_extent_io_ops;
36641 static void end_workqueue_fn(struct btrfs_work *work);
36642 static void free_fs_root(struct btrfs_root *root);
36643
36644@@ -2607,7 +2607,7 @@ out:
36645 return 0;
36646 }
36647
36648-static struct extent_io_ops btree_extent_io_ops = {
36649+static const struct extent_io_ops btree_extent_io_ops = {
36650 .write_cache_pages_lock_hook = btree_lock_page_hook,
36651 .readpage_end_io_hook = btree_readpage_end_io_hook,
36652 .submit_bio_hook = btree_submit_bio_hook,
36653diff -urNp linux-2.6.32.41/fs/btrfs/extent_io.h linux-2.6.32.41/fs/btrfs/extent_io.h
36654--- linux-2.6.32.41/fs/btrfs/extent_io.h 2011-03-27 14:31:47.000000000 -0400
36655+++ linux-2.6.32.41/fs/btrfs/extent_io.h 2011-04-17 15:56:46.000000000 -0400
36656@@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(s
36657 struct bio *bio, int mirror_num,
36658 unsigned long bio_flags);
36659 struct extent_io_ops {
36660- int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
36661+ int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
36662 u64 start, u64 end, int *page_started,
36663 unsigned long *nr_written);
36664- int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
36665- int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
36666+ int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
36667+ int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
36668 extent_submit_bio_hook_t *submit_bio_hook;
36669- int (*merge_bio_hook)(struct page *page, unsigned long offset,
36670+ int (* const merge_bio_hook)(struct page *page, unsigned long offset,
36671 size_t size, struct bio *bio,
36672 unsigned long bio_flags);
36673- int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
36674- int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
36675+ int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
36676+ int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
36677 u64 start, u64 end,
36678 struct extent_state *state);
36679- int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
36680+ int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
36681 u64 start, u64 end,
36682 struct extent_state *state);
36683- int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
36684+ int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
36685 struct extent_state *state);
36686- int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
36687+ int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
36688 struct extent_state *state, int uptodate);
36689- int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
36690+ int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
36691 unsigned long old, unsigned long bits);
36692- int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
36693+ int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
36694 unsigned long bits);
36695- int (*merge_extent_hook)(struct inode *inode,
36696+ int (* const merge_extent_hook)(struct inode *inode,
36697 struct extent_state *new,
36698 struct extent_state *other);
36699- int (*split_extent_hook)(struct inode *inode,
36700+ int (* const split_extent_hook)(struct inode *inode,
36701 struct extent_state *orig, u64 split);
36702- int (*write_cache_pages_lock_hook)(struct page *page);
36703+ int (* const write_cache_pages_lock_hook)(struct page *page);
36704 };
36705
36706 struct extent_io_tree {
36707@@ -88,7 +88,7 @@ struct extent_io_tree {
36708 u64 dirty_bytes;
36709 spinlock_t lock;
36710 spinlock_t buffer_lock;
36711- struct extent_io_ops *ops;
36712+ const struct extent_io_ops *ops;
36713 };
36714
36715 struct extent_state {
36716diff -urNp linux-2.6.32.41/fs/btrfs/free-space-cache.c linux-2.6.32.41/fs/btrfs/free-space-cache.c
36717--- linux-2.6.32.41/fs/btrfs/free-space-cache.c 2011-03-27 14:31:47.000000000 -0400
36718+++ linux-2.6.32.41/fs/btrfs/free-space-cache.c 2011-04-17 15:56:46.000000000 -0400
36719@@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
36720
36721 while(1) {
36722 if (entry->bytes < bytes || entry->offset < min_start) {
36723- struct rb_node *node;
36724-
36725 node = rb_next(&entry->offset_index);
36726 if (!node)
36727 break;
36728@@ -1226,7 +1224,7 @@ again:
36729 */
36730 while (entry->bitmap || found_bitmap ||
36731 (!entry->bitmap && entry->bytes < min_bytes)) {
36732- struct rb_node *node = rb_next(&entry->offset_index);
36733+ node = rb_next(&entry->offset_index);
36734
36735 if (entry->bitmap && entry->bytes > bytes + empty_size) {
36736 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
36737diff -urNp linux-2.6.32.41/fs/btrfs/inode.c linux-2.6.32.41/fs/btrfs/inode.c
36738--- linux-2.6.32.41/fs/btrfs/inode.c 2011-03-27 14:31:47.000000000 -0400
36739+++ linux-2.6.32.41/fs/btrfs/inode.c 2011-04-17 15:56:46.000000000 -0400
36740@@ -63,7 +63,7 @@ static const struct inode_operations btr
36741 static const struct address_space_operations btrfs_aops;
36742 static const struct address_space_operations btrfs_symlink_aops;
36743 static const struct file_operations btrfs_dir_file_operations;
36744-static struct extent_io_ops btrfs_extent_io_ops;
36745+static const struct extent_io_ops btrfs_extent_io_ops;
36746
36747 static struct kmem_cache *btrfs_inode_cachep;
36748 struct kmem_cache *btrfs_trans_handle_cachep;
36749@@ -5410,7 +5410,7 @@ fail:
36750 return -ENOMEM;
36751 }
36752
36753-static int btrfs_getattr(struct vfsmount *mnt,
36754+int btrfs_getattr(struct vfsmount *mnt,
36755 struct dentry *dentry, struct kstat *stat)
36756 {
36757 struct inode *inode = dentry->d_inode;
36758@@ -5422,6 +5422,14 @@ static int btrfs_getattr(struct vfsmount
36759 return 0;
36760 }
36761
36762+EXPORT_SYMBOL(btrfs_getattr);
36763+
36764+dev_t get_btrfs_dev_from_inode(struct inode *inode)
36765+{
36766+ return BTRFS_I(inode)->root->anon_super.s_dev;
36767+}
36768+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
36769+
36770 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
36771 struct inode *new_dir, struct dentry *new_dentry)
36772 {
36773@@ -5972,7 +5980,7 @@ static const struct file_operations btrf
36774 .fsync = btrfs_sync_file,
36775 };
36776
36777-static struct extent_io_ops btrfs_extent_io_ops = {
36778+static const struct extent_io_ops btrfs_extent_io_ops = {
36779 .fill_delalloc = run_delalloc_range,
36780 .submit_bio_hook = btrfs_submit_bio_hook,
36781 .merge_bio_hook = btrfs_merge_bio_hook,
36782diff -urNp linux-2.6.32.41/fs/btrfs/relocation.c linux-2.6.32.41/fs/btrfs/relocation.c
36783--- linux-2.6.32.41/fs/btrfs/relocation.c 2011-03-27 14:31:47.000000000 -0400
36784+++ linux-2.6.32.41/fs/btrfs/relocation.c 2011-04-17 15:56:46.000000000 -0400
36785@@ -884,7 +884,7 @@ static int __update_reloc_root(struct bt
36786 }
36787 spin_unlock(&rc->reloc_root_tree.lock);
36788
36789- BUG_ON((struct btrfs_root *)node->data != root);
36790+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
36791
36792 if (!del) {
36793 spin_lock(&rc->reloc_root_tree.lock);
36794diff -urNp linux-2.6.32.41/fs/btrfs/sysfs.c linux-2.6.32.41/fs/btrfs/sysfs.c
36795--- linux-2.6.32.41/fs/btrfs/sysfs.c 2011-03-27 14:31:47.000000000 -0400
36796+++ linux-2.6.32.41/fs/btrfs/sysfs.c 2011-04-17 15:56:46.000000000 -0400
36797@@ -164,12 +164,12 @@ static void btrfs_root_release(struct ko
36798 complete(&root->kobj_unregister);
36799 }
36800
36801-static struct sysfs_ops btrfs_super_attr_ops = {
36802+static const struct sysfs_ops btrfs_super_attr_ops = {
36803 .show = btrfs_super_attr_show,
36804 .store = btrfs_super_attr_store,
36805 };
36806
36807-static struct sysfs_ops btrfs_root_attr_ops = {
36808+static const struct sysfs_ops btrfs_root_attr_ops = {
36809 .show = btrfs_root_attr_show,
36810 .store = btrfs_root_attr_store,
36811 };
36812diff -urNp linux-2.6.32.41/fs/buffer.c linux-2.6.32.41/fs/buffer.c
36813--- linux-2.6.32.41/fs/buffer.c 2011-03-27 14:31:47.000000000 -0400
36814+++ linux-2.6.32.41/fs/buffer.c 2011-04-17 15:56:46.000000000 -0400
36815@@ -25,6 +25,7 @@
36816 #include <linux/percpu.h>
36817 #include <linux/slab.h>
36818 #include <linux/capability.h>
36819+#include <linux/security.h>
36820 #include <linux/blkdev.h>
36821 #include <linux/file.h>
36822 #include <linux/quotaops.h>
36823diff -urNp linux-2.6.32.41/fs/cachefiles/bind.c linux-2.6.32.41/fs/cachefiles/bind.c
36824--- linux-2.6.32.41/fs/cachefiles/bind.c 2011-03-27 14:31:47.000000000 -0400
36825+++ linux-2.6.32.41/fs/cachefiles/bind.c 2011-04-17 15:56:46.000000000 -0400
36826@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
36827 args);
36828
36829 /* start by checking things over */
36830- ASSERT(cache->fstop_percent >= 0 &&
36831- cache->fstop_percent < cache->fcull_percent &&
36832+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
36833 cache->fcull_percent < cache->frun_percent &&
36834 cache->frun_percent < 100);
36835
36836- ASSERT(cache->bstop_percent >= 0 &&
36837- cache->bstop_percent < cache->bcull_percent &&
36838+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
36839 cache->bcull_percent < cache->brun_percent &&
36840 cache->brun_percent < 100);
36841
36842diff -urNp linux-2.6.32.41/fs/cachefiles/daemon.c linux-2.6.32.41/fs/cachefiles/daemon.c
36843--- linux-2.6.32.41/fs/cachefiles/daemon.c 2011-03-27 14:31:47.000000000 -0400
36844+++ linux-2.6.32.41/fs/cachefiles/daemon.c 2011-04-17 15:56:46.000000000 -0400
36845@@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(s
36846 if (test_bit(CACHEFILES_DEAD, &cache->flags))
36847 return -EIO;
36848
36849- if (datalen < 0 || datalen > PAGE_SIZE - 1)
36850+ if (datalen > PAGE_SIZE - 1)
36851 return -EOPNOTSUPP;
36852
36853 /* drag the command string into the kernel so we can parse it */
36854@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struc
36855 if (args[0] != '%' || args[1] != '\0')
36856 return -EINVAL;
36857
36858- if (fstop < 0 || fstop >= cache->fcull_percent)
36859+ if (fstop >= cache->fcull_percent)
36860 return cachefiles_daemon_range_error(cache, args);
36861
36862 cache->fstop_percent = fstop;
36863@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struc
36864 if (args[0] != '%' || args[1] != '\0')
36865 return -EINVAL;
36866
36867- if (bstop < 0 || bstop >= cache->bcull_percent)
36868+ if (bstop >= cache->bcull_percent)
36869 return cachefiles_daemon_range_error(cache, args);
36870
36871 cache->bstop_percent = bstop;
36872diff -urNp linux-2.6.32.41/fs/cachefiles/internal.h linux-2.6.32.41/fs/cachefiles/internal.h
36873--- linux-2.6.32.41/fs/cachefiles/internal.h 2011-03-27 14:31:47.000000000 -0400
36874+++ linux-2.6.32.41/fs/cachefiles/internal.h 2011-05-04 17:56:28.000000000 -0400
36875@@ -56,7 +56,7 @@ struct cachefiles_cache {
36876 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
36877 struct rb_root active_nodes; /* active nodes (can't be culled) */
36878 rwlock_t active_lock; /* lock for active_nodes */
36879- atomic_t gravecounter; /* graveyard uniquifier */
36880+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
36881 unsigned frun_percent; /* when to stop culling (% files) */
36882 unsigned fcull_percent; /* when to start culling (% files) */
36883 unsigned fstop_percent; /* when to stop allocating (% files) */
36884@@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struc
36885 * proc.c
36886 */
36887 #ifdef CONFIG_CACHEFILES_HISTOGRAM
36888-extern atomic_t cachefiles_lookup_histogram[HZ];
36889-extern atomic_t cachefiles_mkdir_histogram[HZ];
36890-extern atomic_t cachefiles_create_histogram[HZ];
36891+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
36892+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
36893+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
36894
36895 extern int __init cachefiles_proc_init(void);
36896 extern void cachefiles_proc_cleanup(void);
36897 static inline
36898-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
36899+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
36900 {
36901 unsigned long jif = jiffies - start_jif;
36902 if (jif >= HZ)
36903 jif = HZ - 1;
36904- atomic_inc(&histogram[jif]);
36905+ atomic_inc_unchecked(&histogram[jif]);
36906 }
36907
36908 #else
36909diff -urNp linux-2.6.32.41/fs/cachefiles/namei.c linux-2.6.32.41/fs/cachefiles/namei.c
36910--- linux-2.6.32.41/fs/cachefiles/namei.c 2011-03-27 14:31:47.000000000 -0400
36911+++ linux-2.6.32.41/fs/cachefiles/namei.c 2011-05-04 17:56:28.000000000 -0400
36912@@ -250,7 +250,7 @@ try_again:
36913 /* first step is to make up a grave dentry in the graveyard */
36914 sprintf(nbuffer, "%08x%08x",
36915 (uint32_t) get_seconds(),
36916- (uint32_t) atomic_inc_return(&cache->gravecounter));
36917+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
36918
36919 /* do the multiway lock magic */
36920 trap = lock_rename(cache->graveyard, dir);
36921diff -urNp linux-2.6.32.41/fs/cachefiles/proc.c linux-2.6.32.41/fs/cachefiles/proc.c
36922--- linux-2.6.32.41/fs/cachefiles/proc.c 2011-03-27 14:31:47.000000000 -0400
36923+++ linux-2.6.32.41/fs/cachefiles/proc.c 2011-05-04 17:56:28.000000000 -0400
36924@@ -14,9 +14,9 @@
36925 #include <linux/seq_file.h>
36926 #include "internal.h"
36927
36928-atomic_t cachefiles_lookup_histogram[HZ];
36929-atomic_t cachefiles_mkdir_histogram[HZ];
36930-atomic_t cachefiles_create_histogram[HZ];
36931+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
36932+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
36933+atomic_unchecked_t cachefiles_create_histogram[HZ];
36934
36935 /*
36936 * display the latency histogram
36937@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
36938 return 0;
36939 default:
36940 index = (unsigned long) v - 3;
36941- x = atomic_read(&cachefiles_lookup_histogram[index]);
36942- y = atomic_read(&cachefiles_mkdir_histogram[index]);
36943- z = atomic_read(&cachefiles_create_histogram[index]);
36944+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
36945+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
36946+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
36947 if (x == 0 && y == 0 && z == 0)
36948 return 0;
36949
36950diff -urNp linux-2.6.32.41/fs/cachefiles/rdwr.c linux-2.6.32.41/fs/cachefiles/rdwr.c
36951--- linux-2.6.32.41/fs/cachefiles/rdwr.c 2011-03-27 14:31:47.000000000 -0400
36952+++ linux-2.6.32.41/fs/cachefiles/rdwr.c 2011-04-17 15:56:46.000000000 -0400
36953@@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache
36954 old_fs = get_fs();
36955 set_fs(KERNEL_DS);
36956 ret = file->f_op->write(
36957- file, (const void __user *) data, len, &pos);
36958+ file, (__force const void __user *) data, len, &pos);
36959 set_fs(old_fs);
36960 kunmap(page);
36961 if (ret != len)
36962diff -urNp linux-2.6.32.41/fs/cifs/cifs_debug.c linux-2.6.32.41/fs/cifs/cifs_debug.c
36963--- linux-2.6.32.41/fs/cifs/cifs_debug.c 2011-03-27 14:31:47.000000000 -0400
36964+++ linux-2.6.32.41/fs/cifs/cifs_debug.c 2011-05-04 17:56:28.000000000 -0400
36965@@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(str
36966 tcon = list_entry(tmp3,
36967 struct cifsTconInfo,
36968 tcon_list);
36969- atomic_set(&tcon->num_smbs_sent, 0);
36970- atomic_set(&tcon->num_writes, 0);
36971- atomic_set(&tcon->num_reads, 0);
36972- atomic_set(&tcon->num_oplock_brks, 0);
36973- atomic_set(&tcon->num_opens, 0);
36974- atomic_set(&tcon->num_posixopens, 0);
36975- atomic_set(&tcon->num_posixmkdirs, 0);
36976- atomic_set(&tcon->num_closes, 0);
36977- atomic_set(&tcon->num_deletes, 0);
36978- atomic_set(&tcon->num_mkdirs, 0);
36979- atomic_set(&tcon->num_rmdirs, 0);
36980- atomic_set(&tcon->num_renames, 0);
36981- atomic_set(&tcon->num_t2renames, 0);
36982- atomic_set(&tcon->num_ffirst, 0);
36983- atomic_set(&tcon->num_fnext, 0);
36984- atomic_set(&tcon->num_fclose, 0);
36985- atomic_set(&tcon->num_hardlinks, 0);
36986- atomic_set(&tcon->num_symlinks, 0);
36987- atomic_set(&tcon->num_locks, 0);
36988+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
36989+ atomic_set_unchecked(&tcon->num_writes, 0);
36990+ atomic_set_unchecked(&tcon->num_reads, 0);
36991+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
36992+ atomic_set_unchecked(&tcon->num_opens, 0);
36993+ atomic_set_unchecked(&tcon->num_posixopens, 0);
36994+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
36995+ atomic_set_unchecked(&tcon->num_closes, 0);
36996+ atomic_set_unchecked(&tcon->num_deletes, 0);
36997+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
36998+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
36999+ atomic_set_unchecked(&tcon->num_renames, 0);
37000+ atomic_set_unchecked(&tcon->num_t2renames, 0);
37001+ atomic_set_unchecked(&tcon->num_ffirst, 0);
37002+ atomic_set_unchecked(&tcon->num_fnext, 0);
37003+ atomic_set_unchecked(&tcon->num_fclose, 0);
37004+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
37005+ atomic_set_unchecked(&tcon->num_symlinks, 0);
37006+ atomic_set_unchecked(&tcon->num_locks, 0);
37007 }
37008 }
37009 }
37010@@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct s
37011 if (tcon->need_reconnect)
37012 seq_puts(m, "\tDISCONNECTED ");
37013 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
37014- atomic_read(&tcon->num_smbs_sent),
37015- atomic_read(&tcon->num_oplock_brks));
37016+ atomic_read_unchecked(&tcon->num_smbs_sent),
37017+ atomic_read_unchecked(&tcon->num_oplock_brks));
37018 seq_printf(m, "\nReads: %d Bytes: %lld",
37019- atomic_read(&tcon->num_reads),
37020+ atomic_read_unchecked(&tcon->num_reads),
37021 (long long)(tcon->bytes_read));
37022 seq_printf(m, "\nWrites: %d Bytes: %lld",
37023- atomic_read(&tcon->num_writes),
37024+ atomic_read_unchecked(&tcon->num_writes),
37025 (long long)(tcon->bytes_written));
37026 seq_printf(m, "\nFlushes: %d",
37027- atomic_read(&tcon->num_flushes));
37028+ atomic_read_unchecked(&tcon->num_flushes));
37029 seq_printf(m, "\nLocks: %d HardLinks: %d "
37030 "Symlinks: %d",
37031- atomic_read(&tcon->num_locks),
37032- atomic_read(&tcon->num_hardlinks),
37033- atomic_read(&tcon->num_symlinks));
37034+ atomic_read_unchecked(&tcon->num_locks),
37035+ atomic_read_unchecked(&tcon->num_hardlinks),
37036+ atomic_read_unchecked(&tcon->num_symlinks));
37037 seq_printf(m, "\nOpens: %d Closes: %d "
37038 "Deletes: %d",
37039- atomic_read(&tcon->num_opens),
37040- atomic_read(&tcon->num_closes),
37041- atomic_read(&tcon->num_deletes));
37042+ atomic_read_unchecked(&tcon->num_opens),
37043+ atomic_read_unchecked(&tcon->num_closes),
37044+ atomic_read_unchecked(&tcon->num_deletes));
37045 seq_printf(m, "\nPosix Opens: %d "
37046 "Posix Mkdirs: %d",
37047- atomic_read(&tcon->num_posixopens),
37048- atomic_read(&tcon->num_posixmkdirs));
37049+ atomic_read_unchecked(&tcon->num_posixopens),
37050+ atomic_read_unchecked(&tcon->num_posixmkdirs));
37051 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
37052- atomic_read(&tcon->num_mkdirs),
37053- atomic_read(&tcon->num_rmdirs));
37054+ atomic_read_unchecked(&tcon->num_mkdirs),
37055+ atomic_read_unchecked(&tcon->num_rmdirs));
37056 seq_printf(m, "\nRenames: %d T2 Renames %d",
37057- atomic_read(&tcon->num_renames),
37058- atomic_read(&tcon->num_t2renames));
37059+ atomic_read_unchecked(&tcon->num_renames),
37060+ atomic_read_unchecked(&tcon->num_t2renames));
37061 seq_printf(m, "\nFindFirst: %d FNext %d "
37062 "FClose %d",
37063- atomic_read(&tcon->num_ffirst),
37064- atomic_read(&tcon->num_fnext),
37065- atomic_read(&tcon->num_fclose));
37066+ atomic_read_unchecked(&tcon->num_ffirst),
37067+ atomic_read_unchecked(&tcon->num_fnext),
37068+ atomic_read_unchecked(&tcon->num_fclose));
37069 }
37070 }
37071 }
37072diff -urNp linux-2.6.32.41/fs/cifs/cifsglob.h linux-2.6.32.41/fs/cifs/cifsglob.h
37073--- linux-2.6.32.41/fs/cifs/cifsglob.h 2011-03-27 14:31:47.000000000 -0400
37074+++ linux-2.6.32.41/fs/cifs/cifsglob.h 2011-05-04 17:56:28.000000000 -0400
37075@@ -252,28 +252,28 @@ struct cifsTconInfo {
37076 __u16 Flags; /* optional support bits */
37077 enum statusEnum tidStatus;
37078 #ifdef CONFIG_CIFS_STATS
37079- atomic_t num_smbs_sent;
37080- atomic_t num_writes;
37081- atomic_t num_reads;
37082- atomic_t num_flushes;
37083- atomic_t num_oplock_brks;
37084- atomic_t num_opens;
37085- atomic_t num_closes;
37086- atomic_t num_deletes;
37087- atomic_t num_mkdirs;
37088- atomic_t num_posixopens;
37089- atomic_t num_posixmkdirs;
37090- atomic_t num_rmdirs;
37091- atomic_t num_renames;
37092- atomic_t num_t2renames;
37093- atomic_t num_ffirst;
37094- atomic_t num_fnext;
37095- atomic_t num_fclose;
37096- atomic_t num_hardlinks;
37097- atomic_t num_symlinks;
37098- atomic_t num_locks;
37099- atomic_t num_acl_get;
37100- atomic_t num_acl_set;
37101+ atomic_unchecked_t num_smbs_sent;
37102+ atomic_unchecked_t num_writes;
37103+ atomic_unchecked_t num_reads;
37104+ atomic_unchecked_t num_flushes;
37105+ atomic_unchecked_t num_oplock_brks;
37106+ atomic_unchecked_t num_opens;
37107+ atomic_unchecked_t num_closes;
37108+ atomic_unchecked_t num_deletes;
37109+ atomic_unchecked_t num_mkdirs;
37110+ atomic_unchecked_t num_posixopens;
37111+ atomic_unchecked_t num_posixmkdirs;
37112+ atomic_unchecked_t num_rmdirs;
37113+ atomic_unchecked_t num_renames;
37114+ atomic_unchecked_t num_t2renames;
37115+ atomic_unchecked_t num_ffirst;
37116+ atomic_unchecked_t num_fnext;
37117+ atomic_unchecked_t num_fclose;
37118+ atomic_unchecked_t num_hardlinks;
37119+ atomic_unchecked_t num_symlinks;
37120+ atomic_unchecked_t num_locks;
37121+ atomic_unchecked_t num_acl_get;
37122+ atomic_unchecked_t num_acl_set;
37123 #ifdef CONFIG_CIFS_STATS2
37124 unsigned long long time_writes;
37125 unsigned long long time_reads;
37126@@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const st
37127 }
37128
37129 #ifdef CONFIG_CIFS_STATS
37130-#define cifs_stats_inc atomic_inc
37131+#define cifs_stats_inc atomic_inc_unchecked
37132
37133 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
37134 unsigned int bytes)
37135diff -urNp linux-2.6.32.41/fs/cifs/link.c linux-2.6.32.41/fs/cifs/link.c
37136--- linux-2.6.32.41/fs/cifs/link.c 2011-03-27 14:31:47.000000000 -0400
37137+++ linux-2.6.32.41/fs/cifs/link.c 2011-04-17 15:56:46.000000000 -0400
37138@@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct
37139
37140 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
37141 {
37142- char *p = nd_get_link(nd);
37143+ const char *p = nd_get_link(nd);
37144 if (!IS_ERR(p))
37145 kfree(p);
37146 }
37147diff -urNp linux-2.6.32.41/fs/coda/cache.c linux-2.6.32.41/fs/coda/cache.c
37148--- linux-2.6.32.41/fs/coda/cache.c 2011-03-27 14:31:47.000000000 -0400
37149+++ linux-2.6.32.41/fs/coda/cache.c 2011-05-04 17:56:28.000000000 -0400
37150@@ -24,14 +24,14 @@
37151 #include <linux/coda_fs_i.h>
37152 #include <linux/coda_cache.h>
37153
37154-static atomic_t permission_epoch = ATOMIC_INIT(0);
37155+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
37156
37157 /* replace or extend an acl cache hit */
37158 void coda_cache_enter(struct inode *inode, int mask)
37159 {
37160 struct coda_inode_info *cii = ITOC(inode);
37161
37162- cii->c_cached_epoch = atomic_read(&permission_epoch);
37163+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
37164 if (cii->c_uid != current_fsuid()) {
37165 cii->c_uid = current_fsuid();
37166 cii->c_cached_perm = mask;
37167@@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inod
37168 void coda_cache_clear_inode(struct inode *inode)
37169 {
37170 struct coda_inode_info *cii = ITOC(inode);
37171- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
37172+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
37173 }
37174
37175 /* remove all acl caches */
37176 void coda_cache_clear_all(struct super_block *sb)
37177 {
37178- atomic_inc(&permission_epoch);
37179+ atomic_inc_unchecked(&permission_epoch);
37180 }
37181
37182
37183@@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode
37184
37185 hit = (mask & cii->c_cached_perm) == mask &&
37186 cii->c_uid == current_fsuid() &&
37187- cii->c_cached_epoch == atomic_read(&permission_epoch);
37188+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
37189
37190 return hit;
37191 }
37192diff -urNp linux-2.6.32.41/fs/compat_binfmt_elf.c linux-2.6.32.41/fs/compat_binfmt_elf.c
37193--- linux-2.6.32.41/fs/compat_binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
37194+++ linux-2.6.32.41/fs/compat_binfmt_elf.c 2011-04-17 15:56:46.000000000 -0400
37195@@ -29,10 +29,12 @@
37196 #undef elfhdr
37197 #undef elf_phdr
37198 #undef elf_note
37199+#undef elf_dyn
37200 #undef elf_addr_t
37201 #define elfhdr elf32_hdr
37202 #define elf_phdr elf32_phdr
37203 #define elf_note elf32_note
37204+#define elf_dyn Elf32_Dyn
37205 #define elf_addr_t Elf32_Addr
37206
37207 /*
37208diff -urNp linux-2.6.32.41/fs/compat.c linux-2.6.32.41/fs/compat.c
37209--- linux-2.6.32.41/fs/compat.c 2011-04-17 17:00:52.000000000 -0400
37210+++ linux-2.6.32.41/fs/compat.c 2011-05-16 21:46:57.000000000 -0400
37211@@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
37212
37213 struct compat_readdir_callback {
37214 struct compat_old_linux_dirent __user *dirent;
37215+ struct file * file;
37216 int result;
37217 };
37218
37219@@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf
37220 buf->result = -EOVERFLOW;
37221 return -EOVERFLOW;
37222 }
37223+
37224+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
37225+ return 0;
37226+
37227 buf->result++;
37228 dirent = buf->dirent;
37229 if (!access_ok(VERIFY_WRITE, dirent,
37230@@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(u
37231
37232 buf.result = 0;
37233 buf.dirent = dirent;
37234+ buf.file = file;
37235
37236 error = vfs_readdir(file, compat_fillonedir, &buf);
37237 if (buf.result)
37238@@ -899,6 +905,7 @@ struct compat_linux_dirent {
37239 struct compat_getdents_callback {
37240 struct compat_linux_dirent __user *current_dir;
37241 struct compat_linux_dirent __user *previous;
37242+ struct file * file;
37243 int count;
37244 int error;
37245 };
37246@@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, c
37247 buf->error = -EOVERFLOW;
37248 return -EOVERFLOW;
37249 }
37250+
37251+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
37252+ return 0;
37253+
37254 dirent = buf->previous;
37255 if (dirent) {
37256 if (__put_user(offset, &dirent->d_off))
37257@@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsi
37258 buf.previous = NULL;
37259 buf.count = count;
37260 buf.error = 0;
37261+ buf.file = file;
37262
37263 error = vfs_readdir(file, compat_filldir, &buf);
37264 if (error >= 0)
37265@@ -987,6 +999,7 @@ out:
37266 struct compat_getdents_callback64 {
37267 struct linux_dirent64 __user *current_dir;
37268 struct linux_dirent64 __user *previous;
37269+ struct file * file;
37270 int count;
37271 int error;
37272 };
37273@@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf
37274 buf->error = -EINVAL; /* only used if we fail.. */
37275 if (reclen > buf->count)
37276 return -EINVAL;
37277+
37278+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
37279+ return 0;
37280+
37281 dirent = buf->previous;
37282
37283 if (dirent) {
37284@@ -1054,6 +1071,7 @@ asmlinkage long compat_sys_getdents64(un
37285 buf.previous = NULL;
37286 buf.count = count;
37287 buf.error = 0;
37288+ buf.file = file;
37289
37290 error = vfs_readdir(file, compat_filldir64, &buf);
37291 if (error >= 0)
37292@@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(in
37293 * verify all the pointers
37294 */
37295 ret = -EINVAL;
37296- if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
37297+ if (nr_segs > UIO_MAXIOV)
37298 goto out;
37299 if (!file->f_op)
37300 goto out;
37301@@ -1463,6 +1481,11 @@ int compat_do_execve(char * filename,
37302 compat_uptr_t __user *envp,
37303 struct pt_regs * regs)
37304 {
37305+#ifdef CONFIG_GRKERNSEC
37306+ struct file *old_exec_file;
37307+ struct acl_subject_label *old_acl;
37308+ struct rlimit old_rlim[RLIM_NLIMITS];
37309+#endif
37310 struct linux_binprm *bprm;
37311 struct file *file;
37312 struct files_struct *displaced;
37313@@ -1499,6 +1522,19 @@ int compat_do_execve(char * filename,
37314 bprm->filename = filename;
37315 bprm->interp = filename;
37316
37317+ if (gr_process_user_ban()) {
37318+ retval = -EPERM;
37319+ goto out_file;
37320+ }
37321+
37322+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
37323+ retval = -EAGAIN;
37324+ if (gr_handle_nproc())
37325+ goto out_file;
37326+ retval = -EACCES;
37327+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
37328+ goto out_file;
37329+
37330 retval = bprm_mm_init(bprm);
37331 if (retval)
37332 goto out_file;
37333@@ -1528,9 +1564,40 @@ int compat_do_execve(char * filename,
37334 if (retval < 0)
37335 goto out;
37336
37337+ if (!gr_tpe_allow(file)) {
37338+ retval = -EACCES;
37339+ goto out;
37340+ }
37341+
37342+ if (gr_check_crash_exec(file)) {
37343+ retval = -EACCES;
37344+ goto out;
37345+ }
37346+
37347+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
37348+
37349+ gr_handle_exec_args_compat(bprm, argv);
37350+
37351+#ifdef CONFIG_GRKERNSEC
37352+ old_acl = current->acl;
37353+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
37354+ old_exec_file = current->exec_file;
37355+ get_file(file);
37356+ current->exec_file = file;
37357+#endif
37358+
37359+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
37360+ bprm->unsafe & LSM_UNSAFE_SHARE);
37361+ if (retval < 0)
37362+ goto out_fail;
37363+
37364 retval = search_binary_handler(bprm, regs);
37365 if (retval < 0)
37366- goto out;
37367+ goto out_fail;
37368+#ifdef CONFIG_GRKERNSEC
37369+ if (old_exec_file)
37370+ fput(old_exec_file);
37371+#endif
37372
37373 /* execve succeeded */
37374 current->fs->in_exec = 0;
37375@@ -1541,6 +1608,14 @@ int compat_do_execve(char * filename,
37376 put_files_struct(displaced);
37377 return retval;
37378
37379+out_fail:
37380+#ifdef CONFIG_GRKERNSEC
37381+ current->acl = old_acl;
37382+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
37383+ fput(current->exec_file);
37384+ current->exec_file = old_exec_file;
37385+#endif
37386+
37387 out:
37388 if (bprm->mm) {
37389 acct_arg_size(bprm, 0);
37390@@ -1711,6 +1786,8 @@ int compat_core_sys_select(int n, compat
37391 struct fdtable *fdt;
37392 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
37393
37394+ pax_track_stack();
37395+
37396 if (n < 0)
37397 goto out_nofds;
37398
37399diff -urNp linux-2.6.32.41/fs/compat_ioctl.c linux-2.6.32.41/fs/compat_ioctl.c
37400--- linux-2.6.32.41/fs/compat_ioctl.c 2011-03-27 14:31:47.000000000 -0400
37401+++ linux-2.6.32.41/fs/compat_ioctl.c 2011-04-23 12:56:11.000000000 -0400
37402@@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsi
37403 up = (struct compat_video_spu_palette __user *) arg;
37404 err = get_user(palp, &up->palette);
37405 err |= get_user(length, &up->length);
37406+ if (err)
37407+ return -EFAULT;
37408
37409 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
37410 err = put_user(compat_ptr(palp), &up_native->palette);
37411diff -urNp linux-2.6.32.41/fs/configfs/dir.c linux-2.6.32.41/fs/configfs/dir.c
37412--- linux-2.6.32.41/fs/configfs/dir.c 2011-03-27 14:31:47.000000000 -0400
37413+++ linux-2.6.32.41/fs/configfs/dir.c 2011-05-11 18:25:15.000000000 -0400
37414@@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file
37415 }
37416 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
37417 struct configfs_dirent *next;
37418- const char * name;
37419+ const unsigned char * name;
37420+ char d_name[sizeof(next->s_dentry->d_iname)];
37421 int len;
37422
37423 next = list_entry(p, struct configfs_dirent,
37424@@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file
37425 continue;
37426
37427 name = configfs_get_name(next);
37428- len = strlen(name);
37429+ if (next->s_dentry && name == next->s_dentry->d_iname) {
37430+ len = next->s_dentry->d_name.len;
37431+ memcpy(d_name, name, len);
37432+ name = d_name;
37433+ } else
37434+ len = strlen(name);
37435 if (next->s_dentry)
37436 ino = next->s_dentry->d_inode->i_ino;
37437 else
37438diff -urNp linux-2.6.32.41/fs/dcache.c linux-2.6.32.41/fs/dcache.c
37439--- linux-2.6.32.41/fs/dcache.c 2011-03-27 14:31:47.000000000 -0400
37440+++ linux-2.6.32.41/fs/dcache.c 2011-04-23 13:32:21.000000000 -0400
37441@@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
37442
37443 static struct kmem_cache *dentry_cache __read_mostly;
37444
37445-#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
37446-
37447 /*
37448 * This is the single most critical data structure when it comes
37449 * to the dcache: the hashtable for lookups. Somebody should try
37450@@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned lon
37451 mempages -= reserve;
37452
37453 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
37454- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
37455+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
37456
37457 dcache_init();
37458 inode_init();
37459diff -urNp linux-2.6.32.41/fs/dlm/lockspace.c linux-2.6.32.41/fs/dlm/lockspace.c
37460--- linux-2.6.32.41/fs/dlm/lockspace.c 2011-03-27 14:31:47.000000000 -0400
37461+++ linux-2.6.32.41/fs/dlm/lockspace.c 2011-04-17 15:56:46.000000000 -0400
37462@@ -148,7 +148,7 @@ static void lockspace_kobj_release(struc
37463 kfree(ls);
37464 }
37465
37466-static struct sysfs_ops dlm_attr_ops = {
37467+static const struct sysfs_ops dlm_attr_ops = {
37468 .show = dlm_attr_show,
37469 .store = dlm_attr_store,
37470 };
37471diff -urNp linux-2.6.32.41/fs/ecryptfs/inode.c linux-2.6.32.41/fs/ecryptfs/inode.c
37472--- linux-2.6.32.41/fs/ecryptfs/inode.c 2011-03-27 14:31:47.000000000 -0400
37473+++ linux-2.6.32.41/fs/ecryptfs/inode.c 2011-04-17 15:56:46.000000000 -0400
37474@@ -660,7 +660,7 @@ static int ecryptfs_readlink_lower(struc
37475 old_fs = get_fs();
37476 set_fs(get_ds());
37477 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
37478- (char __user *)lower_buf,
37479+ (__force char __user *)lower_buf,
37480 lower_bufsiz);
37481 set_fs(old_fs);
37482 if (rc < 0)
37483@@ -706,7 +706,7 @@ static void *ecryptfs_follow_link(struct
37484 }
37485 old_fs = get_fs();
37486 set_fs(get_ds());
37487- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
37488+ rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
37489 set_fs(old_fs);
37490 if (rc < 0)
37491 goto out_free;
37492diff -urNp linux-2.6.32.41/fs/exec.c linux-2.6.32.41/fs/exec.c
37493--- linux-2.6.32.41/fs/exec.c 2011-04-17 17:00:52.000000000 -0400
37494+++ linux-2.6.32.41/fs/exec.c 2011-05-04 17:56:20.000000000 -0400
37495@@ -56,12 +56,24 @@
37496 #include <linux/fsnotify.h>
37497 #include <linux/fs_struct.h>
37498 #include <linux/pipe_fs_i.h>
37499+#include <linux/random.h>
37500+#include <linux/seq_file.h>
37501+
37502+#ifdef CONFIG_PAX_REFCOUNT
37503+#include <linux/kallsyms.h>
37504+#include <linux/kdebug.h>
37505+#endif
37506
37507 #include <asm/uaccess.h>
37508 #include <asm/mmu_context.h>
37509 #include <asm/tlb.h>
37510 #include "internal.h"
37511
37512+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
37513+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
37514+EXPORT_SYMBOL(pax_set_initial_flags_func);
37515+#endif
37516+
37517 int core_uses_pid;
37518 char core_pattern[CORENAME_MAX_SIZE] = "core";
37519 unsigned int core_pipe_limit;
37520@@ -115,7 +127,7 @@ SYSCALL_DEFINE1(uselib, const char __use
37521 goto out;
37522
37523 file = do_filp_open(AT_FDCWD, tmp,
37524- O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
37525+ O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
37526 MAY_READ | MAY_EXEC | MAY_OPEN);
37527 putname(tmp);
37528 error = PTR_ERR(file);
37529@@ -178,18 +190,10 @@ struct page *get_arg_page(struct linux_b
37530 int write)
37531 {
37532 struct page *page;
37533- int ret;
37534
37535-#ifdef CONFIG_STACK_GROWSUP
37536- if (write) {
37537- ret = expand_stack_downwards(bprm->vma, pos);
37538- if (ret < 0)
37539- return NULL;
37540- }
37541-#endif
37542- ret = get_user_pages(current, bprm->mm, pos,
37543- 1, write, 1, &page, NULL);
37544- if (ret <= 0)
37545+ if (0 > expand_stack_downwards(bprm->vma, pos))
37546+ return NULL;
37547+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
37548 return NULL;
37549
37550 if (write) {
37551@@ -263,6 +267,11 @@ static int __bprm_mm_init(struct linux_b
37552 vma->vm_end = STACK_TOP_MAX;
37553 vma->vm_start = vma->vm_end - PAGE_SIZE;
37554 vma->vm_flags = VM_STACK_FLAGS;
37555+
37556+#ifdef CONFIG_PAX_SEGMEXEC
37557+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
37558+#endif
37559+
37560 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
37561
37562 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
37563@@ -276,6 +285,12 @@ static int __bprm_mm_init(struct linux_b
37564 mm->stack_vm = mm->total_vm = 1;
37565 up_write(&mm->mmap_sem);
37566 bprm->p = vma->vm_end - sizeof(void *);
37567+
37568+#ifdef CONFIG_PAX_RANDUSTACK
37569+ if (randomize_va_space)
37570+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
37571+#endif
37572+
37573 return 0;
37574 err:
37575 up_write(&mm->mmap_sem);
37576@@ -510,7 +525,7 @@ int copy_strings_kernel(int argc,char **
37577 int r;
37578 mm_segment_t oldfs = get_fs();
37579 set_fs(KERNEL_DS);
37580- r = copy_strings(argc, (char __user * __user *)argv, bprm);
37581+ r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
37582 set_fs(oldfs);
37583 return r;
37584 }
37585@@ -540,7 +555,8 @@ static int shift_arg_pages(struct vm_are
37586 unsigned long new_end = old_end - shift;
37587 struct mmu_gather *tlb;
37588
37589- BUG_ON(new_start > new_end);
37590+ if (new_start >= new_end || new_start < mmap_min_addr)
37591+ return -ENOMEM;
37592
37593 /*
37594 * ensure there are no vmas between where we want to go
37595@@ -549,6 +565,10 @@ static int shift_arg_pages(struct vm_are
37596 if (vma != find_vma(mm, new_start))
37597 return -EFAULT;
37598
37599+#ifdef CONFIG_PAX_SEGMEXEC
37600+ BUG_ON(pax_find_mirror_vma(vma));
37601+#endif
37602+
37603 /*
37604 * cover the whole range: [new_start, old_end)
37605 */
37606@@ -630,10 +650,6 @@ int setup_arg_pages(struct linux_binprm
37607 stack_top = arch_align_stack(stack_top);
37608 stack_top = PAGE_ALIGN(stack_top);
37609
37610- if (unlikely(stack_top < mmap_min_addr) ||
37611- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
37612- return -ENOMEM;
37613-
37614 stack_shift = vma->vm_end - stack_top;
37615
37616 bprm->p -= stack_shift;
37617@@ -645,6 +661,14 @@ int setup_arg_pages(struct linux_binprm
37618 bprm->exec -= stack_shift;
37619
37620 down_write(&mm->mmap_sem);
37621+
37622+ /* Move stack pages down in memory. */
37623+ if (stack_shift) {
37624+ ret = shift_arg_pages(vma, stack_shift);
37625+ if (ret)
37626+ goto out_unlock;
37627+ }
37628+
37629 vm_flags = VM_STACK_FLAGS;
37630
37631 /*
37632@@ -658,19 +682,24 @@ int setup_arg_pages(struct linux_binprm
37633 vm_flags &= ~VM_EXEC;
37634 vm_flags |= mm->def_flags;
37635
37636+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
37637+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
37638+ vm_flags &= ~VM_EXEC;
37639+
37640+#ifdef CONFIG_PAX_MPROTECT
37641+ if (mm->pax_flags & MF_PAX_MPROTECT)
37642+ vm_flags &= ~VM_MAYEXEC;
37643+#endif
37644+
37645+ }
37646+#endif
37647+
37648 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
37649 vm_flags);
37650 if (ret)
37651 goto out_unlock;
37652 BUG_ON(prev != vma);
37653
37654- /* Move stack pages down in memory. */
37655- if (stack_shift) {
37656- ret = shift_arg_pages(vma, stack_shift);
37657- if (ret)
37658- goto out_unlock;
37659- }
37660-
37661 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
37662 stack_size = vma->vm_end - vma->vm_start;
37663 /*
37664@@ -707,7 +736,7 @@ struct file *open_exec(const char *name)
37665 int err;
37666
37667 file = do_filp_open(AT_FDCWD, name,
37668- O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
37669+ O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
37670 MAY_EXEC | MAY_OPEN);
37671 if (IS_ERR(file))
37672 goto out;
37673@@ -744,7 +773,7 @@ int kernel_read(struct file *file, loff_
37674 old_fs = get_fs();
37675 set_fs(get_ds());
37676 /* The cast to a user pointer is valid due to the set_fs() */
37677- result = vfs_read(file, (void __user *)addr, count, &pos);
37678+ result = vfs_read(file, (__force void __user *)addr, count, &pos);
37679 set_fs(old_fs);
37680 return result;
37681 }
37682@@ -1151,7 +1180,7 @@ int check_unsafe_exec(struct linux_binpr
37683 }
37684 rcu_read_unlock();
37685
37686- if (p->fs->users > n_fs) {
37687+ if (atomic_read(&p->fs->users) > n_fs) {
37688 bprm->unsafe |= LSM_UNSAFE_SHARE;
37689 } else {
37690 res = -EAGAIN;
37691@@ -1350,6 +1379,11 @@ int do_execve(char * filename,
37692 char __user *__user *envp,
37693 struct pt_regs * regs)
37694 {
37695+#ifdef CONFIG_GRKERNSEC
37696+ struct file *old_exec_file;
37697+ struct acl_subject_label *old_acl;
37698+ struct rlimit old_rlim[RLIM_NLIMITS];
37699+#endif
37700 struct linux_binprm *bprm;
37701 struct file *file;
37702 struct files_struct *displaced;
37703@@ -1386,6 +1420,23 @@ int do_execve(char * filename,
37704 bprm->filename = filename;
37705 bprm->interp = filename;
37706
37707+ if (gr_process_user_ban()) {
37708+ retval = -EPERM;
37709+ goto out_file;
37710+ }
37711+
37712+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
37713+
37714+ if (gr_handle_nproc()) {
37715+ retval = -EAGAIN;
37716+ goto out_file;
37717+ }
37718+
37719+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
37720+ retval = -EACCES;
37721+ goto out_file;
37722+ }
37723+
37724 retval = bprm_mm_init(bprm);
37725 if (retval)
37726 goto out_file;
37727@@ -1415,10 +1466,41 @@ int do_execve(char * filename,
37728 if (retval < 0)
37729 goto out;
37730
37731+ if (!gr_tpe_allow(file)) {
37732+ retval = -EACCES;
37733+ goto out;
37734+ }
37735+
37736+ if (gr_check_crash_exec(file)) {
37737+ retval = -EACCES;
37738+ goto out;
37739+ }
37740+
37741+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
37742+
37743+ gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
37744+
37745+#ifdef CONFIG_GRKERNSEC
37746+ old_acl = current->acl;
37747+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
37748+ old_exec_file = current->exec_file;
37749+ get_file(file);
37750+ current->exec_file = file;
37751+#endif
37752+
37753+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
37754+ bprm->unsafe & LSM_UNSAFE_SHARE);
37755+ if (retval < 0)
37756+ goto out_fail;
37757+
37758 current->flags &= ~PF_KTHREAD;
37759 retval = search_binary_handler(bprm,regs);
37760 if (retval < 0)
37761- goto out;
37762+ goto out_fail;
37763+#ifdef CONFIG_GRKERNSEC
37764+ if (old_exec_file)
37765+ fput(old_exec_file);
37766+#endif
37767
37768 /* execve succeeded */
37769 current->fs->in_exec = 0;
37770@@ -1429,6 +1511,14 @@ int do_execve(char * filename,
37771 put_files_struct(displaced);
37772 return retval;
37773
37774+out_fail:
37775+#ifdef CONFIG_GRKERNSEC
37776+ current->acl = old_acl;
37777+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
37778+ fput(current->exec_file);
37779+ current->exec_file = old_exec_file;
37780+#endif
37781+
37782 out:
37783 if (bprm->mm) {
37784 acct_arg_size(bprm, 0);
37785@@ -1594,6 +1684,209 @@ out:
37786 return ispipe;
37787 }
37788
37789+int pax_check_flags(unsigned long *flags)
37790+{
37791+ int retval = 0;
37792+
37793+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
37794+ if (*flags & MF_PAX_SEGMEXEC)
37795+ {
37796+ *flags &= ~MF_PAX_SEGMEXEC;
37797+ retval = -EINVAL;
37798+ }
37799+#endif
37800+
37801+ if ((*flags & MF_PAX_PAGEEXEC)
37802+
37803+#ifdef CONFIG_PAX_PAGEEXEC
37804+ && (*flags & MF_PAX_SEGMEXEC)
37805+#endif
37806+
37807+ )
37808+ {
37809+ *flags &= ~MF_PAX_PAGEEXEC;
37810+ retval = -EINVAL;
37811+ }
37812+
37813+ if ((*flags & MF_PAX_MPROTECT)
37814+
37815+#ifdef CONFIG_PAX_MPROTECT
37816+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
37817+#endif
37818+
37819+ )
37820+ {
37821+ *flags &= ~MF_PAX_MPROTECT;
37822+ retval = -EINVAL;
37823+ }
37824+
37825+ if ((*flags & MF_PAX_EMUTRAMP)
37826+
37827+#ifdef CONFIG_PAX_EMUTRAMP
37828+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
37829+#endif
37830+
37831+ )
37832+ {
37833+ *flags &= ~MF_PAX_EMUTRAMP;
37834+ retval = -EINVAL;
37835+ }
37836+
37837+ return retval;
37838+}
37839+
37840+EXPORT_SYMBOL(pax_check_flags);
37841+
37842+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
37843+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
37844+{
37845+ struct task_struct *tsk = current;
37846+ struct mm_struct *mm = current->mm;
37847+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
37848+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
37849+ char *path_exec = NULL;
37850+ char *path_fault = NULL;
37851+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
37852+
37853+ if (buffer_exec && buffer_fault) {
37854+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
37855+
37856+ down_read(&mm->mmap_sem);
37857+ vma = mm->mmap;
37858+ while (vma && (!vma_exec || !vma_fault)) {
37859+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
37860+ vma_exec = vma;
37861+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
37862+ vma_fault = vma;
37863+ vma = vma->vm_next;
37864+ }
37865+ if (vma_exec) {
37866+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
37867+ if (IS_ERR(path_exec))
37868+ path_exec = "<path too long>";
37869+ else {
37870+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
37871+ if (path_exec) {
37872+ *path_exec = 0;
37873+ path_exec = buffer_exec;
37874+ } else
37875+ path_exec = "<path too long>";
37876+ }
37877+ }
37878+ if (vma_fault) {
37879+ start = vma_fault->vm_start;
37880+ end = vma_fault->vm_end;
37881+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
37882+ if (vma_fault->vm_file) {
37883+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
37884+ if (IS_ERR(path_fault))
37885+ path_fault = "<path too long>";
37886+ else {
37887+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
37888+ if (path_fault) {
37889+ *path_fault = 0;
37890+ path_fault = buffer_fault;
37891+ } else
37892+ path_fault = "<path too long>";
37893+ }
37894+ } else
37895+ path_fault = "<anonymous mapping>";
37896+ }
37897+ up_read(&mm->mmap_sem);
37898+ }
37899+ if (tsk->signal->curr_ip)
37900+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
37901+ else
37902+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
37903+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
37904+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
37905+ task_uid(tsk), task_euid(tsk), pc, sp);
37906+ free_page((unsigned long)buffer_exec);
37907+ free_page((unsigned long)buffer_fault);
37908+ pax_report_insns(pc, sp);
37909+ do_coredump(SIGKILL, SIGKILL, regs);
37910+}
37911+#endif
37912+
37913+#ifdef CONFIG_PAX_REFCOUNT
37914+void pax_report_refcount_overflow(struct pt_regs *regs)
37915+{
37916+ if (current->signal->curr_ip)
37917+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
37918+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
37919+ else
37920+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
37921+ current->comm, task_pid_nr(current), current_uid(), current_euid());
37922+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
37923+ show_regs(regs);
37924+ force_sig_specific(SIGKILL, current);
37925+}
37926+#endif
37927+
37928+#ifdef CONFIG_PAX_USERCOPY
37929+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
37930+int object_is_on_stack(const void *obj, unsigned long len)
37931+{
37932+ const void * const stack = task_stack_page(current);
37933+ const void * const stackend = stack + THREAD_SIZE;
37934+
37935+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
37936+ const void *frame = NULL;
37937+ const void *oldframe;
37938+#endif
37939+
37940+ if (obj + len < obj)
37941+ return -1;
37942+
37943+ if (obj + len <= stack || stackend <= obj)
37944+ return 0;
37945+
37946+ if (obj < stack || stackend < obj + len)
37947+ return -1;
37948+
37949+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
37950+ oldframe = __builtin_frame_address(1);
37951+ if (oldframe)
37952+ frame = __builtin_frame_address(2);
37953+ /*
37954+ low ----------------------------------------------> high
37955+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
37956+ ^----------------^
37957+ allow copies only within here
37958+ */
37959+ while (stack <= frame && frame < stackend) {
37960+ /* if obj + len extends past the last frame, this
37961+ check won't pass and the next frame will be 0,
37962+ causing us to bail out and correctly report
37963+ the copy as invalid
37964+ */
37965+ if (obj + len <= frame)
37966+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
37967+ oldframe = frame;
37968+ frame = *(const void * const *)frame;
37969+ }
37970+ return -1;
37971+#else
37972+ return 1;
37973+#endif
37974+}
37975+
37976+
37977+void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
37978+{
37979+ if (current->signal->curr_ip)
37980+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
37981+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
37982+ else
37983+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
37984+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
37985+
37986+ dump_stack();
37987+ gr_handle_kernel_exploit();
37988+ do_group_exit(SIGKILL);
37989+}
37990+#endif
37991+
37992 static int zap_process(struct task_struct *start)
37993 {
37994 struct task_struct *t;
37995@@ -1796,17 +2089,17 @@ static void wait_for_dump_helpers(struct
37996 pipe = file->f_path.dentry->d_inode->i_pipe;
37997
37998 pipe_lock(pipe);
37999- pipe->readers++;
38000- pipe->writers--;
38001+ atomic_inc(&pipe->readers);
38002+ atomic_dec(&pipe->writers);
38003
38004- while ((pipe->readers > 1) && (!signal_pending(current))) {
38005+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
38006 wake_up_interruptible_sync(&pipe->wait);
38007 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
38008 pipe_wait(pipe);
38009 }
38010
38011- pipe->readers--;
38012- pipe->writers++;
38013+ atomic_dec(&pipe->readers);
38014+ atomic_inc(&pipe->writers);
38015 pipe_unlock(pipe);
38016
38017 }
38018@@ -1829,10 +2122,13 @@ void do_coredump(long signr, int exit_co
38019 char **helper_argv = NULL;
38020 int helper_argc = 0;
38021 int dump_count = 0;
38022- static atomic_t core_dump_count = ATOMIC_INIT(0);
38023+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
38024
38025 audit_core_dumps(signr);
38026
38027+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
38028+ gr_handle_brute_attach(current, mm->flags);
38029+
38030 binfmt = mm->binfmt;
38031 if (!binfmt || !binfmt->core_dump)
38032 goto fail;
38033@@ -1877,6 +2173,8 @@ void do_coredump(long signr, int exit_co
38034 */
38035 clear_thread_flag(TIF_SIGPENDING);
38036
38037+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
38038+
38039 /*
38040 * lock_kernel() because format_corename() is controlled by sysctl, which
38041 * uses lock_kernel()
38042@@ -1911,7 +2209,7 @@ void do_coredump(long signr, int exit_co
38043 goto fail_unlock;
38044 }
38045
38046- dump_count = atomic_inc_return(&core_dump_count);
38047+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
38048 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
38049 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
38050 task_tgid_vnr(current), current->comm);
38051@@ -1975,7 +2273,7 @@ close_fail:
38052 filp_close(file, NULL);
38053 fail_dropcount:
38054 if (dump_count)
38055- atomic_dec(&core_dump_count);
38056+ atomic_dec_unchecked(&core_dump_count);
38057 fail_unlock:
38058 if (helper_argv)
38059 argv_free(helper_argv);
38060diff -urNp linux-2.6.32.41/fs/ext2/balloc.c linux-2.6.32.41/fs/ext2/balloc.c
38061--- linux-2.6.32.41/fs/ext2/balloc.c 2011-03-27 14:31:47.000000000 -0400
38062+++ linux-2.6.32.41/fs/ext2/balloc.c 2011-04-17 15:56:46.000000000 -0400
38063@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
38064
38065 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
38066 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
38067- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
38068+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
38069 sbi->s_resuid != current_fsuid() &&
38070 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
38071 return 0;
38072diff -urNp linux-2.6.32.41/fs/ext3/balloc.c linux-2.6.32.41/fs/ext3/balloc.c
38073--- linux-2.6.32.41/fs/ext3/balloc.c 2011-03-27 14:31:47.000000000 -0400
38074+++ linux-2.6.32.41/fs/ext3/balloc.c 2011-04-17 15:56:46.000000000 -0400
38075@@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct e
38076
38077 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
38078 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
38079- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
38080+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
38081 sbi->s_resuid != current_fsuid() &&
38082 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
38083 return 0;
38084diff -urNp linux-2.6.32.41/fs/ext4/balloc.c linux-2.6.32.41/fs/ext4/balloc.c
38085--- linux-2.6.32.41/fs/ext4/balloc.c 2011-03-27 14:31:47.000000000 -0400
38086+++ linux-2.6.32.41/fs/ext4/balloc.c 2011-04-17 15:56:46.000000000 -0400
38087@@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_
38088 /* Hm, nope. Are (enough) root reserved blocks available? */
38089 if (sbi->s_resuid == current_fsuid() ||
38090 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
38091- capable(CAP_SYS_RESOURCE)) {
38092+ capable_nolog(CAP_SYS_RESOURCE)) {
38093 if (free_blocks >= (nblocks + dirty_blocks))
38094 return 1;
38095 }
38096diff -urNp linux-2.6.32.41/fs/ext4/ext4.h linux-2.6.32.41/fs/ext4/ext4.h
38097--- linux-2.6.32.41/fs/ext4/ext4.h 2011-03-27 14:31:47.000000000 -0400
38098+++ linux-2.6.32.41/fs/ext4/ext4.h 2011-04-17 15:56:46.000000000 -0400
38099@@ -1078,19 +1078,19 @@ struct ext4_sb_info {
38100
38101 /* stats for buddy allocator */
38102 spinlock_t s_mb_pa_lock;
38103- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
38104- atomic_t s_bal_success; /* we found long enough chunks */
38105- atomic_t s_bal_allocated; /* in blocks */
38106- atomic_t s_bal_ex_scanned; /* total extents scanned */
38107- atomic_t s_bal_goals; /* goal hits */
38108- atomic_t s_bal_breaks; /* too long searches */
38109- atomic_t s_bal_2orders; /* 2^order hits */
38110+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
38111+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
38112+ atomic_unchecked_t s_bal_allocated; /* in blocks */
38113+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
38114+ atomic_unchecked_t s_bal_goals; /* goal hits */
38115+ atomic_unchecked_t s_bal_breaks; /* too long searches */
38116+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
38117 spinlock_t s_bal_lock;
38118 unsigned long s_mb_buddies_generated;
38119 unsigned long long s_mb_generation_time;
38120- atomic_t s_mb_lost_chunks;
38121- atomic_t s_mb_preallocated;
38122- atomic_t s_mb_discarded;
38123+ atomic_unchecked_t s_mb_lost_chunks;
38124+ atomic_unchecked_t s_mb_preallocated;
38125+ atomic_unchecked_t s_mb_discarded;
38126 atomic_t s_lock_busy;
38127
38128 /* locality groups */
38129diff -urNp linux-2.6.32.41/fs/ext4/mballoc.c linux-2.6.32.41/fs/ext4/mballoc.c
38130--- linux-2.6.32.41/fs/ext4/mballoc.c 2011-03-27 14:31:47.000000000 -0400
38131+++ linux-2.6.32.41/fs/ext4/mballoc.c 2011-05-16 21:46:57.000000000 -0400
38132@@ -1753,7 +1753,7 @@ void ext4_mb_simple_scan_group(struct ex
38133 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
38134
38135 if (EXT4_SB(sb)->s_mb_stats)
38136- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
38137+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
38138
38139 break;
38140 }
38141@@ -2129,7 +2129,7 @@ repeat:
38142 ac->ac_status = AC_STATUS_CONTINUE;
38143 ac->ac_flags |= EXT4_MB_HINT_FIRST;
38144 cr = 3;
38145- atomic_inc(&sbi->s_mb_lost_chunks);
38146+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
38147 goto repeat;
38148 }
38149 }
38150@@ -2172,6 +2172,8 @@ static int ext4_mb_seq_groups_show(struc
38151 ext4_grpblk_t counters[16];
38152 } sg;
38153
38154+ pax_track_stack();
38155+
38156 group--;
38157 if (group == 0)
38158 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
38159@@ -2532,25 +2534,25 @@ int ext4_mb_release(struct super_block *
38160 if (sbi->s_mb_stats) {
38161 printk(KERN_INFO
38162 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
38163- atomic_read(&sbi->s_bal_allocated),
38164- atomic_read(&sbi->s_bal_reqs),
38165- atomic_read(&sbi->s_bal_success));
38166+ atomic_read_unchecked(&sbi->s_bal_allocated),
38167+ atomic_read_unchecked(&sbi->s_bal_reqs),
38168+ atomic_read_unchecked(&sbi->s_bal_success));
38169 printk(KERN_INFO
38170 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
38171 "%u 2^N hits, %u breaks, %u lost\n",
38172- atomic_read(&sbi->s_bal_ex_scanned),
38173- atomic_read(&sbi->s_bal_goals),
38174- atomic_read(&sbi->s_bal_2orders),
38175- atomic_read(&sbi->s_bal_breaks),
38176- atomic_read(&sbi->s_mb_lost_chunks));
38177+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
38178+ atomic_read_unchecked(&sbi->s_bal_goals),
38179+ atomic_read_unchecked(&sbi->s_bal_2orders),
38180+ atomic_read_unchecked(&sbi->s_bal_breaks),
38181+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
38182 printk(KERN_INFO
38183 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
38184 sbi->s_mb_buddies_generated++,
38185 sbi->s_mb_generation_time);
38186 printk(KERN_INFO
38187 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
38188- atomic_read(&sbi->s_mb_preallocated),
38189- atomic_read(&sbi->s_mb_discarded));
38190+ atomic_read_unchecked(&sbi->s_mb_preallocated),
38191+ atomic_read_unchecked(&sbi->s_mb_discarded));
38192 }
38193
38194 free_percpu(sbi->s_locality_groups);
38195@@ -3032,16 +3034,16 @@ static void ext4_mb_collect_stats(struct
38196 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
38197
38198 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
38199- atomic_inc(&sbi->s_bal_reqs);
38200- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
38201+ atomic_inc_unchecked(&sbi->s_bal_reqs);
38202+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
38203 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
38204- atomic_inc(&sbi->s_bal_success);
38205- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
38206+ atomic_inc_unchecked(&sbi->s_bal_success);
38207+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
38208 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
38209 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
38210- atomic_inc(&sbi->s_bal_goals);
38211+ atomic_inc_unchecked(&sbi->s_bal_goals);
38212 if (ac->ac_found > sbi->s_mb_max_to_scan)
38213- atomic_inc(&sbi->s_bal_breaks);
38214+ atomic_inc_unchecked(&sbi->s_bal_breaks);
38215 }
38216
38217 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
38218@@ -3441,7 +3443,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
38219 trace_ext4_mb_new_inode_pa(ac, pa);
38220
38221 ext4_mb_use_inode_pa(ac, pa);
38222- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
38223+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
38224
38225 ei = EXT4_I(ac->ac_inode);
38226 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
38227@@ -3501,7 +3503,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
38228 trace_ext4_mb_new_group_pa(ac, pa);
38229
38230 ext4_mb_use_group_pa(ac, pa);
38231- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
38232+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
38233
38234 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
38235 lg = ac->ac_lg;
38236@@ -3605,7 +3607,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
38237 * from the bitmap and continue.
38238 */
38239 }
38240- atomic_add(free, &sbi->s_mb_discarded);
38241+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
38242
38243 return err;
38244 }
38245@@ -3624,7 +3626,7 @@ ext4_mb_release_group_pa(struct ext4_bud
38246 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
38247 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
38248 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
38249- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
38250+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
38251
38252 if (ac) {
38253 ac->ac_sb = sb;
38254diff -urNp linux-2.6.32.41/fs/ext4/super.c linux-2.6.32.41/fs/ext4/super.c
38255--- linux-2.6.32.41/fs/ext4/super.c 2011-03-27 14:31:47.000000000 -0400
38256+++ linux-2.6.32.41/fs/ext4/super.c 2011-04-17 15:56:46.000000000 -0400
38257@@ -2287,7 +2287,7 @@ static void ext4_sb_release(struct kobje
38258 }
38259
38260
38261-static struct sysfs_ops ext4_attr_ops = {
38262+static const struct sysfs_ops ext4_attr_ops = {
38263 .show = ext4_attr_show,
38264 .store = ext4_attr_store,
38265 };
38266diff -urNp linux-2.6.32.41/fs/fcntl.c linux-2.6.32.41/fs/fcntl.c
38267--- linux-2.6.32.41/fs/fcntl.c 2011-03-27 14:31:47.000000000 -0400
38268+++ linux-2.6.32.41/fs/fcntl.c 2011-04-17 15:56:46.000000000 -0400
38269@@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct
38270 if (err)
38271 return err;
38272
38273+ if (gr_handle_chroot_fowner(pid, type))
38274+ return -ENOENT;
38275+ if (gr_check_protected_task_fowner(pid, type))
38276+ return -EACCES;
38277+
38278 f_modown(filp, pid, type, force);
38279 return 0;
38280 }
38281@@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned in
38282 switch (cmd) {
38283 case F_DUPFD:
38284 case F_DUPFD_CLOEXEC:
38285+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
38286 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
38287 break;
38288 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
38289diff -urNp linux-2.6.32.41/fs/fifo.c linux-2.6.32.41/fs/fifo.c
38290--- linux-2.6.32.41/fs/fifo.c 2011-03-27 14:31:47.000000000 -0400
38291+++ linux-2.6.32.41/fs/fifo.c 2011-04-17 15:56:46.000000000 -0400
38292@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode
38293 */
38294 filp->f_op = &read_pipefifo_fops;
38295 pipe->r_counter++;
38296- if (pipe->readers++ == 0)
38297+ if (atomic_inc_return(&pipe->readers) == 1)
38298 wake_up_partner(inode);
38299
38300- if (!pipe->writers) {
38301+ if (!atomic_read(&pipe->writers)) {
38302 if ((filp->f_flags & O_NONBLOCK)) {
38303 /* suppress POLLHUP until we have
38304 * seen a writer */
38305@@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode
38306 * errno=ENXIO when there is no process reading the FIFO.
38307 */
38308 ret = -ENXIO;
38309- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
38310+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
38311 goto err;
38312
38313 filp->f_op = &write_pipefifo_fops;
38314 pipe->w_counter++;
38315- if (!pipe->writers++)
38316+ if (atomic_inc_return(&pipe->writers) == 1)
38317 wake_up_partner(inode);
38318
38319- if (!pipe->readers) {
38320+ if (!atomic_read(&pipe->readers)) {
38321 wait_for_partner(inode, &pipe->r_counter);
38322 if (signal_pending(current))
38323 goto err_wr;
38324@@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode
38325 */
38326 filp->f_op = &rdwr_pipefifo_fops;
38327
38328- pipe->readers++;
38329- pipe->writers++;
38330+ atomic_inc(&pipe->readers);
38331+ atomic_inc(&pipe->writers);
38332 pipe->r_counter++;
38333 pipe->w_counter++;
38334- if (pipe->readers == 1 || pipe->writers == 1)
38335+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
38336 wake_up_partner(inode);
38337 break;
38338
38339@@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode
38340 return 0;
38341
38342 err_rd:
38343- if (!--pipe->readers)
38344+ if (atomic_dec_and_test(&pipe->readers))
38345 wake_up_interruptible(&pipe->wait);
38346 ret = -ERESTARTSYS;
38347 goto err;
38348
38349 err_wr:
38350- if (!--pipe->writers)
38351+ if (atomic_dec_and_test(&pipe->writers))
38352 wake_up_interruptible(&pipe->wait);
38353 ret = -ERESTARTSYS;
38354 goto err;
38355
38356 err:
38357- if (!pipe->readers && !pipe->writers)
38358+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
38359 free_pipe_info(inode);
38360
38361 err_nocleanup:
38362diff -urNp linux-2.6.32.41/fs/file.c linux-2.6.32.41/fs/file.c
38363--- linux-2.6.32.41/fs/file.c 2011-03-27 14:31:47.000000000 -0400
38364+++ linux-2.6.32.41/fs/file.c 2011-04-17 15:56:46.000000000 -0400
38365@@ -14,6 +14,7 @@
38366 #include <linux/slab.h>
38367 #include <linux/vmalloc.h>
38368 #include <linux/file.h>
38369+#include <linux/security.h>
38370 #include <linux/fdtable.h>
38371 #include <linux/bitops.h>
38372 #include <linux/interrupt.h>
38373@@ -257,6 +258,8 @@ int expand_files(struct files_struct *fi
38374 * N.B. For clone tasks sharing a files structure, this test
38375 * will limit the total number of files that can be opened.
38376 */
38377+
38378+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
38379 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
38380 return -EMFILE;
38381
38382diff -urNp linux-2.6.32.41/fs/filesystems.c linux-2.6.32.41/fs/filesystems.c
38383--- linux-2.6.32.41/fs/filesystems.c 2011-03-27 14:31:47.000000000 -0400
38384+++ linux-2.6.32.41/fs/filesystems.c 2011-04-17 15:56:46.000000000 -0400
38385@@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(con
38386 int len = dot ? dot - name : strlen(name);
38387
38388 fs = __get_fs_type(name, len);
38389+
38390+#ifdef CONFIG_GRKERNSEC_MODHARDEN
38391+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
38392+#else
38393 if (!fs && (request_module("%.*s", len, name) == 0))
38394+#endif
38395 fs = __get_fs_type(name, len);
38396
38397 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
38398diff -urNp linux-2.6.32.41/fs/fscache/cookie.c linux-2.6.32.41/fs/fscache/cookie.c
38399--- linux-2.6.32.41/fs/fscache/cookie.c 2011-03-27 14:31:47.000000000 -0400
38400+++ linux-2.6.32.41/fs/fscache/cookie.c 2011-05-04 17:56:28.000000000 -0400
38401@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
38402 parent ? (char *) parent->def->name : "<no-parent>",
38403 def->name, netfs_data);
38404
38405- fscache_stat(&fscache_n_acquires);
38406+ fscache_stat_unchecked(&fscache_n_acquires);
38407
38408 /* if there's no parent cookie, then we don't create one here either */
38409 if (!parent) {
38410- fscache_stat(&fscache_n_acquires_null);
38411+ fscache_stat_unchecked(&fscache_n_acquires_null);
38412 _leave(" [no parent]");
38413 return NULL;
38414 }
38415@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
38416 /* allocate and initialise a cookie */
38417 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
38418 if (!cookie) {
38419- fscache_stat(&fscache_n_acquires_oom);
38420+ fscache_stat_unchecked(&fscache_n_acquires_oom);
38421 _leave(" [ENOMEM]");
38422 return NULL;
38423 }
38424@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
38425
38426 switch (cookie->def->type) {
38427 case FSCACHE_COOKIE_TYPE_INDEX:
38428- fscache_stat(&fscache_n_cookie_index);
38429+ fscache_stat_unchecked(&fscache_n_cookie_index);
38430 break;
38431 case FSCACHE_COOKIE_TYPE_DATAFILE:
38432- fscache_stat(&fscache_n_cookie_data);
38433+ fscache_stat_unchecked(&fscache_n_cookie_data);
38434 break;
38435 default:
38436- fscache_stat(&fscache_n_cookie_special);
38437+ fscache_stat_unchecked(&fscache_n_cookie_special);
38438 break;
38439 }
38440
38441@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
38442 if (fscache_acquire_non_index_cookie(cookie) < 0) {
38443 atomic_dec(&parent->n_children);
38444 __fscache_cookie_put(cookie);
38445- fscache_stat(&fscache_n_acquires_nobufs);
38446+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
38447 _leave(" = NULL");
38448 return NULL;
38449 }
38450 }
38451
38452- fscache_stat(&fscache_n_acquires_ok);
38453+ fscache_stat_unchecked(&fscache_n_acquires_ok);
38454 _leave(" = %p", cookie);
38455 return cookie;
38456 }
38457@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
38458 cache = fscache_select_cache_for_object(cookie->parent);
38459 if (!cache) {
38460 up_read(&fscache_addremove_sem);
38461- fscache_stat(&fscache_n_acquires_no_cache);
38462+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
38463 _leave(" = -ENOMEDIUM [no cache]");
38464 return -ENOMEDIUM;
38465 }
38466@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
38467 object = cache->ops->alloc_object(cache, cookie);
38468 fscache_stat_d(&fscache_n_cop_alloc_object);
38469 if (IS_ERR(object)) {
38470- fscache_stat(&fscache_n_object_no_alloc);
38471+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
38472 ret = PTR_ERR(object);
38473 goto error;
38474 }
38475
38476- fscache_stat(&fscache_n_object_alloc);
38477+ fscache_stat_unchecked(&fscache_n_object_alloc);
38478
38479 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
38480
38481@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
38482 struct fscache_object *object;
38483 struct hlist_node *_p;
38484
38485- fscache_stat(&fscache_n_updates);
38486+ fscache_stat_unchecked(&fscache_n_updates);
38487
38488 if (!cookie) {
38489- fscache_stat(&fscache_n_updates_null);
38490+ fscache_stat_unchecked(&fscache_n_updates_null);
38491 _leave(" [no cookie]");
38492 return;
38493 }
38494@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
38495 struct fscache_object *object;
38496 unsigned long event;
38497
38498- fscache_stat(&fscache_n_relinquishes);
38499+ fscache_stat_unchecked(&fscache_n_relinquishes);
38500 if (retire)
38501- fscache_stat(&fscache_n_relinquishes_retire);
38502+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
38503
38504 if (!cookie) {
38505- fscache_stat(&fscache_n_relinquishes_null);
38506+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
38507 _leave(" [no cookie]");
38508 return;
38509 }
38510@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
38511
38512 /* wait for the cookie to finish being instantiated (or to fail) */
38513 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
38514- fscache_stat(&fscache_n_relinquishes_waitcrt);
38515+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
38516 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
38517 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
38518 }
38519diff -urNp linux-2.6.32.41/fs/fscache/internal.h linux-2.6.32.41/fs/fscache/internal.h
38520--- linux-2.6.32.41/fs/fscache/internal.h 2011-03-27 14:31:47.000000000 -0400
38521+++ linux-2.6.32.41/fs/fscache/internal.h 2011-05-04 17:56:28.000000000 -0400
38522@@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
38523 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
38524 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
38525
38526-extern atomic_t fscache_n_op_pend;
38527-extern atomic_t fscache_n_op_run;
38528-extern atomic_t fscache_n_op_enqueue;
38529-extern atomic_t fscache_n_op_deferred_release;
38530-extern atomic_t fscache_n_op_release;
38531-extern atomic_t fscache_n_op_gc;
38532-extern atomic_t fscache_n_op_cancelled;
38533-extern atomic_t fscache_n_op_rejected;
38534-
38535-extern atomic_t fscache_n_attr_changed;
38536-extern atomic_t fscache_n_attr_changed_ok;
38537-extern atomic_t fscache_n_attr_changed_nobufs;
38538-extern atomic_t fscache_n_attr_changed_nomem;
38539-extern atomic_t fscache_n_attr_changed_calls;
38540-
38541-extern atomic_t fscache_n_allocs;
38542-extern atomic_t fscache_n_allocs_ok;
38543-extern atomic_t fscache_n_allocs_wait;
38544-extern atomic_t fscache_n_allocs_nobufs;
38545-extern atomic_t fscache_n_allocs_intr;
38546-extern atomic_t fscache_n_allocs_object_dead;
38547-extern atomic_t fscache_n_alloc_ops;
38548-extern atomic_t fscache_n_alloc_op_waits;
38549-
38550-extern atomic_t fscache_n_retrievals;
38551-extern atomic_t fscache_n_retrievals_ok;
38552-extern atomic_t fscache_n_retrievals_wait;
38553-extern atomic_t fscache_n_retrievals_nodata;
38554-extern atomic_t fscache_n_retrievals_nobufs;
38555-extern atomic_t fscache_n_retrievals_intr;
38556-extern atomic_t fscache_n_retrievals_nomem;
38557-extern atomic_t fscache_n_retrievals_object_dead;
38558-extern atomic_t fscache_n_retrieval_ops;
38559-extern atomic_t fscache_n_retrieval_op_waits;
38560-
38561-extern atomic_t fscache_n_stores;
38562-extern atomic_t fscache_n_stores_ok;
38563-extern atomic_t fscache_n_stores_again;
38564-extern atomic_t fscache_n_stores_nobufs;
38565-extern atomic_t fscache_n_stores_oom;
38566-extern atomic_t fscache_n_store_ops;
38567-extern atomic_t fscache_n_store_calls;
38568-extern atomic_t fscache_n_store_pages;
38569-extern atomic_t fscache_n_store_radix_deletes;
38570-extern atomic_t fscache_n_store_pages_over_limit;
38571-
38572-extern atomic_t fscache_n_store_vmscan_not_storing;
38573-extern atomic_t fscache_n_store_vmscan_gone;
38574-extern atomic_t fscache_n_store_vmscan_busy;
38575-extern atomic_t fscache_n_store_vmscan_cancelled;
38576-
38577-extern atomic_t fscache_n_marks;
38578-extern atomic_t fscache_n_uncaches;
38579-
38580-extern atomic_t fscache_n_acquires;
38581-extern atomic_t fscache_n_acquires_null;
38582-extern atomic_t fscache_n_acquires_no_cache;
38583-extern atomic_t fscache_n_acquires_ok;
38584-extern atomic_t fscache_n_acquires_nobufs;
38585-extern atomic_t fscache_n_acquires_oom;
38586-
38587-extern atomic_t fscache_n_updates;
38588-extern atomic_t fscache_n_updates_null;
38589-extern atomic_t fscache_n_updates_run;
38590-
38591-extern atomic_t fscache_n_relinquishes;
38592-extern atomic_t fscache_n_relinquishes_null;
38593-extern atomic_t fscache_n_relinquishes_waitcrt;
38594-extern atomic_t fscache_n_relinquishes_retire;
38595-
38596-extern atomic_t fscache_n_cookie_index;
38597-extern atomic_t fscache_n_cookie_data;
38598-extern atomic_t fscache_n_cookie_special;
38599-
38600-extern atomic_t fscache_n_object_alloc;
38601-extern atomic_t fscache_n_object_no_alloc;
38602-extern atomic_t fscache_n_object_lookups;
38603-extern atomic_t fscache_n_object_lookups_negative;
38604-extern atomic_t fscache_n_object_lookups_positive;
38605-extern atomic_t fscache_n_object_lookups_timed_out;
38606-extern atomic_t fscache_n_object_created;
38607-extern atomic_t fscache_n_object_avail;
38608-extern atomic_t fscache_n_object_dead;
38609-
38610-extern atomic_t fscache_n_checkaux_none;
38611-extern atomic_t fscache_n_checkaux_okay;
38612-extern atomic_t fscache_n_checkaux_update;
38613-extern atomic_t fscache_n_checkaux_obsolete;
38614+extern atomic_unchecked_t fscache_n_op_pend;
38615+extern atomic_unchecked_t fscache_n_op_run;
38616+extern atomic_unchecked_t fscache_n_op_enqueue;
38617+extern atomic_unchecked_t fscache_n_op_deferred_release;
38618+extern atomic_unchecked_t fscache_n_op_release;
38619+extern atomic_unchecked_t fscache_n_op_gc;
38620+extern atomic_unchecked_t fscache_n_op_cancelled;
38621+extern atomic_unchecked_t fscache_n_op_rejected;
38622+
38623+extern atomic_unchecked_t fscache_n_attr_changed;
38624+extern atomic_unchecked_t fscache_n_attr_changed_ok;
38625+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
38626+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
38627+extern atomic_unchecked_t fscache_n_attr_changed_calls;
38628+
38629+extern atomic_unchecked_t fscache_n_allocs;
38630+extern atomic_unchecked_t fscache_n_allocs_ok;
38631+extern atomic_unchecked_t fscache_n_allocs_wait;
38632+extern atomic_unchecked_t fscache_n_allocs_nobufs;
38633+extern atomic_unchecked_t fscache_n_allocs_intr;
38634+extern atomic_unchecked_t fscache_n_allocs_object_dead;
38635+extern atomic_unchecked_t fscache_n_alloc_ops;
38636+extern atomic_unchecked_t fscache_n_alloc_op_waits;
38637+
38638+extern atomic_unchecked_t fscache_n_retrievals;
38639+extern atomic_unchecked_t fscache_n_retrievals_ok;
38640+extern atomic_unchecked_t fscache_n_retrievals_wait;
38641+extern atomic_unchecked_t fscache_n_retrievals_nodata;
38642+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
38643+extern atomic_unchecked_t fscache_n_retrievals_intr;
38644+extern atomic_unchecked_t fscache_n_retrievals_nomem;
38645+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
38646+extern atomic_unchecked_t fscache_n_retrieval_ops;
38647+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
38648+
38649+extern atomic_unchecked_t fscache_n_stores;
38650+extern atomic_unchecked_t fscache_n_stores_ok;
38651+extern atomic_unchecked_t fscache_n_stores_again;
38652+extern atomic_unchecked_t fscache_n_stores_nobufs;
38653+extern atomic_unchecked_t fscache_n_stores_oom;
38654+extern atomic_unchecked_t fscache_n_store_ops;
38655+extern atomic_unchecked_t fscache_n_store_calls;
38656+extern atomic_unchecked_t fscache_n_store_pages;
38657+extern atomic_unchecked_t fscache_n_store_radix_deletes;
38658+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
38659+
38660+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
38661+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
38662+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
38663+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
38664+
38665+extern atomic_unchecked_t fscache_n_marks;
38666+extern atomic_unchecked_t fscache_n_uncaches;
38667+
38668+extern atomic_unchecked_t fscache_n_acquires;
38669+extern atomic_unchecked_t fscache_n_acquires_null;
38670+extern atomic_unchecked_t fscache_n_acquires_no_cache;
38671+extern atomic_unchecked_t fscache_n_acquires_ok;
38672+extern atomic_unchecked_t fscache_n_acquires_nobufs;
38673+extern atomic_unchecked_t fscache_n_acquires_oom;
38674+
38675+extern atomic_unchecked_t fscache_n_updates;
38676+extern atomic_unchecked_t fscache_n_updates_null;
38677+extern atomic_unchecked_t fscache_n_updates_run;
38678+
38679+extern atomic_unchecked_t fscache_n_relinquishes;
38680+extern atomic_unchecked_t fscache_n_relinquishes_null;
38681+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
38682+extern atomic_unchecked_t fscache_n_relinquishes_retire;
38683+
38684+extern atomic_unchecked_t fscache_n_cookie_index;
38685+extern atomic_unchecked_t fscache_n_cookie_data;
38686+extern atomic_unchecked_t fscache_n_cookie_special;
38687+
38688+extern atomic_unchecked_t fscache_n_object_alloc;
38689+extern atomic_unchecked_t fscache_n_object_no_alloc;
38690+extern atomic_unchecked_t fscache_n_object_lookups;
38691+extern atomic_unchecked_t fscache_n_object_lookups_negative;
38692+extern atomic_unchecked_t fscache_n_object_lookups_positive;
38693+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
38694+extern atomic_unchecked_t fscache_n_object_created;
38695+extern atomic_unchecked_t fscache_n_object_avail;
38696+extern atomic_unchecked_t fscache_n_object_dead;
38697+
38698+extern atomic_unchecked_t fscache_n_checkaux_none;
38699+extern atomic_unchecked_t fscache_n_checkaux_okay;
38700+extern atomic_unchecked_t fscache_n_checkaux_update;
38701+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
38702
38703 extern atomic_t fscache_n_cop_alloc_object;
38704 extern atomic_t fscache_n_cop_lookup_object;
38705@@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t
38706 atomic_inc(stat);
38707 }
38708
38709+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
38710+{
38711+ atomic_inc_unchecked(stat);
38712+}
38713+
38714 static inline void fscache_stat_d(atomic_t *stat)
38715 {
38716 atomic_dec(stat);
38717@@ -259,6 +264,7 @@ extern const struct file_operations fsca
38718
38719 #define __fscache_stat(stat) (NULL)
38720 #define fscache_stat(stat) do {} while (0)
38721+#define fscache_stat_unchecked(stat) do {} while (0)
38722 #define fscache_stat_d(stat) do {} while (0)
38723 #endif
38724
38725diff -urNp linux-2.6.32.41/fs/fscache/object.c linux-2.6.32.41/fs/fscache/object.c
38726--- linux-2.6.32.41/fs/fscache/object.c 2011-03-27 14:31:47.000000000 -0400
38727+++ linux-2.6.32.41/fs/fscache/object.c 2011-05-04 17:56:28.000000000 -0400
38728@@ -144,7 +144,7 @@ static void fscache_object_state_machine
38729 /* update the object metadata on disk */
38730 case FSCACHE_OBJECT_UPDATING:
38731 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
38732- fscache_stat(&fscache_n_updates_run);
38733+ fscache_stat_unchecked(&fscache_n_updates_run);
38734 fscache_stat(&fscache_n_cop_update_object);
38735 object->cache->ops->update_object(object);
38736 fscache_stat_d(&fscache_n_cop_update_object);
38737@@ -233,7 +233,7 @@ static void fscache_object_state_machine
38738 spin_lock(&object->lock);
38739 object->state = FSCACHE_OBJECT_DEAD;
38740 spin_unlock(&object->lock);
38741- fscache_stat(&fscache_n_object_dead);
38742+ fscache_stat_unchecked(&fscache_n_object_dead);
38743 goto terminal_transit;
38744
38745 /* handle the parent cache of this object being withdrawn from
38746@@ -248,7 +248,7 @@ static void fscache_object_state_machine
38747 spin_lock(&object->lock);
38748 object->state = FSCACHE_OBJECT_DEAD;
38749 spin_unlock(&object->lock);
38750- fscache_stat(&fscache_n_object_dead);
38751+ fscache_stat_unchecked(&fscache_n_object_dead);
38752 goto terminal_transit;
38753
38754 /* complain about the object being woken up once it is
38755@@ -492,7 +492,7 @@ static void fscache_lookup_object(struct
38756 parent->cookie->def->name, cookie->def->name,
38757 object->cache->tag->name);
38758
38759- fscache_stat(&fscache_n_object_lookups);
38760+ fscache_stat_unchecked(&fscache_n_object_lookups);
38761 fscache_stat(&fscache_n_cop_lookup_object);
38762 ret = object->cache->ops->lookup_object(object);
38763 fscache_stat_d(&fscache_n_cop_lookup_object);
38764@@ -503,7 +503,7 @@ static void fscache_lookup_object(struct
38765 if (ret == -ETIMEDOUT) {
38766 /* probably stuck behind another object, so move this one to
38767 * the back of the queue */
38768- fscache_stat(&fscache_n_object_lookups_timed_out);
38769+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
38770 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
38771 }
38772
38773@@ -526,7 +526,7 @@ void fscache_object_lookup_negative(stru
38774
38775 spin_lock(&object->lock);
38776 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
38777- fscache_stat(&fscache_n_object_lookups_negative);
38778+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
38779
38780 /* transit here to allow write requests to begin stacking up
38781 * and read requests to begin returning ENODATA */
38782@@ -572,7 +572,7 @@ void fscache_obtained_object(struct fsca
38783 * result, in which case there may be data available */
38784 spin_lock(&object->lock);
38785 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
38786- fscache_stat(&fscache_n_object_lookups_positive);
38787+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
38788
38789 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
38790
38791@@ -586,7 +586,7 @@ void fscache_obtained_object(struct fsca
38792 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
38793 } else {
38794 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
38795- fscache_stat(&fscache_n_object_created);
38796+ fscache_stat_unchecked(&fscache_n_object_created);
38797
38798 object->state = FSCACHE_OBJECT_AVAILABLE;
38799 spin_unlock(&object->lock);
38800@@ -633,7 +633,7 @@ static void fscache_object_available(str
38801 fscache_enqueue_dependents(object);
38802
38803 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
38804- fscache_stat(&fscache_n_object_avail);
38805+ fscache_stat_unchecked(&fscache_n_object_avail);
38806
38807 _leave("");
38808 }
38809@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
38810 enum fscache_checkaux result;
38811
38812 if (!object->cookie->def->check_aux) {
38813- fscache_stat(&fscache_n_checkaux_none);
38814+ fscache_stat_unchecked(&fscache_n_checkaux_none);
38815 return FSCACHE_CHECKAUX_OKAY;
38816 }
38817
38818@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
38819 switch (result) {
38820 /* entry okay as is */
38821 case FSCACHE_CHECKAUX_OKAY:
38822- fscache_stat(&fscache_n_checkaux_okay);
38823+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
38824 break;
38825
38826 /* entry requires update */
38827 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
38828- fscache_stat(&fscache_n_checkaux_update);
38829+ fscache_stat_unchecked(&fscache_n_checkaux_update);
38830 break;
38831
38832 /* entry requires deletion */
38833 case FSCACHE_CHECKAUX_OBSOLETE:
38834- fscache_stat(&fscache_n_checkaux_obsolete);
38835+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
38836 break;
38837
38838 default:
38839diff -urNp linux-2.6.32.41/fs/fscache/operation.c linux-2.6.32.41/fs/fscache/operation.c
38840--- linux-2.6.32.41/fs/fscache/operation.c 2011-03-27 14:31:47.000000000 -0400
38841+++ linux-2.6.32.41/fs/fscache/operation.c 2011-05-04 17:56:28.000000000 -0400
38842@@ -16,7 +16,7 @@
38843 #include <linux/seq_file.h>
38844 #include "internal.h"
38845
38846-atomic_t fscache_op_debug_id;
38847+atomic_unchecked_t fscache_op_debug_id;
38848 EXPORT_SYMBOL(fscache_op_debug_id);
38849
38850 /**
38851@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fs
38852 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
38853 ASSERTCMP(atomic_read(&op->usage), >, 0);
38854
38855- fscache_stat(&fscache_n_op_enqueue);
38856+ fscache_stat_unchecked(&fscache_n_op_enqueue);
38857 switch (op->flags & FSCACHE_OP_TYPE) {
38858 case FSCACHE_OP_FAST:
38859 _debug("queue fast");
38860@@ -76,7 +76,7 @@ static void fscache_run_op(struct fscach
38861 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
38862 if (op->processor)
38863 fscache_enqueue_operation(op);
38864- fscache_stat(&fscache_n_op_run);
38865+ fscache_stat_unchecked(&fscache_n_op_run);
38866 }
38867
38868 /*
38869@@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct f
38870 if (object->n_ops > 0) {
38871 atomic_inc(&op->usage);
38872 list_add_tail(&op->pend_link, &object->pending_ops);
38873- fscache_stat(&fscache_n_op_pend);
38874+ fscache_stat_unchecked(&fscache_n_op_pend);
38875 } else if (!list_empty(&object->pending_ops)) {
38876 atomic_inc(&op->usage);
38877 list_add_tail(&op->pend_link, &object->pending_ops);
38878- fscache_stat(&fscache_n_op_pend);
38879+ fscache_stat_unchecked(&fscache_n_op_pend);
38880 fscache_start_operations(object);
38881 } else {
38882 ASSERTCMP(object->n_in_progress, ==, 0);
38883@@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct f
38884 object->n_exclusive++; /* reads and writes must wait */
38885 atomic_inc(&op->usage);
38886 list_add_tail(&op->pend_link, &object->pending_ops);
38887- fscache_stat(&fscache_n_op_pend);
38888+ fscache_stat_unchecked(&fscache_n_op_pend);
38889 ret = 0;
38890 } else {
38891 /* not allowed to submit ops in any other state */
38892@@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_obj
38893 if (object->n_exclusive > 0) {
38894 atomic_inc(&op->usage);
38895 list_add_tail(&op->pend_link, &object->pending_ops);
38896- fscache_stat(&fscache_n_op_pend);
38897+ fscache_stat_unchecked(&fscache_n_op_pend);
38898 } else if (!list_empty(&object->pending_ops)) {
38899 atomic_inc(&op->usage);
38900 list_add_tail(&op->pend_link, &object->pending_ops);
38901- fscache_stat(&fscache_n_op_pend);
38902+ fscache_stat_unchecked(&fscache_n_op_pend);
38903 fscache_start_operations(object);
38904 } else {
38905 ASSERTCMP(object->n_exclusive, ==, 0);
38906@@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_obj
38907 object->n_ops++;
38908 atomic_inc(&op->usage);
38909 list_add_tail(&op->pend_link, &object->pending_ops);
38910- fscache_stat(&fscache_n_op_pend);
38911+ fscache_stat_unchecked(&fscache_n_op_pend);
38912 ret = 0;
38913 } else if (object->state == FSCACHE_OBJECT_DYING ||
38914 object->state == FSCACHE_OBJECT_LC_DYING ||
38915 object->state == FSCACHE_OBJECT_WITHDRAWING) {
38916- fscache_stat(&fscache_n_op_rejected);
38917+ fscache_stat_unchecked(&fscache_n_op_rejected);
38918 ret = -ENOBUFS;
38919 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
38920 fscache_report_unexpected_submission(object, op, ostate);
38921@@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_ope
38922
38923 ret = -EBUSY;
38924 if (!list_empty(&op->pend_link)) {
38925- fscache_stat(&fscache_n_op_cancelled);
38926+ fscache_stat_unchecked(&fscache_n_op_cancelled);
38927 list_del_init(&op->pend_link);
38928 object->n_ops--;
38929 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
38930@@ -344,7 +344,7 @@ void fscache_put_operation(struct fscach
38931 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
38932 BUG();
38933
38934- fscache_stat(&fscache_n_op_release);
38935+ fscache_stat_unchecked(&fscache_n_op_release);
38936
38937 if (op->release) {
38938 op->release(op);
38939@@ -361,7 +361,7 @@ void fscache_put_operation(struct fscach
38940 * lock, and defer it otherwise */
38941 if (!spin_trylock(&object->lock)) {
38942 _debug("defer put");
38943- fscache_stat(&fscache_n_op_deferred_release);
38944+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
38945
38946 cache = object->cache;
38947 spin_lock(&cache->op_gc_list_lock);
38948@@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_st
38949
38950 _debug("GC DEFERRED REL OBJ%x OP%x",
38951 object->debug_id, op->debug_id);
38952- fscache_stat(&fscache_n_op_gc);
38953+ fscache_stat_unchecked(&fscache_n_op_gc);
38954
38955 ASSERTCMP(atomic_read(&op->usage), ==, 0);
38956
38957diff -urNp linux-2.6.32.41/fs/fscache/page.c linux-2.6.32.41/fs/fscache/page.c
38958--- linux-2.6.32.41/fs/fscache/page.c 2011-03-27 14:31:47.000000000 -0400
38959+++ linux-2.6.32.41/fs/fscache/page.c 2011-05-04 17:56:28.000000000 -0400
38960@@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct
38961 val = radix_tree_lookup(&cookie->stores, page->index);
38962 if (!val) {
38963 rcu_read_unlock();
38964- fscache_stat(&fscache_n_store_vmscan_not_storing);
38965+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
38966 __fscache_uncache_page(cookie, page);
38967 return true;
38968 }
38969@@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct
38970 spin_unlock(&cookie->stores_lock);
38971
38972 if (xpage) {
38973- fscache_stat(&fscache_n_store_vmscan_cancelled);
38974- fscache_stat(&fscache_n_store_radix_deletes);
38975+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
38976+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
38977 ASSERTCMP(xpage, ==, page);
38978 } else {
38979- fscache_stat(&fscache_n_store_vmscan_gone);
38980+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
38981 }
38982
38983 wake_up_bit(&cookie->flags, 0);
38984@@ -106,7 +106,7 @@ page_busy:
38985 /* we might want to wait here, but that could deadlock the allocator as
38986 * the slow-work threads writing to the cache may all end up sleeping
38987 * on memory allocation */
38988- fscache_stat(&fscache_n_store_vmscan_busy);
38989+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
38990 return false;
38991 }
38992 EXPORT_SYMBOL(__fscache_maybe_release_page);
38993@@ -130,7 +130,7 @@ static void fscache_end_page_write(struc
38994 FSCACHE_COOKIE_STORING_TAG);
38995 if (!radix_tree_tag_get(&cookie->stores, page->index,
38996 FSCACHE_COOKIE_PENDING_TAG)) {
38997- fscache_stat(&fscache_n_store_radix_deletes);
38998+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
38999 xpage = radix_tree_delete(&cookie->stores, page->index);
39000 }
39001 spin_unlock(&cookie->stores_lock);
39002@@ -151,7 +151,7 @@ static void fscache_attr_changed_op(stru
39003
39004 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
39005
39006- fscache_stat(&fscache_n_attr_changed_calls);
39007+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
39008
39009 if (fscache_object_is_active(object)) {
39010 fscache_set_op_state(op, "CallFS");
39011@@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscach
39012
39013 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
39014
39015- fscache_stat(&fscache_n_attr_changed);
39016+ fscache_stat_unchecked(&fscache_n_attr_changed);
39017
39018 op = kzalloc(sizeof(*op), GFP_KERNEL);
39019 if (!op) {
39020- fscache_stat(&fscache_n_attr_changed_nomem);
39021+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
39022 _leave(" = -ENOMEM");
39023 return -ENOMEM;
39024 }
39025@@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscach
39026 if (fscache_submit_exclusive_op(object, op) < 0)
39027 goto nobufs;
39028 spin_unlock(&cookie->lock);
39029- fscache_stat(&fscache_n_attr_changed_ok);
39030+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
39031 fscache_put_operation(op);
39032 _leave(" = 0");
39033 return 0;
39034@@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscach
39035 nobufs:
39036 spin_unlock(&cookie->lock);
39037 kfree(op);
39038- fscache_stat(&fscache_n_attr_changed_nobufs);
39039+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
39040 _leave(" = %d", -ENOBUFS);
39041 return -ENOBUFS;
39042 }
39043@@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache
39044 /* allocate a retrieval operation and attempt to submit it */
39045 op = kzalloc(sizeof(*op), GFP_NOIO);
39046 if (!op) {
39047- fscache_stat(&fscache_n_retrievals_nomem);
39048+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
39049 return NULL;
39050 }
39051
39052@@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_loo
39053 return 0;
39054 }
39055
39056- fscache_stat(&fscache_n_retrievals_wait);
39057+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
39058
39059 jif = jiffies;
39060 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
39061 fscache_wait_bit_interruptible,
39062 TASK_INTERRUPTIBLE) != 0) {
39063- fscache_stat(&fscache_n_retrievals_intr);
39064+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
39065 _leave(" = -ERESTARTSYS");
39066 return -ERESTARTSYS;
39067 }
39068@@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_loo
39069 */
39070 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
39071 struct fscache_retrieval *op,
39072- atomic_t *stat_op_waits,
39073- atomic_t *stat_object_dead)
39074+ atomic_unchecked_t *stat_op_waits,
39075+ atomic_unchecked_t *stat_object_dead)
39076 {
39077 int ret;
39078
39079@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_ac
39080 goto check_if_dead;
39081
39082 _debug(">>> WT");
39083- fscache_stat(stat_op_waits);
39084+ fscache_stat_unchecked(stat_op_waits);
39085 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
39086 fscache_wait_bit_interruptible,
39087 TASK_INTERRUPTIBLE) < 0) {
39088@@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_ac
39089
39090 check_if_dead:
39091 if (unlikely(fscache_object_is_dead(object))) {
39092- fscache_stat(stat_object_dead);
39093+ fscache_stat_unchecked(stat_object_dead);
39094 return -ENOBUFS;
39095 }
39096 return 0;
39097@@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct
39098
39099 _enter("%p,%p,,,", cookie, page);
39100
39101- fscache_stat(&fscache_n_retrievals);
39102+ fscache_stat_unchecked(&fscache_n_retrievals);
39103
39104 if (hlist_empty(&cookie->backing_objects))
39105 goto nobufs;
39106@@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct
39107 goto nobufs_unlock;
39108 spin_unlock(&cookie->lock);
39109
39110- fscache_stat(&fscache_n_retrieval_ops);
39111+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
39112
39113 /* pin the netfs read context in case we need to do the actual netfs
39114 * read because we've encountered a cache read failure */
39115@@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct
39116
39117 error:
39118 if (ret == -ENOMEM)
39119- fscache_stat(&fscache_n_retrievals_nomem);
39120+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
39121 else if (ret == -ERESTARTSYS)
39122- fscache_stat(&fscache_n_retrievals_intr);
39123+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
39124 else if (ret == -ENODATA)
39125- fscache_stat(&fscache_n_retrievals_nodata);
39126+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
39127 else if (ret < 0)
39128- fscache_stat(&fscache_n_retrievals_nobufs);
39129+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
39130 else
39131- fscache_stat(&fscache_n_retrievals_ok);
39132+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
39133
39134 fscache_put_retrieval(op);
39135 _leave(" = %d", ret);
39136@@ -453,7 +453,7 @@ nobufs_unlock:
39137 spin_unlock(&cookie->lock);
39138 kfree(op);
39139 nobufs:
39140- fscache_stat(&fscache_n_retrievals_nobufs);
39141+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
39142 _leave(" = -ENOBUFS");
39143 return -ENOBUFS;
39144 }
39145@@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct
39146
39147 _enter("%p,,%d,,,", cookie, *nr_pages);
39148
39149- fscache_stat(&fscache_n_retrievals);
39150+ fscache_stat_unchecked(&fscache_n_retrievals);
39151
39152 if (hlist_empty(&cookie->backing_objects))
39153 goto nobufs;
39154@@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct
39155 goto nobufs_unlock;
39156 spin_unlock(&cookie->lock);
39157
39158- fscache_stat(&fscache_n_retrieval_ops);
39159+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
39160
39161 /* pin the netfs read context in case we need to do the actual netfs
39162 * read because we've encountered a cache read failure */
39163@@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct
39164
39165 error:
39166 if (ret == -ENOMEM)
39167- fscache_stat(&fscache_n_retrievals_nomem);
39168+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
39169 else if (ret == -ERESTARTSYS)
39170- fscache_stat(&fscache_n_retrievals_intr);
39171+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
39172 else if (ret == -ENODATA)
39173- fscache_stat(&fscache_n_retrievals_nodata);
39174+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
39175 else if (ret < 0)
39176- fscache_stat(&fscache_n_retrievals_nobufs);
39177+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
39178 else
39179- fscache_stat(&fscache_n_retrievals_ok);
39180+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
39181
39182 fscache_put_retrieval(op);
39183 _leave(" = %d", ret);
39184@@ -570,7 +570,7 @@ nobufs_unlock:
39185 spin_unlock(&cookie->lock);
39186 kfree(op);
39187 nobufs:
39188- fscache_stat(&fscache_n_retrievals_nobufs);
39189+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
39190 _leave(" = -ENOBUFS");
39191 return -ENOBUFS;
39192 }
39193@@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_
39194
39195 _enter("%p,%p,,,", cookie, page);
39196
39197- fscache_stat(&fscache_n_allocs);
39198+ fscache_stat_unchecked(&fscache_n_allocs);
39199
39200 if (hlist_empty(&cookie->backing_objects))
39201 goto nobufs;
39202@@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_
39203 goto nobufs_unlock;
39204 spin_unlock(&cookie->lock);
39205
39206- fscache_stat(&fscache_n_alloc_ops);
39207+ fscache_stat_unchecked(&fscache_n_alloc_ops);
39208
39209 ret = fscache_wait_for_retrieval_activation(
39210 object, op,
39211@@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_
39212
39213 error:
39214 if (ret == -ERESTARTSYS)
39215- fscache_stat(&fscache_n_allocs_intr);
39216+ fscache_stat_unchecked(&fscache_n_allocs_intr);
39217 else if (ret < 0)
39218- fscache_stat(&fscache_n_allocs_nobufs);
39219+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
39220 else
39221- fscache_stat(&fscache_n_allocs_ok);
39222+ fscache_stat_unchecked(&fscache_n_allocs_ok);
39223
39224 fscache_put_retrieval(op);
39225 _leave(" = %d", ret);
39226@@ -651,7 +651,7 @@ nobufs_unlock:
39227 spin_unlock(&cookie->lock);
39228 kfree(op);
39229 nobufs:
39230- fscache_stat(&fscache_n_allocs_nobufs);
39231+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
39232 _leave(" = -ENOBUFS");
39233 return -ENOBUFS;
39234 }
39235@@ -694,7 +694,7 @@ static void fscache_write_op(struct fsca
39236
39237 spin_lock(&cookie->stores_lock);
39238
39239- fscache_stat(&fscache_n_store_calls);
39240+ fscache_stat_unchecked(&fscache_n_store_calls);
39241
39242 /* find a page to store */
39243 page = NULL;
39244@@ -705,7 +705,7 @@ static void fscache_write_op(struct fsca
39245 page = results[0];
39246 _debug("gang %d [%lx]", n, page->index);
39247 if (page->index > op->store_limit) {
39248- fscache_stat(&fscache_n_store_pages_over_limit);
39249+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
39250 goto superseded;
39251 }
39252
39253@@ -721,7 +721,7 @@ static void fscache_write_op(struct fsca
39254
39255 if (page) {
39256 fscache_set_op_state(&op->op, "Store");
39257- fscache_stat(&fscache_n_store_pages);
39258+ fscache_stat_unchecked(&fscache_n_store_pages);
39259 fscache_stat(&fscache_n_cop_write_page);
39260 ret = object->cache->ops->write_page(op, page);
39261 fscache_stat_d(&fscache_n_cop_write_page);
39262@@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_
39263 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
39264 ASSERT(PageFsCache(page));
39265
39266- fscache_stat(&fscache_n_stores);
39267+ fscache_stat_unchecked(&fscache_n_stores);
39268
39269 op = kzalloc(sizeof(*op), GFP_NOIO);
39270 if (!op)
39271@@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_
39272 spin_unlock(&cookie->stores_lock);
39273 spin_unlock(&object->lock);
39274
39275- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
39276+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
39277 op->store_limit = object->store_limit;
39278
39279 if (fscache_submit_op(object, &op->op) < 0)
39280@@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_
39281
39282 spin_unlock(&cookie->lock);
39283 radix_tree_preload_end();
39284- fscache_stat(&fscache_n_store_ops);
39285- fscache_stat(&fscache_n_stores_ok);
39286+ fscache_stat_unchecked(&fscache_n_store_ops);
39287+ fscache_stat_unchecked(&fscache_n_stores_ok);
39288
39289 /* the slow work queue now carries its own ref on the object */
39290 fscache_put_operation(&op->op);
39291@@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_
39292 return 0;
39293
39294 already_queued:
39295- fscache_stat(&fscache_n_stores_again);
39296+ fscache_stat_unchecked(&fscache_n_stores_again);
39297 already_pending:
39298 spin_unlock(&cookie->stores_lock);
39299 spin_unlock(&object->lock);
39300 spin_unlock(&cookie->lock);
39301 radix_tree_preload_end();
39302 kfree(op);
39303- fscache_stat(&fscache_n_stores_ok);
39304+ fscache_stat_unchecked(&fscache_n_stores_ok);
39305 _leave(" = 0");
39306 return 0;
39307
39308@@ -886,14 +886,14 @@ nobufs:
39309 spin_unlock(&cookie->lock);
39310 radix_tree_preload_end();
39311 kfree(op);
39312- fscache_stat(&fscache_n_stores_nobufs);
39313+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
39314 _leave(" = -ENOBUFS");
39315 return -ENOBUFS;
39316
39317 nomem_free:
39318 kfree(op);
39319 nomem:
39320- fscache_stat(&fscache_n_stores_oom);
39321+ fscache_stat_unchecked(&fscache_n_stores_oom);
39322 _leave(" = -ENOMEM");
39323 return -ENOMEM;
39324 }
39325@@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscac
39326 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
39327 ASSERTCMP(page, !=, NULL);
39328
39329- fscache_stat(&fscache_n_uncaches);
39330+ fscache_stat_unchecked(&fscache_n_uncaches);
39331
39332 /* cache withdrawal may beat us to it */
39333 if (!PageFsCache(page))
39334@@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fs
39335 unsigned long loop;
39336
39337 #ifdef CONFIG_FSCACHE_STATS
39338- atomic_add(pagevec->nr, &fscache_n_marks);
39339+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
39340 #endif
39341
39342 for (loop = 0; loop < pagevec->nr; loop++) {
39343diff -urNp linux-2.6.32.41/fs/fscache/stats.c linux-2.6.32.41/fs/fscache/stats.c
39344--- linux-2.6.32.41/fs/fscache/stats.c 2011-03-27 14:31:47.000000000 -0400
39345+++ linux-2.6.32.41/fs/fscache/stats.c 2011-05-04 17:56:28.000000000 -0400
39346@@ -18,95 +18,95 @@
39347 /*
39348 * operation counters
39349 */
39350-atomic_t fscache_n_op_pend;
39351-atomic_t fscache_n_op_run;
39352-atomic_t fscache_n_op_enqueue;
39353-atomic_t fscache_n_op_requeue;
39354-atomic_t fscache_n_op_deferred_release;
39355-atomic_t fscache_n_op_release;
39356-atomic_t fscache_n_op_gc;
39357-atomic_t fscache_n_op_cancelled;
39358-atomic_t fscache_n_op_rejected;
39359-
39360-atomic_t fscache_n_attr_changed;
39361-atomic_t fscache_n_attr_changed_ok;
39362-atomic_t fscache_n_attr_changed_nobufs;
39363-atomic_t fscache_n_attr_changed_nomem;
39364-atomic_t fscache_n_attr_changed_calls;
39365-
39366-atomic_t fscache_n_allocs;
39367-atomic_t fscache_n_allocs_ok;
39368-atomic_t fscache_n_allocs_wait;
39369-atomic_t fscache_n_allocs_nobufs;
39370-atomic_t fscache_n_allocs_intr;
39371-atomic_t fscache_n_allocs_object_dead;
39372-atomic_t fscache_n_alloc_ops;
39373-atomic_t fscache_n_alloc_op_waits;
39374-
39375-atomic_t fscache_n_retrievals;
39376-atomic_t fscache_n_retrievals_ok;
39377-atomic_t fscache_n_retrievals_wait;
39378-atomic_t fscache_n_retrievals_nodata;
39379-atomic_t fscache_n_retrievals_nobufs;
39380-atomic_t fscache_n_retrievals_intr;
39381-atomic_t fscache_n_retrievals_nomem;
39382-atomic_t fscache_n_retrievals_object_dead;
39383-atomic_t fscache_n_retrieval_ops;
39384-atomic_t fscache_n_retrieval_op_waits;
39385-
39386-atomic_t fscache_n_stores;
39387-atomic_t fscache_n_stores_ok;
39388-atomic_t fscache_n_stores_again;
39389-atomic_t fscache_n_stores_nobufs;
39390-atomic_t fscache_n_stores_oom;
39391-atomic_t fscache_n_store_ops;
39392-atomic_t fscache_n_store_calls;
39393-atomic_t fscache_n_store_pages;
39394-atomic_t fscache_n_store_radix_deletes;
39395-atomic_t fscache_n_store_pages_over_limit;
39396-
39397-atomic_t fscache_n_store_vmscan_not_storing;
39398-atomic_t fscache_n_store_vmscan_gone;
39399-atomic_t fscache_n_store_vmscan_busy;
39400-atomic_t fscache_n_store_vmscan_cancelled;
39401-
39402-atomic_t fscache_n_marks;
39403-atomic_t fscache_n_uncaches;
39404-
39405-atomic_t fscache_n_acquires;
39406-atomic_t fscache_n_acquires_null;
39407-atomic_t fscache_n_acquires_no_cache;
39408-atomic_t fscache_n_acquires_ok;
39409-atomic_t fscache_n_acquires_nobufs;
39410-atomic_t fscache_n_acquires_oom;
39411-
39412-atomic_t fscache_n_updates;
39413-atomic_t fscache_n_updates_null;
39414-atomic_t fscache_n_updates_run;
39415-
39416-atomic_t fscache_n_relinquishes;
39417-atomic_t fscache_n_relinquishes_null;
39418-atomic_t fscache_n_relinquishes_waitcrt;
39419-atomic_t fscache_n_relinquishes_retire;
39420-
39421-atomic_t fscache_n_cookie_index;
39422-atomic_t fscache_n_cookie_data;
39423-atomic_t fscache_n_cookie_special;
39424-
39425-atomic_t fscache_n_object_alloc;
39426-atomic_t fscache_n_object_no_alloc;
39427-atomic_t fscache_n_object_lookups;
39428-atomic_t fscache_n_object_lookups_negative;
39429-atomic_t fscache_n_object_lookups_positive;
39430-atomic_t fscache_n_object_lookups_timed_out;
39431-atomic_t fscache_n_object_created;
39432-atomic_t fscache_n_object_avail;
39433-atomic_t fscache_n_object_dead;
39434-
39435-atomic_t fscache_n_checkaux_none;
39436-atomic_t fscache_n_checkaux_okay;
39437-atomic_t fscache_n_checkaux_update;
39438-atomic_t fscache_n_checkaux_obsolete;
39439+atomic_unchecked_t fscache_n_op_pend;
39440+atomic_unchecked_t fscache_n_op_run;
39441+atomic_unchecked_t fscache_n_op_enqueue;
39442+atomic_unchecked_t fscache_n_op_requeue;
39443+atomic_unchecked_t fscache_n_op_deferred_release;
39444+atomic_unchecked_t fscache_n_op_release;
39445+atomic_unchecked_t fscache_n_op_gc;
39446+atomic_unchecked_t fscache_n_op_cancelled;
39447+atomic_unchecked_t fscache_n_op_rejected;
39448+
39449+atomic_unchecked_t fscache_n_attr_changed;
39450+atomic_unchecked_t fscache_n_attr_changed_ok;
39451+atomic_unchecked_t fscache_n_attr_changed_nobufs;
39452+atomic_unchecked_t fscache_n_attr_changed_nomem;
39453+atomic_unchecked_t fscache_n_attr_changed_calls;
39454+
39455+atomic_unchecked_t fscache_n_allocs;
39456+atomic_unchecked_t fscache_n_allocs_ok;
39457+atomic_unchecked_t fscache_n_allocs_wait;
39458+atomic_unchecked_t fscache_n_allocs_nobufs;
39459+atomic_unchecked_t fscache_n_allocs_intr;
39460+atomic_unchecked_t fscache_n_allocs_object_dead;
39461+atomic_unchecked_t fscache_n_alloc_ops;
39462+atomic_unchecked_t fscache_n_alloc_op_waits;
39463+
39464+atomic_unchecked_t fscache_n_retrievals;
39465+atomic_unchecked_t fscache_n_retrievals_ok;
39466+atomic_unchecked_t fscache_n_retrievals_wait;
39467+atomic_unchecked_t fscache_n_retrievals_nodata;
39468+atomic_unchecked_t fscache_n_retrievals_nobufs;
39469+atomic_unchecked_t fscache_n_retrievals_intr;
39470+atomic_unchecked_t fscache_n_retrievals_nomem;
39471+atomic_unchecked_t fscache_n_retrievals_object_dead;
39472+atomic_unchecked_t fscache_n_retrieval_ops;
39473+atomic_unchecked_t fscache_n_retrieval_op_waits;
39474+
39475+atomic_unchecked_t fscache_n_stores;
39476+atomic_unchecked_t fscache_n_stores_ok;
39477+atomic_unchecked_t fscache_n_stores_again;
39478+atomic_unchecked_t fscache_n_stores_nobufs;
39479+atomic_unchecked_t fscache_n_stores_oom;
39480+atomic_unchecked_t fscache_n_store_ops;
39481+atomic_unchecked_t fscache_n_store_calls;
39482+atomic_unchecked_t fscache_n_store_pages;
39483+atomic_unchecked_t fscache_n_store_radix_deletes;
39484+atomic_unchecked_t fscache_n_store_pages_over_limit;
39485+
39486+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
39487+atomic_unchecked_t fscache_n_store_vmscan_gone;
39488+atomic_unchecked_t fscache_n_store_vmscan_busy;
39489+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
39490+
39491+atomic_unchecked_t fscache_n_marks;
39492+atomic_unchecked_t fscache_n_uncaches;
39493+
39494+atomic_unchecked_t fscache_n_acquires;
39495+atomic_unchecked_t fscache_n_acquires_null;
39496+atomic_unchecked_t fscache_n_acquires_no_cache;
39497+atomic_unchecked_t fscache_n_acquires_ok;
39498+atomic_unchecked_t fscache_n_acquires_nobufs;
39499+atomic_unchecked_t fscache_n_acquires_oom;
39500+
39501+atomic_unchecked_t fscache_n_updates;
39502+atomic_unchecked_t fscache_n_updates_null;
39503+atomic_unchecked_t fscache_n_updates_run;
39504+
39505+atomic_unchecked_t fscache_n_relinquishes;
39506+atomic_unchecked_t fscache_n_relinquishes_null;
39507+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
39508+atomic_unchecked_t fscache_n_relinquishes_retire;
39509+
39510+atomic_unchecked_t fscache_n_cookie_index;
39511+atomic_unchecked_t fscache_n_cookie_data;
39512+atomic_unchecked_t fscache_n_cookie_special;
39513+
39514+atomic_unchecked_t fscache_n_object_alloc;
39515+atomic_unchecked_t fscache_n_object_no_alloc;
39516+atomic_unchecked_t fscache_n_object_lookups;
39517+atomic_unchecked_t fscache_n_object_lookups_negative;
39518+atomic_unchecked_t fscache_n_object_lookups_positive;
39519+atomic_unchecked_t fscache_n_object_lookups_timed_out;
39520+atomic_unchecked_t fscache_n_object_created;
39521+atomic_unchecked_t fscache_n_object_avail;
39522+atomic_unchecked_t fscache_n_object_dead;
39523+
39524+atomic_unchecked_t fscache_n_checkaux_none;
39525+atomic_unchecked_t fscache_n_checkaux_okay;
39526+atomic_unchecked_t fscache_n_checkaux_update;
39527+atomic_unchecked_t fscache_n_checkaux_obsolete;
39528
39529 atomic_t fscache_n_cop_alloc_object;
39530 atomic_t fscache_n_cop_lookup_object;
39531@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
39532 seq_puts(m, "FS-Cache statistics\n");
39533
39534 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
39535- atomic_read(&fscache_n_cookie_index),
39536- atomic_read(&fscache_n_cookie_data),
39537- atomic_read(&fscache_n_cookie_special));
39538+ atomic_read_unchecked(&fscache_n_cookie_index),
39539+ atomic_read_unchecked(&fscache_n_cookie_data),
39540+ atomic_read_unchecked(&fscache_n_cookie_special));
39541
39542 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
39543- atomic_read(&fscache_n_object_alloc),
39544- atomic_read(&fscache_n_object_no_alloc),
39545- atomic_read(&fscache_n_object_avail),
39546- atomic_read(&fscache_n_object_dead));
39547+ atomic_read_unchecked(&fscache_n_object_alloc),
39548+ atomic_read_unchecked(&fscache_n_object_no_alloc),
39549+ atomic_read_unchecked(&fscache_n_object_avail),
39550+ atomic_read_unchecked(&fscache_n_object_dead));
39551 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
39552- atomic_read(&fscache_n_checkaux_none),
39553- atomic_read(&fscache_n_checkaux_okay),
39554- atomic_read(&fscache_n_checkaux_update),
39555- atomic_read(&fscache_n_checkaux_obsolete));
39556+ atomic_read_unchecked(&fscache_n_checkaux_none),
39557+ atomic_read_unchecked(&fscache_n_checkaux_okay),
39558+ atomic_read_unchecked(&fscache_n_checkaux_update),
39559+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
39560
39561 seq_printf(m, "Pages : mrk=%u unc=%u\n",
39562- atomic_read(&fscache_n_marks),
39563- atomic_read(&fscache_n_uncaches));
39564+ atomic_read_unchecked(&fscache_n_marks),
39565+ atomic_read_unchecked(&fscache_n_uncaches));
39566
39567 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
39568 " oom=%u\n",
39569- atomic_read(&fscache_n_acquires),
39570- atomic_read(&fscache_n_acquires_null),
39571- atomic_read(&fscache_n_acquires_no_cache),
39572- atomic_read(&fscache_n_acquires_ok),
39573- atomic_read(&fscache_n_acquires_nobufs),
39574- atomic_read(&fscache_n_acquires_oom));
39575+ atomic_read_unchecked(&fscache_n_acquires),
39576+ atomic_read_unchecked(&fscache_n_acquires_null),
39577+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
39578+ atomic_read_unchecked(&fscache_n_acquires_ok),
39579+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
39580+ atomic_read_unchecked(&fscache_n_acquires_oom));
39581
39582 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
39583- atomic_read(&fscache_n_object_lookups),
39584- atomic_read(&fscache_n_object_lookups_negative),
39585- atomic_read(&fscache_n_object_lookups_positive),
39586- atomic_read(&fscache_n_object_lookups_timed_out),
39587- atomic_read(&fscache_n_object_created));
39588+ atomic_read_unchecked(&fscache_n_object_lookups),
39589+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
39590+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
39591+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
39592+ atomic_read_unchecked(&fscache_n_object_created));
39593
39594 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
39595- atomic_read(&fscache_n_updates),
39596- atomic_read(&fscache_n_updates_null),
39597- atomic_read(&fscache_n_updates_run));
39598+ atomic_read_unchecked(&fscache_n_updates),
39599+ atomic_read_unchecked(&fscache_n_updates_null),
39600+ atomic_read_unchecked(&fscache_n_updates_run));
39601
39602 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
39603- atomic_read(&fscache_n_relinquishes),
39604- atomic_read(&fscache_n_relinquishes_null),
39605- atomic_read(&fscache_n_relinquishes_waitcrt),
39606- atomic_read(&fscache_n_relinquishes_retire));
39607+ atomic_read_unchecked(&fscache_n_relinquishes),
39608+ atomic_read_unchecked(&fscache_n_relinquishes_null),
39609+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
39610+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
39611
39612 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
39613- atomic_read(&fscache_n_attr_changed),
39614- atomic_read(&fscache_n_attr_changed_ok),
39615- atomic_read(&fscache_n_attr_changed_nobufs),
39616- atomic_read(&fscache_n_attr_changed_nomem),
39617- atomic_read(&fscache_n_attr_changed_calls));
39618+ atomic_read_unchecked(&fscache_n_attr_changed),
39619+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
39620+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
39621+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
39622+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
39623
39624 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
39625- atomic_read(&fscache_n_allocs),
39626- atomic_read(&fscache_n_allocs_ok),
39627- atomic_read(&fscache_n_allocs_wait),
39628- atomic_read(&fscache_n_allocs_nobufs),
39629- atomic_read(&fscache_n_allocs_intr));
39630+ atomic_read_unchecked(&fscache_n_allocs),
39631+ atomic_read_unchecked(&fscache_n_allocs_ok),
39632+ atomic_read_unchecked(&fscache_n_allocs_wait),
39633+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
39634+ atomic_read_unchecked(&fscache_n_allocs_intr));
39635 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
39636- atomic_read(&fscache_n_alloc_ops),
39637- atomic_read(&fscache_n_alloc_op_waits),
39638- atomic_read(&fscache_n_allocs_object_dead));
39639+ atomic_read_unchecked(&fscache_n_alloc_ops),
39640+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
39641+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
39642
39643 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
39644 " int=%u oom=%u\n",
39645- atomic_read(&fscache_n_retrievals),
39646- atomic_read(&fscache_n_retrievals_ok),
39647- atomic_read(&fscache_n_retrievals_wait),
39648- atomic_read(&fscache_n_retrievals_nodata),
39649- atomic_read(&fscache_n_retrievals_nobufs),
39650- atomic_read(&fscache_n_retrievals_intr),
39651- atomic_read(&fscache_n_retrievals_nomem));
39652+ atomic_read_unchecked(&fscache_n_retrievals),
39653+ atomic_read_unchecked(&fscache_n_retrievals_ok),
39654+ atomic_read_unchecked(&fscache_n_retrievals_wait),
39655+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
39656+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
39657+ atomic_read_unchecked(&fscache_n_retrievals_intr),
39658+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
39659 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
39660- atomic_read(&fscache_n_retrieval_ops),
39661- atomic_read(&fscache_n_retrieval_op_waits),
39662- atomic_read(&fscache_n_retrievals_object_dead));
39663+ atomic_read_unchecked(&fscache_n_retrieval_ops),
39664+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
39665+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
39666
39667 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
39668- atomic_read(&fscache_n_stores),
39669- atomic_read(&fscache_n_stores_ok),
39670- atomic_read(&fscache_n_stores_again),
39671- atomic_read(&fscache_n_stores_nobufs),
39672- atomic_read(&fscache_n_stores_oom));
39673+ atomic_read_unchecked(&fscache_n_stores),
39674+ atomic_read_unchecked(&fscache_n_stores_ok),
39675+ atomic_read_unchecked(&fscache_n_stores_again),
39676+ atomic_read_unchecked(&fscache_n_stores_nobufs),
39677+ atomic_read_unchecked(&fscache_n_stores_oom));
39678 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
39679- atomic_read(&fscache_n_store_ops),
39680- atomic_read(&fscache_n_store_calls),
39681- atomic_read(&fscache_n_store_pages),
39682- atomic_read(&fscache_n_store_radix_deletes),
39683- atomic_read(&fscache_n_store_pages_over_limit));
39684+ atomic_read_unchecked(&fscache_n_store_ops),
39685+ atomic_read_unchecked(&fscache_n_store_calls),
39686+ atomic_read_unchecked(&fscache_n_store_pages),
39687+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
39688+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
39689
39690 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
39691- atomic_read(&fscache_n_store_vmscan_not_storing),
39692- atomic_read(&fscache_n_store_vmscan_gone),
39693- atomic_read(&fscache_n_store_vmscan_busy),
39694- atomic_read(&fscache_n_store_vmscan_cancelled));
39695+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
39696+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
39697+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
39698+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
39699
39700 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
39701- atomic_read(&fscache_n_op_pend),
39702- atomic_read(&fscache_n_op_run),
39703- atomic_read(&fscache_n_op_enqueue),
39704- atomic_read(&fscache_n_op_cancelled),
39705- atomic_read(&fscache_n_op_rejected));
39706+ atomic_read_unchecked(&fscache_n_op_pend),
39707+ atomic_read_unchecked(&fscache_n_op_run),
39708+ atomic_read_unchecked(&fscache_n_op_enqueue),
39709+ atomic_read_unchecked(&fscache_n_op_cancelled),
39710+ atomic_read_unchecked(&fscache_n_op_rejected));
39711 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
39712- atomic_read(&fscache_n_op_deferred_release),
39713- atomic_read(&fscache_n_op_release),
39714- atomic_read(&fscache_n_op_gc));
39715+ atomic_read_unchecked(&fscache_n_op_deferred_release),
39716+ atomic_read_unchecked(&fscache_n_op_release),
39717+ atomic_read_unchecked(&fscache_n_op_gc));
39718
39719 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
39720 atomic_read(&fscache_n_cop_alloc_object),
39721diff -urNp linux-2.6.32.41/fs/fs_struct.c linux-2.6.32.41/fs/fs_struct.c
39722--- linux-2.6.32.41/fs/fs_struct.c 2011-03-27 14:31:47.000000000 -0400
39723+++ linux-2.6.32.41/fs/fs_struct.c 2011-04-17 15:56:46.000000000 -0400
39724@@ -4,6 +4,7 @@
39725 #include <linux/path.h>
39726 #include <linux/slab.h>
39727 #include <linux/fs_struct.h>
39728+#include <linux/grsecurity.h>
39729
39730 /*
39731 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
39732@@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, s
39733 old_root = fs->root;
39734 fs->root = *path;
39735 path_get(path);
39736+ gr_set_chroot_entries(current, path);
39737 write_unlock(&fs->lock);
39738 if (old_root.dentry)
39739 path_put(&old_root);
39740@@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_roo
39741 && fs->root.mnt == old_root->mnt) {
39742 path_get(new_root);
39743 fs->root = *new_root;
39744+ gr_set_chroot_entries(p, new_root);
39745 count++;
39746 }
39747 if (fs->pwd.dentry == old_root->dentry
39748@@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
39749 task_lock(tsk);
39750 write_lock(&fs->lock);
39751 tsk->fs = NULL;
39752- kill = !--fs->users;
39753+ gr_clear_chroot_entries(tsk);
39754+ kill = !atomic_dec_return(&fs->users);
39755 write_unlock(&fs->lock);
39756 task_unlock(tsk);
39757 if (kill)
39758@@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct
39759 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
39760 /* We don't need to lock fs - think why ;-) */
39761 if (fs) {
39762- fs->users = 1;
39763+ atomic_set(&fs->users, 1);
39764 fs->in_exec = 0;
39765 rwlock_init(&fs->lock);
39766 fs->umask = old->umask;
39767@@ -127,8 +131,9 @@ int unshare_fs_struct(void)
39768
39769 task_lock(current);
39770 write_lock(&fs->lock);
39771- kill = !--fs->users;
39772+ kill = !atomic_dec_return(&fs->users);
39773 current->fs = new_fs;
39774+ gr_set_chroot_entries(current, &new_fs->root);
39775 write_unlock(&fs->lock);
39776 task_unlock(current);
39777
39778@@ -147,7 +152,7 @@ EXPORT_SYMBOL(current_umask);
39779
39780 /* to be mentioned only in INIT_TASK */
39781 struct fs_struct init_fs = {
39782- .users = 1,
39783+ .users = ATOMIC_INIT(1),
39784 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
39785 .umask = 0022,
39786 };
39787@@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
39788 task_lock(current);
39789
39790 write_lock(&init_fs.lock);
39791- init_fs.users++;
39792+ atomic_inc(&init_fs.users);
39793 write_unlock(&init_fs.lock);
39794
39795 write_lock(&fs->lock);
39796 current->fs = &init_fs;
39797- kill = !--fs->users;
39798+ gr_set_chroot_entries(current, &current->fs->root);
39799+ kill = !atomic_dec_return(&fs->users);
39800 write_unlock(&fs->lock);
39801
39802 task_unlock(current);
39803diff -urNp linux-2.6.32.41/fs/fuse/cuse.c linux-2.6.32.41/fs/fuse/cuse.c
39804--- linux-2.6.32.41/fs/fuse/cuse.c 2011-03-27 14:31:47.000000000 -0400
39805+++ linux-2.6.32.41/fs/fuse/cuse.c 2011-04-17 15:56:46.000000000 -0400
39806@@ -528,8 +528,18 @@ static int cuse_channel_release(struct i
39807 return rc;
39808 }
39809
39810-static struct file_operations cuse_channel_fops; /* initialized during init */
39811-
39812+static const struct file_operations cuse_channel_fops = { /* initialized during init */
39813+ .owner = THIS_MODULE,
39814+ .llseek = no_llseek,
39815+ .read = do_sync_read,
39816+ .aio_read = fuse_dev_read,
39817+ .write = do_sync_write,
39818+ .aio_write = fuse_dev_write,
39819+ .poll = fuse_dev_poll,
39820+ .open = cuse_channel_open,
39821+ .release = cuse_channel_release,
39822+ .fasync = fuse_dev_fasync,
39823+};
39824
39825 /**************************************************************************
39826 * Misc stuff and module initializatiion
39827@@ -575,12 +585,6 @@ static int __init cuse_init(void)
39828 for (i = 0; i < CUSE_CONNTBL_LEN; i++)
39829 INIT_LIST_HEAD(&cuse_conntbl[i]);
39830
39831- /* inherit and extend fuse_dev_operations */
39832- cuse_channel_fops = fuse_dev_operations;
39833- cuse_channel_fops.owner = THIS_MODULE;
39834- cuse_channel_fops.open = cuse_channel_open;
39835- cuse_channel_fops.release = cuse_channel_release;
39836-
39837 cuse_class = class_create(THIS_MODULE, "cuse");
39838 if (IS_ERR(cuse_class))
39839 return PTR_ERR(cuse_class);
39840diff -urNp linux-2.6.32.41/fs/fuse/dev.c linux-2.6.32.41/fs/fuse/dev.c
39841--- linux-2.6.32.41/fs/fuse/dev.c 2011-03-27 14:31:47.000000000 -0400
39842+++ linux-2.6.32.41/fs/fuse/dev.c 2011-04-17 15:56:46.000000000 -0400
39843@@ -745,7 +745,7 @@ __releases(&fc->lock)
39844 * request_end(). Otherwise add it to the processing list, and set
39845 * the 'sent' flag.
39846 */
39847-static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
39848+ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
39849 unsigned long nr_segs, loff_t pos)
39850 {
39851 int err;
39852@@ -827,6 +827,7 @@ static ssize_t fuse_dev_read(struct kioc
39853 spin_unlock(&fc->lock);
39854 return err;
39855 }
39856+EXPORT_SYMBOL_GPL(fuse_dev_read);
39857
39858 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
39859 struct fuse_copy_state *cs)
39860@@ -885,7 +886,7 @@ static int fuse_notify_inval_entry(struc
39861 {
39862 struct fuse_notify_inval_entry_out outarg;
39863 int err = -EINVAL;
39864- char buf[FUSE_NAME_MAX+1];
39865+ char *buf = NULL;
39866 struct qstr name;
39867
39868 if (size < sizeof(outarg))
39869@@ -899,6 +900,11 @@ static int fuse_notify_inval_entry(struc
39870 if (outarg.namelen > FUSE_NAME_MAX)
39871 goto err;
39872
39873+ err = -ENOMEM;
39874+ buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
39875+ if (!buf)
39876+ goto err;
39877+
39878 name.name = buf;
39879 name.len = outarg.namelen;
39880 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
39881@@ -910,17 +916,15 @@ static int fuse_notify_inval_entry(struc
39882
39883 down_read(&fc->killsb);
39884 err = -ENOENT;
39885- if (!fc->sb)
39886- goto err_unlock;
39887-
39888- err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
39889-
39890-err_unlock:
39891+ if (fc->sb)
39892+ err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
39893 up_read(&fc->killsb);
39894+ kfree(buf);
39895 return err;
39896
39897 err:
39898 fuse_copy_finish(cs);
39899+ kfree(buf);
39900 return err;
39901 }
39902
39903@@ -987,7 +991,7 @@ static int copy_out_args(struct fuse_cop
39904 * it from the list and copy the rest of the buffer to the request.
39905 * The request is finished by calling request_end()
39906 */
39907-static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
39908+ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
39909 unsigned long nr_segs, loff_t pos)
39910 {
39911 int err;
39912@@ -1083,8 +1087,9 @@ static ssize_t fuse_dev_write(struct kio
39913 fuse_copy_finish(&cs);
39914 return err;
39915 }
39916+EXPORT_SYMBOL_GPL(fuse_dev_write);
39917
39918-static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
39919+unsigned fuse_dev_poll(struct file *file, poll_table *wait)
39920 {
39921 unsigned mask = POLLOUT | POLLWRNORM;
39922 struct fuse_conn *fc = fuse_get_conn(file);
39923@@ -1102,6 +1107,7 @@ static unsigned fuse_dev_poll(struct fil
39924
39925 return mask;
39926 }
39927+EXPORT_SYMBOL_GPL(fuse_dev_poll);
39928
39929 /*
39930 * Abort all requests on the given list (pending or processing)
39931@@ -1218,7 +1224,7 @@ int fuse_dev_release(struct inode *inode
39932 }
39933 EXPORT_SYMBOL_GPL(fuse_dev_release);
39934
39935-static int fuse_dev_fasync(int fd, struct file *file, int on)
39936+int fuse_dev_fasync(int fd, struct file *file, int on)
39937 {
39938 struct fuse_conn *fc = fuse_get_conn(file);
39939 if (!fc)
39940@@ -1227,6 +1233,7 @@ static int fuse_dev_fasync(int fd, struc
39941 /* No locking - fasync_helper does its own locking */
39942 return fasync_helper(fd, file, on, &fc->fasync);
39943 }
39944+EXPORT_SYMBOL_GPL(fuse_dev_fasync);
39945
39946 const struct file_operations fuse_dev_operations = {
39947 .owner = THIS_MODULE,
39948diff -urNp linux-2.6.32.41/fs/fuse/dir.c linux-2.6.32.41/fs/fuse/dir.c
39949--- linux-2.6.32.41/fs/fuse/dir.c 2011-03-27 14:31:47.000000000 -0400
39950+++ linux-2.6.32.41/fs/fuse/dir.c 2011-04-17 15:56:46.000000000 -0400
39951@@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *de
39952 return link;
39953 }
39954
39955-static void free_link(char *link)
39956+static void free_link(const char *link)
39957 {
39958 if (!IS_ERR(link))
39959 free_page((unsigned long) link);
39960diff -urNp linux-2.6.32.41/fs/fuse/fuse_i.h linux-2.6.32.41/fs/fuse/fuse_i.h
39961--- linux-2.6.32.41/fs/fuse/fuse_i.h 2011-03-27 14:31:47.000000000 -0400
39962+++ linux-2.6.32.41/fs/fuse/fuse_i.h 2011-04-17 15:56:46.000000000 -0400
39963@@ -525,6 +525,16 @@ extern const struct file_operations fuse
39964
39965 extern const struct dentry_operations fuse_dentry_operations;
39966
39967+extern ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
39968+ unsigned long nr_segs, loff_t pos);
39969+
39970+extern ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
39971+ unsigned long nr_segs, loff_t pos);
39972+
39973+extern unsigned fuse_dev_poll(struct file *file, poll_table *wait);
39974+
39975+extern int fuse_dev_fasync(int fd, struct file *file, int on);
39976+
39977 /**
39978 * Inode to nodeid comparison.
39979 */
39980diff -urNp linux-2.6.32.41/fs/gfs2/ops_inode.c linux-2.6.32.41/fs/gfs2/ops_inode.c
39981--- linux-2.6.32.41/fs/gfs2/ops_inode.c 2011-03-27 14:31:47.000000000 -0400
39982+++ linux-2.6.32.41/fs/gfs2/ops_inode.c 2011-05-16 21:46:57.000000000 -0400
39983@@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odi
39984 unsigned int x;
39985 int error;
39986
39987+ pax_track_stack();
39988+
39989 if (ndentry->d_inode) {
39990 nip = GFS2_I(ndentry->d_inode);
39991 if (ip == nip)
39992diff -urNp linux-2.6.32.41/fs/gfs2/sys.c linux-2.6.32.41/fs/gfs2/sys.c
39993--- linux-2.6.32.41/fs/gfs2/sys.c 2011-03-27 14:31:47.000000000 -0400
39994+++ linux-2.6.32.41/fs/gfs2/sys.c 2011-04-17 15:56:46.000000000 -0400
39995@@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct ko
39996 return a->store ? a->store(sdp, buf, len) : len;
39997 }
39998
39999-static struct sysfs_ops gfs2_attr_ops = {
40000+static const struct sysfs_ops gfs2_attr_ops = {
40001 .show = gfs2_attr_show,
40002 .store = gfs2_attr_store,
40003 };
40004@@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset
40005 return 0;
40006 }
40007
40008-static struct kset_uevent_ops gfs2_uevent_ops = {
40009+static const struct kset_uevent_ops gfs2_uevent_ops = {
40010 .uevent = gfs2_uevent,
40011 };
40012
40013diff -urNp linux-2.6.32.41/fs/hfsplus/catalog.c linux-2.6.32.41/fs/hfsplus/catalog.c
40014--- linux-2.6.32.41/fs/hfsplus/catalog.c 2011-03-27 14:31:47.000000000 -0400
40015+++ linux-2.6.32.41/fs/hfsplus/catalog.c 2011-05-16 21:46:57.000000000 -0400
40016@@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block
40017 int err;
40018 u16 type;
40019
40020+ pax_track_stack();
40021+
40022 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
40023 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
40024 if (err)
40025@@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct
40026 int entry_size;
40027 int err;
40028
40029+ pax_track_stack();
40030+
40031 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
40032 sb = dir->i_sb;
40033 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
40034@@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
40035 int entry_size, type;
40036 int err = 0;
40037
40038+ pax_track_stack();
40039+
40040 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
40041 dst_dir->i_ino, dst_name->name);
40042 sb = src_dir->i_sb;
40043diff -urNp linux-2.6.32.41/fs/hfsplus/dir.c linux-2.6.32.41/fs/hfsplus/dir.c
40044--- linux-2.6.32.41/fs/hfsplus/dir.c 2011-03-27 14:31:47.000000000 -0400
40045+++ linux-2.6.32.41/fs/hfsplus/dir.c 2011-05-16 21:46:57.000000000 -0400
40046@@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *
40047 struct hfsplus_readdir_data *rd;
40048 u16 type;
40049
40050+ pax_track_stack();
40051+
40052 if (filp->f_pos >= inode->i_size)
40053 return 0;
40054
40055diff -urNp linux-2.6.32.41/fs/hfsplus/inode.c linux-2.6.32.41/fs/hfsplus/inode.c
40056--- linux-2.6.32.41/fs/hfsplus/inode.c 2011-03-27 14:31:47.000000000 -0400
40057+++ linux-2.6.32.41/fs/hfsplus/inode.c 2011-05-16 21:46:57.000000000 -0400
40058@@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode
40059 int res = 0;
40060 u16 type;
40061
40062+ pax_track_stack();
40063+
40064 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
40065
40066 HFSPLUS_I(inode).dev = 0;
40067@@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode
40068 struct hfs_find_data fd;
40069 hfsplus_cat_entry entry;
40070
40071+ pax_track_stack();
40072+
40073 if (HFSPLUS_IS_RSRC(inode))
40074 main_inode = HFSPLUS_I(inode).rsrc_inode;
40075
40076diff -urNp linux-2.6.32.41/fs/hfsplus/ioctl.c linux-2.6.32.41/fs/hfsplus/ioctl.c
40077--- linux-2.6.32.41/fs/hfsplus/ioctl.c 2011-03-27 14:31:47.000000000 -0400
40078+++ linux-2.6.32.41/fs/hfsplus/ioctl.c 2011-05-16 21:46:57.000000000 -0400
40079@@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dent
40080 struct hfsplus_cat_file *file;
40081 int res;
40082
40083+ pax_track_stack();
40084+
40085 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
40086 return -EOPNOTSUPP;
40087
40088@@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *
40089 struct hfsplus_cat_file *file;
40090 ssize_t res = 0;
40091
40092+ pax_track_stack();
40093+
40094 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
40095 return -EOPNOTSUPP;
40096
40097diff -urNp linux-2.6.32.41/fs/hfsplus/super.c linux-2.6.32.41/fs/hfsplus/super.c
40098--- linux-2.6.32.41/fs/hfsplus/super.c 2011-03-27 14:31:47.000000000 -0400
40099+++ linux-2.6.32.41/fs/hfsplus/super.c 2011-05-16 21:46:57.000000000 -0400
40100@@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct sup
40101 struct nls_table *nls = NULL;
40102 int err = -EINVAL;
40103
40104+ pax_track_stack();
40105+
40106 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
40107 if (!sbi)
40108 return -ENOMEM;
40109diff -urNp linux-2.6.32.41/fs/hugetlbfs/inode.c linux-2.6.32.41/fs/hugetlbfs/inode.c
40110--- linux-2.6.32.41/fs/hugetlbfs/inode.c 2011-03-27 14:31:47.000000000 -0400
40111+++ linux-2.6.32.41/fs/hugetlbfs/inode.c 2011-04-17 15:56:46.000000000 -0400
40112@@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs
40113 .kill_sb = kill_litter_super,
40114 };
40115
40116-static struct vfsmount *hugetlbfs_vfsmount;
40117+struct vfsmount *hugetlbfs_vfsmount;
40118
40119 static int can_do_hugetlb_shm(void)
40120 {
40121diff -urNp linux-2.6.32.41/fs/ioctl.c linux-2.6.32.41/fs/ioctl.c
40122--- linux-2.6.32.41/fs/ioctl.c 2011-03-27 14:31:47.000000000 -0400
40123+++ linux-2.6.32.41/fs/ioctl.c 2011-04-17 15:56:46.000000000 -0400
40124@@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiema
40125 u64 phys, u64 len, u32 flags)
40126 {
40127 struct fiemap_extent extent;
40128- struct fiemap_extent *dest = fieinfo->fi_extents_start;
40129+ struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
40130
40131 /* only count the extents */
40132 if (fieinfo->fi_extents_max == 0) {
40133@@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *fil
40134
40135 fieinfo.fi_flags = fiemap.fm_flags;
40136 fieinfo.fi_extents_max = fiemap.fm_extent_count;
40137- fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
40138+ fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
40139
40140 if (fiemap.fm_extent_count != 0 &&
40141 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
40142@@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *fil
40143 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
40144 fiemap.fm_flags = fieinfo.fi_flags;
40145 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
40146- if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
40147+ if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
40148 error = -EFAULT;
40149
40150 return error;
40151diff -urNp linux-2.6.32.41/fs/jbd/checkpoint.c linux-2.6.32.41/fs/jbd/checkpoint.c
40152--- linux-2.6.32.41/fs/jbd/checkpoint.c 2011-03-27 14:31:47.000000000 -0400
40153+++ linux-2.6.32.41/fs/jbd/checkpoint.c 2011-05-16 21:46:57.000000000 -0400
40154@@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal
40155 tid_t this_tid;
40156 int result;
40157
40158+ pax_track_stack();
40159+
40160 jbd_debug(1, "Start checkpoint\n");
40161
40162 /*
40163diff -urNp linux-2.6.32.41/fs/jffs2/compr_rtime.c linux-2.6.32.41/fs/jffs2/compr_rtime.c
40164--- linux-2.6.32.41/fs/jffs2/compr_rtime.c 2011-03-27 14:31:47.000000000 -0400
40165+++ linux-2.6.32.41/fs/jffs2/compr_rtime.c 2011-05-16 21:46:57.000000000 -0400
40166@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
40167 int outpos = 0;
40168 int pos=0;
40169
40170+ pax_track_stack();
40171+
40172 memset(positions,0,sizeof(positions));
40173
40174 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
40175@@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsign
40176 int outpos = 0;
40177 int pos=0;
40178
40179+ pax_track_stack();
40180+
40181 memset(positions,0,sizeof(positions));
40182
40183 while (outpos<destlen) {
40184diff -urNp linux-2.6.32.41/fs/jffs2/compr_rubin.c linux-2.6.32.41/fs/jffs2/compr_rubin.c
40185--- linux-2.6.32.41/fs/jffs2/compr_rubin.c 2011-03-27 14:31:47.000000000 -0400
40186+++ linux-2.6.32.41/fs/jffs2/compr_rubin.c 2011-05-16 21:46:57.000000000 -0400
40187@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
40188 int ret;
40189 uint32_t mysrclen, mydstlen;
40190
40191+ pax_track_stack();
40192+
40193 mysrclen = *sourcelen;
40194 mydstlen = *dstlen - 8;
40195
40196diff -urNp linux-2.6.32.41/fs/jffs2/erase.c linux-2.6.32.41/fs/jffs2/erase.c
40197--- linux-2.6.32.41/fs/jffs2/erase.c 2011-03-27 14:31:47.000000000 -0400
40198+++ linux-2.6.32.41/fs/jffs2/erase.c 2011-04-17 15:56:46.000000000 -0400
40199@@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(stru
40200 struct jffs2_unknown_node marker = {
40201 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
40202 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
40203- .totlen = cpu_to_je32(c->cleanmarker_size)
40204+ .totlen = cpu_to_je32(c->cleanmarker_size),
40205+ .hdr_crc = cpu_to_je32(0)
40206 };
40207
40208 jffs2_prealloc_raw_node_refs(c, jeb, 1);
40209diff -urNp linux-2.6.32.41/fs/jffs2/wbuf.c linux-2.6.32.41/fs/jffs2/wbuf.c
40210--- linux-2.6.32.41/fs/jffs2/wbuf.c 2011-03-27 14:31:47.000000000 -0400
40211+++ linux-2.6.32.41/fs/jffs2/wbuf.c 2011-04-17 15:56:46.000000000 -0400
40212@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
40213 {
40214 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
40215 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
40216- .totlen = constant_cpu_to_je32(8)
40217+ .totlen = constant_cpu_to_je32(8),
40218+ .hdr_crc = constant_cpu_to_je32(0)
40219 };
40220
40221 /*
40222diff -urNp linux-2.6.32.41/fs/jffs2/xattr.c linux-2.6.32.41/fs/jffs2/xattr.c
40223--- linux-2.6.32.41/fs/jffs2/xattr.c 2011-03-27 14:31:47.000000000 -0400
40224+++ linux-2.6.32.41/fs/jffs2/xattr.c 2011-05-16 21:46:57.000000000 -0400
40225@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
40226
40227 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
40228
40229+ pax_track_stack();
40230+
40231 /* Phase.1 : Merge same xref */
40232 for (i=0; i < XREF_TMPHASH_SIZE; i++)
40233 xref_tmphash[i] = NULL;
40234diff -urNp linux-2.6.32.41/fs/Kconfig.binfmt linux-2.6.32.41/fs/Kconfig.binfmt
40235--- linux-2.6.32.41/fs/Kconfig.binfmt 2011-03-27 14:31:47.000000000 -0400
40236+++ linux-2.6.32.41/fs/Kconfig.binfmt 2011-04-17 15:56:46.000000000 -0400
40237@@ -86,7 +86,7 @@ config HAVE_AOUT
40238
40239 config BINFMT_AOUT
40240 tristate "Kernel support for a.out and ECOFF binaries"
40241- depends on HAVE_AOUT
40242+ depends on HAVE_AOUT && BROKEN
40243 ---help---
40244 A.out (Assembler.OUTput) is a set of formats for libraries and
40245 executables used in the earliest versions of UNIX. Linux used
40246diff -urNp linux-2.6.32.41/fs/libfs.c linux-2.6.32.41/fs/libfs.c
40247--- linux-2.6.32.41/fs/libfs.c 2011-03-27 14:31:47.000000000 -0400
40248+++ linux-2.6.32.41/fs/libfs.c 2011-05-11 18:25:15.000000000 -0400
40249@@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, v
40250
40251 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
40252 struct dentry *next;
40253+ char d_name[sizeof(next->d_iname)];
40254+ const unsigned char *name;
40255+
40256 next = list_entry(p, struct dentry, d_u.d_child);
40257 if (d_unhashed(next) || !next->d_inode)
40258 continue;
40259
40260 spin_unlock(&dcache_lock);
40261- if (filldir(dirent, next->d_name.name,
40262+ name = next->d_name.name;
40263+ if (name == next->d_iname) {
40264+ memcpy(d_name, name, next->d_name.len);
40265+ name = d_name;
40266+ }
40267+ if (filldir(dirent, name,
40268 next->d_name.len, filp->f_pos,
40269 next->d_inode->i_ino,
40270 dt_type(next->d_inode)) < 0)
40271diff -urNp linux-2.6.32.41/fs/lockd/clntproc.c linux-2.6.32.41/fs/lockd/clntproc.c
40272--- linux-2.6.32.41/fs/lockd/clntproc.c 2011-03-27 14:31:47.000000000 -0400
40273+++ linux-2.6.32.41/fs/lockd/clntproc.c 2011-05-16 21:46:57.000000000 -0400
40274@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
40275 /*
40276 * Cookie counter for NLM requests
40277 */
40278-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
40279+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
40280
40281 void nlmclnt_next_cookie(struct nlm_cookie *c)
40282 {
40283- u32 cookie = atomic_inc_return(&nlm_cookie);
40284+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
40285
40286 memcpy(c->data, &cookie, 4);
40287 c->len=4;
40288@@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
40289 struct nlm_rqst reqst, *req;
40290 int status;
40291
40292+ pax_track_stack();
40293+
40294 req = &reqst;
40295 memset(req, 0, sizeof(*req));
40296 locks_init_lock(&req->a_args.lock.fl);
40297diff -urNp linux-2.6.32.41/fs/lockd/svc.c linux-2.6.32.41/fs/lockd/svc.c
40298--- linux-2.6.32.41/fs/lockd/svc.c 2011-03-27 14:31:47.000000000 -0400
40299+++ linux-2.6.32.41/fs/lockd/svc.c 2011-04-17 15:56:46.000000000 -0400
40300@@ -43,7 +43,7 @@
40301
40302 static struct svc_program nlmsvc_program;
40303
40304-struct nlmsvc_binding * nlmsvc_ops;
40305+const struct nlmsvc_binding * nlmsvc_ops;
40306 EXPORT_SYMBOL_GPL(nlmsvc_ops);
40307
40308 static DEFINE_MUTEX(nlmsvc_mutex);
40309diff -urNp linux-2.6.32.41/fs/locks.c linux-2.6.32.41/fs/locks.c
40310--- linux-2.6.32.41/fs/locks.c 2011-03-27 14:31:47.000000000 -0400
40311+++ linux-2.6.32.41/fs/locks.c 2011-04-17 15:56:46.000000000 -0400
40312@@ -2007,16 +2007,16 @@ void locks_remove_flock(struct file *fil
40313 return;
40314
40315 if (filp->f_op && filp->f_op->flock) {
40316- struct file_lock fl = {
40317+ struct file_lock flock = {
40318 .fl_pid = current->tgid,
40319 .fl_file = filp,
40320 .fl_flags = FL_FLOCK,
40321 .fl_type = F_UNLCK,
40322 .fl_end = OFFSET_MAX,
40323 };
40324- filp->f_op->flock(filp, F_SETLKW, &fl);
40325- if (fl.fl_ops && fl.fl_ops->fl_release_private)
40326- fl.fl_ops->fl_release_private(&fl);
40327+ filp->f_op->flock(filp, F_SETLKW, &flock);
40328+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
40329+ flock.fl_ops->fl_release_private(&flock);
40330 }
40331
40332 lock_kernel();
40333diff -urNp linux-2.6.32.41/fs/namei.c linux-2.6.32.41/fs/namei.c
40334--- linux-2.6.32.41/fs/namei.c 2011-03-27 14:31:47.000000000 -0400
40335+++ linux-2.6.32.41/fs/namei.c 2011-05-16 21:46:57.000000000 -0400
40336@@ -224,14 +224,6 @@ int generic_permission(struct inode *ino
40337 return ret;
40338
40339 /*
40340- * Read/write DACs are always overridable.
40341- * Executable DACs are overridable if at least one exec bit is set.
40342- */
40343- if (!(mask & MAY_EXEC) || execute_ok(inode))
40344- if (capable(CAP_DAC_OVERRIDE))
40345- return 0;
40346-
40347- /*
40348 * Searching includes executable on directories, else just read.
40349 */
40350 mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
40351@@ -239,6 +231,14 @@ int generic_permission(struct inode *ino
40352 if (capable(CAP_DAC_READ_SEARCH))
40353 return 0;
40354
40355+ /*
40356+ * Read/write DACs are always overridable.
40357+ * Executable DACs are overridable if at least one exec bit is set.
40358+ */
40359+ if (!(mask & MAY_EXEC) || execute_ok(inode))
40360+ if (capable(CAP_DAC_OVERRIDE))
40361+ return 0;
40362+
40363 return -EACCES;
40364 }
40365
40366@@ -458,7 +458,8 @@ static int exec_permission_lite(struct i
40367 if (!ret)
40368 goto ok;
40369
40370- if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
40371+ if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
40372+ capable(CAP_DAC_OVERRIDE))
40373 goto ok;
40374
40375 return ret;
40376@@ -638,7 +639,7 @@ static __always_inline int __do_follow_l
40377 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
40378 error = PTR_ERR(cookie);
40379 if (!IS_ERR(cookie)) {
40380- char *s = nd_get_link(nd);
40381+ const char *s = nd_get_link(nd);
40382 error = 0;
40383 if (s)
40384 error = __vfs_follow_link(nd, s);
40385@@ -669,6 +670,13 @@ static inline int do_follow_link(struct
40386 err = security_inode_follow_link(path->dentry, nd);
40387 if (err)
40388 goto loop;
40389+
40390+ if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
40391+ path->dentry->d_inode, path->dentry, nd->path.mnt)) {
40392+ err = -EACCES;
40393+ goto loop;
40394+ }
40395+
40396 current->link_count++;
40397 current->total_link_count++;
40398 nd->depth++;
40399@@ -1016,11 +1024,18 @@ return_reval:
40400 break;
40401 }
40402 return_base:
40403+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
40404+ path_put(&nd->path);
40405+ return -ENOENT;
40406+ }
40407 return 0;
40408 out_dput:
40409 path_put_conditional(&next, nd);
40410 break;
40411 }
40412+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
40413+ err = -ENOENT;
40414+
40415 path_put(&nd->path);
40416 return_err:
40417 return err;
40418@@ -1091,13 +1106,20 @@ static int do_path_lookup(int dfd, const
40419 int retval = path_init(dfd, name, flags, nd);
40420 if (!retval)
40421 retval = path_walk(name, nd);
40422- if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
40423- nd->path.dentry->d_inode))
40424- audit_inode(name, nd->path.dentry);
40425+
40426+ if (likely(!retval)) {
40427+ if (nd->path.dentry && nd->path.dentry->d_inode) {
40428+ if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
40429+ retval = -ENOENT;
40430+ if (!audit_dummy_context())
40431+ audit_inode(name, nd->path.dentry);
40432+ }
40433+ }
40434 if (nd->root.mnt) {
40435 path_put(&nd->root);
40436 nd->root.mnt = NULL;
40437 }
40438+
40439 return retval;
40440 }
40441
40442@@ -1576,6 +1598,20 @@ int may_open(struct path *path, int acc_
40443 if (error)
40444 goto err_out;
40445
40446+
40447+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
40448+ error = -EPERM;
40449+ goto err_out;
40450+ }
40451+ if (gr_handle_rawio(inode)) {
40452+ error = -EPERM;
40453+ goto err_out;
40454+ }
40455+ if (!gr_acl_handle_open(dentry, path->mnt, flag)) {
40456+ error = -EACCES;
40457+ goto err_out;
40458+ }
40459+
40460 if (flag & O_TRUNC) {
40461 error = get_write_access(inode);
40462 if (error)
40463@@ -1621,12 +1657,19 @@ static int __open_namei_create(struct na
40464 int error;
40465 struct dentry *dir = nd->path.dentry;
40466
40467+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, nd->path.mnt, flag, mode)) {
40468+ error = -EACCES;
40469+ goto out_unlock;
40470+ }
40471+
40472 if (!IS_POSIXACL(dir->d_inode))
40473 mode &= ~current_umask();
40474 error = security_path_mknod(&nd->path, path->dentry, mode, 0);
40475 if (error)
40476 goto out_unlock;
40477 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
40478+ if (!error)
40479+ gr_handle_create(path->dentry, nd->path.mnt);
40480 out_unlock:
40481 mutex_unlock(&dir->d_inode->i_mutex);
40482 dput(nd->path.dentry);
40483@@ -1709,6 +1752,22 @@ struct file *do_filp_open(int dfd, const
40484 &nd, flag);
40485 if (error)
40486 return ERR_PTR(error);
40487+
40488+ if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
40489+ error = -EPERM;
40490+ goto exit;
40491+ }
40492+
40493+ if (gr_handle_rawio(nd.path.dentry->d_inode)) {
40494+ error = -EPERM;
40495+ goto exit;
40496+ }
40497+
40498+ if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, flag)) {
40499+ error = -EACCES;
40500+ goto exit;
40501+ }
40502+
40503 goto ok;
40504 }
40505
40506@@ -1795,6 +1854,14 @@ do_last:
40507 /*
40508 * It already exists.
40509 */
40510+
40511+ /* only check if O_CREAT is specified, all other checks need
40512+ to go into may_open */
40513+ if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
40514+ error = -EACCES;
40515+ goto exit_mutex_unlock;
40516+ }
40517+
40518 mutex_unlock(&dir->d_inode->i_mutex);
40519 audit_inode(pathname, path.dentry);
40520
40521@@ -1887,6 +1954,13 @@ do_link:
40522 error = security_inode_follow_link(path.dentry, &nd);
40523 if (error)
40524 goto exit_dput;
40525+
40526+ if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
40527+ path.dentry, nd.path.mnt)) {
40528+ error = -EACCES;
40529+ goto exit_dput;
40530+ }
40531+
40532 error = __do_follow_link(&path, &nd);
40533 if (error) {
40534 /* Does someone understand code flow here? Or it is only
40535@@ -2061,6 +2135,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
40536 error = may_mknod(mode);
40537 if (error)
40538 goto out_dput;
40539+
40540+ if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
40541+ error = -EPERM;
40542+ goto out_dput;
40543+ }
40544+
40545+ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
40546+ error = -EACCES;
40547+ goto out_dput;
40548+ }
40549+
40550 error = mnt_want_write(nd.path.mnt);
40551 if (error)
40552 goto out_dput;
40553@@ -2081,6 +2166,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
40554 }
40555 out_drop_write:
40556 mnt_drop_write(nd.path.mnt);
40557+
40558+ if (!error)
40559+ gr_handle_create(dentry, nd.path.mnt);
40560 out_dput:
40561 dput(dentry);
40562 out_unlock:
40563@@ -2134,6 +2222,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
40564 if (IS_ERR(dentry))
40565 goto out_unlock;
40566
40567+ if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
40568+ error = -EACCES;
40569+ goto out_dput;
40570+ }
40571+
40572 if (!IS_POSIXACL(nd.path.dentry->d_inode))
40573 mode &= ~current_umask();
40574 error = mnt_want_write(nd.path.mnt);
40575@@ -2145,6 +2238,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
40576 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
40577 out_drop_write:
40578 mnt_drop_write(nd.path.mnt);
40579+
40580+ if (!error)
40581+ gr_handle_create(dentry, nd.path.mnt);
40582+
40583 out_dput:
40584 dput(dentry);
40585 out_unlock:
40586@@ -2226,6 +2323,8 @@ static long do_rmdir(int dfd, const char
40587 char * name;
40588 struct dentry *dentry;
40589 struct nameidata nd;
40590+ ino_t saved_ino = 0;
40591+ dev_t saved_dev = 0;
40592
40593 error = user_path_parent(dfd, pathname, &nd, &name);
40594 if (error)
40595@@ -2250,6 +2349,19 @@ static long do_rmdir(int dfd, const char
40596 error = PTR_ERR(dentry);
40597 if (IS_ERR(dentry))
40598 goto exit2;
40599+
40600+ if (dentry->d_inode != NULL) {
40601+ if (dentry->d_inode->i_nlink <= 1) {
40602+ saved_ino = dentry->d_inode->i_ino;
40603+ saved_dev = gr_get_dev_from_dentry(dentry);
40604+ }
40605+
40606+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
40607+ error = -EACCES;
40608+ goto exit3;
40609+ }
40610+ }
40611+
40612 error = mnt_want_write(nd.path.mnt);
40613 if (error)
40614 goto exit3;
40615@@ -2257,6 +2369,8 @@ static long do_rmdir(int dfd, const char
40616 if (error)
40617 goto exit4;
40618 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
40619+ if (!error && (saved_dev || saved_ino))
40620+ gr_handle_delete(saved_ino, saved_dev);
40621 exit4:
40622 mnt_drop_write(nd.path.mnt);
40623 exit3:
40624@@ -2318,6 +2432,8 @@ static long do_unlinkat(int dfd, const c
40625 struct dentry *dentry;
40626 struct nameidata nd;
40627 struct inode *inode = NULL;
40628+ ino_t saved_ino = 0;
40629+ dev_t saved_dev = 0;
40630
40631 error = user_path_parent(dfd, pathname, &nd, &name);
40632 if (error)
40633@@ -2337,8 +2453,19 @@ static long do_unlinkat(int dfd, const c
40634 if (nd.last.name[nd.last.len])
40635 goto slashes;
40636 inode = dentry->d_inode;
40637- if (inode)
40638+ if (inode) {
40639+ if (inode->i_nlink <= 1) {
40640+ saved_ino = inode->i_ino;
40641+ saved_dev = gr_get_dev_from_dentry(dentry);
40642+ }
40643+
40644 atomic_inc(&inode->i_count);
40645+
40646+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
40647+ error = -EACCES;
40648+ goto exit2;
40649+ }
40650+ }
40651 error = mnt_want_write(nd.path.mnt);
40652 if (error)
40653 goto exit2;
40654@@ -2346,6 +2473,8 @@ static long do_unlinkat(int dfd, const c
40655 if (error)
40656 goto exit3;
40657 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
40658+ if (!error && (saved_ino || saved_dev))
40659+ gr_handle_delete(saved_ino, saved_dev);
40660 exit3:
40661 mnt_drop_write(nd.path.mnt);
40662 exit2:
40663@@ -2424,6 +2553,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
40664 if (IS_ERR(dentry))
40665 goto out_unlock;
40666
40667+ if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
40668+ error = -EACCES;
40669+ goto out_dput;
40670+ }
40671+
40672 error = mnt_want_write(nd.path.mnt);
40673 if (error)
40674 goto out_dput;
40675@@ -2431,6 +2565,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
40676 if (error)
40677 goto out_drop_write;
40678 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
40679+ if (!error)
40680+ gr_handle_create(dentry, nd.path.mnt);
40681 out_drop_write:
40682 mnt_drop_write(nd.path.mnt);
40683 out_dput:
40684@@ -2524,6 +2660,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
40685 error = PTR_ERR(new_dentry);
40686 if (IS_ERR(new_dentry))
40687 goto out_unlock;
40688+
40689+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
40690+ old_path.dentry->d_inode,
40691+ old_path.dentry->d_inode->i_mode, to)) {
40692+ error = -EACCES;
40693+ goto out_dput;
40694+ }
40695+
40696+ if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
40697+ old_path.dentry, old_path.mnt, to)) {
40698+ error = -EACCES;
40699+ goto out_dput;
40700+ }
40701+
40702 error = mnt_want_write(nd.path.mnt);
40703 if (error)
40704 goto out_dput;
40705@@ -2531,6 +2681,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
40706 if (error)
40707 goto out_drop_write;
40708 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
40709+ if (!error)
40710+ gr_handle_create(new_dentry, nd.path.mnt);
40711 out_drop_write:
40712 mnt_drop_write(nd.path.mnt);
40713 out_dput:
40714@@ -2708,6 +2860,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
40715 char *to;
40716 int error;
40717
40718+ pax_track_stack();
40719+
40720 error = user_path_parent(olddfd, oldname, &oldnd, &from);
40721 if (error)
40722 goto exit;
40723@@ -2764,6 +2918,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
40724 if (new_dentry == trap)
40725 goto exit5;
40726
40727+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
40728+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
40729+ to);
40730+ if (error)
40731+ goto exit5;
40732+
40733 error = mnt_want_write(oldnd.path.mnt);
40734 if (error)
40735 goto exit5;
40736@@ -2773,6 +2933,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
40737 goto exit6;
40738 error = vfs_rename(old_dir->d_inode, old_dentry,
40739 new_dir->d_inode, new_dentry);
40740+ if (!error)
40741+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
40742+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
40743 exit6:
40744 mnt_drop_write(oldnd.path.mnt);
40745 exit5:
40746@@ -2798,6 +2961,8 @@ SYSCALL_DEFINE2(rename, const char __use
40747
40748 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
40749 {
40750+ char tmpbuf[64];
40751+ const char *newlink;
40752 int len;
40753
40754 len = PTR_ERR(link);
40755@@ -2807,7 +2972,14 @@ int vfs_readlink(struct dentry *dentry,
40756 len = strlen(link);
40757 if (len > (unsigned) buflen)
40758 len = buflen;
40759- if (copy_to_user(buffer, link, len))
40760+
40761+ if (len < sizeof(tmpbuf)) {
40762+ memcpy(tmpbuf, link, len);
40763+ newlink = tmpbuf;
40764+ } else
40765+ newlink = link;
40766+
40767+ if (copy_to_user(buffer, newlink, len))
40768 len = -EFAULT;
40769 out:
40770 return len;
40771diff -urNp linux-2.6.32.41/fs/namespace.c linux-2.6.32.41/fs/namespace.c
40772--- linux-2.6.32.41/fs/namespace.c 2011-03-27 14:31:47.000000000 -0400
40773+++ linux-2.6.32.41/fs/namespace.c 2011-04-17 15:56:46.000000000 -0400
40774@@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mn
40775 if (!(sb->s_flags & MS_RDONLY))
40776 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
40777 up_write(&sb->s_umount);
40778+
40779+ gr_log_remount(mnt->mnt_devname, retval);
40780+
40781 return retval;
40782 }
40783
40784@@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mn
40785 security_sb_umount_busy(mnt);
40786 up_write(&namespace_sem);
40787 release_mounts(&umount_list);
40788+
40789+ gr_log_unmount(mnt->mnt_devname, retval);
40790+
40791 return retval;
40792 }
40793
40794@@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_
40795 if (retval)
40796 goto dput_out;
40797
40798+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
40799+ retval = -EPERM;
40800+ goto dput_out;
40801+ }
40802+
40803+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
40804+ retval = -EPERM;
40805+ goto dput_out;
40806+ }
40807+
40808 if (flags & MS_REMOUNT)
40809 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
40810 data_page);
40811@@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_
40812 dev_name, data_page);
40813 dput_out:
40814 path_put(&path);
40815+
40816+ gr_log_mount(dev_name, dir_name, retval);
40817+
40818 return retval;
40819 }
40820
40821@@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char _
40822 goto out1;
40823 }
40824
40825+ if (gr_handle_chroot_pivot()) {
40826+ error = -EPERM;
40827+ path_put(&old);
40828+ goto out1;
40829+ }
40830+
40831 read_lock(&current->fs->lock);
40832 root = current->fs->root;
40833 path_get(&current->fs->root);
40834diff -urNp linux-2.6.32.41/fs/ncpfs/dir.c linux-2.6.32.41/fs/ncpfs/dir.c
40835--- linux-2.6.32.41/fs/ncpfs/dir.c 2011-03-27 14:31:47.000000000 -0400
40836+++ linux-2.6.32.41/fs/ncpfs/dir.c 2011-05-16 21:46:57.000000000 -0400
40837@@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *den
40838 int res, val = 0, len;
40839 __u8 __name[NCP_MAXPATHLEN + 1];
40840
40841+ pax_track_stack();
40842+
40843 parent = dget_parent(dentry);
40844 dir = parent->d_inode;
40845
40846@@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct
40847 int error, res, len;
40848 __u8 __name[NCP_MAXPATHLEN + 1];
40849
40850+ pax_track_stack();
40851+
40852 lock_kernel();
40853 error = -EIO;
40854 if (!ncp_conn_valid(server))
40855@@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, st
40856 int error, result, len;
40857 int opmode;
40858 __u8 __name[NCP_MAXPATHLEN + 1];
40859-
40860+
40861 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
40862 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
40863
40864+ pax_track_stack();
40865+
40866 error = -EIO;
40867 lock_kernel();
40868 if (!ncp_conn_valid(server))
40869@@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir,
40870 int error, len;
40871 __u8 __name[NCP_MAXPATHLEN + 1];
40872
40873+ pax_track_stack();
40874+
40875 DPRINTK("ncp_mkdir: making %s/%s\n",
40876 dentry->d_parent->d_name.name, dentry->d_name.name);
40877
40878@@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir,
40879 if (!ncp_conn_valid(server))
40880 goto out;
40881
40882+ pax_track_stack();
40883+
40884 ncp_age_dentry(server, dentry);
40885 len = sizeof(__name);
40886 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
40887@@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_
40888 int old_len, new_len;
40889 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
40890
40891+ pax_track_stack();
40892+
40893 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
40894 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
40895 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
40896diff -urNp linux-2.6.32.41/fs/ncpfs/inode.c linux-2.6.32.41/fs/ncpfs/inode.c
40897--- linux-2.6.32.41/fs/ncpfs/inode.c 2011-03-27 14:31:47.000000000 -0400
40898+++ linux-2.6.32.41/fs/ncpfs/inode.c 2011-05-16 21:46:57.000000000 -0400
40899@@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_b
40900 #endif
40901 struct ncp_entry_info finfo;
40902
40903+ pax_track_stack();
40904+
40905 data.wdog_pid = NULL;
40906 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
40907 if (!server)
40908diff -urNp linux-2.6.32.41/fs/nfs/inode.c linux-2.6.32.41/fs/nfs/inode.c
40909--- linux-2.6.32.41/fs/nfs/inode.c 2011-05-10 22:12:01.000000000 -0400
40910+++ linux-2.6.32.41/fs/nfs/inode.c 2011-05-10 22:12:33.000000000 -0400
40911@@ -973,16 +973,16 @@ static int nfs_size_need_update(const st
40912 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
40913 }
40914
40915-static atomic_long_t nfs_attr_generation_counter;
40916+static atomic_long_unchecked_t nfs_attr_generation_counter;
40917
40918 static unsigned long nfs_read_attr_generation_counter(void)
40919 {
40920- return atomic_long_read(&nfs_attr_generation_counter);
40921+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
40922 }
40923
40924 unsigned long nfs_inc_attr_generation_counter(void)
40925 {
40926- return atomic_long_inc_return(&nfs_attr_generation_counter);
40927+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
40928 }
40929
40930 void nfs_fattr_init(struct nfs_fattr *fattr)
40931diff -urNp linux-2.6.32.41/fs/nfsd/lockd.c linux-2.6.32.41/fs/nfsd/lockd.c
40932--- linux-2.6.32.41/fs/nfsd/lockd.c 2011-04-17 17:00:52.000000000 -0400
40933+++ linux-2.6.32.41/fs/nfsd/lockd.c 2011-04-17 17:03:15.000000000 -0400
40934@@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
40935 fput(filp);
40936 }
40937
40938-static struct nlmsvc_binding nfsd_nlm_ops = {
40939+static const struct nlmsvc_binding nfsd_nlm_ops = {
40940 .fopen = nlm_fopen, /* open file for locking */
40941 .fclose = nlm_fclose, /* close file */
40942 };
40943diff -urNp linux-2.6.32.41/fs/nfsd/nfs4state.c linux-2.6.32.41/fs/nfsd/nfs4state.c
40944--- linux-2.6.32.41/fs/nfsd/nfs4state.c 2011-03-27 14:31:47.000000000 -0400
40945+++ linux-2.6.32.41/fs/nfsd/nfs4state.c 2011-05-16 21:46:57.000000000 -0400
40946@@ -3457,6 +3457,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
40947 unsigned int cmd;
40948 int err;
40949
40950+ pax_track_stack();
40951+
40952 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
40953 (long long) lock->lk_offset,
40954 (long long) lock->lk_length);
40955diff -urNp linux-2.6.32.41/fs/nfsd/nfs4xdr.c linux-2.6.32.41/fs/nfsd/nfs4xdr.c
40956--- linux-2.6.32.41/fs/nfsd/nfs4xdr.c 2011-03-27 14:31:47.000000000 -0400
40957+++ linux-2.6.32.41/fs/nfsd/nfs4xdr.c 2011-05-16 21:46:57.000000000 -0400
40958@@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
40959 struct nfsd4_compoundres *resp = rqstp->rq_resp;
40960 u32 minorversion = resp->cstate.minorversion;
40961
40962+ pax_track_stack();
40963+
40964 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
40965 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
40966 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
40967diff -urNp linux-2.6.32.41/fs/nfsd/vfs.c linux-2.6.32.41/fs/nfsd/vfs.c
40968--- linux-2.6.32.41/fs/nfsd/vfs.c 2011-05-10 22:12:01.000000000 -0400
40969+++ linux-2.6.32.41/fs/nfsd/vfs.c 2011-05-10 22:12:33.000000000 -0400
40970@@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
40971 } else {
40972 oldfs = get_fs();
40973 set_fs(KERNEL_DS);
40974- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
40975+ host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
40976 set_fs(oldfs);
40977 }
40978
40979@@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
40980
40981 /* Write the data. */
40982 oldfs = get_fs(); set_fs(KERNEL_DS);
40983- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
40984+ host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
40985 set_fs(oldfs);
40986 if (host_err < 0)
40987 goto out_nfserr;
40988@@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
40989 */
40990
40991 oldfs = get_fs(); set_fs(KERNEL_DS);
40992- host_err = inode->i_op->readlink(dentry, buf, *lenp);
40993+ host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
40994 set_fs(oldfs);
40995
40996 if (host_err < 0)
40997diff -urNp linux-2.6.32.41/fs/nilfs2/ioctl.c linux-2.6.32.41/fs/nilfs2/ioctl.c
40998--- linux-2.6.32.41/fs/nilfs2/ioctl.c 2011-03-27 14:31:47.000000000 -0400
40999+++ linux-2.6.32.41/fs/nilfs2/ioctl.c 2011-05-04 17:56:28.000000000 -0400
41000@@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(st
41001 unsigned int cmd, void __user *argp)
41002 {
41003 struct nilfs_argv argv[5];
41004- const static size_t argsz[5] = {
41005+ static const size_t argsz[5] = {
41006 sizeof(struct nilfs_vdesc),
41007 sizeof(struct nilfs_period),
41008 sizeof(__u64),
41009diff -urNp linux-2.6.32.41/fs/notify/dnotify/dnotify.c linux-2.6.32.41/fs/notify/dnotify/dnotify.c
41010--- linux-2.6.32.41/fs/notify/dnotify/dnotify.c 2011-03-27 14:31:47.000000000 -0400
41011+++ linux-2.6.32.41/fs/notify/dnotify/dnotify.c 2011-04-17 15:56:46.000000000 -0400
41012@@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsn
41013 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
41014 }
41015
41016-static struct fsnotify_ops dnotify_fsnotify_ops = {
41017+static const struct fsnotify_ops dnotify_fsnotify_ops = {
41018 .handle_event = dnotify_handle_event,
41019 .should_send_event = dnotify_should_send_event,
41020 .free_group_priv = NULL,
41021diff -urNp linux-2.6.32.41/fs/notify/notification.c linux-2.6.32.41/fs/notify/notification.c
41022--- linux-2.6.32.41/fs/notify/notification.c 2011-03-27 14:31:47.000000000 -0400
41023+++ linux-2.6.32.41/fs/notify/notification.c 2011-05-04 17:56:28.000000000 -0400
41024@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
41025 * get set to 0 so it will never get 'freed'
41026 */
41027 static struct fsnotify_event q_overflow_event;
41028-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
41029+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
41030
41031 /**
41032 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
41033@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
41034 */
41035 u32 fsnotify_get_cookie(void)
41036 {
41037- return atomic_inc_return(&fsnotify_sync_cookie);
41038+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
41039 }
41040 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
41041
41042diff -urNp linux-2.6.32.41/fs/ntfs/dir.c linux-2.6.32.41/fs/ntfs/dir.c
41043--- linux-2.6.32.41/fs/ntfs/dir.c 2011-03-27 14:31:47.000000000 -0400
41044+++ linux-2.6.32.41/fs/ntfs/dir.c 2011-04-17 15:56:46.000000000 -0400
41045@@ -1328,7 +1328,7 @@ find_next_index_buffer:
41046 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
41047 ~(s64)(ndir->itype.index.block_size - 1)));
41048 /* Bounds checks. */
41049- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
41050+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
41051 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
41052 "inode 0x%lx or driver bug.", vdir->i_ino);
41053 goto err_out;
41054diff -urNp linux-2.6.32.41/fs/ntfs/file.c linux-2.6.32.41/fs/ntfs/file.c
41055--- linux-2.6.32.41/fs/ntfs/file.c 2011-03-27 14:31:47.000000000 -0400
41056+++ linux-2.6.32.41/fs/ntfs/file.c 2011-04-17 15:56:46.000000000 -0400
41057@@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_
41058 #endif /* NTFS_RW */
41059 };
41060
41061-const struct file_operations ntfs_empty_file_ops = {};
41062+const struct file_operations ntfs_empty_file_ops __read_only;
41063
41064-const struct inode_operations ntfs_empty_inode_ops = {};
41065+const struct inode_operations ntfs_empty_inode_ops __read_only;
41066diff -urNp linux-2.6.32.41/fs/ocfs2/cluster/masklog.c linux-2.6.32.41/fs/ocfs2/cluster/masklog.c
41067--- linux-2.6.32.41/fs/ocfs2/cluster/masklog.c 2011-03-27 14:31:47.000000000 -0400
41068+++ linux-2.6.32.41/fs/ocfs2/cluster/masklog.c 2011-04-17 15:56:46.000000000 -0400
41069@@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject
41070 return mlog_mask_store(mlog_attr->mask, buf, count);
41071 }
41072
41073-static struct sysfs_ops mlog_attr_ops = {
41074+static const struct sysfs_ops mlog_attr_ops = {
41075 .show = mlog_show,
41076 .store = mlog_store,
41077 };
41078diff -urNp linux-2.6.32.41/fs/ocfs2/localalloc.c linux-2.6.32.41/fs/ocfs2/localalloc.c
41079--- linux-2.6.32.41/fs/ocfs2/localalloc.c 2011-03-27 14:31:47.000000000 -0400
41080+++ linux-2.6.32.41/fs/ocfs2/localalloc.c 2011-04-17 15:56:46.000000000 -0400
41081@@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_windo
41082 goto bail;
41083 }
41084
41085- atomic_inc(&osb->alloc_stats.moves);
41086+ atomic_inc_unchecked(&osb->alloc_stats.moves);
41087
41088 status = 0;
41089 bail:
41090diff -urNp linux-2.6.32.41/fs/ocfs2/namei.c linux-2.6.32.41/fs/ocfs2/namei.c
41091--- linux-2.6.32.41/fs/ocfs2/namei.c 2011-03-27 14:31:47.000000000 -0400
41092+++ linux-2.6.32.41/fs/ocfs2/namei.c 2011-05-16 21:46:57.000000000 -0400
41093@@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *ol
41094 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
41095 struct ocfs2_dir_lookup_result target_insert = { NULL, };
41096
41097+ pax_track_stack();
41098+
41099 /* At some point it might be nice to break this function up a
41100 * bit. */
41101
41102diff -urNp linux-2.6.32.41/fs/ocfs2/ocfs2.h linux-2.6.32.41/fs/ocfs2/ocfs2.h
41103--- linux-2.6.32.41/fs/ocfs2/ocfs2.h 2011-03-27 14:31:47.000000000 -0400
41104+++ linux-2.6.32.41/fs/ocfs2/ocfs2.h 2011-04-17 15:56:46.000000000 -0400
41105@@ -217,11 +217,11 @@ enum ocfs2_vol_state
41106
41107 struct ocfs2_alloc_stats
41108 {
41109- atomic_t moves;
41110- atomic_t local_data;
41111- atomic_t bitmap_data;
41112- atomic_t bg_allocs;
41113- atomic_t bg_extends;
41114+ atomic_unchecked_t moves;
41115+ atomic_unchecked_t local_data;
41116+ atomic_unchecked_t bitmap_data;
41117+ atomic_unchecked_t bg_allocs;
41118+ atomic_unchecked_t bg_extends;
41119 };
41120
41121 enum ocfs2_local_alloc_state
41122diff -urNp linux-2.6.32.41/fs/ocfs2/suballoc.c linux-2.6.32.41/fs/ocfs2/suballoc.c
41123--- linux-2.6.32.41/fs/ocfs2/suballoc.c 2011-03-27 14:31:47.000000000 -0400
41124+++ linux-2.6.32.41/fs/ocfs2/suballoc.c 2011-04-17 15:56:46.000000000 -0400
41125@@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(s
41126 mlog_errno(status);
41127 goto bail;
41128 }
41129- atomic_inc(&osb->alloc_stats.bg_extends);
41130+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
41131
41132 /* You should never ask for this much metadata */
41133 BUG_ON(bits_wanted >
41134@@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_su
41135 mlog_errno(status);
41136 goto bail;
41137 }
41138- atomic_inc(&osb->alloc_stats.bg_allocs);
41139+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
41140
41141 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
41142 ac->ac_bits_given += (*num_bits);
41143@@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_s
41144 mlog_errno(status);
41145 goto bail;
41146 }
41147- atomic_inc(&osb->alloc_stats.bg_allocs);
41148+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
41149
41150 BUG_ON(num_bits != 1);
41151
41152@@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
41153 cluster_start,
41154 num_clusters);
41155 if (!status)
41156- atomic_inc(&osb->alloc_stats.local_data);
41157+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
41158 } else {
41159 if (min_clusters > (osb->bitmap_cpg - 1)) {
41160 /* The only paths asking for contiguousness
41161@@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
41162 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
41163 bg_blkno,
41164 bg_bit_off);
41165- atomic_inc(&osb->alloc_stats.bitmap_data);
41166+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
41167 }
41168 }
41169 if (status < 0) {
41170diff -urNp linux-2.6.32.41/fs/ocfs2/super.c linux-2.6.32.41/fs/ocfs2/super.c
41171--- linux-2.6.32.41/fs/ocfs2/super.c 2011-03-27 14:31:47.000000000 -0400
41172+++ linux-2.6.32.41/fs/ocfs2/super.c 2011-04-17 15:56:46.000000000 -0400
41173@@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
41174 "%10s => GlobalAllocs: %d LocalAllocs: %d "
41175 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
41176 "Stats",
41177- atomic_read(&osb->alloc_stats.bitmap_data),
41178- atomic_read(&osb->alloc_stats.local_data),
41179- atomic_read(&osb->alloc_stats.bg_allocs),
41180- atomic_read(&osb->alloc_stats.moves),
41181- atomic_read(&osb->alloc_stats.bg_extends));
41182+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
41183+ atomic_read_unchecked(&osb->alloc_stats.local_data),
41184+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
41185+ atomic_read_unchecked(&osb->alloc_stats.moves),
41186+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
41187
41188 out += snprintf(buf + out, len - out,
41189 "%10s => State: %u Descriptor: %llu Size: %u bits "
41190@@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct
41191 spin_lock_init(&osb->osb_xattr_lock);
41192 ocfs2_init_inode_steal_slot(osb);
41193
41194- atomic_set(&osb->alloc_stats.moves, 0);
41195- atomic_set(&osb->alloc_stats.local_data, 0);
41196- atomic_set(&osb->alloc_stats.bitmap_data, 0);
41197- atomic_set(&osb->alloc_stats.bg_allocs, 0);
41198- atomic_set(&osb->alloc_stats.bg_extends, 0);
41199+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
41200+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
41201+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
41202+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
41203+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
41204
41205 /* Copy the blockcheck stats from the superblock probe */
41206 osb->osb_ecc_stats = *stats;
41207diff -urNp linux-2.6.32.41/fs/open.c linux-2.6.32.41/fs/open.c
41208--- linux-2.6.32.41/fs/open.c 2011-03-27 14:31:47.000000000 -0400
41209+++ linux-2.6.32.41/fs/open.c 2011-04-17 15:56:46.000000000 -0400
41210@@ -275,6 +275,10 @@ static long do_sys_truncate(const char _
41211 error = locks_verify_truncate(inode, NULL, length);
41212 if (!error)
41213 error = security_path_truncate(&path, length, 0);
41214+
41215+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
41216+ error = -EACCES;
41217+
41218 if (!error) {
41219 vfs_dq_init(inode);
41220 error = do_truncate(path.dentry, length, 0, NULL);
41221@@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
41222 if (__mnt_is_readonly(path.mnt))
41223 res = -EROFS;
41224
41225+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
41226+ res = -EACCES;
41227+
41228 out_path_release:
41229 path_put(&path);
41230 out:
41231@@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user
41232 if (error)
41233 goto dput_and_out;
41234
41235+ gr_log_chdir(path.dentry, path.mnt);
41236+
41237 set_fs_pwd(current->fs, &path);
41238
41239 dput_and_out:
41240@@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
41241 goto out_putf;
41242
41243 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
41244+
41245+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
41246+ error = -EPERM;
41247+
41248+ if (!error)
41249+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
41250+
41251 if (!error)
41252 set_fs_pwd(current->fs, &file->f_path);
41253 out_putf:
41254@@ -588,7 +604,18 @@ SYSCALL_DEFINE1(chroot, const char __use
41255 if (!capable(CAP_SYS_CHROOT))
41256 goto dput_and_out;
41257
41258+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
41259+ goto dput_and_out;
41260+
41261+ if (gr_handle_chroot_caps(&path)) {
41262+ error = -ENOMEM;
41263+ goto dput_and_out;
41264+ }
41265+
41266 set_fs_root(current->fs, &path);
41267+
41268+ gr_handle_chroot_chdir(&path);
41269+
41270 error = 0;
41271 dput_and_out:
41272 path_put(&path);
41273@@ -616,12 +643,27 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
41274 err = mnt_want_write_file(file);
41275 if (err)
41276 goto out_putf;
41277+
41278 mutex_lock(&inode->i_mutex);
41279+
41280+ if (!gr_acl_handle_fchmod(dentry, file->f_path.mnt, mode)) {
41281+ err = -EACCES;
41282+ goto out_unlock;
41283+ }
41284+
41285 if (mode == (mode_t) -1)
41286 mode = inode->i_mode;
41287+
41288+ if (gr_handle_chroot_chmod(dentry, file->f_path.mnt, mode)) {
41289+ err = -EPERM;
41290+ goto out_unlock;
41291+ }
41292+
41293 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
41294 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
41295 err = notify_change(dentry, &newattrs);
41296+
41297+out_unlock:
41298 mutex_unlock(&inode->i_mutex);
41299 mnt_drop_write(file->f_path.mnt);
41300 out_putf:
41301@@ -645,12 +687,27 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
41302 error = mnt_want_write(path.mnt);
41303 if (error)
41304 goto dput_and_out;
41305+
41306 mutex_lock(&inode->i_mutex);
41307+
41308+ if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
41309+ error = -EACCES;
41310+ goto out_unlock;
41311+ }
41312+
41313 if (mode == (mode_t) -1)
41314 mode = inode->i_mode;
41315+
41316+ if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
41317+ error = -EACCES;
41318+ goto out_unlock;
41319+ }
41320+
41321 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
41322 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
41323 error = notify_change(path.dentry, &newattrs);
41324+
41325+out_unlock:
41326 mutex_unlock(&inode->i_mutex);
41327 mnt_drop_write(path.mnt);
41328 dput_and_out:
41329@@ -664,12 +721,15 @@ SYSCALL_DEFINE2(chmod, const char __user
41330 return sys_fchmodat(AT_FDCWD, filename, mode);
41331 }
41332
41333-static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
41334+static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
41335 {
41336 struct inode *inode = dentry->d_inode;
41337 int error;
41338 struct iattr newattrs;
41339
41340+ if (!gr_acl_handle_chown(dentry, mnt))
41341+ return -EACCES;
41342+
41343 newattrs.ia_valid = ATTR_CTIME;
41344 if (user != (uid_t) -1) {
41345 newattrs.ia_valid |= ATTR_UID;
41346@@ -700,7 +760,7 @@ SYSCALL_DEFINE3(chown, const char __user
41347 error = mnt_want_write(path.mnt);
41348 if (error)
41349 goto out_release;
41350- error = chown_common(path.dentry, user, group);
41351+ error = chown_common(path.dentry, user, group, path.mnt);
41352 mnt_drop_write(path.mnt);
41353 out_release:
41354 path_put(&path);
41355@@ -725,7 +785,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, cons
41356 error = mnt_want_write(path.mnt);
41357 if (error)
41358 goto out_release;
41359- error = chown_common(path.dentry, user, group);
41360+ error = chown_common(path.dentry, user, group, path.mnt);
41361 mnt_drop_write(path.mnt);
41362 out_release:
41363 path_put(&path);
41364@@ -744,7 +804,7 @@ SYSCALL_DEFINE3(lchown, const char __use
41365 error = mnt_want_write(path.mnt);
41366 if (error)
41367 goto out_release;
41368- error = chown_common(path.dentry, user, group);
41369+ error = chown_common(path.dentry, user, group, path.mnt);
41370 mnt_drop_write(path.mnt);
41371 out_release:
41372 path_put(&path);
41373@@ -767,7 +827,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd
41374 goto out_fput;
41375 dentry = file->f_path.dentry;
41376 audit_inode(NULL, dentry);
41377- error = chown_common(dentry, user, group);
41378+ error = chown_common(dentry, user, group, file->f_path.mnt);
41379 mnt_drop_write(file->f_path.mnt);
41380 out_fput:
41381 fput(file);
41382@@ -1036,7 +1096,10 @@ long do_sys_open(int dfd, const char __u
41383 if (!IS_ERR(tmp)) {
41384 fd = get_unused_fd_flags(flags);
41385 if (fd >= 0) {
41386- struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
41387+ struct file *f;
41388+ /* don't allow to be set by userland */
41389+ flags &= ~FMODE_GREXEC;
41390+ f = do_filp_open(dfd, tmp, flags, mode, 0);
41391 if (IS_ERR(f)) {
41392 put_unused_fd(fd);
41393 fd = PTR_ERR(f);
41394diff -urNp linux-2.6.32.41/fs/partitions/ldm.c linux-2.6.32.41/fs/partitions/ldm.c
41395--- linux-2.6.32.41/fs/partitions/ldm.c 2011-05-10 22:12:01.000000000 -0400
41396+++ linux-2.6.32.41/fs/partitions/ldm.c 2011-04-18 19:31:12.000000000 -0400
41397@@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
41398 ldm_error ("A VBLK claims to have %d parts.", num);
41399 return false;
41400 }
41401+
41402 if (rec >= num) {
41403 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
41404 return false;
41405@@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
41406 goto found;
41407 }
41408
41409- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
41410+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
41411 if (!f) {
41412 ldm_crit ("Out of memory.");
41413 return false;
41414diff -urNp linux-2.6.32.41/fs/partitions/mac.c linux-2.6.32.41/fs/partitions/mac.c
41415--- linux-2.6.32.41/fs/partitions/mac.c 2011-03-27 14:31:47.000000000 -0400
41416+++ linux-2.6.32.41/fs/partitions/mac.c 2011-04-17 15:56:46.000000000 -0400
41417@@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitio
41418 return 0; /* not a MacOS disk */
41419 }
41420 blocks_in_map = be32_to_cpu(part->map_count);
41421+ printk(" [mac]");
41422 if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
41423 put_dev_sector(sect);
41424 return 0;
41425 }
41426- printk(" [mac]");
41427 for (slot = 1; slot <= blocks_in_map; ++slot) {
41428 int pos = slot * secsize;
41429 put_dev_sector(sect);
41430diff -urNp linux-2.6.32.41/fs/pipe.c linux-2.6.32.41/fs/pipe.c
41431--- linux-2.6.32.41/fs/pipe.c 2011-03-27 14:31:47.000000000 -0400
41432+++ linux-2.6.32.41/fs/pipe.c 2011-04-23 13:37:17.000000000 -0400
41433@@ -401,9 +401,9 @@ redo:
41434 }
41435 if (bufs) /* More to do? */
41436 continue;
41437- if (!pipe->writers)
41438+ if (!atomic_read(&pipe->writers))
41439 break;
41440- if (!pipe->waiting_writers) {
41441+ if (!atomic_read(&pipe->waiting_writers)) {
41442 /* syscall merging: Usually we must not sleep
41443 * if O_NONBLOCK is set, or if we got some data.
41444 * But if a writer sleeps in kernel space, then
41445@@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const str
41446 mutex_lock(&inode->i_mutex);
41447 pipe = inode->i_pipe;
41448
41449- if (!pipe->readers) {
41450+ if (!atomic_read(&pipe->readers)) {
41451 send_sig(SIGPIPE, current, 0);
41452 ret = -EPIPE;
41453 goto out;
41454@@ -511,7 +511,7 @@ redo1:
41455 for (;;) {
41456 int bufs;
41457
41458- if (!pipe->readers) {
41459+ if (!atomic_read(&pipe->readers)) {
41460 send_sig(SIGPIPE, current, 0);
41461 if (!ret)
41462 ret = -EPIPE;
41463@@ -597,9 +597,9 @@ redo2:
41464 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
41465 do_wakeup = 0;
41466 }
41467- pipe->waiting_writers++;
41468+ atomic_inc(&pipe->waiting_writers);
41469 pipe_wait(pipe);
41470- pipe->waiting_writers--;
41471+ atomic_dec(&pipe->waiting_writers);
41472 }
41473 out:
41474 mutex_unlock(&inode->i_mutex);
41475@@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table
41476 mask = 0;
41477 if (filp->f_mode & FMODE_READ) {
41478 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
41479- if (!pipe->writers && filp->f_version != pipe->w_counter)
41480+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
41481 mask |= POLLHUP;
41482 }
41483
41484@@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table
41485 * Most Unices do not set POLLERR for FIFOs but on Linux they
41486 * behave exactly like pipes for poll().
41487 */
41488- if (!pipe->readers)
41489+ if (!atomic_read(&pipe->readers))
41490 mask |= POLLERR;
41491 }
41492
41493@@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int de
41494
41495 mutex_lock(&inode->i_mutex);
41496 pipe = inode->i_pipe;
41497- pipe->readers -= decr;
41498- pipe->writers -= decw;
41499+ atomic_sub(decr, &pipe->readers);
41500+ atomic_sub(decw, &pipe->writers);
41501
41502- if (!pipe->readers && !pipe->writers) {
41503+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
41504 free_pipe_info(inode);
41505 } else {
41506 wake_up_interruptible_sync(&pipe->wait);
41507@@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, stru
41508
41509 if (inode->i_pipe) {
41510 ret = 0;
41511- inode->i_pipe->readers++;
41512+ atomic_inc(&inode->i_pipe->readers);
41513 }
41514
41515 mutex_unlock(&inode->i_mutex);
41516@@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, str
41517
41518 if (inode->i_pipe) {
41519 ret = 0;
41520- inode->i_pipe->writers++;
41521+ atomic_inc(&inode->i_pipe->writers);
41522 }
41523
41524 mutex_unlock(&inode->i_mutex);
41525@@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, stru
41526 if (inode->i_pipe) {
41527 ret = 0;
41528 if (filp->f_mode & FMODE_READ)
41529- inode->i_pipe->readers++;
41530+ atomic_inc(&inode->i_pipe->readers);
41531 if (filp->f_mode & FMODE_WRITE)
41532- inode->i_pipe->writers++;
41533+ atomic_inc(&inode->i_pipe->writers);
41534 }
41535
41536 mutex_unlock(&inode->i_mutex);
41537@@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
41538 inode->i_pipe = NULL;
41539 }
41540
41541-static struct vfsmount *pipe_mnt __read_mostly;
41542+struct vfsmount *pipe_mnt __read_mostly;
41543 static int pipefs_delete_dentry(struct dentry *dentry)
41544 {
41545 /*
41546@@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(voi
41547 goto fail_iput;
41548 inode->i_pipe = pipe;
41549
41550- pipe->readers = pipe->writers = 1;
41551+ atomic_set(&pipe->readers, 1);
41552+ atomic_set(&pipe->writers, 1);
41553 inode->i_fop = &rdwr_pipefifo_fops;
41554
41555 /*
41556diff -urNp linux-2.6.32.41/fs/proc/array.c linux-2.6.32.41/fs/proc/array.c
41557--- linux-2.6.32.41/fs/proc/array.c 2011-03-27 14:31:47.000000000 -0400
41558+++ linux-2.6.32.41/fs/proc/array.c 2011-05-16 21:46:57.000000000 -0400
41559@@ -60,6 +60,7 @@
41560 #include <linux/tty.h>
41561 #include <linux/string.h>
41562 #include <linux/mman.h>
41563+#include <linux/grsecurity.h>
41564 #include <linux/proc_fs.h>
41565 #include <linux/ioport.h>
41566 #include <linux/uaccess.h>
41567@@ -321,6 +322,21 @@ static inline void task_context_switch_c
41568 p->nivcsw);
41569 }
41570
41571+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41572+static inline void task_pax(struct seq_file *m, struct task_struct *p)
41573+{
41574+ if (p->mm)
41575+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
41576+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
41577+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
41578+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
41579+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
41580+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
41581+ else
41582+ seq_printf(m, "PaX:\t-----\n");
41583+}
41584+#endif
41585+
41586 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
41587 struct pid *pid, struct task_struct *task)
41588 {
41589@@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m,
41590 task_cap(m, task);
41591 cpuset_task_status_allowed(m, task);
41592 task_context_switch_counts(m, task);
41593+
41594+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41595+ task_pax(m, task);
41596+#endif
41597+
41598+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
41599+ task_grsec_rbac(m, task);
41600+#endif
41601+
41602 return 0;
41603 }
41604
41605+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41606+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
41607+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
41608+ _mm->pax_flags & MF_PAX_SEGMEXEC))
41609+#endif
41610+
41611 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
41612 struct pid *pid, struct task_struct *task, int whole)
41613 {
41614@@ -358,9 +389,11 @@ static int do_task_stat(struct seq_file
41615 cputime_t cutime, cstime, utime, stime;
41616 cputime_t cgtime, gtime;
41617 unsigned long rsslim = 0;
41618- char tcomm[sizeof(task->comm)];
41619+ char tcomm[sizeof(task->comm)] = { 0 };
41620 unsigned long flags;
41621
41622+ pax_track_stack();
41623+
41624 state = *get_task_state(task);
41625 vsize = eip = esp = 0;
41626 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
41627@@ -433,6 +466,19 @@ static int do_task_stat(struct seq_file
41628 gtime = task_gtime(task);
41629 }
41630
41631+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41632+ if (PAX_RAND_FLAGS(mm)) {
41633+ eip = 0;
41634+ esp = 0;
41635+ wchan = 0;
41636+ }
41637+#endif
41638+#ifdef CONFIG_GRKERNSEC_HIDESYM
41639+ wchan = 0;
41640+ eip =0;
41641+ esp =0;
41642+#endif
41643+
41644 /* scale priority and nice values from timeslices to -20..20 */
41645 /* to make it look like a "normal" Unix priority/nice value */
41646 priority = task_prio(task);
41647@@ -473,9 +519,15 @@ static int do_task_stat(struct seq_file
41648 vsize,
41649 mm ? get_mm_rss(mm) : 0,
41650 rsslim,
41651+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41652+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
41653+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
41654+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
41655+#else
41656 mm ? (permitted ? mm->start_code : 1) : 0,
41657 mm ? (permitted ? mm->end_code : 1) : 0,
41658 (permitted && mm) ? mm->start_stack : 0,
41659+#endif
41660 esp,
41661 eip,
41662 /* The signal information here is obsolete.
41663@@ -528,3 +580,18 @@ int proc_pid_statm(struct seq_file *m, s
41664
41665 return 0;
41666 }
41667+
41668+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
41669+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
41670+{
41671+ u32 curr_ip = 0;
41672+ unsigned long flags;
41673+
41674+ if (lock_task_sighand(task, &flags)) {
41675+ curr_ip = task->signal->curr_ip;
41676+ unlock_task_sighand(task, &flags);
41677+ }
41678+
41679+ return sprintf(buffer, "%pI4\n", &curr_ip);
41680+}
41681+#endif
41682diff -urNp linux-2.6.32.41/fs/proc/base.c linux-2.6.32.41/fs/proc/base.c
41683--- linux-2.6.32.41/fs/proc/base.c 2011-04-22 19:16:29.000000000 -0400
41684+++ linux-2.6.32.41/fs/proc/base.c 2011-04-22 19:16:44.000000000 -0400
41685@@ -102,6 +102,22 @@ struct pid_entry {
41686 union proc_op op;
41687 };
41688
41689+struct getdents_callback {
41690+ struct linux_dirent __user * current_dir;
41691+ struct linux_dirent __user * previous;
41692+ struct file * file;
41693+ int count;
41694+ int error;
41695+};
41696+
41697+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
41698+ loff_t offset, u64 ino, unsigned int d_type)
41699+{
41700+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
41701+ buf->error = -EINVAL;
41702+ return 0;
41703+}
41704+
41705 #define NOD(NAME, MODE, IOP, FOP, OP) { \
41706 .name = (NAME), \
41707 .len = sizeof(NAME) - 1, \
41708@@ -213,6 +229,9 @@ static int check_mem_permission(struct t
41709 if (task == current)
41710 return 0;
41711
41712+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
41713+ return -EPERM;
41714+
41715 /*
41716 * If current is actively ptrace'ing, and would also be
41717 * permitted to freshly attach with ptrace now, permit it.
41718@@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_
41719 if (!mm->arg_end)
41720 goto out_mm; /* Shh! No looking before we're done */
41721
41722+ if (gr_acl_handle_procpidmem(task))
41723+ goto out_mm;
41724+
41725 len = mm->arg_end - mm->arg_start;
41726
41727 if (len > PAGE_SIZE)
41728@@ -287,12 +309,28 @@ out:
41729 return res;
41730 }
41731
41732+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41733+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
41734+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
41735+ _mm->pax_flags & MF_PAX_SEGMEXEC))
41736+#endif
41737+
41738 static int proc_pid_auxv(struct task_struct *task, char *buffer)
41739 {
41740 int res = 0;
41741 struct mm_struct *mm = get_task_mm(task);
41742 if (mm) {
41743 unsigned int nwords = 0;
41744+
41745+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41746+ /* allow if we're currently ptracing this task */
41747+ if (PAX_RAND_FLAGS(mm) &&
41748+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
41749+ mmput(mm);
41750+ return res;
41751+ }
41752+#endif
41753+
41754 do {
41755 nwords += 2;
41756 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
41757@@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_str
41758 }
41759
41760
41761-#ifdef CONFIG_KALLSYMS
41762+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
41763 /*
41764 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
41765 * Returns the resolved symbol. If that fails, simply return the address.
41766@@ -328,7 +366,7 @@ static int proc_pid_wchan(struct task_st
41767 }
41768 #endif /* CONFIG_KALLSYMS */
41769
41770-#ifdef CONFIG_STACKTRACE
41771+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
41772
41773 #define MAX_STACK_TRACE_DEPTH 64
41774
41775@@ -522,7 +560,7 @@ static int proc_pid_limits(struct task_s
41776 return count;
41777 }
41778
41779-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
41780+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
41781 static int proc_pid_syscall(struct task_struct *task, char *buffer)
41782 {
41783 long nr;
41784@@ -547,7 +585,7 @@ static int proc_pid_syscall(struct task_
41785 /************************************************************************/
41786
41787 /* permission checks */
41788-static int proc_fd_access_allowed(struct inode *inode)
41789+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
41790 {
41791 struct task_struct *task;
41792 int allowed = 0;
41793@@ -557,7 +595,10 @@ static int proc_fd_access_allowed(struct
41794 */
41795 task = get_proc_task(inode);
41796 if (task) {
41797- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
41798+ if (log)
41799+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
41800+ else
41801+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
41802 put_task_struct(task);
41803 }
41804 return allowed;
41805@@ -936,6 +977,9 @@ static ssize_t environ_read(struct file
41806 if (!task)
41807 goto out_no_task;
41808
41809+ if (gr_acl_handle_procpidmem(task))
41810+ goto out;
41811+
41812 if (!ptrace_may_access(task, PTRACE_MODE_READ))
41813 goto out;
41814
41815@@ -1350,7 +1394,7 @@ static void *proc_pid_follow_link(struct
41816 path_put(&nd->path);
41817
41818 /* Are we allowed to snoop on the tasks file descriptors? */
41819- if (!proc_fd_access_allowed(inode))
41820+ if (!proc_fd_access_allowed(inode,0))
41821 goto out;
41822
41823 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
41824@@ -1390,8 +1434,18 @@ static int proc_pid_readlink(struct dent
41825 struct path path;
41826
41827 /* Are we allowed to snoop on the tasks file descriptors? */
41828- if (!proc_fd_access_allowed(inode))
41829- goto out;
41830+ /* logging this is needed for learning on chromium to work properly,
41831+ but we don't want to flood the logs from 'ps' which does a readlink
41832+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
41833+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
41834+ */
41835+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
41836+ if (!proc_fd_access_allowed(inode,0))
41837+ goto out;
41838+ } else {
41839+ if (!proc_fd_access_allowed(inode,1))
41840+ goto out;
41841+ }
41842
41843 error = PROC_I(inode)->op.proc_get_link(inode, &path);
41844 if (error)
41845@@ -1456,7 +1510,11 @@ static struct inode *proc_pid_make_inode
41846 rcu_read_lock();
41847 cred = __task_cred(task);
41848 inode->i_uid = cred->euid;
41849+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
41850+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
41851+#else
41852 inode->i_gid = cred->egid;
41853+#endif
41854 rcu_read_unlock();
41855 }
41856 security_task_to_inode(task, inode);
41857@@ -1474,6 +1532,9 @@ static int pid_getattr(struct vfsmount *
41858 struct inode *inode = dentry->d_inode;
41859 struct task_struct *task;
41860 const struct cred *cred;
41861+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
41862+ const struct cred *tmpcred = current_cred();
41863+#endif
41864
41865 generic_fillattr(inode, stat);
41866
41867@@ -1481,12 +1542,34 @@ static int pid_getattr(struct vfsmount *
41868 stat->uid = 0;
41869 stat->gid = 0;
41870 task = pid_task(proc_pid(inode), PIDTYPE_PID);
41871+
41872+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
41873+ rcu_read_unlock();
41874+ return -ENOENT;
41875+ }
41876+
41877 if (task) {
41878+ cred = __task_cred(task);
41879+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
41880+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
41881+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
41882+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
41883+#endif
41884+ )
41885+#endif
41886 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
41887+#ifdef CONFIG_GRKERNSEC_PROC_USER
41888+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
41889+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
41890+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
41891+#endif
41892 task_dumpable(task)) {
41893- cred = __task_cred(task);
41894 stat->uid = cred->euid;
41895+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
41896+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
41897+#else
41898 stat->gid = cred->egid;
41899+#endif
41900 }
41901 }
41902 rcu_read_unlock();
41903@@ -1518,11 +1601,20 @@ static int pid_revalidate(struct dentry
41904
41905 if (task) {
41906 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
41907+#ifdef CONFIG_GRKERNSEC_PROC_USER
41908+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
41909+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
41910+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
41911+#endif
41912 task_dumpable(task)) {
41913 rcu_read_lock();
41914 cred = __task_cred(task);
41915 inode->i_uid = cred->euid;
41916+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
41917+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
41918+#else
41919 inode->i_gid = cred->egid;
41920+#endif
41921 rcu_read_unlock();
41922 } else {
41923 inode->i_uid = 0;
41924@@ -1643,7 +1735,8 @@ static int proc_fd_info(struct inode *in
41925 int fd = proc_fd(inode);
41926
41927 if (task) {
41928- files = get_files_struct(task);
41929+ if (!gr_acl_handle_procpidmem(task))
41930+ files = get_files_struct(task);
41931 put_task_struct(task);
41932 }
41933 if (files) {
41934@@ -1895,12 +1988,22 @@ static const struct file_operations proc
41935 static int proc_fd_permission(struct inode *inode, int mask)
41936 {
41937 int rv;
41938+ struct task_struct *task;
41939
41940 rv = generic_permission(inode, mask, NULL);
41941- if (rv == 0)
41942- return 0;
41943+
41944 if (task_pid(current) == proc_pid(inode))
41945 rv = 0;
41946+
41947+ task = get_proc_task(inode);
41948+ if (task == NULL)
41949+ return rv;
41950+
41951+ if (gr_acl_handle_procpidmem(task))
41952+ rv = -EACCES;
41953+
41954+ put_task_struct(task);
41955+
41956 return rv;
41957 }
41958
41959@@ -2009,6 +2112,9 @@ static struct dentry *proc_pident_lookup
41960 if (!task)
41961 goto out_no_task;
41962
41963+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
41964+ goto out;
41965+
41966 /*
41967 * Yes, it does not scale. And it should not. Don't add
41968 * new entries into /proc/<tgid>/ without very good reasons.
41969@@ -2053,6 +2159,9 @@ static int proc_pident_readdir(struct fi
41970 if (!task)
41971 goto out_no_task;
41972
41973+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
41974+ goto out;
41975+
41976 ret = 0;
41977 i = filp->f_pos;
41978 switch (i) {
41979@@ -2320,7 +2429,7 @@ static void *proc_self_follow_link(struc
41980 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
41981 void *cookie)
41982 {
41983- char *s = nd_get_link(nd);
41984+ const char *s = nd_get_link(nd);
41985 if (!IS_ERR(s))
41986 __putname(s);
41987 }
41988@@ -2519,7 +2628,7 @@ static const struct pid_entry tgid_base_
41989 #ifdef CONFIG_SCHED_DEBUG
41990 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
41991 #endif
41992-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
41993+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
41994 INF("syscall", S_IRUSR, proc_pid_syscall),
41995 #endif
41996 INF("cmdline", S_IRUGO, proc_pid_cmdline),
41997@@ -2544,10 +2653,10 @@ static const struct pid_entry tgid_base_
41998 #ifdef CONFIG_SECURITY
41999 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
42000 #endif
42001-#ifdef CONFIG_KALLSYMS
42002+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42003 INF("wchan", S_IRUGO, proc_pid_wchan),
42004 #endif
42005-#ifdef CONFIG_STACKTRACE
42006+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42007 ONE("stack", S_IRUSR, proc_pid_stack),
42008 #endif
42009 #ifdef CONFIG_SCHEDSTATS
42010@@ -2577,6 +2686,9 @@ static const struct pid_entry tgid_base_
42011 #ifdef CONFIG_TASK_IO_ACCOUNTING
42012 INF("io", S_IRUGO, proc_tgid_io_accounting),
42013 #endif
42014+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
42015+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
42016+#endif
42017 };
42018
42019 static int proc_tgid_base_readdir(struct file * filp,
42020@@ -2701,7 +2813,14 @@ static struct dentry *proc_pid_instantia
42021 if (!inode)
42022 goto out;
42023
42024+#ifdef CONFIG_GRKERNSEC_PROC_USER
42025+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
42026+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42027+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42028+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
42029+#else
42030 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
42031+#endif
42032 inode->i_op = &proc_tgid_base_inode_operations;
42033 inode->i_fop = &proc_tgid_base_operations;
42034 inode->i_flags|=S_IMMUTABLE;
42035@@ -2743,7 +2862,11 @@ struct dentry *proc_pid_lookup(struct in
42036 if (!task)
42037 goto out;
42038
42039+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42040+ goto out_put_task;
42041+
42042 result = proc_pid_instantiate(dir, dentry, task, NULL);
42043+out_put_task:
42044 put_task_struct(task);
42045 out:
42046 return result;
42047@@ -2808,6 +2931,11 @@ int proc_pid_readdir(struct file * filp,
42048 {
42049 unsigned int nr;
42050 struct task_struct *reaper;
42051+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42052+ const struct cred *tmpcred = current_cred();
42053+ const struct cred *itercred;
42054+#endif
42055+ filldir_t __filldir = filldir;
42056 struct tgid_iter iter;
42057 struct pid_namespace *ns;
42058
42059@@ -2831,8 +2959,27 @@ int proc_pid_readdir(struct file * filp,
42060 for (iter = next_tgid(ns, iter);
42061 iter.task;
42062 iter.tgid += 1, iter = next_tgid(ns, iter)) {
42063+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42064+ rcu_read_lock();
42065+ itercred = __task_cred(iter.task);
42066+#endif
42067+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
42068+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42069+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
42070+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42071+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
42072+#endif
42073+ )
42074+#endif
42075+ )
42076+ __filldir = &gr_fake_filldir;
42077+ else
42078+ __filldir = filldir;
42079+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42080+ rcu_read_unlock();
42081+#endif
42082 filp->f_pos = iter.tgid + TGID_OFFSET;
42083- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
42084+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
42085 put_task_struct(iter.task);
42086 goto out;
42087 }
42088@@ -2858,7 +3005,7 @@ static const struct pid_entry tid_base_s
42089 #ifdef CONFIG_SCHED_DEBUG
42090 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
42091 #endif
42092-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
42093+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
42094 INF("syscall", S_IRUSR, proc_pid_syscall),
42095 #endif
42096 INF("cmdline", S_IRUGO, proc_pid_cmdline),
42097@@ -2882,10 +3029,10 @@ static const struct pid_entry tid_base_s
42098 #ifdef CONFIG_SECURITY
42099 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
42100 #endif
42101-#ifdef CONFIG_KALLSYMS
42102+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42103 INF("wchan", S_IRUGO, proc_pid_wchan),
42104 #endif
42105-#ifdef CONFIG_STACKTRACE
42106+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42107 ONE("stack", S_IRUSR, proc_pid_stack),
42108 #endif
42109 #ifdef CONFIG_SCHEDSTATS
42110diff -urNp linux-2.6.32.41/fs/proc/cmdline.c linux-2.6.32.41/fs/proc/cmdline.c
42111--- linux-2.6.32.41/fs/proc/cmdline.c 2011-03-27 14:31:47.000000000 -0400
42112+++ linux-2.6.32.41/fs/proc/cmdline.c 2011-04-17 15:56:46.000000000 -0400
42113@@ -23,7 +23,11 @@ static const struct file_operations cmdl
42114
42115 static int __init proc_cmdline_init(void)
42116 {
42117+#ifdef CONFIG_GRKERNSEC_PROC_ADD
42118+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
42119+#else
42120 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
42121+#endif
42122 return 0;
42123 }
42124 module_init(proc_cmdline_init);
42125diff -urNp linux-2.6.32.41/fs/proc/devices.c linux-2.6.32.41/fs/proc/devices.c
42126--- linux-2.6.32.41/fs/proc/devices.c 2011-03-27 14:31:47.000000000 -0400
42127+++ linux-2.6.32.41/fs/proc/devices.c 2011-04-17 15:56:46.000000000 -0400
42128@@ -64,7 +64,11 @@ static const struct file_operations proc
42129
42130 static int __init proc_devices_init(void)
42131 {
42132+#ifdef CONFIG_GRKERNSEC_PROC_ADD
42133+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
42134+#else
42135 proc_create("devices", 0, NULL, &proc_devinfo_operations);
42136+#endif
42137 return 0;
42138 }
42139 module_init(proc_devices_init);
42140diff -urNp linux-2.6.32.41/fs/proc/inode.c linux-2.6.32.41/fs/proc/inode.c
42141--- linux-2.6.32.41/fs/proc/inode.c 2011-03-27 14:31:47.000000000 -0400
42142+++ linux-2.6.32.41/fs/proc/inode.c 2011-04-17 15:56:46.000000000 -0400
42143@@ -457,7 +457,11 @@ struct inode *proc_get_inode(struct supe
42144 if (de->mode) {
42145 inode->i_mode = de->mode;
42146 inode->i_uid = de->uid;
42147+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42148+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42149+#else
42150 inode->i_gid = de->gid;
42151+#endif
42152 }
42153 if (de->size)
42154 inode->i_size = de->size;
42155diff -urNp linux-2.6.32.41/fs/proc/internal.h linux-2.6.32.41/fs/proc/internal.h
42156--- linux-2.6.32.41/fs/proc/internal.h 2011-03-27 14:31:47.000000000 -0400
42157+++ linux-2.6.32.41/fs/proc/internal.h 2011-04-17 15:56:46.000000000 -0400
42158@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
42159 struct pid *pid, struct task_struct *task);
42160 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
42161 struct pid *pid, struct task_struct *task);
42162+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
42163+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
42164+#endif
42165 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
42166
42167 extern const struct file_operations proc_maps_operations;
42168diff -urNp linux-2.6.32.41/fs/proc/Kconfig linux-2.6.32.41/fs/proc/Kconfig
42169--- linux-2.6.32.41/fs/proc/Kconfig 2011-03-27 14:31:47.000000000 -0400
42170+++ linux-2.6.32.41/fs/proc/Kconfig 2011-04-17 15:56:46.000000000 -0400
42171@@ -30,12 +30,12 @@ config PROC_FS
42172
42173 config PROC_KCORE
42174 bool "/proc/kcore support" if !ARM
42175- depends on PROC_FS && MMU
42176+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
42177
42178 config PROC_VMCORE
42179 bool "/proc/vmcore support (EXPERIMENTAL)"
42180- depends on PROC_FS && CRASH_DUMP
42181- default y
42182+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
42183+ default n
42184 help
42185 Exports the dump image of crashed kernel in ELF format.
42186
42187@@ -59,8 +59,8 @@ config PROC_SYSCTL
42188 limited in memory.
42189
42190 config PROC_PAGE_MONITOR
42191- default y
42192- depends on PROC_FS && MMU
42193+ default n
42194+ depends on PROC_FS && MMU && !GRKERNSEC
42195 bool "Enable /proc page monitoring" if EMBEDDED
42196 help
42197 Various /proc files exist to monitor process memory utilization:
42198diff -urNp linux-2.6.32.41/fs/proc/kcore.c linux-2.6.32.41/fs/proc/kcore.c
42199--- linux-2.6.32.41/fs/proc/kcore.c 2011-03-27 14:31:47.000000000 -0400
42200+++ linux-2.6.32.41/fs/proc/kcore.c 2011-05-16 21:46:57.000000000 -0400
42201@@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bu
42202 off_t offset = 0;
42203 struct kcore_list *m;
42204
42205+ pax_track_stack();
42206+
42207 /* setup ELF header */
42208 elf = (struct elfhdr *) bufp;
42209 bufp += sizeof(struct elfhdr);
42210@@ -477,9 +479,10 @@ read_kcore(struct file *file, char __use
42211 * the addresses in the elf_phdr on our list.
42212 */
42213 start = kc_offset_to_vaddr(*fpos - elf_buflen);
42214- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
42215+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
42216+ if (tsz > buflen)
42217 tsz = buflen;
42218-
42219+
42220 while (buflen) {
42221 struct kcore_list *m;
42222
42223@@ -508,20 +511,23 @@ read_kcore(struct file *file, char __use
42224 kfree(elf_buf);
42225 } else {
42226 if (kern_addr_valid(start)) {
42227- unsigned long n;
42228+ char *elf_buf;
42229+ mm_segment_t oldfs;
42230
42231- n = copy_to_user(buffer, (char *)start, tsz);
42232- /*
42233- * We cannot distingush between fault on source
42234- * and fault on destination. When this happens
42235- * we clear too and hope it will trigger the
42236- * EFAULT again.
42237- */
42238- if (n) {
42239- if (clear_user(buffer + tsz - n,
42240- n))
42241+ elf_buf = kmalloc(tsz, GFP_KERNEL);
42242+ if (!elf_buf)
42243+ return -ENOMEM;
42244+ oldfs = get_fs();
42245+ set_fs(KERNEL_DS);
42246+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
42247+ set_fs(oldfs);
42248+ if (copy_to_user(buffer, elf_buf, tsz)) {
42249+ kfree(elf_buf);
42250 return -EFAULT;
42251+ }
42252 }
42253+ set_fs(oldfs);
42254+ kfree(elf_buf);
42255 } else {
42256 if (clear_user(buffer, tsz))
42257 return -EFAULT;
42258@@ -541,6 +547,9 @@ read_kcore(struct file *file, char __use
42259
42260 static int open_kcore(struct inode *inode, struct file *filp)
42261 {
42262+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
42263+ return -EPERM;
42264+#endif
42265 if (!capable(CAP_SYS_RAWIO))
42266 return -EPERM;
42267 if (kcore_need_update)
42268diff -urNp linux-2.6.32.41/fs/proc/meminfo.c linux-2.6.32.41/fs/proc/meminfo.c
42269--- linux-2.6.32.41/fs/proc/meminfo.c 2011-03-27 14:31:47.000000000 -0400
42270+++ linux-2.6.32.41/fs/proc/meminfo.c 2011-05-16 21:46:57.000000000 -0400
42271@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
42272 unsigned long pages[NR_LRU_LISTS];
42273 int lru;
42274
42275+ pax_track_stack();
42276+
42277 /*
42278 * display in kilobytes.
42279 */
42280@@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_
42281 vmi.used >> 10,
42282 vmi.largest_chunk >> 10
42283 #ifdef CONFIG_MEMORY_FAILURE
42284- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
42285+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
42286 #endif
42287 );
42288
42289diff -urNp linux-2.6.32.41/fs/proc/nommu.c linux-2.6.32.41/fs/proc/nommu.c
42290--- linux-2.6.32.41/fs/proc/nommu.c 2011-03-27 14:31:47.000000000 -0400
42291+++ linux-2.6.32.41/fs/proc/nommu.c 2011-04-17 15:56:46.000000000 -0400
42292@@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_
42293 if (len < 1)
42294 len = 1;
42295 seq_printf(m, "%*c", len, ' ');
42296- seq_path(m, &file->f_path, "");
42297+ seq_path(m, &file->f_path, "\n\\");
42298 }
42299
42300 seq_putc(m, '\n');
42301diff -urNp linux-2.6.32.41/fs/proc/proc_net.c linux-2.6.32.41/fs/proc/proc_net.c
42302--- linux-2.6.32.41/fs/proc/proc_net.c 2011-03-27 14:31:47.000000000 -0400
42303+++ linux-2.6.32.41/fs/proc/proc_net.c 2011-04-17 15:56:46.000000000 -0400
42304@@ -104,6 +104,17 @@ static struct net *get_proc_task_net(str
42305 struct task_struct *task;
42306 struct nsproxy *ns;
42307 struct net *net = NULL;
42308+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42309+ const struct cred *cred = current_cred();
42310+#endif
42311+
42312+#ifdef CONFIG_GRKERNSEC_PROC_USER
42313+ if (cred->fsuid)
42314+ return net;
42315+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42316+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
42317+ return net;
42318+#endif
42319
42320 rcu_read_lock();
42321 task = pid_task(proc_pid(dir), PIDTYPE_PID);
42322diff -urNp linux-2.6.32.41/fs/proc/proc_sysctl.c linux-2.6.32.41/fs/proc/proc_sysctl.c
42323--- linux-2.6.32.41/fs/proc/proc_sysctl.c 2011-03-27 14:31:47.000000000 -0400
42324+++ linux-2.6.32.41/fs/proc/proc_sysctl.c 2011-04-17 15:56:46.000000000 -0400
42325@@ -7,6 +7,8 @@
42326 #include <linux/security.h>
42327 #include "internal.h"
42328
42329+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
42330+
42331 static const struct dentry_operations proc_sys_dentry_operations;
42332 static const struct file_operations proc_sys_file_operations;
42333 static const struct inode_operations proc_sys_inode_operations;
42334@@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(st
42335 if (!p)
42336 goto out;
42337
42338+ if (gr_handle_sysctl(p, MAY_EXEC))
42339+ goto out;
42340+
42341 err = ERR_PTR(-ENOMEM);
42342 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
42343 if (h)
42344@@ -228,6 +233,9 @@ static int scan(struct ctl_table_header
42345 if (*pos < file->f_pos)
42346 continue;
42347
42348+ if (gr_handle_sysctl(table, 0))
42349+ continue;
42350+
42351 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
42352 if (res)
42353 return res;
42354@@ -344,6 +352,9 @@ static int proc_sys_getattr(struct vfsmo
42355 if (IS_ERR(head))
42356 return PTR_ERR(head);
42357
42358+ if (table && gr_handle_sysctl(table, MAY_EXEC))
42359+ return -ENOENT;
42360+
42361 generic_fillattr(inode, stat);
42362 if (table)
42363 stat->mode = (stat->mode & S_IFMT) | table->mode;
42364diff -urNp linux-2.6.32.41/fs/proc/root.c linux-2.6.32.41/fs/proc/root.c
42365--- linux-2.6.32.41/fs/proc/root.c 2011-03-27 14:31:47.000000000 -0400
42366+++ linux-2.6.32.41/fs/proc/root.c 2011-04-17 15:56:46.000000000 -0400
42367@@ -134,7 +134,15 @@ void __init proc_root_init(void)
42368 #ifdef CONFIG_PROC_DEVICETREE
42369 proc_device_tree_init();
42370 #endif
42371+#ifdef CONFIG_GRKERNSEC_PROC_ADD
42372+#ifdef CONFIG_GRKERNSEC_PROC_USER
42373+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
42374+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42375+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
42376+#endif
42377+#else
42378 proc_mkdir("bus", NULL);
42379+#endif
42380 proc_sys_init();
42381 }
42382
42383diff -urNp linux-2.6.32.41/fs/proc/task_mmu.c linux-2.6.32.41/fs/proc/task_mmu.c
42384--- linux-2.6.32.41/fs/proc/task_mmu.c 2011-03-27 14:31:47.000000000 -0400
42385+++ linux-2.6.32.41/fs/proc/task_mmu.c 2011-04-23 13:38:09.000000000 -0400
42386@@ -46,15 +46,26 @@ void task_mem(struct seq_file *m, struct
42387 "VmStk:\t%8lu kB\n"
42388 "VmExe:\t%8lu kB\n"
42389 "VmLib:\t%8lu kB\n"
42390- "VmPTE:\t%8lu kB\n",
42391- hiwater_vm << (PAGE_SHIFT-10),
42392+ "VmPTE:\t%8lu kB\n"
42393+
42394+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
42395+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
42396+#endif
42397+
42398+ ,hiwater_vm << (PAGE_SHIFT-10),
42399 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
42400 mm->locked_vm << (PAGE_SHIFT-10),
42401 hiwater_rss << (PAGE_SHIFT-10),
42402 total_rss << (PAGE_SHIFT-10),
42403 data << (PAGE_SHIFT-10),
42404 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
42405- (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
42406+ (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
42407+
42408+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
42409+ , mm->context.user_cs_base, mm->context.user_cs_limit
42410+#endif
42411+
42412+ );
42413 }
42414
42415 unsigned long task_vsize(struct mm_struct *mm)
42416@@ -175,7 +186,8 @@ static void m_stop(struct seq_file *m, v
42417 struct proc_maps_private *priv = m->private;
42418 struct vm_area_struct *vma = v;
42419
42420- vma_stop(priv, vma);
42421+ if (!IS_ERR(vma))
42422+ vma_stop(priv, vma);
42423 if (priv->task)
42424 put_task_struct(priv->task);
42425 }
42426@@ -199,6 +211,12 @@ static int do_maps_open(struct inode *in
42427 return ret;
42428 }
42429
42430+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42431+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
42432+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
42433+ _mm->pax_flags & MF_PAX_SEGMEXEC))
42434+#endif
42435+
42436 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
42437 {
42438 struct mm_struct *mm = vma->vm_mm;
42439@@ -206,7 +224,6 @@ static void show_map_vma(struct seq_file
42440 int flags = vma->vm_flags;
42441 unsigned long ino = 0;
42442 unsigned long long pgoff = 0;
42443- unsigned long start;
42444 dev_t dev = 0;
42445 int len;
42446
42447@@ -217,20 +234,23 @@ static void show_map_vma(struct seq_file
42448 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
42449 }
42450
42451- /* We don't show the stack guard page in /proc/maps */
42452- start = vma->vm_start;
42453- if (vma->vm_flags & VM_GROWSDOWN)
42454- if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
42455- start += PAGE_SIZE;
42456-
42457 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
42458- start,
42459+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42460+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
42461+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
42462+#else
42463+ vma->vm_start,
42464 vma->vm_end,
42465+#endif
42466 flags & VM_READ ? 'r' : '-',
42467 flags & VM_WRITE ? 'w' : '-',
42468 flags & VM_EXEC ? 'x' : '-',
42469 flags & VM_MAYSHARE ? 's' : 'p',
42470+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42471+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
42472+#else
42473 pgoff,
42474+#endif
42475 MAJOR(dev), MINOR(dev), ino, &len);
42476
42477 /*
42478@@ -239,7 +259,7 @@ static void show_map_vma(struct seq_file
42479 */
42480 if (file) {
42481 pad_len_spaces(m, len);
42482- seq_path(m, &file->f_path, "\n");
42483+ seq_path(m, &file->f_path, "\n\\");
42484 } else {
42485 const char *name = arch_vma_name(vma);
42486 if (!name) {
42487@@ -247,8 +267,9 @@ static void show_map_vma(struct seq_file
42488 if (vma->vm_start <= mm->brk &&
42489 vma->vm_end >= mm->start_brk) {
42490 name = "[heap]";
42491- } else if (vma->vm_start <= mm->start_stack &&
42492- vma->vm_end >= mm->start_stack) {
42493+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
42494+ (vma->vm_start <= mm->start_stack &&
42495+ vma->vm_end >= mm->start_stack)) {
42496 name = "[stack]";
42497 }
42498 } else {
42499@@ -391,9 +412,16 @@ static int show_smap(struct seq_file *m,
42500 };
42501
42502 memset(&mss, 0, sizeof mss);
42503- mss.vma = vma;
42504- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
42505- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
42506+
42507+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42508+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
42509+#endif
42510+ mss.vma = vma;
42511+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
42512+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
42513+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42514+ }
42515+#endif
42516
42517 show_map_vma(m, vma);
42518
42519@@ -409,7 +437,11 @@ static int show_smap(struct seq_file *m,
42520 "Swap: %8lu kB\n"
42521 "KernelPageSize: %8lu kB\n"
42522 "MMUPageSize: %8lu kB\n",
42523+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42524+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
42525+#else
42526 (vma->vm_end - vma->vm_start) >> 10,
42527+#endif
42528 mss.resident >> 10,
42529 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
42530 mss.shared_clean >> 10,
42531diff -urNp linux-2.6.32.41/fs/proc/task_nommu.c linux-2.6.32.41/fs/proc/task_nommu.c
42532--- linux-2.6.32.41/fs/proc/task_nommu.c 2011-03-27 14:31:47.000000000 -0400
42533+++ linux-2.6.32.41/fs/proc/task_nommu.c 2011-04-17 15:56:46.000000000 -0400
42534@@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct
42535 else
42536 bytes += kobjsize(mm);
42537
42538- if (current->fs && current->fs->users > 1)
42539+ if (current->fs && atomic_read(&current->fs->users) > 1)
42540 sbytes += kobjsize(current->fs);
42541 else
42542 bytes += kobjsize(current->fs);
42543@@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_fil
42544 if (len < 1)
42545 len = 1;
42546 seq_printf(m, "%*c", len, ' ');
42547- seq_path(m, &file->f_path, "");
42548+ seq_path(m, &file->f_path, "\n\\");
42549 }
42550
42551 seq_putc(m, '\n');
42552diff -urNp linux-2.6.32.41/fs/readdir.c linux-2.6.32.41/fs/readdir.c
42553--- linux-2.6.32.41/fs/readdir.c 2011-03-27 14:31:47.000000000 -0400
42554+++ linux-2.6.32.41/fs/readdir.c 2011-04-17 15:56:46.000000000 -0400
42555@@ -16,6 +16,7 @@
42556 #include <linux/security.h>
42557 #include <linux/syscalls.h>
42558 #include <linux/unistd.h>
42559+#include <linux/namei.h>
42560
42561 #include <asm/uaccess.h>
42562
42563@@ -67,6 +68,7 @@ struct old_linux_dirent {
42564
42565 struct readdir_callback {
42566 struct old_linux_dirent __user * dirent;
42567+ struct file * file;
42568 int result;
42569 };
42570
42571@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
42572 buf->result = -EOVERFLOW;
42573 return -EOVERFLOW;
42574 }
42575+
42576+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
42577+ return 0;
42578+
42579 buf->result++;
42580 dirent = buf->dirent;
42581 if (!access_ok(VERIFY_WRITE, dirent,
42582@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
42583
42584 buf.result = 0;
42585 buf.dirent = dirent;
42586+ buf.file = file;
42587
42588 error = vfs_readdir(file, fillonedir, &buf);
42589 if (buf.result)
42590@@ -142,6 +149,7 @@ struct linux_dirent {
42591 struct getdents_callback {
42592 struct linux_dirent __user * current_dir;
42593 struct linux_dirent __user * previous;
42594+ struct file * file;
42595 int count;
42596 int error;
42597 };
42598@@ -162,6 +170,10 @@ static int filldir(void * __buf, const c
42599 buf->error = -EOVERFLOW;
42600 return -EOVERFLOW;
42601 }
42602+
42603+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
42604+ return 0;
42605+
42606 dirent = buf->previous;
42607 if (dirent) {
42608 if (__put_user(offset, &dirent->d_off))
42609@@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
42610 buf.previous = NULL;
42611 buf.count = count;
42612 buf.error = 0;
42613+ buf.file = file;
42614
42615 error = vfs_readdir(file, filldir, &buf);
42616 if (error >= 0)
42617@@ -228,6 +241,7 @@ out:
42618 struct getdents_callback64 {
42619 struct linux_dirent64 __user * current_dir;
42620 struct linux_dirent64 __user * previous;
42621+ struct file *file;
42622 int count;
42623 int error;
42624 };
42625@@ -242,6 +256,10 @@ static int filldir64(void * __buf, const
42626 buf->error = -EINVAL; /* only used if we fail.. */
42627 if (reclen > buf->count)
42628 return -EINVAL;
42629+
42630+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
42631+ return 0;
42632+
42633 dirent = buf->previous;
42634 if (dirent) {
42635 if (__put_user(offset, &dirent->d_off))
42636@@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
42637
42638 buf.current_dir = dirent;
42639 buf.previous = NULL;
42640+ buf.file = file;
42641 buf.count = count;
42642 buf.error = 0;
42643
42644diff -urNp linux-2.6.32.41/fs/reiserfs/dir.c linux-2.6.32.41/fs/reiserfs/dir.c
42645--- linux-2.6.32.41/fs/reiserfs/dir.c 2011-03-27 14:31:47.000000000 -0400
42646+++ linux-2.6.32.41/fs/reiserfs/dir.c 2011-05-16 21:46:57.000000000 -0400
42647@@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
42648 struct reiserfs_dir_entry de;
42649 int ret = 0;
42650
42651+ pax_track_stack();
42652+
42653 reiserfs_write_lock(inode->i_sb);
42654
42655 reiserfs_check_lock_depth(inode->i_sb, "readdir");
42656diff -urNp linux-2.6.32.41/fs/reiserfs/do_balan.c linux-2.6.32.41/fs/reiserfs/do_balan.c
42657--- linux-2.6.32.41/fs/reiserfs/do_balan.c 2011-03-27 14:31:47.000000000 -0400
42658+++ linux-2.6.32.41/fs/reiserfs/do_balan.c 2011-04-17 15:56:46.000000000 -0400
42659@@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb,
42660 return;
42661 }
42662
42663- atomic_inc(&(fs_generation(tb->tb_sb)));
42664+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
42665 do_balance_starts(tb);
42666
42667 /* balance leaf returns 0 except if combining L R and S into
42668diff -urNp linux-2.6.32.41/fs/reiserfs/item_ops.c linux-2.6.32.41/fs/reiserfs/item_ops.c
42669--- linux-2.6.32.41/fs/reiserfs/item_ops.c 2011-03-27 14:31:47.000000000 -0400
42670+++ linux-2.6.32.41/fs/reiserfs/item_ops.c 2011-04-17 15:56:46.000000000 -0400
42671@@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_i
42672 vi->vi_index, vi->vi_type, vi->vi_ih);
42673 }
42674
42675-static struct item_operations stat_data_ops = {
42676+static const struct item_operations stat_data_ops = {
42677 .bytes_number = sd_bytes_number,
42678 .decrement_key = sd_decrement_key,
42679 .is_left_mergeable = sd_is_left_mergeable,
42680@@ -196,7 +196,7 @@ static void direct_print_vi(struct virtu
42681 vi->vi_index, vi->vi_type, vi->vi_ih);
42682 }
42683
42684-static struct item_operations direct_ops = {
42685+static const struct item_operations direct_ops = {
42686 .bytes_number = direct_bytes_number,
42687 .decrement_key = direct_decrement_key,
42688 .is_left_mergeable = direct_is_left_mergeable,
42689@@ -341,7 +341,7 @@ static void indirect_print_vi(struct vir
42690 vi->vi_index, vi->vi_type, vi->vi_ih);
42691 }
42692
42693-static struct item_operations indirect_ops = {
42694+static const struct item_operations indirect_ops = {
42695 .bytes_number = indirect_bytes_number,
42696 .decrement_key = indirect_decrement_key,
42697 .is_left_mergeable = indirect_is_left_mergeable,
42698@@ -628,7 +628,7 @@ static void direntry_print_vi(struct vir
42699 printk("\n");
42700 }
42701
42702-static struct item_operations direntry_ops = {
42703+static const struct item_operations direntry_ops = {
42704 .bytes_number = direntry_bytes_number,
42705 .decrement_key = direntry_decrement_key,
42706 .is_left_mergeable = direntry_is_left_mergeable,
42707@@ -724,7 +724,7 @@ static void errcatch_print_vi(struct vir
42708 "Invalid item type observed, run fsck ASAP");
42709 }
42710
42711-static struct item_operations errcatch_ops = {
42712+static const struct item_operations errcatch_ops = {
42713 errcatch_bytes_number,
42714 errcatch_decrement_key,
42715 errcatch_is_left_mergeable,
42716@@ -746,7 +746,7 @@ static struct item_operations errcatch_o
42717 #error Item types must use disk-format assigned values.
42718 #endif
42719
42720-struct item_operations *item_ops[TYPE_ANY + 1] = {
42721+const struct item_operations * const item_ops[TYPE_ANY + 1] = {
42722 &stat_data_ops,
42723 &indirect_ops,
42724 &direct_ops,
42725diff -urNp linux-2.6.32.41/fs/reiserfs/journal.c linux-2.6.32.41/fs/reiserfs/journal.c
42726--- linux-2.6.32.41/fs/reiserfs/journal.c 2011-03-27 14:31:47.000000000 -0400
42727+++ linux-2.6.32.41/fs/reiserfs/journal.c 2011-05-16 21:46:57.000000000 -0400
42728@@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_brea
42729 struct buffer_head *bh;
42730 int i, j;
42731
42732+ pax_track_stack();
42733+
42734 bh = __getblk(dev, block, bufsize);
42735 if (buffer_uptodate(bh))
42736 return (bh);
42737diff -urNp linux-2.6.32.41/fs/reiserfs/namei.c linux-2.6.32.41/fs/reiserfs/namei.c
42738--- linux-2.6.32.41/fs/reiserfs/namei.c 2011-03-27 14:31:47.000000000 -0400
42739+++ linux-2.6.32.41/fs/reiserfs/namei.c 2011-05-16 21:46:57.000000000 -0400
42740@@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode
42741 unsigned long savelink = 1;
42742 struct timespec ctime;
42743
42744+ pax_track_stack();
42745+
42746 /* three balancings: (1) old name removal, (2) new name insertion
42747 and (3) maybe "save" link insertion
42748 stat data updates: (1) old directory,
42749diff -urNp linux-2.6.32.41/fs/reiserfs/procfs.c linux-2.6.32.41/fs/reiserfs/procfs.c
42750--- linux-2.6.32.41/fs/reiserfs/procfs.c 2011-03-27 14:31:47.000000000 -0400
42751+++ linux-2.6.32.41/fs/reiserfs/procfs.c 2011-05-16 21:46:57.000000000 -0400
42752@@ -123,7 +123,7 @@ static int show_super(struct seq_file *m
42753 "SMALL_TAILS " : "NO_TAILS ",
42754 replay_only(sb) ? "REPLAY_ONLY " : "",
42755 convert_reiserfs(sb) ? "CONV " : "",
42756- atomic_read(&r->s_generation_counter),
42757+ atomic_read_unchecked(&r->s_generation_counter),
42758 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
42759 SF(s_do_balance), SF(s_unneeded_left_neighbor),
42760 SF(s_good_search_by_key_reada), SF(s_bmaps),
42761@@ -309,6 +309,8 @@ static int show_journal(struct seq_file
42762 struct journal_params *jp = &rs->s_v1.s_journal;
42763 char b[BDEVNAME_SIZE];
42764
42765+ pax_track_stack();
42766+
42767 seq_printf(m, /* on-disk fields */
42768 "jp_journal_1st_block: \t%i\n"
42769 "jp_journal_dev: \t%s[%x]\n"
42770diff -urNp linux-2.6.32.41/fs/reiserfs/stree.c linux-2.6.32.41/fs/reiserfs/stree.c
42771--- linux-2.6.32.41/fs/reiserfs/stree.c 2011-03-27 14:31:47.000000000 -0400
42772+++ linux-2.6.32.41/fs/reiserfs/stree.c 2011-05-16 21:46:57.000000000 -0400
42773@@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs
42774 int iter = 0;
42775 #endif
42776
42777+ pax_track_stack();
42778+
42779 BUG_ON(!th->t_trans_id);
42780
42781 init_tb_struct(th, &s_del_balance, sb, path,
42782@@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct r
42783 int retval;
42784 int quota_cut_bytes = 0;
42785
42786+ pax_track_stack();
42787+
42788 BUG_ON(!th->t_trans_id);
42789
42790 le_key2cpu_key(&cpu_key, key);
42791@@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiser
42792 int quota_cut_bytes;
42793 loff_t tail_pos = 0;
42794
42795+ pax_track_stack();
42796+
42797 BUG_ON(!th->t_trans_id);
42798
42799 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
42800@@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reis
42801 int retval;
42802 int fs_gen;
42803
42804+ pax_track_stack();
42805+
42806 BUG_ON(!th->t_trans_id);
42807
42808 fs_gen = get_generation(inode->i_sb);
42809@@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs
42810 int fs_gen = 0;
42811 int quota_bytes = 0;
42812
42813+ pax_track_stack();
42814+
42815 BUG_ON(!th->t_trans_id);
42816
42817 if (inode) { /* Do we count quotas for item? */
42818diff -urNp linux-2.6.32.41/fs/reiserfs/super.c linux-2.6.32.41/fs/reiserfs/super.c
42819--- linux-2.6.32.41/fs/reiserfs/super.c 2011-03-27 14:31:47.000000000 -0400
42820+++ linux-2.6.32.41/fs/reiserfs/super.c 2011-05-16 21:46:57.000000000 -0400
42821@@ -912,6 +912,8 @@ static int reiserfs_parse_options(struct
42822 {.option_name = NULL}
42823 };
42824
42825+ pax_track_stack();
42826+
42827 *blocks = 0;
42828 if (!options || !*options)
42829 /* use default configuration: create tails, journaling on, no
42830diff -urNp linux-2.6.32.41/fs/select.c linux-2.6.32.41/fs/select.c
42831--- linux-2.6.32.41/fs/select.c 2011-03-27 14:31:47.000000000 -0400
42832+++ linux-2.6.32.41/fs/select.c 2011-05-16 21:46:57.000000000 -0400
42833@@ -20,6 +20,7 @@
42834 #include <linux/module.h>
42835 #include <linux/slab.h>
42836 #include <linux/poll.h>
42837+#include <linux/security.h>
42838 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
42839 #include <linux/file.h>
42840 #include <linux/fdtable.h>
42841@@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, s
42842 int retval, i, timed_out = 0;
42843 unsigned long slack = 0;
42844
42845+ pax_track_stack();
42846+
42847 rcu_read_lock();
42848 retval = max_select_fd(n, fds);
42849 rcu_read_unlock();
42850@@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user
42851 /* Allocate small arguments on the stack to save memory and be faster */
42852 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
42853
42854+ pax_track_stack();
42855+
42856 ret = -EINVAL;
42857 if (n < 0)
42858 goto out_nofds;
42859@@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *uf
42860 struct poll_list *walk = head;
42861 unsigned long todo = nfds;
42862
42863+ pax_track_stack();
42864+
42865+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
42866 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
42867 return -EINVAL;
42868
42869diff -urNp linux-2.6.32.41/fs/seq_file.c linux-2.6.32.41/fs/seq_file.c
42870--- linux-2.6.32.41/fs/seq_file.c 2011-03-27 14:31:47.000000000 -0400
42871+++ linux-2.6.32.41/fs/seq_file.c 2011-04-17 15:56:46.000000000 -0400
42872@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
42873 return 0;
42874 }
42875 if (!m->buf) {
42876- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
42877+ m->size = PAGE_SIZE;
42878+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
42879 if (!m->buf)
42880 return -ENOMEM;
42881 }
42882@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
42883 Eoverflow:
42884 m->op->stop(m, p);
42885 kfree(m->buf);
42886- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
42887+ m->size <<= 1;
42888+ m->buf = kmalloc(m->size, GFP_KERNEL);
42889 return !m->buf ? -ENOMEM : -EAGAIN;
42890 }
42891
42892@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
42893 m->version = file->f_version;
42894 /* grab buffer if we didn't have one */
42895 if (!m->buf) {
42896- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
42897+ m->size = PAGE_SIZE;
42898+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
42899 if (!m->buf)
42900 goto Enomem;
42901 }
42902@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
42903 goto Fill;
42904 m->op->stop(m, p);
42905 kfree(m->buf);
42906- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
42907+ m->size <<= 1;
42908+ m->buf = kmalloc(m->size, GFP_KERNEL);
42909 if (!m->buf)
42910 goto Enomem;
42911 m->count = 0;
42912diff -urNp linux-2.6.32.41/fs/smbfs/symlink.c linux-2.6.32.41/fs/smbfs/symlink.c
42913--- linux-2.6.32.41/fs/smbfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
42914+++ linux-2.6.32.41/fs/smbfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
42915@@ -55,7 +55,7 @@ static void *smb_follow_link(struct dent
42916
42917 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
42918 {
42919- char *s = nd_get_link(nd);
42920+ const char *s = nd_get_link(nd);
42921 if (!IS_ERR(s))
42922 __putname(s);
42923 }
42924diff -urNp linux-2.6.32.41/fs/splice.c linux-2.6.32.41/fs/splice.c
42925--- linux-2.6.32.41/fs/splice.c 2011-03-27 14:31:47.000000000 -0400
42926+++ linux-2.6.32.41/fs/splice.c 2011-05-16 21:46:57.000000000 -0400
42927@@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode
42928 pipe_lock(pipe);
42929
42930 for (;;) {
42931- if (!pipe->readers) {
42932+ if (!atomic_read(&pipe->readers)) {
42933 send_sig(SIGPIPE, current, 0);
42934 if (!ret)
42935 ret = -EPIPE;
42936@@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode
42937 do_wakeup = 0;
42938 }
42939
42940- pipe->waiting_writers++;
42941+ atomic_inc(&pipe->waiting_writers);
42942 pipe_wait(pipe);
42943- pipe->waiting_writers--;
42944+ atomic_dec(&pipe->waiting_writers);
42945 }
42946
42947 pipe_unlock(pipe);
42948@@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *
42949 .spd_release = spd_release_page,
42950 };
42951
42952+ pax_track_stack();
42953+
42954 index = *ppos >> PAGE_CACHE_SHIFT;
42955 loff = *ppos & ~PAGE_CACHE_MASK;
42956 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
42957@@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file
42958 old_fs = get_fs();
42959 set_fs(get_ds());
42960 /* The cast to a user pointer is valid due to the set_fs() */
42961- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
42962+ res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
42963 set_fs(old_fs);
42964
42965 return res;
42966@@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file
42967 old_fs = get_fs();
42968 set_fs(get_ds());
42969 /* The cast to a user pointer is valid due to the set_fs() */
42970- res = vfs_write(file, (const char __user *)buf, count, &pos);
42971+ res = vfs_write(file, (__force const char __user *)buf, count, &pos);
42972 set_fs(old_fs);
42973
42974 return res;
42975@@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct
42976 .spd_release = spd_release_page,
42977 };
42978
42979+ pax_track_stack();
42980+
42981 index = *ppos >> PAGE_CACHE_SHIFT;
42982 offset = *ppos & ~PAGE_CACHE_MASK;
42983 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
42984@@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct
42985 goto err;
42986
42987 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
42988- vec[i].iov_base = (void __user *) page_address(page);
42989+ vec[i].iov_base = (__force void __user *) page_address(page);
42990 vec[i].iov_len = this_len;
42991 pages[i] = page;
42992 spd.nr_pages++;
42993@@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
42994 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
42995 {
42996 while (!pipe->nrbufs) {
42997- if (!pipe->writers)
42998+ if (!atomic_read(&pipe->writers))
42999 return 0;
43000
43001- if (!pipe->waiting_writers && sd->num_spliced)
43002+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
43003 return 0;
43004
43005 if (sd->flags & SPLICE_F_NONBLOCK)
43006@@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct fi
43007 * out of the pipe right after the splice_to_pipe(). So set
43008 * PIPE_READERS appropriately.
43009 */
43010- pipe->readers = 1;
43011+ atomic_set(&pipe->readers, 1);
43012
43013 current->splice_pipe = pipe;
43014 }
43015@@ -1592,6 +1596,8 @@ static long vmsplice_to_pipe(struct file
43016 .spd_release = spd_release_page,
43017 };
43018
43019+ pax_track_stack();
43020+
43021 pipe = pipe_info(file->f_path.dentry->d_inode);
43022 if (!pipe)
43023 return -EBADF;
43024@@ -1700,9 +1706,9 @@ static int ipipe_prep(struct pipe_inode_
43025 ret = -ERESTARTSYS;
43026 break;
43027 }
43028- if (!pipe->writers)
43029+ if (!atomic_read(&pipe->writers))
43030 break;
43031- if (!pipe->waiting_writers) {
43032+ if (!atomic_read(&pipe->waiting_writers)) {
43033 if (flags & SPLICE_F_NONBLOCK) {
43034 ret = -EAGAIN;
43035 break;
43036@@ -1734,7 +1740,7 @@ static int opipe_prep(struct pipe_inode_
43037 pipe_lock(pipe);
43038
43039 while (pipe->nrbufs >= PIPE_BUFFERS) {
43040- if (!pipe->readers) {
43041+ if (!atomic_read(&pipe->readers)) {
43042 send_sig(SIGPIPE, current, 0);
43043 ret = -EPIPE;
43044 break;
43045@@ -1747,9 +1753,9 @@ static int opipe_prep(struct pipe_inode_
43046 ret = -ERESTARTSYS;
43047 break;
43048 }
43049- pipe->waiting_writers++;
43050+ atomic_inc(&pipe->waiting_writers);
43051 pipe_wait(pipe);
43052- pipe->waiting_writers--;
43053+ atomic_dec(&pipe->waiting_writers);
43054 }
43055
43056 pipe_unlock(pipe);
43057@@ -1785,14 +1791,14 @@ retry:
43058 pipe_double_lock(ipipe, opipe);
43059
43060 do {
43061- if (!opipe->readers) {
43062+ if (!atomic_read(&opipe->readers)) {
43063 send_sig(SIGPIPE, current, 0);
43064 if (!ret)
43065 ret = -EPIPE;
43066 break;
43067 }
43068
43069- if (!ipipe->nrbufs && !ipipe->writers)
43070+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
43071 break;
43072
43073 /*
43074@@ -1892,7 +1898,7 @@ static int link_pipe(struct pipe_inode_i
43075 pipe_double_lock(ipipe, opipe);
43076
43077 do {
43078- if (!opipe->readers) {
43079+ if (!atomic_read(&opipe->readers)) {
43080 send_sig(SIGPIPE, current, 0);
43081 if (!ret)
43082 ret = -EPIPE;
43083@@ -1937,7 +1943,7 @@ static int link_pipe(struct pipe_inode_i
43084 * return EAGAIN if we have the potential of some data in the
43085 * future, otherwise just return 0
43086 */
43087- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
43088+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
43089 ret = -EAGAIN;
43090
43091 pipe_unlock(ipipe);
43092diff -urNp linux-2.6.32.41/fs/sysfs/file.c linux-2.6.32.41/fs/sysfs/file.c
43093--- linux-2.6.32.41/fs/sysfs/file.c 2011-03-27 14:31:47.000000000 -0400
43094+++ linux-2.6.32.41/fs/sysfs/file.c 2011-05-04 17:56:20.000000000 -0400
43095@@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
43096
43097 struct sysfs_open_dirent {
43098 atomic_t refcnt;
43099- atomic_t event;
43100+ atomic_unchecked_t event;
43101 wait_queue_head_t poll;
43102 struct list_head buffers; /* goes through sysfs_buffer.list */
43103 };
43104@@ -53,7 +53,7 @@ struct sysfs_buffer {
43105 size_t count;
43106 loff_t pos;
43107 char * page;
43108- struct sysfs_ops * ops;
43109+ const struct sysfs_ops * ops;
43110 struct mutex mutex;
43111 int needs_read_fill;
43112 int event;
43113@@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentr
43114 {
43115 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
43116 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
43117- struct sysfs_ops * ops = buffer->ops;
43118+ const struct sysfs_ops * ops = buffer->ops;
43119 int ret = 0;
43120 ssize_t count;
43121
43122@@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentr
43123 if (!sysfs_get_active_two(attr_sd))
43124 return -ENODEV;
43125
43126- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
43127+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
43128 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
43129
43130 sysfs_put_active_two(attr_sd);
43131@@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentr
43132 {
43133 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
43134 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
43135- struct sysfs_ops * ops = buffer->ops;
43136+ const struct sysfs_ops * ops = buffer->ops;
43137 int rc;
43138
43139 /* need attr_sd for attr and ops, its parent for kobj */
43140@@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct
43141 return -ENOMEM;
43142
43143 atomic_set(&new_od->refcnt, 0);
43144- atomic_set(&new_od->event, 1);
43145+ atomic_set_unchecked(&new_od->event, 1);
43146 init_waitqueue_head(&new_od->poll);
43147 INIT_LIST_HEAD(&new_od->buffers);
43148 goto retry;
43149@@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode
43150 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
43151 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
43152 struct sysfs_buffer *buffer;
43153- struct sysfs_ops *ops;
43154+ const struct sysfs_ops *ops;
43155 int error = -EACCES;
43156 char *p;
43157
43158@@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct fi
43159
43160 sysfs_put_active_two(attr_sd);
43161
43162- if (buffer->event != atomic_read(&od->event))
43163+ if (buffer->event != atomic_read_unchecked(&od->event))
43164 goto trigger;
43165
43166 return DEFAULT_POLLMASK;
43167@@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_di
43168
43169 od = sd->s_attr.open;
43170 if (od) {
43171- atomic_inc(&od->event);
43172+ atomic_inc_unchecked(&od->event);
43173 wake_up_interruptible(&od->poll);
43174 }
43175
43176diff -urNp linux-2.6.32.41/fs/sysfs/mount.c linux-2.6.32.41/fs/sysfs/mount.c
43177--- linux-2.6.32.41/fs/sysfs/mount.c 2011-03-27 14:31:47.000000000 -0400
43178+++ linux-2.6.32.41/fs/sysfs/mount.c 2011-04-17 15:56:46.000000000 -0400
43179@@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
43180 .s_name = "",
43181 .s_count = ATOMIC_INIT(1),
43182 .s_flags = SYSFS_DIR,
43183+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
43184+ .s_mode = S_IFDIR | S_IRWXU,
43185+#else
43186 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
43187+#endif
43188 .s_ino = 1,
43189 };
43190
43191diff -urNp linux-2.6.32.41/fs/sysfs/symlink.c linux-2.6.32.41/fs/sysfs/symlink.c
43192--- linux-2.6.32.41/fs/sysfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
43193+++ linux-2.6.32.41/fs/sysfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
43194@@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct de
43195
43196 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
43197 {
43198- char *page = nd_get_link(nd);
43199+ const char *page = nd_get_link(nd);
43200 if (!IS_ERR(page))
43201 free_page((unsigned long)page);
43202 }
43203diff -urNp linux-2.6.32.41/fs/udf/balloc.c linux-2.6.32.41/fs/udf/balloc.c
43204--- linux-2.6.32.41/fs/udf/balloc.c 2011-03-27 14:31:47.000000000 -0400
43205+++ linux-2.6.32.41/fs/udf/balloc.c 2011-04-17 15:56:46.000000000 -0400
43206@@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struc
43207
43208 mutex_lock(&sbi->s_alloc_mutex);
43209 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
43210- if (bloc->logicalBlockNum < 0 ||
43211- (bloc->logicalBlockNum + count) >
43212- partmap->s_partition_len) {
43213+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
43214 udf_debug("%d < %d || %d + %d > %d\n",
43215 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
43216 count, partmap->s_partition_len);
43217@@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct
43218
43219 mutex_lock(&sbi->s_alloc_mutex);
43220 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
43221- if (bloc->logicalBlockNum < 0 ||
43222- (bloc->logicalBlockNum + count) >
43223- partmap->s_partition_len) {
43224+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
43225 udf_debug("%d < %d || %d + %d > %d\n",
43226 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
43227 partmap->s_partition_len);
43228diff -urNp linux-2.6.32.41/fs/udf/inode.c linux-2.6.32.41/fs/udf/inode.c
43229--- linux-2.6.32.41/fs/udf/inode.c 2011-03-27 14:31:47.000000000 -0400
43230+++ linux-2.6.32.41/fs/udf/inode.c 2011-05-16 21:46:57.000000000 -0400
43231@@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(
43232 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
43233 int lastblock = 0;
43234
43235+ pax_track_stack();
43236+
43237 prev_epos.offset = udf_file_entry_alloc_offset(inode);
43238 prev_epos.block = iinfo->i_location;
43239 prev_epos.bh = NULL;
43240diff -urNp linux-2.6.32.41/fs/udf/misc.c linux-2.6.32.41/fs/udf/misc.c
43241--- linux-2.6.32.41/fs/udf/misc.c 2011-03-27 14:31:47.000000000 -0400
43242+++ linux-2.6.32.41/fs/udf/misc.c 2011-04-23 12:56:11.000000000 -0400
43243@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
43244
43245 u8 udf_tag_checksum(const struct tag *t)
43246 {
43247- u8 *data = (u8 *)t;
43248+ const u8 *data = (const u8 *)t;
43249 u8 checksum = 0;
43250 int i;
43251 for (i = 0; i < sizeof(struct tag); ++i)
43252diff -urNp linux-2.6.32.41/fs/utimes.c linux-2.6.32.41/fs/utimes.c
43253--- linux-2.6.32.41/fs/utimes.c 2011-03-27 14:31:47.000000000 -0400
43254+++ linux-2.6.32.41/fs/utimes.c 2011-04-17 15:56:46.000000000 -0400
43255@@ -1,6 +1,7 @@
43256 #include <linux/compiler.h>
43257 #include <linux/file.h>
43258 #include <linux/fs.h>
43259+#include <linux/security.h>
43260 #include <linux/linkage.h>
43261 #include <linux/mount.h>
43262 #include <linux/namei.h>
43263@@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
43264 goto mnt_drop_write_and_out;
43265 }
43266 }
43267+
43268+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
43269+ error = -EACCES;
43270+ goto mnt_drop_write_and_out;
43271+ }
43272+
43273 mutex_lock(&inode->i_mutex);
43274 error = notify_change(path->dentry, &newattrs);
43275 mutex_unlock(&inode->i_mutex);
43276diff -urNp linux-2.6.32.41/fs/xattr_acl.c linux-2.6.32.41/fs/xattr_acl.c
43277--- linux-2.6.32.41/fs/xattr_acl.c 2011-03-27 14:31:47.000000000 -0400
43278+++ linux-2.6.32.41/fs/xattr_acl.c 2011-04-17 15:56:46.000000000 -0400
43279@@ -17,8 +17,8 @@
43280 struct posix_acl *
43281 posix_acl_from_xattr(const void *value, size_t size)
43282 {
43283- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
43284- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
43285+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
43286+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
43287 int count;
43288 struct posix_acl *acl;
43289 struct posix_acl_entry *acl_e;
43290diff -urNp linux-2.6.32.41/fs/xattr.c linux-2.6.32.41/fs/xattr.c
43291--- linux-2.6.32.41/fs/xattr.c 2011-03-27 14:31:47.000000000 -0400
43292+++ linux-2.6.32.41/fs/xattr.c 2011-04-17 15:56:46.000000000 -0400
43293@@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
43294 * Extended attribute SET operations
43295 */
43296 static long
43297-setxattr(struct dentry *d, const char __user *name, const void __user *value,
43298+setxattr(struct path *path, const char __user *name, const void __user *value,
43299 size_t size, int flags)
43300 {
43301 int error;
43302@@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __
43303 return PTR_ERR(kvalue);
43304 }
43305
43306- error = vfs_setxattr(d, kname, kvalue, size, flags);
43307+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
43308+ error = -EACCES;
43309+ goto out;
43310+ }
43311+
43312+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
43313+out:
43314 kfree(kvalue);
43315 return error;
43316 }
43317@@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
43318 return error;
43319 error = mnt_want_write(path.mnt);
43320 if (!error) {
43321- error = setxattr(path.dentry, name, value, size, flags);
43322+ error = setxattr(&path, name, value, size, flags);
43323 mnt_drop_write(path.mnt);
43324 }
43325 path_put(&path);
43326@@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
43327 return error;
43328 error = mnt_want_write(path.mnt);
43329 if (!error) {
43330- error = setxattr(path.dentry, name, value, size, flags);
43331+ error = setxattr(&path, name, value, size, flags);
43332 mnt_drop_write(path.mnt);
43333 }
43334 path_put(&path);
43335@@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
43336 const void __user *,value, size_t, size, int, flags)
43337 {
43338 struct file *f;
43339- struct dentry *dentry;
43340 int error = -EBADF;
43341
43342 f = fget(fd);
43343 if (!f)
43344 return error;
43345- dentry = f->f_path.dentry;
43346- audit_inode(NULL, dentry);
43347+ audit_inode(NULL, f->f_path.dentry);
43348 error = mnt_want_write_file(f);
43349 if (!error) {
43350- error = setxattr(dentry, name, value, size, flags);
43351+ error = setxattr(&f->f_path, name, value, size, flags);
43352 mnt_drop_write(f->f_path.mnt);
43353 }
43354 fput(f);
43355diff -urNp linux-2.6.32.41/fs/xfs/linux-2.6/xfs_ioctl32.c linux-2.6.32.41/fs/xfs/linux-2.6/xfs_ioctl32.c
43356--- linux-2.6.32.41/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-03-27 14:31:47.000000000 -0400
43357+++ linux-2.6.32.41/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-04-17 15:56:46.000000000 -0400
43358@@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
43359 xfs_fsop_geom_t fsgeo;
43360 int error;
43361
43362+ memset(&fsgeo, 0, sizeof(fsgeo));
43363 error = xfs_fs_geometry(mp, &fsgeo, 3);
43364 if (error)
43365 return -error;
43366diff -urNp linux-2.6.32.41/fs/xfs/linux-2.6/xfs_ioctl.c linux-2.6.32.41/fs/xfs/linux-2.6/xfs_ioctl.c
43367--- linux-2.6.32.41/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 17:00:52.000000000 -0400
43368+++ linux-2.6.32.41/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 20:07:09.000000000 -0400
43369@@ -134,7 +134,7 @@ xfs_find_handle(
43370 }
43371
43372 error = -EFAULT;
43373- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
43374+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
43375 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
43376 goto out_put;
43377
43378@@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
43379 if (IS_ERR(dentry))
43380 return PTR_ERR(dentry);
43381
43382- kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
43383+ kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
43384 if (!kbuf)
43385 goto out_dput;
43386
43387@@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
43388 xfs_mount_t *mp,
43389 void __user *arg)
43390 {
43391- xfs_fsop_geom_t fsgeo;
43392+ xfs_fsop_geom_t fsgeo;
43393 int error;
43394
43395 error = xfs_fs_geometry(mp, &fsgeo, 3);
43396diff -urNp linux-2.6.32.41/fs/xfs/linux-2.6/xfs_iops.c linux-2.6.32.41/fs/xfs/linux-2.6/xfs_iops.c
43397--- linux-2.6.32.41/fs/xfs/linux-2.6/xfs_iops.c 2011-03-27 14:31:47.000000000 -0400
43398+++ linux-2.6.32.41/fs/xfs/linux-2.6/xfs_iops.c 2011-04-17 15:56:46.000000000 -0400
43399@@ -468,7 +468,7 @@ xfs_vn_put_link(
43400 struct nameidata *nd,
43401 void *p)
43402 {
43403- char *s = nd_get_link(nd);
43404+ const char *s = nd_get_link(nd);
43405
43406 if (!IS_ERR(s))
43407 kfree(s);
43408diff -urNp linux-2.6.32.41/fs/xfs/xfs_bmap.c linux-2.6.32.41/fs/xfs/xfs_bmap.c
43409--- linux-2.6.32.41/fs/xfs/xfs_bmap.c 2011-03-27 14:31:47.000000000 -0400
43410+++ linux-2.6.32.41/fs/xfs/xfs_bmap.c 2011-04-17 15:56:46.000000000 -0400
43411@@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
43412 int nmap,
43413 int ret_nmap);
43414 #else
43415-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
43416+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
43417 #endif /* DEBUG */
43418
43419 #if defined(XFS_RW_TRACE)
43420diff -urNp linux-2.6.32.41/fs/xfs/xfs_dir2_sf.c linux-2.6.32.41/fs/xfs/xfs_dir2_sf.c
43421--- linux-2.6.32.41/fs/xfs/xfs_dir2_sf.c 2011-03-27 14:31:47.000000000 -0400
43422+++ linux-2.6.32.41/fs/xfs/xfs_dir2_sf.c 2011-04-18 22:07:30.000000000 -0400
43423@@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
43424 }
43425
43426 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
43427- if (filldir(dirent, sfep->name, sfep->namelen,
43428+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
43429+ char name[sfep->namelen];
43430+ memcpy(name, sfep->name, sfep->namelen);
43431+ if (filldir(dirent, name, sfep->namelen,
43432+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
43433+ *offset = off & 0x7fffffff;
43434+ return 0;
43435+ }
43436+ } else if (filldir(dirent, sfep->name, sfep->namelen,
43437 off & 0x7fffffff, ino, DT_UNKNOWN)) {
43438 *offset = off & 0x7fffffff;
43439 return 0;
43440diff -urNp linux-2.6.32.41/grsecurity/gracl_alloc.c linux-2.6.32.41/grsecurity/gracl_alloc.c
43441--- linux-2.6.32.41/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
43442+++ linux-2.6.32.41/grsecurity/gracl_alloc.c 2011-04-17 15:56:46.000000000 -0400
43443@@ -0,0 +1,105 @@
43444+#include <linux/kernel.h>
43445+#include <linux/mm.h>
43446+#include <linux/slab.h>
43447+#include <linux/vmalloc.h>
43448+#include <linux/gracl.h>
43449+#include <linux/grsecurity.h>
43450+
43451+static unsigned long alloc_stack_next = 1;
43452+static unsigned long alloc_stack_size = 1;
43453+static void **alloc_stack;
43454+
43455+static __inline__ int
43456+alloc_pop(void)
43457+{
43458+ if (alloc_stack_next == 1)
43459+ return 0;
43460+
43461+ kfree(alloc_stack[alloc_stack_next - 2]);
43462+
43463+ alloc_stack_next--;
43464+
43465+ return 1;
43466+}
43467+
43468+static __inline__ int
43469+alloc_push(void *buf)
43470+{
43471+ if (alloc_stack_next >= alloc_stack_size)
43472+ return 1;
43473+
43474+ alloc_stack[alloc_stack_next - 1] = buf;
43475+
43476+ alloc_stack_next++;
43477+
43478+ return 0;
43479+}
43480+
43481+void *
43482+acl_alloc(unsigned long len)
43483+{
43484+ void *ret = NULL;
43485+
43486+ if (!len || len > PAGE_SIZE)
43487+ goto out;
43488+
43489+ ret = kmalloc(len, GFP_KERNEL);
43490+
43491+ if (ret) {
43492+ if (alloc_push(ret)) {
43493+ kfree(ret);
43494+ ret = NULL;
43495+ }
43496+ }
43497+
43498+out:
43499+ return ret;
43500+}
43501+
43502+void *
43503+acl_alloc_num(unsigned long num, unsigned long len)
43504+{
43505+ if (!len || (num > (PAGE_SIZE / len)))
43506+ return NULL;
43507+
43508+ return acl_alloc(num * len);
43509+}
43510+
43511+void
43512+acl_free_all(void)
43513+{
43514+ if (gr_acl_is_enabled() || !alloc_stack)
43515+ return;
43516+
43517+ while (alloc_pop()) ;
43518+
43519+ if (alloc_stack) {
43520+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
43521+ kfree(alloc_stack);
43522+ else
43523+ vfree(alloc_stack);
43524+ }
43525+
43526+ alloc_stack = NULL;
43527+ alloc_stack_size = 1;
43528+ alloc_stack_next = 1;
43529+
43530+ return;
43531+}
43532+
43533+int
43534+acl_alloc_stack_init(unsigned long size)
43535+{
43536+ if ((size * sizeof (void *)) <= PAGE_SIZE)
43537+ alloc_stack =
43538+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
43539+ else
43540+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
43541+
43542+ alloc_stack_size = size;
43543+
43544+ if (!alloc_stack)
43545+ return 0;
43546+ else
43547+ return 1;
43548+}
43549diff -urNp linux-2.6.32.41/grsecurity/gracl.c linux-2.6.32.41/grsecurity/gracl.c
43550--- linux-2.6.32.41/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
43551+++ linux-2.6.32.41/grsecurity/gracl.c 2011-05-24 20:26:07.000000000 -0400
43552@@ -0,0 +1,4079 @@
43553+#include <linux/kernel.h>
43554+#include <linux/module.h>
43555+#include <linux/sched.h>
43556+#include <linux/mm.h>
43557+#include <linux/file.h>
43558+#include <linux/fs.h>
43559+#include <linux/namei.h>
43560+#include <linux/mount.h>
43561+#include <linux/tty.h>
43562+#include <linux/proc_fs.h>
43563+#include <linux/smp_lock.h>
43564+#include <linux/slab.h>
43565+#include <linux/vmalloc.h>
43566+#include <linux/types.h>
43567+#include <linux/sysctl.h>
43568+#include <linux/netdevice.h>
43569+#include <linux/ptrace.h>
43570+#include <linux/gracl.h>
43571+#include <linux/gralloc.h>
43572+#include <linux/grsecurity.h>
43573+#include <linux/grinternal.h>
43574+#include <linux/pid_namespace.h>
43575+#include <linux/fdtable.h>
43576+#include <linux/percpu.h>
43577+
43578+#include <asm/uaccess.h>
43579+#include <asm/errno.h>
43580+#include <asm/mman.h>
43581+
43582+static struct acl_role_db acl_role_set;
43583+static struct name_db name_set;
43584+static struct inodev_db inodev_set;
43585+
43586+/* for keeping track of userspace pointers used for subjects, so we
43587+ can share references in the kernel as well
43588+*/
43589+
43590+static struct dentry *real_root;
43591+static struct vfsmount *real_root_mnt;
43592+
43593+static struct acl_subj_map_db subj_map_set;
43594+
43595+static struct acl_role_label *default_role;
43596+
43597+static struct acl_role_label *role_list;
43598+
43599+static u16 acl_sp_role_value;
43600+
43601+extern char *gr_shared_page[4];
43602+static DEFINE_MUTEX(gr_dev_mutex);
43603+DEFINE_RWLOCK(gr_inode_lock);
43604+
43605+struct gr_arg *gr_usermode;
43606+
43607+static unsigned int gr_status __read_only = GR_STATUS_INIT;
43608+
43609+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
43610+extern void gr_clear_learn_entries(void);
43611+
43612+#ifdef CONFIG_GRKERNSEC_RESLOG
43613+extern void gr_log_resource(const struct task_struct *task,
43614+ const int res, const unsigned long wanted, const int gt);
43615+#endif
43616+
43617+unsigned char *gr_system_salt;
43618+unsigned char *gr_system_sum;
43619+
43620+static struct sprole_pw **acl_special_roles = NULL;
43621+static __u16 num_sprole_pws = 0;
43622+
43623+static struct acl_role_label *kernel_role = NULL;
43624+
43625+static unsigned int gr_auth_attempts = 0;
43626+static unsigned long gr_auth_expires = 0UL;
43627+
43628+#ifdef CONFIG_NET
43629+extern struct vfsmount *sock_mnt;
43630+#endif
43631+extern struct vfsmount *pipe_mnt;
43632+extern struct vfsmount *shm_mnt;
43633+#ifdef CONFIG_HUGETLBFS
43634+extern struct vfsmount *hugetlbfs_vfsmount;
43635+#endif
43636+
43637+static struct acl_object_label *fakefs_obj;
43638+
43639+extern int gr_init_uidset(void);
43640+extern void gr_free_uidset(void);
43641+extern void gr_remove_uid(uid_t uid);
43642+extern int gr_find_uid(uid_t uid);
43643+
43644+__inline__ int
43645+gr_acl_is_enabled(void)
43646+{
43647+ return (gr_status & GR_READY);
43648+}
43649+
43650+#ifdef CONFIG_BTRFS_FS
43651+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
43652+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
43653+#endif
43654+
43655+static inline dev_t __get_dev(const struct dentry *dentry)
43656+{
43657+#ifdef CONFIG_BTRFS_FS
43658+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
43659+ return get_btrfs_dev_from_inode(dentry->d_inode);
43660+ else
43661+#endif
43662+ return dentry->d_inode->i_sb->s_dev;
43663+}
43664+
43665+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
43666+{
43667+ return __get_dev(dentry);
43668+}
43669+
43670+static char gr_task_roletype_to_char(struct task_struct *task)
43671+{
43672+ switch (task->role->roletype &
43673+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
43674+ GR_ROLE_SPECIAL)) {
43675+ case GR_ROLE_DEFAULT:
43676+ return 'D';
43677+ case GR_ROLE_USER:
43678+ return 'U';
43679+ case GR_ROLE_GROUP:
43680+ return 'G';
43681+ case GR_ROLE_SPECIAL:
43682+ return 'S';
43683+ }
43684+
43685+ return 'X';
43686+}
43687+
43688+char gr_roletype_to_char(void)
43689+{
43690+ return gr_task_roletype_to_char(current);
43691+}
43692+
43693+__inline__ int
43694+gr_acl_tpe_check(void)
43695+{
43696+ if (unlikely(!(gr_status & GR_READY)))
43697+ return 0;
43698+ if (current->role->roletype & GR_ROLE_TPE)
43699+ return 1;
43700+ else
43701+ return 0;
43702+}
43703+
43704+int
43705+gr_handle_rawio(const struct inode *inode)
43706+{
43707+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
43708+ if (inode && S_ISBLK(inode->i_mode) &&
43709+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
43710+ !capable(CAP_SYS_RAWIO))
43711+ return 1;
43712+#endif
43713+ return 0;
43714+}
43715+
43716+static int
43717+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
43718+{
43719+ if (likely(lena != lenb))
43720+ return 0;
43721+
43722+ return !memcmp(a, b, lena);
43723+}
43724+
43725+/* this must be called with vfsmount_lock and dcache_lock held */
43726+
43727+static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
43728+ struct dentry *root, struct vfsmount *rootmnt,
43729+ char *buffer, int buflen)
43730+{
43731+ char * end = buffer+buflen;
43732+ char * retval;
43733+ int namelen;
43734+
43735+ *--end = '\0';
43736+ buflen--;
43737+
43738+ if (buflen < 1)
43739+ goto Elong;
43740+ /* Get '/' right */
43741+ retval = end-1;
43742+ *retval = '/';
43743+
43744+ for (;;) {
43745+ struct dentry * parent;
43746+
43747+ if (dentry == root && vfsmnt == rootmnt)
43748+ break;
43749+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
43750+ /* Global root? */
43751+ if (vfsmnt->mnt_parent == vfsmnt)
43752+ goto global_root;
43753+ dentry = vfsmnt->mnt_mountpoint;
43754+ vfsmnt = vfsmnt->mnt_parent;
43755+ continue;
43756+ }
43757+ parent = dentry->d_parent;
43758+ prefetch(parent);
43759+ namelen = dentry->d_name.len;
43760+ buflen -= namelen + 1;
43761+ if (buflen < 0)
43762+ goto Elong;
43763+ end -= namelen;
43764+ memcpy(end, dentry->d_name.name, namelen);
43765+ *--end = '/';
43766+ retval = end;
43767+ dentry = parent;
43768+ }
43769+
43770+out:
43771+ return retval;
43772+
43773+global_root:
43774+ namelen = dentry->d_name.len;
43775+ buflen -= namelen;
43776+ if (buflen < 0)
43777+ goto Elong;
43778+ retval -= namelen-1; /* hit the slash */
43779+ memcpy(retval, dentry->d_name.name, namelen);
43780+ goto out;
43781+Elong:
43782+ retval = ERR_PTR(-ENAMETOOLONG);
43783+ goto out;
43784+}
43785+
43786+static char *
43787+gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
43788+ struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
43789+{
43790+ char *retval;
43791+
43792+ retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
43793+ if (unlikely(IS_ERR(retval)))
43794+ retval = strcpy(buf, "<path too long>");
43795+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
43796+ retval[1] = '\0';
43797+
43798+ return retval;
43799+}
43800+
43801+static char *
43802+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
43803+ char *buf, int buflen)
43804+{
43805+ char *res;
43806+
43807+ /* we can use real_root, real_root_mnt, because this is only called
43808+ by the RBAC system */
43809+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
43810+
43811+ return res;
43812+}
43813+
43814+static char *
43815+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
43816+ char *buf, int buflen)
43817+{
43818+ char *res;
43819+ struct dentry *root;
43820+ struct vfsmount *rootmnt;
43821+ struct task_struct *reaper = &init_task;
43822+
43823+ /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
43824+ read_lock(&reaper->fs->lock);
43825+ root = dget(reaper->fs->root.dentry);
43826+ rootmnt = mntget(reaper->fs->root.mnt);
43827+ read_unlock(&reaper->fs->lock);
43828+
43829+ spin_lock(&dcache_lock);
43830+ spin_lock(&vfsmount_lock);
43831+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
43832+ spin_unlock(&vfsmount_lock);
43833+ spin_unlock(&dcache_lock);
43834+
43835+ dput(root);
43836+ mntput(rootmnt);
43837+ return res;
43838+}
43839+
43840+static char *
43841+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
43842+{
43843+ char *ret;
43844+ spin_lock(&dcache_lock);
43845+ spin_lock(&vfsmount_lock);
43846+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
43847+ PAGE_SIZE);
43848+ spin_unlock(&vfsmount_lock);
43849+ spin_unlock(&dcache_lock);
43850+ return ret;
43851+}
43852+
43853+char *
43854+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
43855+{
43856+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
43857+ PAGE_SIZE);
43858+}
43859+
43860+char *
43861+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
43862+{
43863+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
43864+ PAGE_SIZE);
43865+}
43866+
43867+char *
43868+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
43869+{
43870+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
43871+ PAGE_SIZE);
43872+}
43873+
43874+char *
43875+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
43876+{
43877+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
43878+ PAGE_SIZE);
43879+}
43880+
43881+char *
43882+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
43883+{
43884+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
43885+ PAGE_SIZE);
43886+}
43887+
43888+__inline__ __u32
43889+to_gr_audit(const __u32 reqmode)
43890+{
43891+ /* masks off auditable permission flags, then shifts them to create
43892+ auditing flags, and adds the special case of append auditing if
43893+ we're requesting write */
43894+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
43895+}
43896+
43897+struct acl_subject_label *
43898+lookup_subject_map(const struct acl_subject_label *userp)
43899+{
43900+ unsigned int index = shash(userp, subj_map_set.s_size);
43901+ struct subject_map *match;
43902+
43903+ match = subj_map_set.s_hash[index];
43904+
43905+ while (match && match->user != userp)
43906+ match = match->next;
43907+
43908+ if (match != NULL)
43909+ return match->kernel;
43910+ else
43911+ return NULL;
43912+}
43913+
43914+static void
43915+insert_subj_map_entry(struct subject_map *subjmap)
43916+{
43917+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
43918+ struct subject_map **curr;
43919+
43920+ subjmap->prev = NULL;
43921+
43922+ curr = &subj_map_set.s_hash[index];
43923+ if (*curr != NULL)
43924+ (*curr)->prev = subjmap;
43925+
43926+ subjmap->next = *curr;
43927+ *curr = subjmap;
43928+
43929+ return;
43930+}
43931+
43932+static struct acl_role_label *
43933+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
43934+ const gid_t gid)
43935+{
43936+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
43937+ struct acl_role_label *match;
43938+ struct role_allowed_ip *ipp;
43939+ unsigned int x;
43940+ u32 curr_ip = task->signal->curr_ip;
43941+
43942+ task->signal->saved_ip = curr_ip;
43943+
43944+ match = acl_role_set.r_hash[index];
43945+
43946+ while (match) {
43947+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
43948+ for (x = 0; x < match->domain_child_num; x++) {
43949+ if (match->domain_children[x] == uid)
43950+ goto found;
43951+ }
43952+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
43953+ break;
43954+ match = match->next;
43955+ }
43956+found:
43957+ if (match == NULL) {
43958+ try_group:
43959+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
43960+ match = acl_role_set.r_hash[index];
43961+
43962+ while (match) {
43963+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
43964+ for (x = 0; x < match->domain_child_num; x++) {
43965+ if (match->domain_children[x] == gid)
43966+ goto found2;
43967+ }
43968+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
43969+ break;
43970+ match = match->next;
43971+ }
43972+found2:
43973+ if (match == NULL)
43974+ match = default_role;
43975+ if (match->allowed_ips == NULL)
43976+ return match;
43977+ else {
43978+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
43979+ if (likely
43980+ ((ntohl(curr_ip) & ipp->netmask) ==
43981+ (ntohl(ipp->addr) & ipp->netmask)))
43982+ return match;
43983+ }
43984+ match = default_role;
43985+ }
43986+ } else if (match->allowed_ips == NULL) {
43987+ return match;
43988+ } else {
43989+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
43990+ if (likely
43991+ ((ntohl(curr_ip) & ipp->netmask) ==
43992+ (ntohl(ipp->addr) & ipp->netmask)))
43993+ return match;
43994+ }
43995+ goto try_group;
43996+ }
43997+
43998+ return match;
43999+}
44000+
44001+struct acl_subject_label *
44002+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
44003+ const struct acl_role_label *role)
44004+{
44005+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
44006+ struct acl_subject_label *match;
44007+
44008+ match = role->subj_hash[index];
44009+
44010+ while (match && (match->inode != ino || match->device != dev ||
44011+ (match->mode & GR_DELETED))) {
44012+ match = match->next;
44013+ }
44014+
44015+ if (match && !(match->mode & GR_DELETED))
44016+ return match;
44017+ else
44018+ return NULL;
44019+}
44020+
44021+struct acl_subject_label *
44022+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
44023+ const struct acl_role_label *role)
44024+{
44025+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
44026+ struct acl_subject_label *match;
44027+
44028+ match = role->subj_hash[index];
44029+
44030+ while (match && (match->inode != ino || match->device != dev ||
44031+ !(match->mode & GR_DELETED))) {
44032+ match = match->next;
44033+ }
44034+
44035+ if (match && (match->mode & GR_DELETED))
44036+ return match;
44037+ else
44038+ return NULL;
44039+}
44040+
44041+static struct acl_object_label *
44042+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
44043+ const struct acl_subject_label *subj)
44044+{
44045+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
44046+ struct acl_object_label *match;
44047+
44048+ match = subj->obj_hash[index];
44049+
44050+ while (match && (match->inode != ino || match->device != dev ||
44051+ (match->mode & GR_DELETED))) {
44052+ match = match->next;
44053+ }
44054+
44055+ if (match && !(match->mode & GR_DELETED))
44056+ return match;
44057+ else
44058+ return NULL;
44059+}
44060+
44061+static struct acl_object_label *
44062+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
44063+ const struct acl_subject_label *subj)
44064+{
44065+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
44066+ struct acl_object_label *match;
44067+
44068+ match = subj->obj_hash[index];
44069+
44070+ while (match && (match->inode != ino || match->device != dev ||
44071+ !(match->mode & GR_DELETED))) {
44072+ match = match->next;
44073+ }
44074+
44075+ if (match && (match->mode & GR_DELETED))
44076+ return match;
44077+
44078+ match = subj->obj_hash[index];
44079+
44080+ while (match && (match->inode != ino || match->device != dev ||
44081+ (match->mode & GR_DELETED))) {
44082+ match = match->next;
44083+ }
44084+
44085+ if (match && !(match->mode & GR_DELETED))
44086+ return match;
44087+ else
44088+ return NULL;
44089+}
44090+
44091+static struct name_entry *
44092+lookup_name_entry(const char *name)
44093+{
44094+ unsigned int len = strlen(name);
44095+ unsigned int key = full_name_hash(name, len);
44096+ unsigned int index = key % name_set.n_size;
44097+ struct name_entry *match;
44098+
44099+ match = name_set.n_hash[index];
44100+
44101+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
44102+ match = match->next;
44103+
44104+ return match;
44105+}
44106+
44107+static struct name_entry *
44108+lookup_name_entry_create(const char *name)
44109+{
44110+ unsigned int len = strlen(name);
44111+ unsigned int key = full_name_hash(name, len);
44112+ unsigned int index = key % name_set.n_size;
44113+ struct name_entry *match;
44114+
44115+ match = name_set.n_hash[index];
44116+
44117+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
44118+ !match->deleted))
44119+ match = match->next;
44120+
44121+ if (match && match->deleted)
44122+ return match;
44123+
44124+ match = name_set.n_hash[index];
44125+
44126+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
44127+ match->deleted))
44128+ match = match->next;
44129+
44130+ if (match && !match->deleted)
44131+ return match;
44132+ else
44133+ return NULL;
44134+}
44135+
44136+static struct inodev_entry *
44137+lookup_inodev_entry(const ino_t ino, const dev_t dev)
44138+{
44139+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
44140+ struct inodev_entry *match;
44141+
44142+ match = inodev_set.i_hash[index];
44143+
44144+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
44145+ match = match->next;
44146+
44147+ return match;
44148+}
44149+
44150+static void
44151+insert_inodev_entry(struct inodev_entry *entry)
44152+{
44153+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
44154+ inodev_set.i_size);
44155+ struct inodev_entry **curr;
44156+
44157+ entry->prev = NULL;
44158+
44159+ curr = &inodev_set.i_hash[index];
44160+ if (*curr != NULL)
44161+ (*curr)->prev = entry;
44162+
44163+ entry->next = *curr;
44164+ *curr = entry;
44165+
44166+ return;
44167+}
44168+
44169+static void
44170+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
44171+{
44172+ unsigned int index =
44173+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
44174+ struct acl_role_label **curr;
44175+ struct acl_role_label *tmp;
44176+
44177+ curr = &acl_role_set.r_hash[index];
44178+
44179+ /* if role was already inserted due to domains and already has
44180+ a role in the same bucket as it attached, then we need to
44181+ combine these two buckets
44182+ */
44183+ if (role->next) {
44184+ tmp = role->next;
44185+ while (tmp->next)
44186+ tmp = tmp->next;
44187+ tmp->next = *curr;
44188+ } else
44189+ role->next = *curr;
44190+ *curr = role;
44191+
44192+ return;
44193+}
44194+
44195+static void
44196+insert_acl_role_label(struct acl_role_label *role)
44197+{
44198+ int i;
44199+
44200+ if (role_list == NULL) {
44201+ role_list = role;
44202+ role->prev = NULL;
44203+ } else {
44204+ role->prev = role_list;
44205+ role_list = role;
44206+ }
44207+
44208+ /* used for hash chains */
44209+ role->next = NULL;
44210+
44211+ if (role->roletype & GR_ROLE_DOMAIN) {
44212+ for (i = 0; i < role->domain_child_num; i++)
44213+ __insert_acl_role_label(role, role->domain_children[i]);
44214+ } else
44215+ __insert_acl_role_label(role, role->uidgid);
44216+}
44217+
44218+static int
44219+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
44220+{
44221+ struct name_entry **curr, *nentry;
44222+ struct inodev_entry *ientry;
44223+ unsigned int len = strlen(name);
44224+ unsigned int key = full_name_hash(name, len);
44225+ unsigned int index = key % name_set.n_size;
44226+
44227+ curr = &name_set.n_hash[index];
44228+
44229+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
44230+ curr = &((*curr)->next);
44231+
44232+ if (*curr != NULL)
44233+ return 1;
44234+
44235+ nentry = acl_alloc(sizeof (struct name_entry));
44236+ if (nentry == NULL)
44237+ return 0;
44238+ ientry = acl_alloc(sizeof (struct inodev_entry));
44239+ if (ientry == NULL)
44240+ return 0;
44241+ ientry->nentry = nentry;
44242+
44243+ nentry->key = key;
44244+ nentry->name = name;
44245+ nentry->inode = inode;
44246+ nentry->device = device;
44247+ nentry->len = len;
44248+ nentry->deleted = deleted;
44249+
44250+ nentry->prev = NULL;
44251+ curr = &name_set.n_hash[index];
44252+ if (*curr != NULL)
44253+ (*curr)->prev = nentry;
44254+ nentry->next = *curr;
44255+ *curr = nentry;
44256+
44257+ /* insert us into the table searchable by inode/dev */
44258+ insert_inodev_entry(ientry);
44259+
44260+ return 1;
44261+}
44262+
44263+static void
44264+insert_acl_obj_label(struct acl_object_label *obj,
44265+ struct acl_subject_label *subj)
44266+{
44267+ unsigned int index =
44268+ fhash(obj->inode, obj->device, subj->obj_hash_size);
44269+ struct acl_object_label **curr;
44270+
44271+
44272+ obj->prev = NULL;
44273+
44274+ curr = &subj->obj_hash[index];
44275+ if (*curr != NULL)
44276+ (*curr)->prev = obj;
44277+
44278+ obj->next = *curr;
44279+ *curr = obj;
44280+
44281+ return;
44282+}
44283+
44284+static void
44285+insert_acl_subj_label(struct acl_subject_label *obj,
44286+ struct acl_role_label *role)
44287+{
44288+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
44289+ struct acl_subject_label **curr;
44290+
44291+ obj->prev = NULL;
44292+
44293+ curr = &role->subj_hash[index];
44294+ if (*curr != NULL)
44295+ (*curr)->prev = obj;
44296+
44297+ obj->next = *curr;
44298+ *curr = obj;
44299+
44300+ return;
44301+}
44302+
44303+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
44304+
44305+static void *
44306+create_table(__u32 * len, int elementsize)
44307+{
44308+ unsigned int table_sizes[] = {
44309+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
44310+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
44311+ 4194301, 8388593, 16777213, 33554393, 67108859
44312+ };
44313+ void *newtable = NULL;
44314+ unsigned int pwr = 0;
44315+
44316+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
44317+ table_sizes[pwr] <= *len)
44318+ pwr++;
44319+
44320+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
44321+ return newtable;
44322+
44323+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
44324+ newtable =
44325+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
44326+ else
44327+ newtable = vmalloc(table_sizes[pwr] * elementsize);
44328+
44329+ *len = table_sizes[pwr];
44330+
44331+ return newtable;
44332+}
44333+
44334+static int
44335+init_variables(const struct gr_arg *arg)
44336+{
44337+ struct task_struct *reaper = &init_task;
44338+ unsigned int stacksize;
44339+
44340+ subj_map_set.s_size = arg->role_db.num_subjects;
44341+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
44342+ name_set.n_size = arg->role_db.num_objects;
44343+ inodev_set.i_size = arg->role_db.num_objects;
44344+
44345+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
44346+ !name_set.n_size || !inodev_set.i_size)
44347+ return 1;
44348+
44349+ if (!gr_init_uidset())
44350+ return 1;
44351+
44352+ /* set up the stack that holds allocation info */
44353+
44354+ stacksize = arg->role_db.num_pointers + 5;
44355+
44356+ if (!acl_alloc_stack_init(stacksize))
44357+ return 1;
44358+
44359+ /* grab reference for the real root dentry and vfsmount */
44360+ read_lock(&reaper->fs->lock);
44361+ real_root = dget(reaper->fs->root.dentry);
44362+ real_root_mnt = mntget(reaper->fs->root.mnt);
44363+ read_unlock(&reaper->fs->lock);
44364+
44365+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44366+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
44367+#endif
44368+
44369+ fakefs_obj = acl_alloc(sizeof(struct acl_object_label));
44370+ if (fakefs_obj == NULL)
44371+ return 1;
44372+ fakefs_obj->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
44373+
44374+ subj_map_set.s_hash =
44375+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
44376+ acl_role_set.r_hash =
44377+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
44378+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
44379+ inodev_set.i_hash =
44380+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
44381+
44382+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
44383+ !name_set.n_hash || !inodev_set.i_hash)
44384+ return 1;
44385+
44386+ memset(subj_map_set.s_hash, 0,
44387+ sizeof(struct subject_map *) * subj_map_set.s_size);
44388+ memset(acl_role_set.r_hash, 0,
44389+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
44390+ memset(name_set.n_hash, 0,
44391+ sizeof (struct name_entry *) * name_set.n_size);
44392+ memset(inodev_set.i_hash, 0,
44393+ sizeof (struct inodev_entry *) * inodev_set.i_size);
44394+
44395+ return 0;
44396+}
44397+
44398+/* free information not needed after startup
44399+ currently contains user->kernel pointer mappings for subjects
44400+*/
44401+
44402+static void
44403+free_init_variables(void)
44404+{
44405+ __u32 i;
44406+
44407+ if (subj_map_set.s_hash) {
44408+ for (i = 0; i < subj_map_set.s_size; i++) {
44409+ if (subj_map_set.s_hash[i]) {
44410+ kfree(subj_map_set.s_hash[i]);
44411+ subj_map_set.s_hash[i] = NULL;
44412+ }
44413+ }
44414+
44415+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
44416+ PAGE_SIZE)
44417+ kfree(subj_map_set.s_hash);
44418+ else
44419+ vfree(subj_map_set.s_hash);
44420+ }
44421+
44422+ return;
44423+}
44424+
44425+static void
44426+free_variables(void)
44427+{
44428+ struct acl_subject_label *s;
44429+ struct acl_role_label *r;
44430+ struct task_struct *task, *task2;
44431+ unsigned int x;
44432+
44433+ gr_clear_learn_entries();
44434+
44435+ read_lock(&tasklist_lock);
44436+ do_each_thread(task2, task) {
44437+ task->acl_sp_role = 0;
44438+ task->acl_role_id = 0;
44439+ task->acl = NULL;
44440+ task->role = NULL;
44441+ } while_each_thread(task2, task);
44442+ read_unlock(&tasklist_lock);
44443+
44444+ /* release the reference to the real root dentry and vfsmount */
44445+ if (real_root)
44446+ dput(real_root);
44447+ real_root = NULL;
44448+ if (real_root_mnt)
44449+ mntput(real_root_mnt);
44450+ real_root_mnt = NULL;
44451+
44452+ /* free all object hash tables */
44453+
44454+ FOR_EACH_ROLE_START(r)
44455+ if (r->subj_hash == NULL)
44456+ goto next_role;
44457+ FOR_EACH_SUBJECT_START(r, s, x)
44458+ if (s->obj_hash == NULL)
44459+ break;
44460+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
44461+ kfree(s->obj_hash);
44462+ else
44463+ vfree(s->obj_hash);
44464+ FOR_EACH_SUBJECT_END(s, x)
44465+ FOR_EACH_NESTED_SUBJECT_START(r, s)
44466+ if (s->obj_hash == NULL)
44467+ break;
44468+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
44469+ kfree(s->obj_hash);
44470+ else
44471+ vfree(s->obj_hash);
44472+ FOR_EACH_NESTED_SUBJECT_END(s)
44473+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
44474+ kfree(r->subj_hash);
44475+ else
44476+ vfree(r->subj_hash);
44477+ r->subj_hash = NULL;
44478+next_role:
44479+ FOR_EACH_ROLE_END(r)
44480+
44481+ acl_free_all();
44482+
44483+ if (acl_role_set.r_hash) {
44484+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
44485+ PAGE_SIZE)
44486+ kfree(acl_role_set.r_hash);
44487+ else
44488+ vfree(acl_role_set.r_hash);
44489+ }
44490+ if (name_set.n_hash) {
44491+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
44492+ PAGE_SIZE)
44493+ kfree(name_set.n_hash);
44494+ else
44495+ vfree(name_set.n_hash);
44496+ }
44497+
44498+ if (inodev_set.i_hash) {
44499+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
44500+ PAGE_SIZE)
44501+ kfree(inodev_set.i_hash);
44502+ else
44503+ vfree(inodev_set.i_hash);
44504+ }
44505+
44506+ gr_free_uidset();
44507+
44508+ memset(&name_set, 0, sizeof (struct name_db));
44509+ memset(&inodev_set, 0, sizeof (struct inodev_db));
44510+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
44511+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
44512+
44513+ default_role = NULL;
44514+ role_list = NULL;
44515+
44516+ return;
44517+}
44518+
44519+static __u32
44520+count_user_objs(struct acl_object_label *userp)
44521+{
44522+ struct acl_object_label o_tmp;
44523+ __u32 num = 0;
44524+
44525+ while (userp) {
44526+ if (copy_from_user(&o_tmp, userp,
44527+ sizeof (struct acl_object_label)))
44528+ break;
44529+
44530+ userp = o_tmp.prev;
44531+ num++;
44532+ }
44533+
44534+ return num;
44535+}
44536+
44537+static struct acl_subject_label *
44538+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
44539+
44540+static int
44541+copy_user_glob(struct acl_object_label *obj)
44542+{
44543+ struct acl_object_label *g_tmp, **guser;
44544+ unsigned int len;
44545+ char *tmp;
44546+
44547+ if (obj->globbed == NULL)
44548+ return 0;
44549+
44550+ guser = &obj->globbed;
44551+ while (*guser) {
44552+ g_tmp = (struct acl_object_label *)
44553+ acl_alloc(sizeof (struct acl_object_label));
44554+ if (g_tmp == NULL)
44555+ return -ENOMEM;
44556+
44557+ if (copy_from_user(g_tmp, *guser,
44558+ sizeof (struct acl_object_label)))
44559+ return -EFAULT;
44560+
44561+ len = strnlen_user(g_tmp->filename, PATH_MAX);
44562+
44563+ if (!len || len >= PATH_MAX)
44564+ return -EINVAL;
44565+
44566+ if ((tmp = (char *) acl_alloc(len)) == NULL)
44567+ return -ENOMEM;
44568+
44569+ if (copy_from_user(tmp, g_tmp->filename, len))
44570+ return -EFAULT;
44571+ tmp[len-1] = '\0';
44572+ g_tmp->filename = tmp;
44573+
44574+ *guser = g_tmp;
44575+ guser = &(g_tmp->next);
44576+ }
44577+
44578+ return 0;
44579+}
44580+
44581+static int
44582+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
44583+ struct acl_role_label *role)
44584+{
44585+ struct acl_object_label *o_tmp;
44586+ unsigned int len;
44587+ int ret;
44588+ char *tmp;
44589+
44590+ while (userp) {
44591+ if ((o_tmp = (struct acl_object_label *)
44592+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
44593+ return -ENOMEM;
44594+
44595+ if (copy_from_user(o_tmp, userp,
44596+ sizeof (struct acl_object_label)))
44597+ return -EFAULT;
44598+
44599+ userp = o_tmp->prev;
44600+
44601+ len = strnlen_user(o_tmp->filename, PATH_MAX);
44602+
44603+ if (!len || len >= PATH_MAX)
44604+ return -EINVAL;
44605+
44606+ if ((tmp = (char *) acl_alloc(len)) == NULL)
44607+ return -ENOMEM;
44608+
44609+ if (copy_from_user(tmp, o_tmp->filename, len))
44610+ return -EFAULT;
44611+ tmp[len-1] = '\0';
44612+ o_tmp->filename = tmp;
44613+
44614+ insert_acl_obj_label(o_tmp, subj);
44615+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
44616+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
44617+ return -ENOMEM;
44618+
44619+ ret = copy_user_glob(o_tmp);
44620+ if (ret)
44621+ return ret;
44622+
44623+ if (o_tmp->nested) {
44624+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
44625+ if (IS_ERR(o_tmp->nested))
44626+ return PTR_ERR(o_tmp->nested);
44627+
44628+ /* insert into nested subject list */
44629+ o_tmp->nested->next = role->hash->first;
44630+ role->hash->first = o_tmp->nested;
44631+ }
44632+ }
44633+
44634+ return 0;
44635+}
44636+
44637+static __u32
44638+count_user_subjs(struct acl_subject_label *userp)
44639+{
44640+ struct acl_subject_label s_tmp;
44641+ __u32 num = 0;
44642+
44643+ while (userp) {
44644+ if (copy_from_user(&s_tmp, userp,
44645+ sizeof (struct acl_subject_label)))
44646+ break;
44647+
44648+ userp = s_tmp.prev;
44649+ /* do not count nested subjects against this count, since
44650+ they are not included in the hash table, but are
44651+ attached to objects. We have already counted
44652+ the subjects in userspace for the allocation
44653+ stack
44654+ */
44655+ if (!(s_tmp.mode & GR_NESTED))
44656+ num++;
44657+ }
44658+
44659+ return num;
44660+}
44661+
44662+static int
44663+copy_user_allowedips(struct acl_role_label *rolep)
44664+{
44665+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
44666+
44667+ ruserip = rolep->allowed_ips;
44668+
44669+ while (ruserip) {
44670+ rlast = rtmp;
44671+
44672+ if ((rtmp = (struct role_allowed_ip *)
44673+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
44674+ return -ENOMEM;
44675+
44676+ if (copy_from_user(rtmp, ruserip,
44677+ sizeof (struct role_allowed_ip)))
44678+ return -EFAULT;
44679+
44680+ ruserip = rtmp->prev;
44681+
44682+ if (!rlast) {
44683+ rtmp->prev = NULL;
44684+ rolep->allowed_ips = rtmp;
44685+ } else {
44686+ rlast->next = rtmp;
44687+ rtmp->prev = rlast;
44688+ }
44689+
44690+ if (!ruserip)
44691+ rtmp->next = NULL;
44692+ }
44693+
44694+ return 0;
44695+}
44696+
44697+static int
44698+copy_user_transitions(struct acl_role_label *rolep)
44699+{
44700+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
44701+
44702+ unsigned int len;
44703+ char *tmp;
44704+
44705+ rusertp = rolep->transitions;
44706+
44707+ while (rusertp) {
44708+ rlast = rtmp;
44709+
44710+ if ((rtmp = (struct role_transition *)
44711+ acl_alloc(sizeof (struct role_transition))) == NULL)
44712+ return -ENOMEM;
44713+
44714+ if (copy_from_user(rtmp, rusertp,
44715+ sizeof (struct role_transition)))
44716+ return -EFAULT;
44717+
44718+ rusertp = rtmp->prev;
44719+
44720+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
44721+
44722+ if (!len || len >= GR_SPROLE_LEN)
44723+ return -EINVAL;
44724+
44725+ if ((tmp = (char *) acl_alloc(len)) == NULL)
44726+ return -ENOMEM;
44727+
44728+ if (copy_from_user(tmp, rtmp->rolename, len))
44729+ return -EFAULT;
44730+ tmp[len-1] = '\0';
44731+ rtmp->rolename = tmp;
44732+
44733+ if (!rlast) {
44734+ rtmp->prev = NULL;
44735+ rolep->transitions = rtmp;
44736+ } else {
44737+ rlast->next = rtmp;
44738+ rtmp->prev = rlast;
44739+ }
44740+
44741+ if (!rusertp)
44742+ rtmp->next = NULL;
44743+ }
44744+
44745+ return 0;
44746+}
44747+
44748+static struct acl_subject_label *
44749+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
44750+{
44751+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
44752+ unsigned int len;
44753+ char *tmp;
44754+ __u32 num_objs;
44755+ struct acl_ip_label **i_tmp, *i_utmp2;
44756+ struct gr_hash_struct ghash;
44757+ struct subject_map *subjmap;
44758+ unsigned int i_num;
44759+ int err;
44760+
44761+ s_tmp = lookup_subject_map(userp);
44762+
44763+ /* we've already copied this subject into the kernel, just return
44764+ the reference to it, and don't copy it over again
44765+ */
44766+ if (s_tmp)
44767+ return(s_tmp);
44768+
44769+ if ((s_tmp = (struct acl_subject_label *)
44770+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
44771+ return ERR_PTR(-ENOMEM);
44772+
44773+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
44774+ if (subjmap == NULL)
44775+ return ERR_PTR(-ENOMEM);
44776+
44777+ subjmap->user = userp;
44778+ subjmap->kernel = s_tmp;
44779+ insert_subj_map_entry(subjmap);
44780+
44781+ if (copy_from_user(s_tmp, userp,
44782+ sizeof (struct acl_subject_label)))
44783+ return ERR_PTR(-EFAULT);
44784+
44785+ len = strnlen_user(s_tmp->filename, PATH_MAX);
44786+
44787+ if (!len || len >= PATH_MAX)
44788+ return ERR_PTR(-EINVAL);
44789+
44790+ if ((tmp = (char *) acl_alloc(len)) == NULL)
44791+ return ERR_PTR(-ENOMEM);
44792+
44793+ if (copy_from_user(tmp, s_tmp->filename, len))
44794+ return ERR_PTR(-EFAULT);
44795+ tmp[len-1] = '\0';
44796+ s_tmp->filename = tmp;
44797+
44798+ if (!strcmp(s_tmp->filename, "/"))
44799+ role->root_label = s_tmp;
44800+
44801+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
44802+ return ERR_PTR(-EFAULT);
44803+
44804+ /* copy user and group transition tables */
44805+
44806+ if (s_tmp->user_trans_num) {
44807+ uid_t *uidlist;
44808+
44809+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
44810+ if (uidlist == NULL)
44811+ return ERR_PTR(-ENOMEM);
44812+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
44813+ return ERR_PTR(-EFAULT);
44814+
44815+ s_tmp->user_transitions = uidlist;
44816+ }
44817+
44818+ if (s_tmp->group_trans_num) {
44819+ gid_t *gidlist;
44820+
44821+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
44822+ if (gidlist == NULL)
44823+ return ERR_PTR(-ENOMEM);
44824+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
44825+ return ERR_PTR(-EFAULT);
44826+
44827+ s_tmp->group_transitions = gidlist;
44828+ }
44829+
44830+ /* set up object hash table */
44831+ num_objs = count_user_objs(ghash.first);
44832+
44833+ s_tmp->obj_hash_size = num_objs;
44834+ s_tmp->obj_hash =
44835+ (struct acl_object_label **)
44836+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
44837+
44838+ if (!s_tmp->obj_hash)
44839+ return ERR_PTR(-ENOMEM);
44840+
44841+ memset(s_tmp->obj_hash, 0,
44842+ s_tmp->obj_hash_size *
44843+ sizeof (struct acl_object_label *));
44844+
44845+ /* add in objects */
44846+ err = copy_user_objs(ghash.first, s_tmp, role);
44847+
44848+ if (err)
44849+ return ERR_PTR(err);
44850+
44851+ /* set pointer for parent subject */
44852+ if (s_tmp->parent_subject) {
44853+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
44854+
44855+ if (IS_ERR(s_tmp2))
44856+ return s_tmp2;
44857+
44858+ s_tmp->parent_subject = s_tmp2;
44859+ }
44860+
44861+ /* add in ip acls */
44862+
44863+ if (!s_tmp->ip_num) {
44864+ s_tmp->ips = NULL;
44865+ goto insert;
44866+ }
44867+
44868+ i_tmp =
44869+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
44870+ sizeof (struct acl_ip_label *));
44871+
44872+ if (!i_tmp)
44873+ return ERR_PTR(-ENOMEM);
44874+
44875+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
44876+ *(i_tmp + i_num) =
44877+ (struct acl_ip_label *)
44878+ acl_alloc(sizeof (struct acl_ip_label));
44879+ if (!*(i_tmp + i_num))
44880+ return ERR_PTR(-ENOMEM);
44881+
44882+ if (copy_from_user
44883+ (&i_utmp2, s_tmp->ips + i_num,
44884+ sizeof (struct acl_ip_label *)))
44885+ return ERR_PTR(-EFAULT);
44886+
44887+ if (copy_from_user
44888+ (*(i_tmp + i_num), i_utmp2,
44889+ sizeof (struct acl_ip_label)))
44890+ return ERR_PTR(-EFAULT);
44891+
44892+ if ((*(i_tmp + i_num))->iface == NULL)
44893+ continue;
44894+
44895+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
44896+ if (!len || len >= IFNAMSIZ)
44897+ return ERR_PTR(-EINVAL);
44898+ tmp = acl_alloc(len);
44899+ if (tmp == NULL)
44900+ return ERR_PTR(-ENOMEM);
44901+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
44902+ return ERR_PTR(-EFAULT);
44903+ (*(i_tmp + i_num))->iface = tmp;
44904+ }
44905+
44906+ s_tmp->ips = i_tmp;
44907+
44908+insert:
44909+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
44910+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
44911+ return ERR_PTR(-ENOMEM);
44912+
44913+ return s_tmp;
44914+}
44915+
44916+static int
44917+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
44918+{
44919+ struct acl_subject_label s_pre;
44920+ struct acl_subject_label * ret;
44921+ int err;
44922+
44923+ while (userp) {
44924+ if (copy_from_user(&s_pre, userp,
44925+ sizeof (struct acl_subject_label)))
44926+ return -EFAULT;
44927+
44928+ /* do not add nested subjects here, add
44929+ while parsing objects
44930+ */
44931+
44932+ if (s_pre.mode & GR_NESTED) {
44933+ userp = s_pre.prev;
44934+ continue;
44935+ }
44936+
44937+ ret = do_copy_user_subj(userp, role);
44938+
44939+ err = PTR_ERR(ret);
44940+ if (IS_ERR(ret))
44941+ return err;
44942+
44943+ insert_acl_subj_label(ret, role);
44944+
44945+ userp = s_pre.prev;
44946+ }
44947+
44948+ return 0;
44949+}
44950+
44951+static int
44952+copy_user_acl(struct gr_arg *arg)
44953+{
44954+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
44955+ struct sprole_pw *sptmp;
44956+ struct gr_hash_struct *ghash;
44957+ uid_t *domainlist;
44958+ unsigned int r_num;
44959+ unsigned int len;
44960+ char *tmp;
44961+ int err = 0;
44962+ __u16 i;
44963+ __u32 num_subjs;
44964+
44965+ /* we need a default and kernel role */
44966+ if (arg->role_db.num_roles < 2)
44967+ return -EINVAL;
44968+
44969+ /* copy special role authentication info from userspace */
44970+
44971+ num_sprole_pws = arg->num_sprole_pws;
44972+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
44973+
44974+ if (!acl_special_roles) {
44975+ err = -ENOMEM;
44976+ goto cleanup;
44977+ }
44978+
44979+ for (i = 0; i < num_sprole_pws; i++) {
44980+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
44981+ if (!sptmp) {
44982+ err = -ENOMEM;
44983+ goto cleanup;
44984+ }
44985+ if (copy_from_user(sptmp, arg->sprole_pws + i,
44986+ sizeof (struct sprole_pw))) {
44987+ err = -EFAULT;
44988+ goto cleanup;
44989+ }
44990+
44991+ len =
44992+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
44993+
44994+ if (!len || len >= GR_SPROLE_LEN) {
44995+ err = -EINVAL;
44996+ goto cleanup;
44997+ }
44998+
44999+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
45000+ err = -ENOMEM;
45001+ goto cleanup;
45002+ }
45003+
45004+ if (copy_from_user(tmp, sptmp->rolename, len)) {
45005+ err = -EFAULT;
45006+ goto cleanup;
45007+ }
45008+ tmp[len-1] = '\0';
45009+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
45010+ printk(KERN_ALERT "Copying special role %s\n", tmp);
45011+#endif
45012+ sptmp->rolename = tmp;
45013+ acl_special_roles[i] = sptmp;
45014+ }
45015+
45016+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
45017+
45018+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
45019+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
45020+
45021+ if (!r_tmp) {
45022+ err = -ENOMEM;
45023+ goto cleanup;
45024+ }
45025+
45026+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
45027+ sizeof (struct acl_role_label *))) {
45028+ err = -EFAULT;
45029+ goto cleanup;
45030+ }
45031+
45032+ if (copy_from_user(r_tmp, r_utmp2,
45033+ sizeof (struct acl_role_label))) {
45034+ err = -EFAULT;
45035+ goto cleanup;
45036+ }
45037+
45038+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
45039+
45040+ if (!len || len >= PATH_MAX) {
45041+ err = -EINVAL;
45042+ goto cleanup;
45043+ }
45044+
45045+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
45046+ err = -ENOMEM;
45047+ goto cleanup;
45048+ }
45049+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
45050+ err = -EFAULT;
45051+ goto cleanup;
45052+ }
45053+ tmp[len-1] = '\0';
45054+ r_tmp->rolename = tmp;
45055+
45056+ if (!strcmp(r_tmp->rolename, "default")
45057+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
45058+ default_role = r_tmp;
45059+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
45060+ kernel_role = r_tmp;
45061+ }
45062+
45063+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
45064+ err = -ENOMEM;
45065+ goto cleanup;
45066+ }
45067+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
45068+ err = -EFAULT;
45069+ goto cleanup;
45070+ }
45071+
45072+ r_tmp->hash = ghash;
45073+
45074+ num_subjs = count_user_subjs(r_tmp->hash->first);
45075+
45076+ r_tmp->subj_hash_size = num_subjs;
45077+ r_tmp->subj_hash =
45078+ (struct acl_subject_label **)
45079+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
45080+
45081+ if (!r_tmp->subj_hash) {
45082+ err = -ENOMEM;
45083+ goto cleanup;
45084+ }
45085+
45086+ err = copy_user_allowedips(r_tmp);
45087+ if (err)
45088+ goto cleanup;
45089+
45090+ /* copy domain info */
45091+ if (r_tmp->domain_children != NULL) {
45092+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
45093+ if (domainlist == NULL) {
45094+ err = -ENOMEM;
45095+ goto cleanup;
45096+ }
45097+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
45098+ err = -EFAULT;
45099+ goto cleanup;
45100+ }
45101+ r_tmp->domain_children = domainlist;
45102+ }
45103+
45104+ err = copy_user_transitions(r_tmp);
45105+ if (err)
45106+ goto cleanup;
45107+
45108+ memset(r_tmp->subj_hash, 0,
45109+ r_tmp->subj_hash_size *
45110+ sizeof (struct acl_subject_label *));
45111+
45112+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
45113+
45114+ if (err)
45115+ goto cleanup;
45116+
45117+ /* set nested subject list to null */
45118+ r_tmp->hash->first = NULL;
45119+
45120+ insert_acl_role_label(r_tmp);
45121+ }
45122+
45123+ goto return_err;
45124+ cleanup:
45125+ free_variables();
45126+ return_err:
45127+ return err;
45128+
45129+}
45130+
45131+static int
45132+gracl_init(struct gr_arg *args)
45133+{
45134+ int error = 0;
45135+
45136+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
45137+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
45138+
45139+ if (init_variables(args)) {
45140+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
45141+ error = -ENOMEM;
45142+ free_variables();
45143+ goto out;
45144+ }
45145+
45146+ error = copy_user_acl(args);
45147+ free_init_variables();
45148+ if (error) {
45149+ free_variables();
45150+ goto out;
45151+ }
45152+
45153+ if ((error = gr_set_acls(0))) {
45154+ free_variables();
45155+ goto out;
45156+ }
45157+
45158+ pax_open_kernel();
45159+ gr_status |= GR_READY;
45160+ pax_close_kernel();
45161+
45162+ out:
45163+ return error;
45164+}
45165+
45166+/* derived from glibc fnmatch() 0: match, 1: no match*/
45167+
45168+static int
45169+glob_match(const char *p, const char *n)
45170+{
45171+ char c;
45172+
45173+ while ((c = *p++) != '\0') {
45174+ switch (c) {
45175+ case '?':
45176+ if (*n == '\0')
45177+ return 1;
45178+ else if (*n == '/')
45179+ return 1;
45180+ break;
45181+ case '\\':
45182+ if (*n != c)
45183+ return 1;
45184+ break;
45185+ case '*':
45186+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
45187+ if (*n == '/')
45188+ return 1;
45189+ else if (c == '?') {
45190+ if (*n == '\0')
45191+ return 1;
45192+ else
45193+ ++n;
45194+ }
45195+ }
45196+ if (c == '\0') {
45197+ return 0;
45198+ } else {
45199+ const char *endp;
45200+
45201+ if ((endp = strchr(n, '/')) == NULL)
45202+ endp = n + strlen(n);
45203+
45204+ if (c == '[') {
45205+ for (--p; n < endp; ++n)
45206+ if (!glob_match(p, n))
45207+ return 0;
45208+ } else if (c == '/') {
45209+ while (*n != '\0' && *n != '/')
45210+ ++n;
45211+ if (*n == '/' && !glob_match(p, n + 1))
45212+ return 0;
45213+ } else {
45214+ for (--p; n < endp; ++n)
45215+ if (*n == c && !glob_match(p, n))
45216+ return 0;
45217+ }
45218+
45219+ return 1;
45220+ }
45221+ case '[':
45222+ {
45223+ int not;
45224+ char cold;
45225+
45226+ if (*n == '\0' || *n == '/')
45227+ return 1;
45228+
45229+ not = (*p == '!' || *p == '^');
45230+ if (not)
45231+ ++p;
45232+
45233+ c = *p++;
45234+ for (;;) {
45235+ unsigned char fn = (unsigned char)*n;
45236+
45237+ if (c == '\0')
45238+ return 1;
45239+ else {
45240+ if (c == fn)
45241+ goto matched;
45242+ cold = c;
45243+ c = *p++;
45244+
45245+ if (c == '-' && *p != ']') {
45246+ unsigned char cend = *p++;
45247+
45248+ if (cend == '\0')
45249+ return 1;
45250+
45251+ if (cold <= fn && fn <= cend)
45252+ goto matched;
45253+
45254+ c = *p++;
45255+ }
45256+ }
45257+
45258+ if (c == ']')
45259+ break;
45260+ }
45261+ if (!not)
45262+ return 1;
45263+ break;
45264+ matched:
45265+ while (c != ']') {
45266+ if (c == '\0')
45267+ return 1;
45268+
45269+ c = *p++;
45270+ }
45271+ if (not)
45272+ return 1;
45273+ }
45274+ break;
45275+ default:
45276+ if (c != *n)
45277+ return 1;
45278+ }
45279+
45280+ ++n;
45281+ }
45282+
45283+ if (*n == '\0')
45284+ return 0;
45285+
45286+ if (*n == '/')
45287+ return 0;
45288+
45289+ return 1;
45290+}
45291+
45292+static struct acl_object_label *
45293+chk_glob_label(struct acl_object_label *globbed,
45294+ struct dentry *dentry, struct vfsmount *mnt, char **path)
45295+{
45296+ struct acl_object_label *tmp;
45297+
45298+ if (*path == NULL)
45299+ *path = gr_to_filename_nolock(dentry, mnt);
45300+
45301+ tmp = globbed;
45302+
45303+ while (tmp) {
45304+ if (!glob_match(tmp->filename, *path))
45305+ return tmp;
45306+ tmp = tmp->next;
45307+ }
45308+
45309+ return NULL;
45310+}
45311+
45312+static struct acl_object_label *
45313+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
45314+ const ino_t curr_ino, const dev_t curr_dev,
45315+ const struct acl_subject_label *subj, char **path, const int checkglob)
45316+{
45317+ struct acl_subject_label *tmpsubj;
45318+ struct acl_object_label *retval;
45319+ struct acl_object_label *retval2;
45320+
45321+ tmpsubj = (struct acl_subject_label *) subj;
45322+ read_lock(&gr_inode_lock);
45323+ do {
45324+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
45325+ if (retval) {
45326+ if (checkglob && retval->globbed) {
45327+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
45328+ (struct vfsmount *)orig_mnt, path);
45329+ if (retval2)
45330+ retval = retval2;
45331+ }
45332+ break;
45333+ }
45334+ } while ((tmpsubj = tmpsubj->parent_subject));
45335+ read_unlock(&gr_inode_lock);
45336+
45337+ return retval;
45338+}
45339+
45340+static __inline__ struct acl_object_label *
45341+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
45342+ const struct dentry *curr_dentry,
45343+ const struct acl_subject_label *subj, char **path, const int checkglob)
45344+{
45345+ int newglob = checkglob;
45346+
45347+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
45348+ as we don't want a / * rule to match instead of the / object
45349+ don't do this for create lookups that call this function though, since they're looking up
45350+ on the parent and thus need globbing checks on all paths
45351+ */
45352+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
45353+ newglob = GR_NO_GLOB;
45354+
45355+ return __full_lookup(orig_dentry, orig_mnt,
45356+ curr_dentry->d_inode->i_ino,
45357+ __get_dev(curr_dentry), subj, path, newglob);
45358+}
45359+
45360+static struct acl_object_label *
45361+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45362+ const struct acl_subject_label *subj, char *path, const int checkglob)
45363+{
45364+ struct dentry *dentry = (struct dentry *) l_dentry;
45365+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
45366+ struct acl_object_label *retval;
45367+
45368+ spin_lock(&dcache_lock);
45369+ spin_lock(&vfsmount_lock);
45370+
45371+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
45372+#ifdef CONFIG_NET
45373+ mnt == sock_mnt ||
45374+#endif
45375+#ifdef CONFIG_HUGETLBFS
45376+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
45377+#endif
45378+ /* ignore Eric Biederman */
45379+ IS_PRIVATE(l_dentry->d_inode))) {
45380+ retval = fakefs_obj;
45381+ goto out;
45382+ }
45383+
45384+ for (;;) {
45385+ if (dentry == real_root && mnt == real_root_mnt)
45386+ break;
45387+
45388+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
45389+ if (mnt->mnt_parent == mnt)
45390+ break;
45391+
45392+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
45393+ if (retval != NULL)
45394+ goto out;
45395+
45396+ dentry = mnt->mnt_mountpoint;
45397+ mnt = mnt->mnt_parent;
45398+ continue;
45399+ }
45400+
45401+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
45402+ if (retval != NULL)
45403+ goto out;
45404+
45405+ dentry = dentry->d_parent;
45406+ }
45407+
45408+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
45409+
45410+ if (retval == NULL)
45411+ retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
45412+out:
45413+ spin_unlock(&vfsmount_lock);
45414+ spin_unlock(&dcache_lock);
45415+
45416+ BUG_ON(retval == NULL);
45417+
45418+ return retval;
45419+}
45420+
45421+static __inline__ struct acl_object_label *
45422+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45423+ const struct acl_subject_label *subj)
45424+{
45425+ char *path = NULL;
45426+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
45427+}
45428+
45429+static __inline__ struct acl_object_label *
45430+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45431+ const struct acl_subject_label *subj)
45432+{
45433+ char *path = NULL;
45434+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
45435+}
45436+
45437+static __inline__ struct acl_object_label *
45438+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45439+ const struct acl_subject_label *subj, char *path)
45440+{
45441+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
45442+}
45443+
45444+static struct acl_subject_label *
45445+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45446+ const struct acl_role_label *role)
45447+{
45448+ struct dentry *dentry = (struct dentry *) l_dentry;
45449+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
45450+ struct acl_subject_label *retval;
45451+
45452+ spin_lock(&dcache_lock);
45453+ spin_lock(&vfsmount_lock);
45454+
45455+ for (;;) {
45456+ if (dentry == real_root && mnt == real_root_mnt)
45457+ break;
45458+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
45459+ if (mnt->mnt_parent == mnt)
45460+ break;
45461+
45462+ read_lock(&gr_inode_lock);
45463+ retval =
45464+ lookup_acl_subj_label(dentry->d_inode->i_ino,
45465+ __get_dev(dentry), role);
45466+ read_unlock(&gr_inode_lock);
45467+ if (retval != NULL)
45468+ goto out;
45469+
45470+ dentry = mnt->mnt_mountpoint;
45471+ mnt = mnt->mnt_parent;
45472+ continue;
45473+ }
45474+
45475+ read_lock(&gr_inode_lock);
45476+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
45477+ __get_dev(dentry), role);
45478+ read_unlock(&gr_inode_lock);
45479+ if (retval != NULL)
45480+ goto out;
45481+
45482+ dentry = dentry->d_parent;
45483+ }
45484+
45485+ read_lock(&gr_inode_lock);
45486+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
45487+ __get_dev(dentry), role);
45488+ read_unlock(&gr_inode_lock);
45489+
45490+ if (unlikely(retval == NULL)) {
45491+ read_lock(&gr_inode_lock);
45492+ retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
45493+ __get_dev(real_root), role);
45494+ read_unlock(&gr_inode_lock);
45495+ }
45496+out:
45497+ spin_unlock(&vfsmount_lock);
45498+ spin_unlock(&dcache_lock);
45499+
45500+ BUG_ON(retval == NULL);
45501+
45502+ return retval;
45503+}
45504+
45505+static void
45506+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
45507+{
45508+ struct task_struct *task = current;
45509+ const struct cred *cred = current_cred();
45510+
45511+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
45512+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
45513+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
45514+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
45515+
45516+ return;
45517+}
45518+
45519+static void
45520+gr_log_learn_sysctl(const char *path, const __u32 mode)
45521+{
45522+ struct task_struct *task = current;
45523+ const struct cred *cred = current_cred();
45524+
45525+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
45526+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
45527+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
45528+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
45529+
45530+ return;
45531+}
45532+
45533+static void
45534+gr_log_learn_id_change(const char type, const unsigned int real,
45535+ const unsigned int effective, const unsigned int fs)
45536+{
45537+ struct task_struct *task = current;
45538+ const struct cred *cred = current_cred();
45539+
45540+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
45541+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
45542+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
45543+ type, real, effective, fs, &task->signal->saved_ip);
45544+
45545+ return;
45546+}
45547+
45548+__u32
45549+gr_check_link(const struct dentry * new_dentry,
45550+ const struct dentry * parent_dentry,
45551+ const struct vfsmount * parent_mnt,
45552+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
45553+{
45554+ struct acl_object_label *obj;
45555+ __u32 oldmode, newmode;
45556+ __u32 needmode;
45557+
45558+ if (unlikely(!(gr_status & GR_READY)))
45559+ return (GR_CREATE | GR_LINK);
45560+
45561+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
45562+ oldmode = obj->mode;
45563+
45564+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
45565+ oldmode |= (GR_CREATE | GR_LINK);
45566+
45567+ needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
45568+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
45569+ needmode |= GR_SETID | GR_AUDIT_SETID;
45570+
45571+ newmode =
45572+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
45573+ oldmode | needmode);
45574+
45575+ needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
45576+ GR_SETID | GR_READ | GR_FIND | GR_DELETE |
45577+ GR_INHERIT | GR_AUDIT_INHERIT);
45578+
45579+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
45580+ goto bad;
45581+
45582+ if ((oldmode & needmode) != needmode)
45583+ goto bad;
45584+
45585+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
45586+ if ((newmode & needmode) != needmode)
45587+ goto bad;
45588+
45589+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
45590+ return newmode;
45591+bad:
45592+ needmode = oldmode;
45593+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
45594+ needmode |= GR_SETID;
45595+
45596+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
45597+ gr_log_learn(old_dentry, old_mnt, needmode);
45598+ return (GR_CREATE | GR_LINK);
45599+ } else if (newmode & GR_SUPPRESS)
45600+ return GR_SUPPRESS;
45601+ else
45602+ return 0;
45603+}
45604+
45605+__u32
45606+gr_search_file(const struct dentry * dentry, const __u32 mode,
45607+ const struct vfsmount * mnt)
45608+{
45609+ __u32 retval = mode;
45610+ struct acl_subject_label *curracl;
45611+ struct acl_object_label *currobj;
45612+
45613+ if (unlikely(!(gr_status & GR_READY)))
45614+ return (mode & ~GR_AUDITS);
45615+
45616+ curracl = current->acl;
45617+
45618+ currobj = chk_obj_label(dentry, mnt, curracl);
45619+ retval = currobj->mode & mode;
45620+
45621+ /* if we're opening a specified transfer file for writing
45622+ (e.g. /dev/initctl), then transfer our role to init
45623+ */
45624+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
45625+ current->role->roletype & GR_ROLE_PERSIST)) {
45626+ struct task_struct *task = init_pid_ns.child_reaper;
45627+
45628+ if (task->role != current->role) {
45629+ task->acl_sp_role = 0;
45630+ task->acl_role_id = current->acl_role_id;
45631+ task->role = current->role;
45632+ rcu_read_lock();
45633+ read_lock(&grsec_exec_file_lock);
45634+ gr_apply_subject_to_task(task);
45635+ read_unlock(&grsec_exec_file_lock);
45636+ rcu_read_unlock();
45637+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
45638+ }
45639+ }
45640+
45641+ if (unlikely
45642+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
45643+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
45644+ __u32 new_mode = mode;
45645+
45646+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
45647+
45648+ retval = new_mode;
45649+
45650+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
45651+ new_mode |= GR_INHERIT;
45652+
45653+ if (!(mode & GR_NOLEARN))
45654+ gr_log_learn(dentry, mnt, new_mode);
45655+ }
45656+
45657+ return retval;
45658+}
45659+
45660+__u32
45661+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
45662+ const struct vfsmount * mnt, const __u32 mode)
45663+{
45664+ struct name_entry *match;
45665+ struct acl_object_label *matchpo;
45666+ struct acl_subject_label *curracl;
45667+ char *path;
45668+ __u32 retval;
45669+
45670+ if (unlikely(!(gr_status & GR_READY)))
45671+ return (mode & ~GR_AUDITS);
45672+
45673+ preempt_disable();
45674+ path = gr_to_filename_rbac(new_dentry, mnt);
45675+ match = lookup_name_entry_create(path);
45676+
45677+ if (!match)
45678+ goto check_parent;
45679+
45680+ curracl = current->acl;
45681+
45682+ read_lock(&gr_inode_lock);
45683+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
45684+ read_unlock(&gr_inode_lock);
45685+
45686+ if (matchpo) {
45687+ if ((matchpo->mode & mode) !=
45688+ (mode & ~(GR_AUDITS | GR_SUPPRESS))
45689+ && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
45690+ __u32 new_mode = mode;
45691+
45692+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
45693+
45694+ gr_log_learn(new_dentry, mnt, new_mode);
45695+
45696+ preempt_enable();
45697+ return new_mode;
45698+ }
45699+ preempt_enable();
45700+ return (matchpo->mode & mode);
45701+ }
45702+
45703+ check_parent:
45704+ curracl = current->acl;
45705+
45706+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
45707+ retval = matchpo->mode & mode;
45708+
45709+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
45710+ && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
45711+ __u32 new_mode = mode;
45712+
45713+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
45714+
45715+ gr_log_learn(new_dentry, mnt, new_mode);
45716+ preempt_enable();
45717+ return new_mode;
45718+ }
45719+
45720+ preempt_enable();
45721+ return retval;
45722+}
45723+
45724+int
45725+gr_check_hidden_task(const struct task_struct *task)
45726+{
45727+ if (unlikely(!(gr_status & GR_READY)))
45728+ return 0;
45729+
45730+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
45731+ return 1;
45732+
45733+ return 0;
45734+}
45735+
45736+int
45737+gr_check_protected_task(const struct task_struct *task)
45738+{
45739+ if (unlikely(!(gr_status & GR_READY) || !task))
45740+ return 0;
45741+
45742+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
45743+ task->acl != current->acl)
45744+ return 1;
45745+
45746+ return 0;
45747+}
45748+
45749+int
45750+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
45751+{
45752+ struct task_struct *p;
45753+ int ret = 0;
45754+
45755+ if (unlikely(!(gr_status & GR_READY) || !pid))
45756+ return ret;
45757+
45758+ read_lock(&tasklist_lock);
45759+ do_each_pid_task(pid, type, p) {
45760+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
45761+ p->acl != current->acl) {
45762+ ret = 1;
45763+ goto out;
45764+ }
45765+ } while_each_pid_task(pid, type, p);
45766+out:
45767+ read_unlock(&tasklist_lock);
45768+
45769+ return ret;
45770+}
45771+
45772+void
45773+gr_copy_label(struct task_struct *tsk)
45774+{
45775+ tsk->signal->used_accept = 0;
45776+ tsk->acl_sp_role = 0;
45777+ tsk->acl_role_id = current->acl_role_id;
45778+ tsk->acl = current->acl;
45779+ tsk->role = current->role;
45780+ tsk->signal->curr_ip = current->signal->curr_ip;
45781+ tsk->signal->saved_ip = current->signal->saved_ip;
45782+ if (current->exec_file)
45783+ get_file(current->exec_file);
45784+ tsk->exec_file = current->exec_file;
45785+ tsk->is_writable = current->is_writable;
45786+ if (unlikely(current->signal->used_accept)) {
45787+ current->signal->curr_ip = 0;
45788+ current->signal->saved_ip = 0;
45789+ }
45790+
45791+ return;
45792+}
45793+
45794+static void
45795+gr_set_proc_res(struct task_struct *task)
45796+{
45797+ struct acl_subject_label *proc;
45798+ unsigned short i;
45799+
45800+ proc = task->acl;
45801+
45802+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
45803+ return;
45804+
45805+ for (i = 0; i < RLIM_NLIMITS; i++) {
45806+ if (!(proc->resmask & (1 << i)))
45807+ continue;
45808+
45809+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
45810+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
45811+ }
45812+
45813+ return;
45814+}
45815+
45816+extern int __gr_process_user_ban(struct user_struct *user);
45817+
45818+int
45819+gr_check_user_change(int real, int effective, int fs)
45820+{
45821+ unsigned int i;
45822+ __u16 num;
45823+ uid_t *uidlist;
45824+ int curuid;
45825+ int realok = 0;
45826+ int effectiveok = 0;
45827+ int fsok = 0;
45828+
45829+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
45830+ struct user_struct *user;
45831+
45832+ if (real == -1)
45833+ goto skipit;
45834+
45835+ user = find_user(real);
45836+ if (user == NULL)
45837+ goto skipit;
45838+
45839+ if (__gr_process_user_ban(user)) {
45840+ /* for find_user */
45841+ free_uid(user);
45842+ return 1;
45843+ }
45844+
45845+ /* for find_user */
45846+ free_uid(user);
45847+
45848+skipit:
45849+#endif
45850+
45851+ if (unlikely(!(gr_status & GR_READY)))
45852+ return 0;
45853+
45854+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
45855+ gr_log_learn_id_change('u', real, effective, fs);
45856+
45857+ num = current->acl->user_trans_num;
45858+ uidlist = current->acl->user_transitions;
45859+
45860+ if (uidlist == NULL)
45861+ return 0;
45862+
45863+ if (real == -1)
45864+ realok = 1;
45865+ if (effective == -1)
45866+ effectiveok = 1;
45867+ if (fs == -1)
45868+ fsok = 1;
45869+
45870+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
45871+ for (i = 0; i < num; i++) {
45872+ curuid = (int)uidlist[i];
45873+ if (real == curuid)
45874+ realok = 1;
45875+ if (effective == curuid)
45876+ effectiveok = 1;
45877+ if (fs == curuid)
45878+ fsok = 1;
45879+ }
45880+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
45881+ for (i = 0; i < num; i++) {
45882+ curuid = (int)uidlist[i];
45883+ if (real == curuid)
45884+ break;
45885+ if (effective == curuid)
45886+ break;
45887+ if (fs == curuid)
45888+ break;
45889+ }
45890+ /* not in deny list */
45891+ if (i == num) {
45892+ realok = 1;
45893+ effectiveok = 1;
45894+ fsok = 1;
45895+ }
45896+ }
45897+
45898+ if (realok && effectiveok && fsok)
45899+ return 0;
45900+ else {
45901+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
45902+ return 1;
45903+ }
45904+}
45905+
45906+int
45907+gr_check_group_change(int real, int effective, int fs)
45908+{
45909+ unsigned int i;
45910+ __u16 num;
45911+ gid_t *gidlist;
45912+ int curgid;
45913+ int realok = 0;
45914+ int effectiveok = 0;
45915+ int fsok = 0;
45916+
45917+ if (unlikely(!(gr_status & GR_READY)))
45918+ return 0;
45919+
45920+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
45921+ gr_log_learn_id_change('g', real, effective, fs);
45922+
45923+ num = current->acl->group_trans_num;
45924+ gidlist = current->acl->group_transitions;
45925+
45926+ if (gidlist == NULL)
45927+ return 0;
45928+
45929+ if (real == -1)
45930+ realok = 1;
45931+ if (effective == -1)
45932+ effectiveok = 1;
45933+ if (fs == -1)
45934+ fsok = 1;
45935+
45936+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
45937+ for (i = 0; i < num; i++) {
45938+ curgid = (int)gidlist[i];
45939+ if (real == curgid)
45940+ realok = 1;
45941+ if (effective == curgid)
45942+ effectiveok = 1;
45943+ if (fs == curgid)
45944+ fsok = 1;
45945+ }
45946+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
45947+ for (i = 0; i < num; i++) {
45948+ curgid = (int)gidlist[i];
45949+ if (real == curgid)
45950+ break;
45951+ if (effective == curgid)
45952+ break;
45953+ if (fs == curgid)
45954+ break;
45955+ }
45956+ /* not in deny list */
45957+ if (i == num) {
45958+ realok = 1;
45959+ effectiveok = 1;
45960+ fsok = 1;
45961+ }
45962+ }
45963+
45964+ if (realok && effectiveok && fsok)
45965+ return 0;
45966+ else {
45967+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
45968+ return 1;
45969+ }
45970+}
45971+
45972+void
45973+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
45974+{
45975+ struct acl_role_label *role = task->role;
45976+ struct acl_subject_label *subj = NULL;
45977+ struct acl_object_label *obj;
45978+ struct file *filp;
45979+
45980+ if (unlikely(!(gr_status & GR_READY)))
45981+ return;
45982+
45983+ filp = task->exec_file;
45984+
45985+ /* kernel process, we'll give them the kernel role */
45986+ if (unlikely(!filp)) {
45987+ task->role = kernel_role;
45988+ task->acl = kernel_role->root_label;
45989+ return;
45990+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
45991+ role = lookup_acl_role_label(task, uid, gid);
45992+
45993+ /* perform subject lookup in possibly new role
45994+ we can use this result below in the case where role == task->role
45995+ */
45996+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
45997+
45998+ /* if we changed uid/gid, but result in the same role
45999+ and are using inheritance, don't lose the inherited subject
46000+ if current subject is other than what normal lookup
46001+ would result in, we arrived via inheritance, don't
46002+ lose subject
46003+ */
46004+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
46005+ (subj == task->acl)))
46006+ task->acl = subj;
46007+
46008+ task->role = role;
46009+
46010+ task->is_writable = 0;
46011+
46012+ /* ignore additional mmap checks for processes that are writable
46013+ by the default ACL */
46014+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
46015+ if (unlikely(obj->mode & GR_WRITE))
46016+ task->is_writable = 1;
46017+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
46018+ if (unlikely(obj->mode & GR_WRITE))
46019+ task->is_writable = 1;
46020+
46021+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46022+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
46023+#endif
46024+
46025+ gr_set_proc_res(task);
46026+
46027+ return;
46028+}
46029+
46030+int
46031+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
46032+ const int unsafe_share)
46033+{
46034+ struct task_struct *task = current;
46035+ struct acl_subject_label *newacl;
46036+ struct acl_object_label *obj;
46037+ __u32 retmode;
46038+
46039+ if (unlikely(!(gr_status & GR_READY)))
46040+ return 0;
46041+
46042+ newacl = chk_subj_label(dentry, mnt, task->role);
46043+
46044+ task_lock(task);
46045+ if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
46046+ !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
46047+ !(task->role->roletype & GR_ROLE_GOD) &&
46048+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
46049+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
46050+ task_unlock(task);
46051+ if (unsafe_share)
46052+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
46053+ else
46054+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
46055+ return -EACCES;
46056+ }
46057+ task_unlock(task);
46058+
46059+ obj = chk_obj_label(dentry, mnt, task->acl);
46060+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
46061+
46062+ if (!(task->acl->mode & GR_INHERITLEARN) &&
46063+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
46064+ if (obj->nested)
46065+ task->acl = obj->nested;
46066+ else
46067+ task->acl = newacl;
46068+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
46069+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
46070+
46071+ task->is_writable = 0;
46072+
46073+ /* ignore additional mmap checks for processes that are writable
46074+ by the default ACL */
46075+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
46076+ if (unlikely(obj->mode & GR_WRITE))
46077+ task->is_writable = 1;
46078+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
46079+ if (unlikely(obj->mode & GR_WRITE))
46080+ task->is_writable = 1;
46081+
46082+ gr_set_proc_res(task);
46083+
46084+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46085+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
46086+#endif
46087+ return 0;
46088+}
46089+
46090+/* always called with valid inodev ptr */
46091+static void
46092+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
46093+{
46094+ struct acl_object_label *matchpo;
46095+ struct acl_subject_label *matchps;
46096+ struct acl_subject_label *subj;
46097+ struct acl_role_label *role;
46098+ unsigned int x;
46099+
46100+ FOR_EACH_ROLE_START(role)
46101+ FOR_EACH_SUBJECT_START(role, subj, x)
46102+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
46103+ matchpo->mode |= GR_DELETED;
46104+ FOR_EACH_SUBJECT_END(subj,x)
46105+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
46106+ if (subj->inode == ino && subj->device == dev)
46107+ subj->mode |= GR_DELETED;
46108+ FOR_EACH_NESTED_SUBJECT_END(subj)
46109+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
46110+ matchps->mode |= GR_DELETED;
46111+ FOR_EACH_ROLE_END(role)
46112+
46113+ inodev->nentry->deleted = 1;
46114+
46115+ return;
46116+}
46117+
46118+void
46119+gr_handle_delete(const ino_t ino, const dev_t dev)
46120+{
46121+ struct inodev_entry *inodev;
46122+
46123+ if (unlikely(!(gr_status & GR_READY)))
46124+ return;
46125+
46126+ write_lock(&gr_inode_lock);
46127+ inodev = lookup_inodev_entry(ino, dev);
46128+ if (inodev != NULL)
46129+ do_handle_delete(inodev, ino, dev);
46130+ write_unlock(&gr_inode_lock);
46131+
46132+ return;
46133+}
46134+
46135+static void
46136+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
46137+ const ino_t newinode, const dev_t newdevice,
46138+ struct acl_subject_label *subj)
46139+{
46140+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
46141+ struct acl_object_label *match;
46142+
46143+ match = subj->obj_hash[index];
46144+
46145+ while (match && (match->inode != oldinode ||
46146+ match->device != olddevice ||
46147+ !(match->mode & GR_DELETED)))
46148+ match = match->next;
46149+
46150+ if (match && (match->inode == oldinode)
46151+ && (match->device == olddevice)
46152+ && (match->mode & GR_DELETED)) {
46153+ if (match->prev == NULL) {
46154+ subj->obj_hash[index] = match->next;
46155+ if (match->next != NULL)
46156+ match->next->prev = NULL;
46157+ } else {
46158+ match->prev->next = match->next;
46159+ if (match->next != NULL)
46160+ match->next->prev = match->prev;
46161+ }
46162+ match->prev = NULL;
46163+ match->next = NULL;
46164+ match->inode = newinode;
46165+ match->device = newdevice;
46166+ match->mode &= ~GR_DELETED;
46167+
46168+ insert_acl_obj_label(match, subj);
46169+ }
46170+
46171+ return;
46172+}
46173+
46174+static void
46175+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
46176+ const ino_t newinode, const dev_t newdevice,
46177+ struct acl_role_label *role)
46178+{
46179+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
46180+ struct acl_subject_label *match;
46181+
46182+ match = role->subj_hash[index];
46183+
46184+ while (match && (match->inode != oldinode ||
46185+ match->device != olddevice ||
46186+ !(match->mode & GR_DELETED)))
46187+ match = match->next;
46188+
46189+ if (match && (match->inode == oldinode)
46190+ && (match->device == olddevice)
46191+ && (match->mode & GR_DELETED)) {
46192+ if (match->prev == NULL) {
46193+ role->subj_hash[index] = match->next;
46194+ if (match->next != NULL)
46195+ match->next->prev = NULL;
46196+ } else {
46197+ match->prev->next = match->next;
46198+ if (match->next != NULL)
46199+ match->next->prev = match->prev;
46200+ }
46201+ match->prev = NULL;
46202+ match->next = NULL;
46203+ match->inode = newinode;
46204+ match->device = newdevice;
46205+ match->mode &= ~GR_DELETED;
46206+
46207+ insert_acl_subj_label(match, role);
46208+ }
46209+
46210+ return;
46211+}
46212+
46213+static void
46214+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
46215+ const ino_t newinode, const dev_t newdevice)
46216+{
46217+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
46218+ struct inodev_entry *match;
46219+
46220+ match = inodev_set.i_hash[index];
46221+
46222+ while (match && (match->nentry->inode != oldinode ||
46223+ match->nentry->device != olddevice || !match->nentry->deleted))
46224+ match = match->next;
46225+
46226+ if (match && (match->nentry->inode == oldinode)
46227+ && (match->nentry->device == olddevice) &&
46228+ match->nentry->deleted) {
46229+ if (match->prev == NULL) {
46230+ inodev_set.i_hash[index] = match->next;
46231+ if (match->next != NULL)
46232+ match->next->prev = NULL;
46233+ } else {
46234+ match->prev->next = match->next;
46235+ if (match->next != NULL)
46236+ match->next->prev = match->prev;
46237+ }
46238+ match->prev = NULL;
46239+ match->next = NULL;
46240+ match->nentry->inode = newinode;
46241+ match->nentry->device = newdevice;
46242+ match->nentry->deleted = 0;
46243+
46244+ insert_inodev_entry(match);
46245+ }
46246+
46247+ return;
46248+}
46249+
46250+static void
46251+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
46252+ const struct vfsmount *mnt)
46253+{
46254+ struct acl_subject_label *subj;
46255+ struct acl_role_label *role;
46256+ unsigned int x;
46257+ ino_t inode = dentry->d_inode->i_ino;
46258+ dev_t dev = __get_dev(dentry);
46259+
46260+ FOR_EACH_ROLE_START(role)
46261+ update_acl_subj_label(matchn->inode, matchn->device,
46262+ inode, dev, role);
46263+
46264+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
46265+ if ((subj->inode == inode) && (subj->device == dev)) {
46266+ subj->inode = inode;
46267+ subj->device = dev;
46268+ }
46269+ FOR_EACH_NESTED_SUBJECT_END(subj)
46270+ FOR_EACH_SUBJECT_START(role, subj, x)
46271+ update_acl_obj_label(matchn->inode, matchn->device,
46272+ inode, dev, subj);
46273+ FOR_EACH_SUBJECT_END(subj,x)
46274+ FOR_EACH_ROLE_END(role)
46275+
46276+ update_inodev_entry(matchn->inode, matchn->device, inode, dev);
46277+
46278+ return;
46279+}
46280+
46281+void
46282+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
46283+{
46284+ struct name_entry *matchn;
46285+
46286+ if (unlikely(!(gr_status & GR_READY)))
46287+ return;
46288+
46289+ preempt_disable();
46290+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
46291+
46292+ if (unlikely((unsigned long)matchn)) {
46293+ write_lock(&gr_inode_lock);
46294+ do_handle_create(matchn, dentry, mnt);
46295+ write_unlock(&gr_inode_lock);
46296+ }
46297+ preempt_enable();
46298+
46299+ return;
46300+}
46301+
46302+void
46303+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
46304+ struct dentry *old_dentry,
46305+ struct dentry *new_dentry,
46306+ struct vfsmount *mnt, const __u8 replace)
46307+{
46308+ struct name_entry *matchn;
46309+ struct inodev_entry *inodev;
46310+ ino_t oldinode = old_dentry->d_inode->i_ino;
46311+ dev_t olddev = __get_dev(old_dentry);
46312+
46313+ /* vfs_rename swaps the name and parent link for old_dentry and
46314+ new_dentry
46315+ at this point, old_dentry has the new name, parent link, and inode
46316+ for the renamed file
46317+ if a file is being replaced by a rename, new_dentry has the inode
46318+ and name for the replaced file
46319+ */
46320+
46321+ if (unlikely(!(gr_status & GR_READY)))
46322+ return;
46323+
46324+ preempt_disable();
46325+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
46326+
46327+ /* we wouldn't have to check d_inode if it weren't for
46328+ NFS silly-renaming
46329+ */
46330+
46331+ write_lock(&gr_inode_lock);
46332+ if (unlikely(replace && new_dentry->d_inode)) {
46333+ ino_t newinode = new_dentry->d_inode->i_ino;
46334+ dev_t newdev = __get_dev(new_dentry);
46335+ inodev = lookup_inodev_entry(newinode, newdev);
46336+ if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
46337+ do_handle_delete(inodev, newinode, newdev);
46338+ }
46339+
46340+ inodev = lookup_inodev_entry(oldinode, olddev);
46341+ if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
46342+ do_handle_delete(inodev, oldinode, olddev);
46343+
46344+ if (unlikely((unsigned long)matchn))
46345+ do_handle_create(matchn, old_dentry, mnt);
46346+
46347+ write_unlock(&gr_inode_lock);
46348+ preempt_enable();
46349+
46350+ return;
46351+}
46352+
46353+static int
46354+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
46355+ unsigned char **sum)
46356+{
46357+ struct acl_role_label *r;
46358+ struct role_allowed_ip *ipp;
46359+ struct role_transition *trans;
46360+ unsigned int i;
46361+ int found = 0;
46362+ u32 curr_ip = current->signal->curr_ip;
46363+
46364+ current->signal->saved_ip = curr_ip;
46365+
46366+ /* check transition table */
46367+
46368+ for (trans = current->role->transitions; trans; trans = trans->next) {
46369+ if (!strcmp(rolename, trans->rolename)) {
46370+ found = 1;
46371+ break;
46372+ }
46373+ }
46374+
46375+ if (!found)
46376+ return 0;
46377+
46378+ /* handle special roles that do not require authentication
46379+ and check ip */
46380+
46381+ FOR_EACH_ROLE_START(r)
46382+ if (!strcmp(rolename, r->rolename) &&
46383+ (r->roletype & GR_ROLE_SPECIAL)) {
46384+ found = 0;
46385+ if (r->allowed_ips != NULL) {
46386+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
46387+ if ((ntohl(curr_ip) & ipp->netmask) ==
46388+ (ntohl(ipp->addr) & ipp->netmask))
46389+ found = 1;
46390+ }
46391+ } else
46392+ found = 2;
46393+ if (!found)
46394+ return 0;
46395+
46396+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
46397+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
46398+ *salt = NULL;
46399+ *sum = NULL;
46400+ return 1;
46401+ }
46402+ }
46403+ FOR_EACH_ROLE_END(r)
46404+
46405+ for (i = 0; i < num_sprole_pws; i++) {
46406+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
46407+ *salt = acl_special_roles[i]->salt;
46408+ *sum = acl_special_roles[i]->sum;
46409+ return 1;
46410+ }
46411+ }
46412+
46413+ return 0;
46414+}
46415+
46416+static void
46417+assign_special_role(char *rolename)
46418+{
46419+ struct acl_object_label *obj;
46420+ struct acl_role_label *r;
46421+ struct acl_role_label *assigned = NULL;
46422+ struct task_struct *tsk;
46423+ struct file *filp;
46424+
46425+ FOR_EACH_ROLE_START(r)
46426+ if (!strcmp(rolename, r->rolename) &&
46427+ (r->roletype & GR_ROLE_SPECIAL)) {
46428+ assigned = r;
46429+ break;
46430+ }
46431+ FOR_EACH_ROLE_END(r)
46432+
46433+ if (!assigned)
46434+ return;
46435+
46436+ read_lock(&tasklist_lock);
46437+ read_lock(&grsec_exec_file_lock);
46438+
46439+ tsk = current->real_parent;
46440+ if (tsk == NULL)
46441+ goto out_unlock;
46442+
46443+ filp = tsk->exec_file;
46444+ if (filp == NULL)
46445+ goto out_unlock;
46446+
46447+ tsk->is_writable = 0;
46448+
46449+ tsk->acl_sp_role = 1;
46450+ tsk->acl_role_id = ++acl_sp_role_value;
46451+ tsk->role = assigned;
46452+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
46453+
46454+ /* ignore additional mmap checks for processes that are writable
46455+ by the default ACL */
46456+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
46457+ if (unlikely(obj->mode & GR_WRITE))
46458+ tsk->is_writable = 1;
46459+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
46460+ if (unlikely(obj->mode & GR_WRITE))
46461+ tsk->is_writable = 1;
46462+
46463+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46464+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
46465+#endif
46466+
46467+out_unlock:
46468+ read_unlock(&grsec_exec_file_lock);
46469+ read_unlock(&tasklist_lock);
46470+ return;
46471+}
46472+
46473+int gr_check_secure_terminal(struct task_struct *task)
46474+{
46475+ struct task_struct *p, *p2, *p3;
46476+ struct files_struct *files;
46477+ struct fdtable *fdt;
46478+ struct file *our_file = NULL, *file;
46479+ int i;
46480+
46481+ if (task->signal->tty == NULL)
46482+ return 1;
46483+
46484+ files = get_files_struct(task);
46485+ if (files != NULL) {
46486+ rcu_read_lock();
46487+ fdt = files_fdtable(files);
46488+ for (i=0; i < fdt->max_fds; i++) {
46489+ file = fcheck_files(files, i);
46490+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
46491+ get_file(file);
46492+ our_file = file;
46493+ }
46494+ }
46495+ rcu_read_unlock();
46496+ put_files_struct(files);
46497+ }
46498+
46499+ if (our_file == NULL)
46500+ return 1;
46501+
46502+ read_lock(&tasklist_lock);
46503+ do_each_thread(p2, p) {
46504+ files = get_files_struct(p);
46505+ if (files == NULL ||
46506+ (p->signal && p->signal->tty == task->signal->tty)) {
46507+ if (files != NULL)
46508+ put_files_struct(files);
46509+ continue;
46510+ }
46511+ rcu_read_lock();
46512+ fdt = files_fdtable(files);
46513+ for (i=0; i < fdt->max_fds; i++) {
46514+ file = fcheck_files(files, i);
46515+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
46516+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
46517+ p3 = task;
46518+ while (p3->pid > 0) {
46519+ if (p3 == p)
46520+ break;
46521+ p3 = p3->real_parent;
46522+ }
46523+ if (p3 == p)
46524+ break;
46525+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
46526+ gr_handle_alertkill(p);
46527+ rcu_read_unlock();
46528+ put_files_struct(files);
46529+ read_unlock(&tasklist_lock);
46530+ fput(our_file);
46531+ return 0;
46532+ }
46533+ }
46534+ rcu_read_unlock();
46535+ put_files_struct(files);
46536+ } while_each_thread(p2, p);
46537+ read_unlock(&tasklist_lock);
46538+
46539+ fput(our_file);
46540+ return 1;
46541+}
46542+
46543+ssize_t
46544+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
46545+{
46546+ struct gr_arg_wrapper uwrap;
46547+ unsigned char *sprole_salt = NULL;
46548+ unsigned char *sprole_sum = NULL;
46549+ int error = sizeof (struct gr_arg_wrapper);
46550+ int error2 = 0;
46551+
46552+ mutex_lock(&gr_dev_mutex);
46553+
46554+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
46555+ error = -EPERM;
46556+ goto out;
46557+ }
46558+
46559+ if (count != sizeof (struct gr_arg_wrapper)) {
46560+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
46561+ error = -EINVAL;
46562+ goto out;
46563+ }
46564+
46565+
46566+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
46567+ gr_auth_expires = 0;
46568+ gr_auth_attempts = 0;
46569+ }
46570+
46571+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
46572+ error = -EFAULT;
46573+ goto out;
46574+ }
46575+
46576+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
46577+ error = -EINVAL;
46578+ goto out;
46579+ }
46580+
46581+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
46582+ error = -EFAULT;
46583+ goto out;
46584+ }
46585+
46586+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
46587+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
46588+ time_after(gr_auth_expires, get_seconds())) {
46589+ error = -EBUSY;
46590+ goto out;
46591+ }
46592+
46593+ /* if non-root trying to do anything other than use a special role,
46594+ do not attempt authentication, do not count towards authentication
46595+ locking
46596+ */
46597+
46598+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
46599+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
46600+ current_uid()) {
46601+ error = -EPERM;
46602+ goto out;
46603+ }
46604+
46605+ /* ensure pw and special role name are null terminated */
46606+
46607+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
46608+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
46609+
46610+ /* Okay.
46611+ * We have our enough of the argument structure..(we have yet
46612+ * to copy_from_user the tables themselves) . Copy the tables
46613+ * only if we need them, i.e. for loading operations. */
46614+
46615+ switch (gr_usermode->mode) {
46616+ case GR_STATUS:
46617+ if (gr_status & GR_READY) {
46618+ error = 1;
46619+ if (!gr_check_secure_terminal(current))
46620+ error = 3;
46621+ } else
46622+ error = 2;
46623+ goto out;
46624+ case GR_SHUTDOWN:
46625+ if ((gr_status & GR_READY)
46626+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
46627+ pax_open_kernel();
46628+ gr_status &= ~GR_READY;
46629+ pax_close_kernel();
46630+
46631+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
46632+ free_variables();
46633+ memset(gr_usermode, 0, sizeof (struct gr_arg));
46634+ memset(gr_system_salt, 0, GR_SALT_LEN);
46635+ memset(gr_system_sum, 0, GR_SHA_LEN);
46636+ } else if (gr_status & GR_READY) {
46637+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
46638+ error = -EPERM;
46639+ } else {
46640+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
46641+ error = -EAGAIN;
46642+ }
46643+ break;
46644+ case GR_ENABLE:
46645+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
46646+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
46647+ else {
46648+ if (gr_status & GR_READY)
46649+ error = -EAGAIN;
46650+ else
46651+ error = error2;
46652+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
46653+ }
46654+ break;
46655+ case GR_RELOAD:
46656+ if (!(gr_status & GR_READY)) {
46657+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
46658+ error = -EAGAIN;
46659+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
46660+ lock_kernel();
46661+
46662+ pax_open_kernel();
46663+ gr_status &= ~GR_READY;
46664+ pax_close_kernel();
46665+
46666+ free_variables();
46667+ if (!(error2 = gracl_init(gr_usermode))) {
46668+ unlock_kernel();
46669+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
46670+ } else {
46671+ unlock_kernel();
46672+ error = error2;
46673+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
46674+ }
46675+ } else {
46676+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
46677+ error = -EPERM;
46678+ }
46679+ break;
46680+ case GR_SEGVMOD:
46681+ if (unlikely(!(gr_status & GR_READY))) {
46682+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
46683+ error = -EAGAIN;
46684+ break;
46685+ }
46686+
46687+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
46688+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
46689+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
46690+ struct acl_subject_label *segvacl;
46691+ segvacl =
46692+ lookup_acl_subj_label(gr_usermode->segv_inode,
46693+ gr_usermode->segv_device,
46694+ current->role);
46695+ if (segvacl) {
46696+ segvacl->crashes = 0;
46697+ segvacl->expires = 0;
46698+ }
46699+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
46700+ gr_remove_uid(gr_usermode->segv_uid);
46701+ }
46702+ } else {
46703+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
46704+ error = -EPERM;
46705+ }
46706+ break;
46707+ case GR_SPROLE:
46708+ case GR_SPROLEPAM:
46709+ if (unlikely(!(gr_status & GR_READY))) {
46710+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
46711+ error = -EAGAIN;
46712+ break;
46713+ }
46714+
46715+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
46716+ current->role->expires = 0;
46717+ current->role->auth_attempts = 0;
46718+ }
46719+
46720+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
46721+ time_after(current->role->expires, get_seconds())) {
46722+ error = -EBUSY;
46723+ goto out;
46724+ }
46725+
46726+ if (lookup_special_role_auth
46727+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
46728+ && ((!sprole_salt && !sprole_sum)
46729+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
46730+ char *p = "";
46731+ assign_special_role(gr_usermode->sp_role);
46732+ read_lock(&tasklist_lock);
46733+ if (current->real_parent)
46734+ p = current->real_parent->role->rolename;
46735+ read_unlock(&tasklist_lock);
46736+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
46737+ p, acl_sp_role_value);
46738+ } else {
46739+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
46740+ error = -EPERM;
46741+ if(!(current->role->auth_attempts++))
46742+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
46743+
46744+ goto out;
46745+ }
46746+ break;
46747+ case GR_UNSPROLE:
46748+ if (unlikely(!(gr_status & GR_READY))) {
46749+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
46750+ error = -EAGAIN;
46751+ break;
46752+ }
46753+
46754+ if (current->role->roletype & GR_ROLE_SPECIAL) {
46755+ char *p = "";
46756+ int i = 0;
46757+
46758+ read_lock(&tasklist_lock);
46759+ if (current->real_parent) {
46760+ p = current->real_parent->role->rolename;
46761+ i = current->real_parent->acl_role_id;
46762+ }
46763+ read_unlock(&tasklist_lock);
46764+
46765+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
46766+ gr_set_acls(1);
46767+ } else {
46768+ error = -EPERM;
46769+ goto out;
46770+ }
46771+ break;
46772+ default:
46773+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
46774+ error = -EINVAL;
46775+ break;
46776+ }
46777+
46778+ if (error != -EPERM)
46779+ goto out;
46780+
46781+ if(!(gr_auth_attempts++))
46782+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
46783+
46784+ out:
46785+ mutex_unlock(&gr_dev_mutex);
46786+ return error;
46787+}
46788+
46789+/* must be called with
46790+ rcu_read_lock();
46791+ read_lock(&tasklist_lock);
46792+ read_lock(&grsec_exec_file_lock);
46793+*/
46794+int gr_apply_subject_to_task(struct task_struct *task)
46795+{
46796+ struct acl_object_label *obj;
46797+ char *tmpname;
46798+ struct acl_subject_label *tmpsubj;
46799+ struct file *filp;
46800+ struct name_entry *nmatch;
46801+
46802+ filp = task->exec_file;
46803+ if (filp == NULL)
46804+ return 0;
46805+
46806+ /* the following is to apply the correct subject
46807+ on binaries running when the RBAC system
46808+ is enabled, when the binaries have been
46809+ replaced or deleted since their execution
46810+ -----
46811+ when the RBAC system starts, the inode/dev
46812+ from exec_file will be one the RBAC system
46813+ is unaware of. It only knows the inode/dev
46814+ of the present file on disk, or the absence
46815+ of it.
46816+ */
46817+ preempt_disable();
46818+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
46819+
46820+ nmatch = lookup_name_entry(tmpname);
46821+ preempt_enable();
46822+ tmpsubj = NULL;
46823+ if (nmatch) {
46824+ if (nmatch->deleted)
46825+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
46826+ else
46827+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
46828+ if (tmpsubj != NULL)
46829+ task->acl = tmpsubj;
46830+ }
46831+ if (tmpsubj == NULL)
46832+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
46833+ task->role);
46834+ if (task->acl) {
46835+ struct acl_subject_label *curr;
46836+ curr = task->acl;
46837+
46838+ task->is_writable = 0;
46839+ /* ignore additional mmap checks for processes that are writable
46840+ by the default ACL */
46841+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
46842+ if (unlikely(obj->mode & GR_WRITE))
46843+ task->is_writable = 1;
46844+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
46845+ if (unlikely(obj->mode & GR_WRITE))
46846+ task->is_writable = 1;
46847+
46848+ gr_set_proc_res(task);
46849+
46850+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46851+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
46852+#endif
46853+ } else {
46854+ return 1;
46855+ }
46856+
46857+ return 0;
46858+}
46859+
46860+int
46861+gr_set_acls(const int type)
46862+{
46863+ struct task_struct *task, *task2;
46864+ struct acl_role_label *role = current->role;
46865+ __u16 acl_role_id = current->acl_role_id;
46866+ const struct cred *cred;
46867+ int ret;
46868+
46869+ rcu_read_lock();
46870+ read_lock(&tasklist_lock);
46871+ read_lock(&grsec_exec_file_lock);
46872+ do_each_thread(task2, task) {
46873+ /* check to see if we're called from the exit handler,
46874+ if so, only replace ACLs that have inherited the admin
46875+ ACL */
46876+
46877+ if (type && (task->role != role ||
46878+ task->acl_role_id != acl_role_id))
46879+ continue;
46880+
46881+ task->acl_role_id = 0;
46882+ task->acl_sp_role = 0;
46883+
46884+ if (task->exec_file) {
46885+ cred = __task_cred(task);
46886+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
46887+
46888+ ret = gr_apply_subject_to_task(task);
46889+ if (ret) {
46890+ read_unlock(&grsec_exec_file_lock);
46891+ read_unlock(&tasklist_lock);
46892+ rcu_read_unlock();
46893+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
46894+ return ret;
46895+ }
46896+ } else {
46897+ // it's a kernel process
46898+ task->role = kernel_role;
46899+ task->acl = kernel_role->root_label;
46900+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
46901+ task->acl->mode &= ~GR_PROCFIND;
46902+#endif
46903+ }
46904+ } while_each_thread(task2, task);
46905+ read_unlock(&grsec_exec_file_lock);
46906+ read_unlock(&tasklist_lock);
46907+ rcu_read_unlock();
46908+
46909+ return 0;
46910+}
46911+
46912+void
46913+gr_learn_resource(const struct task_struct *task,
46914+ const int res, const unsigned long wanted, const int gt)
46915+{
46916+ struct acl_subject_label *acl;
46917+ const struct cred *cred;
46918+
46919+ if (unlikely((gr_status & GR_READY) &&
46920+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
46921+ goto skip_reslog;
46922+
46923+#ifdef CONFIG_GRKERNSEC_RESLOG
46924+ gr_log_resource(task, res, wanted, gt);
46925+#endif
46926+ skip_reslog:
46927+
46928+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
46929+ return;
46930+
46931+ acl = task->acl;
46932+
46933+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
46934+ !(acl->resmask & (1 << (unsigned short) res))))
46935+ return;
46936+
46937+ if (wanted >= acl->res[res].rlim_cur) {
46938+ unsigned long res_add;
46939+
46940+ res_add = wanted;
46941+ switch (res) {
46942+ case RLIMIT_CPU:
46943+ res_add += GR_RLIM_CPU_BUMP;
46944+ break;
46945+ case RLIMIT_FSIZE:
46946+ res_add += GR_RLIM_FSIZE_BUMP;
46947+ break;
46948+ case RLIMIT_DATA:
46949+ res_add += GR_RLIM_DATA_BUMP;
46950+ break;
46951+ case RLIMIT_STACK:
46952+ res_add += GR_RLIM_STACK_BUMP;
46953+ break;
46954+ case RLIMIT_CORE:
46955+ res_add += GR_RLIM_CORE_BUMP;
46956+ break;
46957+ case RLIMIT_RSS:
46958+ res_add += GR_RLIM_RSS_BUMP;
46959+ break;
46960+ case RLIMIT_NPROC:
46961+ res_add += GR_RLIM_NPROC_BUMP;
46962+ break;
46963+ case RLIMIT_NOFILE:
46964+ res_add += GR_RLIM_NOFILE_BUMP;
46965+ break;
46966+ case RLIMIT_MEMLOCK:
46967+ res_add += GR_RLIM_MEMLOCK_BUMP;
46968+ break;
46969+ case RLIMIT_AS:
46970+ res_add += GR_RLIM_AS_BUMP;
46971+ break;
46972+ case RLIMIT_LOCKS:
46973+ res_add += GR_RLIM_LOCKS_BUMP;
46974+ break;
46975+ case RLIMIT_SIGPENDING:
46976+ res_add += GR_RLIM_SIGPENDING_BUMP;
46977+ break;
46978+ case RLIMIT_MSGQUEUE:
46979+ res_add += GR_RLIM_MSGQUEUE_BUMP;
46980+ break;
46981+ case RLIMIT_NICE:
46982+ res_add += GR_RLIM_NICE_BUMP;
46983+ break;
46984+ case RLIMIT_RTPRIO:
46985+ res_add += GR_RLIM_RTPRIO_BUMP;
46986+ break;
46987+ case RLIMIT_RTTIME:
46988+ res_add += GR_RLIM_RTTIME_BUMP;
46989+ break;
46990+ }
46991+
46992+ acl->res[res].rlim_cur = res_add;
46993+
46994+ if (wanted > acl->res[res].rlim_max)
46995+ acl->res[res].rlim_max = res_add;
46996+
46997+ /* only log the subject filename, since resource logging is supported for
46998+ single-subject learning only */
46999+ rcu_read_lock();
47000+ cred = __task_cred(task);
47001+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
47002+ task->role->roletype, cred->uid, cred->gid, acl->filename,
47003+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
47004+ "", (unsigned long) res, &task->signal->saved_ip);
47005+ rcu_read_unlock();
47006+ }
47007+
47008+ return;
47009+}
47010+
47011+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
47012+void
47013+pax_set_initial_flags(struct linux_binprm *bprm)
47014+{
47015+ struct task_struct *task = current;
47016+ struct acl_subject_label *proc;
47017+ unsigned long flags;
47018+
47019+ if (unlikely(!(gr_status & GR_READY)))
47020+ return;
47021+
47022+ flags = pax_get_flags(task);
47023+
47024+ proc = task->acl;
47025+
47026+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
47027+ flags &= ~MF_PAX_PAGEEXEC;
47028+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
47029+ flags &= ~MF_PAX_SEGMEXEC;
47030+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
47031+ flags &= ~MF_PAX_RANDMMAP;
47032+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
47033+ flags &= ~MF_PAX_EMUTRAMP;
47034+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
47035+ flags &= ~MF_PAX_MPROTECT;
47036+
47037+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
47038+ flags |= MF_PAX_PAGEEXEC;
47039+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
47040+ flags |= MF_PAX_SEGMEXEC;
47041+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
47042+ flags |= MF_PAX_RANDMMAP;
47043+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
47044+ flags |= MF_PAX_EMUTRAMP;
47045+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
47046+ flags |= MF_PAX_MPROTECT;
47047+
47048+ pax_set_flags(task, flags);
47049+
47050+ return;
47051+}
47052+#endif
47053+
47054+#ifdef CONFIG_SYSCTL
47055+/* Eric Biederman likes breaking userland ABI and every inode-based security
47056+ system to save 35kb of memory */
47057+
47058+/* we modify the passed in filename, but adjust it back before returning */
47059+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
47060+{
47061+ struct name_entry *nmatch;
47062+ char *p, *lastp = NULL;
47063+ struct acl_object_label *obj = NULL, *tmp;
47064+ struct acl_subject_label *tmpsubj;
47065+ char c = '\0';
47066+
47067+ read_lock(&gr_inode_lock);
47068+
47069+ p = name + len - 1;
47070+ do {
47071+ nmatch = lookup_name_entry(name);
47072+ if (lastp != NULL)
47073+ *lastp = c;
47074+
47075+ if (nmatch == NULL)
47076+ goto next_component;
47077+ tmpsubj = current->acl;
47078+ do {
47079+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
47080+ if (obj != NULL) {
47081+ tmp = obj->globbed;
47082+ while (tmp) {
47083+ if (!glob_match(tmp->filename, name)) {
47084+ obj = tmp;
47085+ goto found_obj;
47086+ }
47087+ tmp = tmp->next;
47088+ }
47089+ goto found_obj;
47090+ }
47091+ } while ((tmpsubj = tmpsubj->parent_subject));
47092+next_component:
47093+ /* end case */
47094+ if (p == name)
47095+ break;
47096+
47097+ while (*p != '/')
47098+ p--;
47099+ if (p == name)
47100+ lastp = p + 1;
47101+ else {
47102+ lastp = p;
47103+ p--;
47104+ }
47105+ c = *lastp;
47106+ *lastp = '\0';
47107+ } while (1);
47108+found_obj:
47109+ read_unlock(&gr_inode_lock);
47110+ /* obj returned will always be non-null */
47111+ return obj;
47112+}
47113+
47114+/* returns 0 when allowing, non-zero on error
47115+ op of 0 is used for readdir, so we don't log the names of hidden files
47116+*/
47117+__u32
47118+gr_handle_sysctl(const struct ctl_table *table, const int op)
47119+{
47120+ ctl_table *tmp;
47121+ const char *proc_sys = "/proc/sys";
47122+ char *path;
47123+ struct acl_object_label *obj;
47124+ unsigned short len = 0, pos = 0, depth = 0, i;
47125+ __u32 err = 0;
47126+ __u32 mode = 0;
47127+
47128+ if (unlikely(!(gr_status & GR_READY)))
47129+ return 0;
47130+
47131+ /* for now, ignore operations on non-sysctl entries if it's not a
47132+ readdir*/
47133+ if (table->child != NULL && op != 0)
47134+ return 0;
47135+
47136+ mode |= GR_FIND;
47137+ /* it's only a read if it's an entry, read on dirs is for readdir */
47138+ if (op & MAY_READ)
47139+ mode |= GR_READ;
47140+ if (op & MAY_WRITE)
47141+ mode |= GR_WRITE;
47142+
47143+ preempt_disable();
47144+
47145+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
47146+
47147+ /* it's only a read/write if it's an actual entry, not a dir
47148+ (which are opened for readdir)
47149+ */
47150+
47151+ /* convert the requested sysctl entry into a pathname */
47152+
47153+ for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
47154+ len += strlen(tmp->procname);
47155+ len++;
47156+ depth++;
47157+ }
47158+
47159+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
47160+ /* deny */
47161+ goto out;
47162+ }
47163+
47164+ memset(path, 0, PAGE_SIZE);
47165+
47166+ memcpy(path, proc_sys, strlen(proc_sys));
47167+
47168+ pos += strlen(proc_sys);
47169+
47170+ for (; depth > 0; depth--) {
47171+ path[pos] = '/';
47172+ pos++;
47173+ for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
47174+ if (depth == i) {
47175+ memcpy(path + pos, tmp->procname,
47176+ strlen(tmp->procname));
47177+ pos += strlen(tmp->procname);
47178+ }
47179+ i++;
47180+ }
47181+ }
47182+
47183+ obj = gr_lookup_by_name(path, pos);
47184+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
47185+
47186+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
47187+ ((err & mode) != mode))) {
47188+ __u32 new_mode = mode;
47189+
47190+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
47191+
47192+ err = 0;
47193+ gr_log_learn_sysctl(path, new_mode);
47194+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
47195+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
47196+ err = -ENOENT;
47197+ } else if (!(err & GR_FIND)) {
47198+ err = -ENOENT;
47199+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
47200+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
47201+ path, (mode & GR_READ) ? " reading" : "",
47202+ (mode & GR_WRITE) ? " writing" : "");
47203+ err = -EACCES;
47204+ } else if ((err & mode) != mode) {
47205+ err = -EACCES;
47206+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
47207+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
47208+ path, (mode & GR_READ) ? " reading" : "",
47209+ (mode & GR_WRITE) ? " writing" : "");
47210+ err = 0;
47211+ } else
47212+ err = 0;
47213+
47214+ out:
47215+ preempt_enable();
47216+
47217+ return err;
47218+}
47219+#endif
47220+
47221+int
47222+gr_handle_proc_ptrace(struct task_struct *task)
47223+{
47224+ struct file *filp;
47225+ struct task_struct *tmp = task;
47226+ struct task_struct *curtemp = current;
47227+ __u32 retmode;
47228+
47229+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
47230+ if (unlikely(!(gr_status & GR_READY)))
47231+ return 0;
47232+#endif
47233+
47234+ read_lock(&tasklist_lock);
47235+ read_lock(&grsec_exec_file_lock);
47236+ filp = task->exec_file;
47237+
47238+ while (tmp->pid > 0) {
47239+ if (tmp == curtemp)
47240+ break;
47241+ tmp = tmp->real_parent;
47242+ }
47243+
47244+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
47245+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
47246+ read_unlock(&grsec_exec_file_lock);
47247+ read_unlock(&tasklist_lock);
47248+ return 1;
47249+ }
47250+
47251+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
47252+ if (!(gr_status & GR_READY)) {
47253+ read_unlock(&grsec_exec_file_lock);
47254+ read_unlock(&tasklist_lock);
47255+ return 0;
47256+ }
47257+#endif
47258+
47259+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
47260+ read_unlock(&grsec_exec_file_lock);
47261+ read_unlock(&tasklist_lock);
47262+
47263+ if (retmode & GR_NOPTRACE)
47264+ return 1;
47265+
47266+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
47267+ && (current->acl != task->acl || (current->acl != current->role->root_label
47268+ && current->pid != task->pid)))
47269+ return 1;
47270+
47271+ return 0;
47272+}
47273+
47274+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
47275+{
47276+ if (unlikely(!(gr_status & GR_READY)))
47277+ return;
47278+
47279+ if (!(current->role->roletype & GR_ROLE_GOD))
47280+ return;
47281+
47282+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
47283+ p->role->rolename, gr_task_roletype_to_char(p),
47284+ p->acl->filename);
47285+}
47286+
47287+int
47288+gr_handle_ptrace(struct task_struct *task, const long request)
47289+{
47290+ struct task_struct *tmp = task;
47291+ struct task_struct *curtemp = current;
47292+ __u32 retmode;
47293+
47294+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
47295+ if (unlikely(!(gr_status & GR_READY)))
47296+ return 0;
47297+#endif
47298+
47299+ read_lock(&tasklist_lock);
47300+ while (tmp->pid > 0) {
47301+ if (tmp == curtemp)
47302+ break;
47303+ tmp = tmp->real_parent;
47304+ }
47305+
47306+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
47307+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
47308+ read_unlock(&tasklist_lock);
47309+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
47310+ return 1;
47311+ }
47312+ read_unlock(&tasklist_lock);
47313+
47314+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
47315+ if (!(gr_status & GR_READY))
47316+ return 0;
47317+#endif
47318+
47319+ read_lock(&grsec_exec_file_lock);
47320+ if (unlikely(!task->exec_file)) {
47321+ read_unlock(&grsec_exec_file_lock);
47322+ return 0;
47323+ }
47324+
47325+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
47326+ read_unlock(&grsec_exec_file_lock);
47327+
47328+ if (retmode & GR_NOPTRACE) {
47329+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
47330+ return 1;
47331+ }
47332+
47333+ if (retmode & GR_PTRACERD) {
47334+ switch (request) {
47335+ case PTRACE_POKETEXT:
47336+ case PTRACE_POKEDATA:
47337+ case PTRACE_POKEUSR:
47338+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
47339+ case PTRACE_SETREGS:
47340+ case PTRACE_SETFPREGS:
47341+#endif
47342+#ifdef CONFIG_X86
47343+ case PTRACE_SETFPXREGS:
47344+#endif
47345+#ifdef CONFIG_ALTIVEC
47346+ case PTRACE_SETVRREGS:
47347+#endif
47348+ return 1;
47349+ default:
47350+ return 0;
47351+ }
47352+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
47353+ !(current->role->roletype & GR_ROLE_GOD) &&
47354+ (current->acl != task->acl)) {
47355+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
47356+ return 1;
47357+ }
47358+
47359+ return 0;
47360+}
47361+
47362+static int is_writable_mmap(const struct file *filp)
47363+{
47364+ struct task_struct *task = current;
47365+ struct acl_object_label *obj, *obj2;
47366+
47367+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
47368+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
47369+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
47370+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
47371+ task->role->root_label);
47372+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
47373+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
47374+ return 1;
47375+ }
47376+ }
47377+ return 0;
47378+}
47379+
47380+int
47381+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
47382+{
47383+ __u32 mode;
47384+
47385+ if (unlikely(!file || !(prot & PROT_EXEC)))
47386+ return 1;
47387+
47388+ if (is_writable_mmap(file))
47389+ return 0;
47390+
47391+ mode =
47392+ gr_search_file(file->f_path.dentry,
47393+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
47394+ file->f_path.mnt);
47395+
47396+ if (!gr_tpe_allow(file))
47397+ return 0;
47398+
47399+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
47400+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
47401+ return 0;
47402+ } else if (unlikely(!(mode & GR_EXEC))) {
47403+ return 0;
47404+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
47405+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
47406+ return 1;
47407+ }
47408+
47409+ return 1;
47410+}
47411+
47412+int
47413+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
47414+{
47415+ __u32 mode;
47416+
47417+ if (unlikely(!file || !(prot & PROT_EXEC)))
47418+ return 1;
47419+
47420+ if (is_writable_mmap(file))
47421+ return 0;
47422+
47423+ mode =
47424+ gr_search_file(file->f_path.dentry,
47425+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
47426+ file->f_path.mnt);
47427+
47428+ if (!gr_tpe_allow(file))
47429+ return 0;
47430+
47431+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
47432+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
47433+ return 0;
47434+ } else if (unlikely(!(mode & GR_EXEC))) {
47435+ return 0;
47436+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
47437+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
47438+ return 1;
47439+ }
47440+
47441+ return 1;
47442+}
47443+
47444+void
47445+gr_acl_handle_psacct(struct task_struct *task, const long code)
47446+{
47447+ unsigned long runtime;
47448+ unsigned long cputime;
47449+ unsigned int wday, cday;
47450+ __u8 whr, chr;
47451+ __u8 wmin, cmin;
47452+ __u8 wsec, csec;
47453+ struct timespec timeval;
47454+
47455+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
47456+ !(task->acl->mode & GR_PROCACCT)))
47457+ return;
47458+
47459+ do_posix_clock_monotonic_gettime(&timeval);
47460+ runtime = timeval.tv_sec - task->start_time.tv_sec;
47461+ wday = runtime / (3600 * 24);
47462+ runtime -= wday * (3600 * 24);
47463+ whr = runtime / 3600;
47464+ runtime -= whr * 3600;
47465+ wmin = runtime / 60;
47466+ runtime -= wmin * 60;
47467+ wsec = runtime;
47468+
47469+ cputime = (task->utime + task->stime) / HZ;
47470+ cday = cputime / (3600 * 24);
47471+ cputime -= cday * (3600 * 24);
47472+ chr = cputime / 3600;
47473+ cputime -= chr * 3600;
47474+ cmin = cputime / 60;
47475+ cputime -= cmin * 60;
47476+ csec = cputime;
47477+
47478+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
47479+
47480+ return;
47481+}
47482+
47483+void gr_set_kernel_label(struct task_struct *task)
47484+{
47485+ if (gr_status & GR_READY) {
47486+ task->role = kernel_role;
47487+ task->acl = kernel_role->root_label;
47488+ }
47489+ return;
47490+}
47491+
47492+#ifdef CONFIG_TASKSTATS
47493+int gr_is_taskstats_denied(int pid)
47494+{
47495+ struct task_struct *task;
47496+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47497+ const struct cred *cred;
47498+#endif
47499+ int ret = 0;
47500+
47501+ /* restrict taskstats viewing to un-chrooted root users
47502+ who have the 'view' subject flag if the RBAC system is enabled
47503+ */
47504+
47505+ rcu_read_lock();
47506+ read_lock(&tasklist_lock);
47507+ task = find_task_by_vpid(pid);
47508+ if (task) {
47509+#ifdef CONFIG_GRKERNSEC_CHROOT
47510+ if (proc_is_chrooted(task))
47511+ ret = -EACCES;
47512+#endif
47513+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47514+ cred = __task_cred(task);
47515+#ifdef CONFIG_GRKERNSEC_PROC_USER
47516+ if (cred->uid != 0)
47517+ ret = -EACCES;
47518+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47519+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
47520+ ret = -EACCES;
47521+#endif
47522+#endif
47523+ if (gr_status & GR_READY) {
47524+ if (!(task->acl->mode & GR_VIEW))
47525+ ret = -EACCES;
47526+ }
47527+ } else
47528+ ret = -ENOENT;
47529+
47530+ read_unlock(&tasklist_lock);
47531+ rcu_read_unlock();
47532+
47533+ return ret;
47534+}
47535+#endif
47536+
47537+/* AUXV entries are filled via a descendant of search_binary_handler
47538+ after we've already applied the subject for the target
47539+*/
47540+int gr_acl_enable_at_secure(void)
47541+{
47542+ if (unlikely(!(gr_status & GR_READY)))
47543+ return 0;
47544+
47545+ if (current->acl->mode & GR_ATSECURE)
47546+ return 1;
47547+
47548+ return 0;
47549+}
47550+
47551+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
47552+{
47553+ struct task_struct *task = current;
47554+ struct dentry *dentry = file->f_path.dentry;
47555+ struct vfsmount *mnt = file->f_path.mnt;
47556+ struct acl_object_label *obj, *tmp;
47557+ struct acl_subject_label *subj;
47558+ unsigned int bufsize;
47559+ int is_not_root;
47560+ char *path;
47561+ dev_t dev = __get_dev(dentry);
47562+
47563+ if (unlikely(!(gr_status & GR_READY)))
47564+ return 1;
47565+
47566+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
47567+ return 1;
47568+
47569+ /* ignore Eric Biederman */
47570+ if (IS_PRIVATE(dentry->d_inode))
47571+ return 1;
47572+
47573+ subj = task->acl;
47574+ do {
47575+ obj = lookup_acl_obj_label(ino, dev, subj);
47576+ if (obj != NULL)
47577+ return (obj->mode & GR_FIND) ? 1 : 0;
47578+ } while ((subj = subj->parent_subject));
47579+
47580+ /* this is purely an optimization since we're looking for an object
47581+ for the directory we're doing a readdir on
47582+ if it's possible for any globbed object to match the entry we're
47583+ filling into the directory, then the object we find here will be
47584+ an anchor point with attached globbed objects
47585+ */
47586+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
47587+ if (obj->globbed == NULL)
47588+ return (obj->mode & GR_FIND) ? 1 : 0;
47589+
47590+ is_not_root = ((obj->filename[0] == '/') &&
47591+ (obj->filename[1] == '\0')) ? 0 : 1;
47592+ bufsize = PAGE_SIZE - namelen - is_not_root;
47593+
47594+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
47595+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
47596+ return 1;
47597+
47598+ preempt_disable();
47599+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
47600+ bufsize);
47601+
47602+ bufsize = strlen(path);
47603+
47604+ /* if base is "/", don't append an additional slash */
47605+ if (is_not_root)
47606+ *(path + bufsize) = '/';
47607+ memcpy(path + bufsize + is_not_root, name, namelen);
47608+ *(path + bufsize + namelen + is_not_root) = '\0';
47609+
47610+ tmp = obj->globbed;
47611+ while (tmp) {
47612+ if (!glob_match(tmp->filename, path)) {
47613+ preempt_enable();
47614+ return (tmp->mode & GR_FIND) ? 1 : 0;
47615+ }
47616+ tmp = tmp->next;
47617+ }
47618+ preempt_enable();
47619+ return (obj->mode & GR_FIND) ? 1 : 0;
47620+}
47621+
47622+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
47623+EXPORT_SYMBOL(gr_acl_is_enabled);
47624+#endif
47625+EXPORT_SYMBOL(gr_learn_resource);
47626+EXPORT_SYMBOL(gr_set_kernel_label);
47627+#ifdef CONFIG_SECURITY
47628+EXPORT_SYMBOL(gr_check_user_change);
47629+EXPORT_SYMBOL(gr_check_group_change);
47630+#endif
47631+
47632diff -urNp linux-2.6.32.41/grsecurity/gracl_cap.c linux-2.6.32.41/grsecurity/gracl_cap.c
47633--- linux-2.6.32.41/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
47634+++ linux-2.6.32.41/grsecurity/gracl_cap.c 2011-04-17 15:56:46.000000000 -0400
47635@@ -0,0 +1,138 @@
47636+#include <linux/kernel.h>
47637+#include <linux/module.h>
47638+#include <linux/sched.h>
47639+#include <linux/gracl.h>
47640+#include <linux/grsecurity.h>
47641+#include <linux/grinternal.h>
47642+
47643+static const char *captab_log[] = {
47644+ "CAP_CHOWN",
47645+ "CAP_DAC_OVERRIDE",
47646+ "CAP_DAC_READ_SEARCH",
47647+ "CAP_FOWNER",
47648+ "CAP_FSETID",
47649+ "CAP_KILL",
47650+ "CAP_SETGID",
47651+ "CAP_SETUID",
47652+ "CAP_SETPCAP",
47653+ "CAP_LINUX_IMMUTABLE",
47654+ "CAP_NET_BIND_SERVICE",
47655+ "CAP_NET_BROADCAST",
47656+ "CAP_NET_ADMIN",
47657+ "CAP_NET_RAW",
47658+ "CAP_IPC_LOCK",
47659+ "CAP_IPC_OWNER",
47660+ "CAP_SYS_MODULE",
47661+ "CAP_SYS_RAWIO",
47662+ "CAP_SYS_CHROOT",
47663+ "CAP_SYS_PTRACE",
47664+ "CAP_SYS_PACCT",
47665+ "CAP_SYS_ADMIN",
47666+ "CAP_SYS_BOOT",
47667+ "CAP_SYS_NICE",
47668+ "CAP_SYS_RESOURCE",
47669+ "CAP_SYS_TIME",
47670+ "CAP_SYS_TTY_CONFIG",
47671+ "CAP_MKNOD",
47672+ "CAP_LEASE",
47673+ "CAP_AUDIT_WRITE",
47674+ "CAP_AUDIT_CONTROL",
47675+ "CAP_SETFCAP",
47676+ "CAP_MAC_OVERRIDE",
47677+ "CAP_MAC_ADMIN"
47678+};
47679+
47680+EXPORT_SYMBOL(gr_is_capable);
47681+EXPORT_SYMBOL(gr_is_capable_nolog);
47682+
47683+int
47684+gr_is_capable(const int cap)
47685+{
47686+ struct task_struct *task = current;
47687+ const struct cred *cred = current_cred();
47688+ struct acl_subject_label *curracl;
47689+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
47690+ kernel_cap_t cap_audit = __cap_empty_set;
47691+
47692+ if (!gr_acl_is_enabled())
47693+ return 1;
47694+
47695+ curracl = task->acl;
47696+
47697+ cap_drop = curracl->cap_lower;
47698+ cap_mask = curracl->cap_mask;
47699+ cap_audit = curracl->cap_invert_audit;
47700+
47701+ while ((curracl = curracl->parent_subject)) {
47702+ /* if the cap isn't specified in the current computed mask but is specified in the
47703+ current level subject, and is lowered in the current level subject, then add
47704+ it to the set of dropped capabilities
47705+ otherwise, add the current level subject's mask to the current computed mask
47706+ */
47707+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
47708+ cap_raise(cap_mask, cap);
47709+ if (cap_raised(curracl->cap_lower, cap))
47710+ cap_raise(cap_drop, cap);
47711+ if (cap_raised(curracl->cap_invert_audit, cap))
47712+ cap_raise(cap_audit, cap);
47713+ }
47714+ }
47715+
47716+ if (!cap_raised(cap_drop, cap)) {
47717+ if (cap_raised(cap_audit, cap))
47718+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
47719+ return 1;
47720+ }
47721+
47722+ curracl = task->acl;
47723+
47724+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
47725+ && cap_raised(cred->cap_effective, cap)) {
47726+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
47727+ task->role->roletype, cred->uid,
47728+ cred->gid, task->exec_file ?
47729+ gr_to_filename(task->exec_file->f_path.dentry,
47730+ task->exec_file->f_path.mnt) : curracl->filename,
47731+ curracl->filename, 0UL,
47732+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
47733+ return 1;
47734+ }
47735+
47736+ if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
47737+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
47738+ return 0;
47739+}
47740+
47741+int
47742+gr_is_capable_nolog(const int cap)
47743+{
47744+ struct acl_subject_label *curracl;
47745+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
47746+
47747+ if (!gr_acl_is_enabled())
47748+ return 1;
47749+
47750+ curracl = current->acl;
47751+
47752+ cap_drop = curracl->cap_lower;
47753+ cap_mask = curracl->cap_mask;
47754+
47755+ while ((curracl = curracl->parent_subject)) {
47756+ /* if the cap isn't specified in the current computed mask but is specified in the
47757+ current level subject, and is lowered in the current level subject, then add
47758+ it to the set of dropped capabilities
47759+ otherwise, add the current level subject's mask to the current computed mask
47760+ */
47761+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
47762+ cap_raise(cap_mask, cap);
47763+ if (cap_raised(curracl->cap_lower, cap))
47764+ cap_raise(cap_drop, cap);
47765+ }
47766+ }
47767+
47768+ if (!cap_raised(cap_drop, cap))
47769+ return 1;
47770+
47771+ return 0;
47772+}
47773+
47774diff -urNp linux-2.6.32.41/grsecurity/gracl_fs.c linux-2.6.32.41/grsecurity/gracl_fs.c
47775--- linux-2.6.32.41/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
47776+++ linux-2.6.32.41/grsecurity/gracl_fs.c 2011-04-17 15:56:46.000000000 -0400
47777@@ -0,0 +1,431 @@
47778+#include <linux/kernel.h>
47779+#include <linux/sched.h>
47780+#include <linux/types.h>
47781+#include <linux/fs.h>
47782+#include <linux/file.h>
47783+#include <linux/stat.h>
47784+#include <linux/grsecurity.h>
47785+#include <linux/grinternal.h>
47786+#include <linux/gracl.h>
47787+
47788+__u32
47789+gr_acl_handle_hidden_file(const struct dentry * dentry,
47790+ const struct vfsmount * mnt)
47791+{
47792+ __u32 mode;
47793+
47794+ if (unlikely(!dentry->d_inode))
47795+ return GR_FIND;
47796+
47797+ mode =
47798+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
47799+
47800+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
47801+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
47802+ return mode;
47803+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
47804+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
47805+ return 0;
47806+ } else if (unlikely(!(mode & GR_FIND)))
47807+ return 0;
47808+
47809+ return GR_FIND;
47810+}
47811+
47812+__u32
47813+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
47814+ const int fmode)
47815+{
47816+ __u32 reqmode = GR_FIND;
47817+ __u32 mode;
47818+
47819+ if (unlikely(!dentry->d_inode))
47820+ return reqmode;
47821+
47822+ if (unlikely(fmode & O_APPEND))
47823+ reqmode |= GR_APPEND;
47824+ else if (unlikely(fmode & FMODE_WRITE))
47825+ reqmode |= GR_WRITE;
47826+ if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
47827+ reqmode |= GR_READ;
47828+ if ((fmode & FMODE_GREXEC) && (fmode & FMODE_EXEC))
47829+ reqmode &= ~GR_READ;
47830+ mode =
47831+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
47832+ mnt);
47833+
47834+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
47835+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
47836+ reqmode & GR_READ ? " reading" : "",
47837+ reqmode & GR_WRITE ? " writing" : reqmode &
47838+ GR_APPEND ? " appending" : "");
47839+ return reqmode;
47840+ } else
47841+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
47842+ {
47843+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
47844+ reqmode & GR_READ ? " reading" : "",
47845+ reqmode & GR_WRITE ? " writing" : reqmode &
47846+ GR_APPEND ? " appending" : "");
47847+ return 0;
47848+ } else if (unlikely((mode & reqmode) != reqmode))
47849+ return 0;
47850+
47851+ return reqmode;
47852+}
47853+
47854+__u32
47855+gr_acl_handle_creat(const struct dentry * dentry,
47856+ const struct dentry * p_dentry,
47857+ const struct vfsmount * p_mnt, const int fmode,
47858+ const int imode)
47859+{
47860+ __u32 reqmode = GR_WRITE | GR_CREATE;
47861+ __u32 mode;
47862+
47863+ if (unlikely(fmode & O_APPEND))
47864+ reqmode |= GR_APPEND;
47865+ if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
47866+ reqmode |= GR_READ;
47867+ if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
47868+ reqmode |= GR_SETID;
47869+
47870+ mode =
47871+ gr_check_create(dentry, p_dentry, p_mnt,
47872+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
47873+
47874+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
47875+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
47876+ reqmode & GR_READ ? " reading" : "",
47877+ reqmode & GR_WRITE ? " writing" : reqmode &
47878+ GR_APPEND ? " appending" : "");
47879+ return reqmode;
47880+ } else
47881+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
47882+ {
47883+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
47884+ reqmode & GR_READ ? " reading" : "",
47885+ reqmode & GR_WRITE ? " writing" : reqmode &
47886+ GR_APPEND ? " appending" : "");
47887+ return 0;
47888+ } else if (unlikely((mode & reqmode) != reqmode))
47889+ return 0;
47890+
47891+ return reqmode;
47892+}
47893+
47894+__u32
47895+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
47896+ const int fmode)
47897+{
47898+ __u32 mode, reqmode = GR_FIND;
47899+
47900+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
47901+ reqmode |= GR_EXEC;
47902+ if (fmode & S_IWOTH)
47903+ reqmode |= GR_WRITE;
47904+ if (fmode & S_IROTH)
47905+ reqmode |= GR_READ;
47906+
47907+ mode =
47908+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
47909+ mnt);
47910+
47911+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
47912+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
47913+ reqmode & GR_READ ? " reading" : "",
47914+ reqmode & GR_WRITE ? " writing" : "",
47915+ reqmode & GR_EXEC ? " executing" : "");
47916+ return reqmode;
47917+ } else
47918+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
47919+ {
47920+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
47921+ reqmode & GR_READ ? " reading" : "",
47922+ reqmode & GR_WRITE ? " writing" : "",
47923+ reqmode & GR_EXEC ? " executing" : "");
47924+ return 0;
47925+ } else if (unlikely((mode & reqmode) != reqmode))
47926+ return 0;
47927+
47928+ return reqmode;
47929+}
47930+
47931+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
47932+{
47933+ __u32 mode;
47934+
47935+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
47936+
47937+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
47938+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
47939+ return mode;
47940+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
47941+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
47942+ return 0;
47943+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
47944+ return 0;
47945+
47946+ return (reqmode);
47947+}
47948+
47949+__u32
47950+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
47951+{
47952+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
47953+}
47954+
47955+__u32
47956+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
47957+{
47958+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
47959+}
47960+
47961+__u32
47962+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
47963+{
47964+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
47965+}
47966+
47967+__u32
47968+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
47969+{
47970+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
47971+}
47972+
47973+__u32
47974+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
47975+ mode_t mode)
47976+{
47977+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
47978+ return 1;
47979+
47980+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
47981+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
47982+ GR_FCHMOD_ACL_MSG);
47983+ } else {
47984+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
47985+ }
47986+}
47987+
47988+__u32
47989+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
47990+ mode_t mode)
47991+{
47992+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
47993+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
47994+ GR_CHMOD_ACL_MSG);
47995+ } else {
47996+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
47997+ }
47998+}
47999+
48000+__u32
48001+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
48002+{
48003+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
48004+}
48005+
48006+__u32
48007+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
48008+{
48009+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
48010+}
48011+
48012+__u32
48013+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
48014+{
48015+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
48016+}
48017+
48018+__u32
48019+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
48020+{
48021+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
48022+ GR_UNIXCONNECT_ACL_MSG);
48023+}
48024+
48025+/* hardlinks require at minimum create permission,
48026+ any additional privilege required is based on the
48027+ privilege of the file being linked to
48028+*/
48029+__u32
48030+gr_acl_handle_link(const struct dentry * new_dentry,
48031+ const struct dentry * parent_dentry,
48032+ const struct vfsmount * parent_mnt,
48033+ const struct dentry * old_dentry,
48034+ const struct vfsmount * old_mnt, const char *to)
48035+{
48036+ __u32 mode;
48037+ __u32 needmode = GR_CREATE | GR_LINK;
48038+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
48039+
48040+ mode =
48041+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
48042+ old_mnt);
48043+
48044+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
48045+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
48046+ return mode;
48047+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
48048+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
48049+ return 0;
48050+ } else if (unlikely((mode & needmode) != needmode))
48051+ return 0;
48052+
48053+ return 1;
48054+}
48055+
48056+__u32
48057+gr_acl_handle_symlink(const struct dentry * new_dentry,
48058+ const struct dentry * parent_dentry,
48059+ const struct vfsmount * parent_mnt, const char *from)
48060+{
48061+ __u32 needmode = GR_WRITE | GR_CREATE;
48062+ __u32 mode;
48063+
48064+ mode =
48065+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
48066+ GR_CREATE | GR_AUDIT_CREATE |
48067+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
48068+
48069+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
48070+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
48071+ return mode;
48072+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
48073+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
48074+ return 0;
48075+ } else if (unlikely((mode & needmode) != needmode))
48076+ return 0;
48077+
48078+ return (GR_WRITE | GR_CREATE);
48079+}
48080+
48081+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
48082+{
48083+ __u32 mode;
48084+
48085+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
48086+
48087+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
48088+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
48089+ return mode;
48090+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
48091+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
48092+ return 0;
48093+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
48094+ return 0;
48095+
48096+ return (reqmode);
48097+}
48098+
48099+__u32
48100+gr_acl_handle_mknod(const struct dentry * new_dentry,
48101+ const struct dentry * parent_dentry,
48102+ const struct vfsmount * parent_mnt,
48103+ const int mode)
48104+{
48105+ __u32 reqmode = GR_WRITE | GR_CREATE;
48106+ if (unlikely(mode & (S_ISUID | S_ISGID)))
48107+ reqmode |= GR_SETID;
48108+
48109+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
48110+ reqmode, GR_MKNOD_ACL_MSG);
48111+}
48112+
48113+__u32
48114+gr_acl_handle_mkdir(const struct dentry *new_dentry,
48115+ const struct dentry *parent_dentry,
48116+ const struct vfsmount *parent_mnt)
48117+{
48118+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
48119+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
48120+}
48121+
48122+#define RENAME_CHECK_SUCCESS(old, new) \
48123+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
48124+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
48125+
48126+int
48127+gr_acl_handle_rename(struct dentry *new_dentry,
48128+ struct dentry *parent_dentry,
48129+ const struct vfsmount *parent_mnt,
48130+ struct dentry *old_dentry,
48131+ struct inode *old_parent_inode,
48132+ struct vfsmount *old_mnt, const char *newname)
48133+{
48134+ __u32 comp1, comp2;
48135+ int error = 0;
48136+
48137+ if (unlikely(!gr_acl_is_enabled()))
48138+ return 0;
48139+
48140+ if (!new_dentry->d_inode) {
48141+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
48142+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
48143+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
48144+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
48145+ GR_DELETE | GR_AUDIT_DELETE |
48146+ GR_AUDIT_READ | GR_AUDIT_WRITE |
48147+ GR_SUPPRESS, old_mnt);
48148+ } else {
48149+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
48150+ GR_CREATE | GR_DELETE |
48151+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
48152+ GR_AUDIT_READ | GR_AUDIT_WRITE |
48153+ GR_SUPPRESS, parent_mnt);
48154+ comp2 =
48155+ gr_search_file(old_dentry,
48156+ GR_READ | GR_WRITE | GR_AUDIT_READ |
48157+ GR_DELETE | GR_AUDIT_DELETE |
48158+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
48159+ }
48160+
48161+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
48162+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
48163+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
48164+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
48165+ && !(comp2 & GR_SUPPRESS)) {
48166+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
48167+ error = -EACCES;
48168+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
48169+ error = -EACCES;
48170+
48171+ return error;
48172+}
48173+
48174+void
48175+gr_acl_handle_exit(void)
48176+{
48177+ u16 id;
48178+ char *rolename;
48179+ struct file *exec_file;
48180+
48181+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
48182+ !(current->role->roletype & GR_ROLE_PERSIST))) {
48183+ id = current->acl_role_id;
48184+ rolename = current->role->rolename;
48185+ gr_set_acls(1);
48186+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
48187+ }
48188+
48189+ write_lock(&grsec_exec_file_lock);
48190+ exec_file = current->exec_file;
48191+ current->exec_file = NULL;
48192+ write_unlock(&grsec_exec_file_lock);
48193+
48194+ if (exec_file)
48195+ fput(exec_file);
48196+}
48197+
48198+int
48199+gr_acl_handle_procpidmem(const struct task_struct *task)
48200+{
48201+ if (unlikely(!gr_acl_is_enabled()))
48202+ return 0;
48203+
48204+ if (task != current && task->acl->mode & GR_PROTPROCFD)
48205+ return -EACCES;
48206+
48207+ return 0;
48208+}
48209diff -urNp linux-2.6.32.41/grsecurity/gracl_ip.c linux-2.6.32.41/grsecurity/gracl_ip.c
48210--- linux-2.6.32.41/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
48211+++ linux-2.6.32.41/grsecurity/gracl_ip.c 2011-04-17 15:56:46.000000000 -0400
48212@@ -0,0 +1,382 @@
48213+#include <linux/kernel.h>
48214+#include <asm/uaccess.h>
48215+#include <asm/errno.h>
48216+#include <net/sock.h>
48217+#include <linux/file.h>
48218+#include <linux/fs.h>
48219+#include <linux/net.h>
48220+#include <linux/in.h>
48221+#include <linux/skbuff.h>
48222+#include <linux/ip.h>
48223+#include <linux/udp.h>
48224+#include <linux/smp_lock.h>
48225+#include <linux/types.h>
48226+#include <linux/sched.h>
48227+#include <linux/netdevice.h>
48228+#include <linux/inetdevice.h>
48229+#include <linux/gracl.h>
48230+#include <linux/grsecurity.h>
48231+#include <linux/grinternal.h>
48232+
48233+#define GR_BIND 0x01
48234+#define GR_CONNECT 0x02
48235+#define GR_INVERT 0x04
48236+#define GR_BINDOVERRIDE 0x08
48237+#define GR_CONNECTOVERRIDE 0x10
48238+#define GR_SOCK_FAMILY 0x20
48239+
48240+static const char * gr_protocols[IPPROTO_MAX] = {
48241+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
48242+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
48243+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
48244+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
48245+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
48246+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
48247+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
48248+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
48249+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
48250+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
48251+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
48252+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
48253+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
48254+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
48255+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
48256+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
48257+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
48258+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
48259+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
48260+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
48261+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
48262+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
48263+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
48264+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
48265+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
48266+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
48267+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
48268+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
48269+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
48270+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
48271+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
48272+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
48273+ };
48274+
48275+static const char * gr_socktypes[SOCK_MAX] = {
48276+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
48277+ "unknown:7", "unknown:8", "unknown:9", "packet"
48278+ };
48279+
48280+static const char * gr_sockfamilies[AF_MAX+1] = {
48281+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
48282+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
48283+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
48284+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
48285+ };
48286+
48287+const char *
48288+gr_proto_to_name(unsigned char proto)
48289+{
48290+ return gr_protocols[proto];
48291+}
48292+
48293+const char *
48294+gr_socktype_to_name(unsigned char type)
48295+{
48296+ return gr_socktypes[type];
48297+}
48298+
48299+const char *
48300+gr_sockfamily_to_name(unsigned char family)
48301+{
48302+ return gr_sockfamilies[family];
48303+}
48304+
48305+int
48306+gr_search_socket(const int domain, const int type, const int protocol)
48307+{
48308+ struct acl_subject_label *curr;
48309+ const struct cred *cred = current_cred();
48310+
48311+ if (unlikely(!gr_acl_is_enabled()))
48312+ goto exit;
48313+
48314+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
48315+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
48316+ goto exit; // let the kernel handle it
48317+
48318+ curr = current->acl;
48319+
48320+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
48321+ /* the family is allowed, if this is PF_INET allow it only if
48322+ the extra sock type/protocol checks pass */
48323+ if (domain == PF_INET)
48324+ goto inet_check;
48325+ goto exit;
48326+ } else {
48327+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
48328+ __u32 fakeip = 0;
48329+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48330+ current->role->roletype, cred->uid,
48331+ cred->gid, current->exec_file ?
48332+ gr_to_filename(current->exec_file->f_path.dentry,
48333+ current->exec_file->f_path.mnt) :
48334+ curr->filename, curr->filename,
48335+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
48336+ &current->signal->saved_ip);
48337+ goto exit;
48338+ }
48339+ goto exit_fail;
48340+ }
48341+
48342+inet_check:
48343+ /* the rest of this checking is for IPv4 only */
48344+ if (!curr->ips)
48345+ goto exit;
48346+
48347+ if ((curr->ip_type & (1 << type)) &&
48348+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
48349+ goto exit;
48350+
48351+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
48352+ /* we don't place acls on raw sockets , and sometimes
48353+ dgram/ip sockets are opened for ioctl and not
48354+ bind/connect, so we'll fake a bind learn log */
48355+ if (type == SOCK_RAW || type == SOCK_PACKET) {
48356+ __u32 fakeip = 0;
48357+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48358+ current->role->roletype, cred->uid,
48359+ cred->gid, current->exec_file ?
48360+ gr_to_filename(current->exec_file->f_path.dentry,
48361+ current->exec_file->f_path.mnt) :
48362+ curr->filename, curr->filename,
48363+ &fakeip, 0, type,
48364+ protocol, GR_CONNECT, &current->signal->saved_ip);
48365+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
48366+ __u32 fakeip = 0;
48367+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48368+ current->role->roletype, cred->uid,
48369+ cred->gid, current->exec_file ?
48370+ gr_to_filename(current->exec_file->f_path.dentry,
48371+ current->exec_file->f_path.mnt) :
48372+ curr->filename, curr->filename,
48373+ &fakeip, 0, type,
48374+ protocol, GR_BIND, &current->signal->saved_ip);
48375+ }
48376+ /* we'll log when they use connect or bind */
48377+ goto exit;
48378+ }
48379+
48380+exit_fail:
48381+ if (domain == PF_INET)
48382+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
48383+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
48384+ else
48385+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
48386+ gr_socktype_to_name(type), protocol);
48387+
48388+ return 0;
48389+exit:
48390+ return 1;
48391+}
48392+
48393+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
48394+{
48395+ if ((ip->mode & mode) &&
48396+ (ip_port >= ip->low) &&
48397+ (ip_port <= ip->high) &&
48398+ ((ntohl(ip_addr) & our_netmask) ==
48399+ (ntohl(our_addr) & our_netmask))
48400+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
48401+ && (ip->type & (1 << type))) {
48402+ if (ip->mode & GR_INVERT)
48403+ return 2; // specifically denied
48404+ else
48405+ return 1; // allowed
48406+ }
48407+
48408+ return 0; // not specifically allowed, may continue parsing
48409+}
48410+
48411+static int
48412+gr_search_connectbind(const int full_mode, struct sock *sk,
48413+ struct sockaddr_in *addr, const int type)
48414+{
48415+ char iface[IFNAMSIZ] = {0};
48416+ struct acl_subject_label *curr;
48417+ struct acl_ip_label *ip;
48418+ struct inet_sock *isk;
48419+ struct net_device *dev;
48420+ struct in_device *idev;
48421+ unsigned long i;
48422+ int ret;
48423+ int mode = full_mode & (GR_BIND | GR_CONNECT);
48424+ __u32 ip_addr = 0;
48425+ __u32 our_addr;
48426+ __u32 our_netmask;
48427+ char *p;
48428+ __u16 ip_port = 0;
48429+ const struct cred *cred = current_cred();
48430+
48431+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
48432+ return 0;
48433+
48434+ curr = current->acl;
48435+ isk = inet_sk(sk);
48436+
48437+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
48438+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
48439+ addr->sin_addr.s_addr = curr->inaddr_any_override;
48440+ if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
48441+ struct sockaddr_in saddr;
48442+ int err;
48443+
48444+ saddr.sin_family = AF_INET;
48445+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
48446+ saddr.sin_port = isk->sport;
48447+
48448+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
48449+ if (err)
48450+ return err;
48451+
48452+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
48453+ if (err)
48454+ return err;
48455+ }
48456+
48457+ if (!curr->ips)
48458+ return 0;
48459+
48460+ ip_addr = addr->sin_addr.s_addr;
48461+ ip_port = ntohs(addr->sin_port);
48462+
48463+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
48464+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48465+ current->role->roletype, cred->uid,
48466+ cred->gid, current->exec_file ?
48467+ gr_to_filename(current->exec_file->f_path.dentry,
48468+ current->exec_file->f_path.mnt) :
48469+ curr->filename, curr->filename,
48470+ &ip_addr, ip_port, type,
48471+ sk->sk_protocol, mode, &current->signal->saved_ip);
48472+ return 0;
48473+ }
48474+
48475+ for (i = 0; i < curr->ip_num; i++) {
48476+ ip = *(curr->ips + i);
48477+ if (ip->iface != NULL) {
48478+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
48479+ p = strchr(iface, ':');
48480+ if (p != NULL)
48481+ *p = '\0';
48482+ dev = dev_get_by_name(sock_net(sk), iface);
48483+ if (dev == NULL)
48484+ continue;
48485+ idev = in_dev_get(dev);
48486+ if (idev == NULL) {
48487+ dev_put(dev);
48488+ continue;
48489+ }
48490+ rcu_read_lock();
48491+ for_ifa(idev) {
48492+ if (!strcmp(ip->iface, ifa->ifa_label)) {
48493+ our_addr = ifa->ifa_address;
48494+ our_netmask = 0xffffffff;
48495+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
48496+ if (ret == 1) {
48497+ rcu_read_unlock();
48498+ in_dev_put(idev);
48499+ dev_put(dev);
48500+ return 0;
48501+ } else if (ret == 2) {
48502+ rcu_read_unlock();
48503+ in_dev_put(idev);
48504+ dev_put(dev);
48505+ goto denied;
48506+ }
48507+ }
48508+ } endfor_ifa(idev);
48509+ rcu_read_unlock();
48510+ in_dev_put(idev);
48511+ dev_put(dev);
48512+ } else {
48513+ our_addr = ip->addr;
48514+ our_netmask = ip->netmask;
48515+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
48516+ if (ret == 1)
48517+ return 0;
48518+ else if (ret == 2)
48519+ goto denied;
48520+ }
48521+ }
48522+
48523+denied:
48524+ if (mode == GR_BIND)
48525+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
48526+ else if (mode == GR_CONNECT)
48527+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
48528+
48529+ return -EACCES;
48530+}
48531+
48532+int
48533+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
48534+{
48535+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
48536+}
48537+
48538+int
48539+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
48540+{
48541+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
48542+}
48543+
48544+int gr_search_listen(struct socket *sock)
48545+{
48546+ struct sock *sk = sock->sk;
48547+ struct sockaddr_in addr;
48548+
48549+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
48550+ addr.sin_port = inet_sk(sk)->sport;
48551+
48552+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
48553+}
48554+
48555+int gr_search_accept(struct socket *sock)
48556+{
48557+ struct sock *sk = sock->sk;
48558+ struct sockaddr_in addr;
48559+
48560+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
48561+ addr.sin_port = inet_sk(sk)->sport;
48562+
48563+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
48564+}
48565+
48566+int
48567+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
48568+{
48569+ if (addr)
48570+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
48571+ else {
48572+ struct sockaddr_in sin;
48573+ const struct inet_sock *inet = inet_sk(sk);
48574+
48575+ sin.sin_addr.s_addr = inet->daddr;
48576+ sin.sin_port = inet->dport;
48577+
48578+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
48579+ }
48580+}
48581+
48582+int
48583+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
48584+{
48585+ struct sockaddr_in sin;
48586+
48587+ if (unlikely(skb->len < sizeof (struct udphdr)))
48588+ return 0; // skip this packet
48589+
48590+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
48591+ sin.sin_port = udp_hdr(skb)->source;
48592+
48593+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
48594+}
48595diff -urNp linux-2.6.32.41/grsecurity/gracl_learn.c linux-2.6.32.41/grsecurity/gracl_learn.c
48596--- linux-2.6.32.41/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
48597+++ linux-2.6.32.41/grsecurity/gracl_learn.c 2011-04-17 15:56:46.000000000 -0400
48598@@ -0,0 +1,211 @@
48599+#include <linux/kernel.h>
48600+#include <linux/mm.h>
48601+#include <linux/sched.h>
48602+#include <linux/poll.h>
48603+#include <linux/smp_lock.h>
48604+#include <linux/string.h>
48605+#include <linux/file.h>
48606+#include <linux/types.h>
48607+#include <linux/vmalloc.h>
48608+#include <linux/grinternal.h>
48609+
48610+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
48611+ size_t count, loff_t *ppos);
48612+extern int gr_acl_is_enabled(void);
48613+
48614+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
48615+static int gr_learn_attached;
48616+
48617+/* use a 512k buffer */
48618+#define LEARN_BUFFER_SIZE (512 * 1024)
48619+
48620+static DEFINE_SPINLOCK(gr_learn_lock);
48621+static DEFINE_MUTEX(gr_learn_user_mutex);
48622+
48623+/* we need to maintain two buffers, so that the kernel context of grlearn
48624+ uses a semaphore around the userspace copying, and the other kernel contexts
48625+ use a spinlock when copying into the buffer, since they cannot sleep
48626+*/
48627+static char *learn_buffer;
48628+static char *learn_buffer_user;
48629+static int learn_buffer_len;
48630+static int learn_buffer_user_len;
48631+
48632+static ssize_t
48633+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
48634+{
48635+ DECLARE_WAITQUEUE(wait, current);
48636+ ssize_t retval = 0;
48637+
48638+ add_wait_queue(&learn_wait, &wait);
48639+ set_current_state(TASK_INTERRUPTIBLE);
48640+ do {
48641+ mutex_lock(&gr_learn_user_mutex);
48642+ spin_lock(&gr_learn_lock);
48643+ if (learn_buffer_len)
48644+ break;
48645+ spin_unlock(&gr_learn_lock);
48646+ mutex_unlock(&gr_learn_user_mutex);
48647+ if (file->f_flags & O_NONBLOCK) {
48648+ retval = -EAGAIN;
48649+ goto out;
48650+ }
48651+ if (signal_pending(current)) {
48652+ retval = -ERESTARTSYS;
48653+ goto out;
48654+ }
48655+
48656+ schedule();
48657+ } while (1);
48658+
48659+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
48660+ learn_buffer_user_len = learn_buffer_len;
48661+ retval = learn_buffer_len;
48662+ learn_buffer_len = 0;
48663+
48664+ spin_unlock(&gr_learn_lock);
48665+
48666+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
48667+ retval = -EFAULT;
48668+
48669+ mutex_unlock(&gr_learn_user_mutex);
48670+out:
48671+ set_current_state(TASK_RUNNING);
48672+ remove_wait_queue(&learn_wait, &wait);
48673+ return retval;
48674+}
48675+
48676+static unsigned int
48677+poll_learn(struct file * file, poll_table * wait)
48678+{
48679+ poll_wait(file, &learn_wait, wait);
48680+
48681+ if (learn_buffer_len)
48682+ return (POLLIN | POLLRDNORM);
48683+
48684+ return 0;
48685+}
48686+
48687+void
48688+gr_clear_learn_entries(void)
48689+{
48690+ char *tmp;
48691+
48692+ mutex_lock(&gr_learn_user_mutex);
48693+ if (learn_buffer != NULL) {
48694+ spin_lock(&gr_learn_lock);
48695+ tmp = learn_buffer;
48696+ learn_buffer = NULL;
48697+ spin_unlock(&gr_learn_lock);
48698+ vfree(learn_buffer);
48699+ }
48700+ if (learn_buffer_user != NULL) {
48701+ vfree(learn_buffer_user);
48702+ learn_buffer_user = NULL;
48703+ }
48704+ learn_buffer_len = 0;
48705+ mutex_unlock(&gr_learn_user_mutex);
48706+
48707+ return;
48708+}
48709+
48710+void
48711+gr_add_learn_entry(const char *fmt, ...)
48712+{
48713+ va_list args;
48714+ unsigned int len;
48715+
48716+ if (!gr_learn_attached)
48717+ return;
48718+
48719+ spin_lock(&gr_learn_lock);
48720+
48721+ /* leave a gap at the end so we know when it's "full" but don't have to
48722+ compute the exact length of the string we're trying to append
48723+ */
48724+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
48725+ spin_unlock(&gr_learn_lock);
48726+ wake_up_interruptible(&learn_wait);
48727+ return;
48728+ }
48729+ if (learn_buffer == NULL) {
48730+ spin_unlock(&gr_learn_lock);
48731+ return;
48732+ }
48733+
48734+ va_start(args, fmt);
48735+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
48736+ va_end(args);
48737+
48738+ learn_buffer_len += len + 1;
48739+
48740+ spin_unlock(&gr_learn_lock);
48741+ wake_up_interruptible(&learn_wait);
48742+
48743+ return;
48744+}
48745+
48746+static int
48747+open_learn(struct inode *inode, struct file *file)
48748+{
48749+ if (file->f_mode & FMODE_READ && gr_learn_attached)
48750+ return -EBUSY;
48751+ if (file->f_mode & FMODE_READ) {
48752+ int retval = 0;
48753+ mutex_lock(&gr_learn_user_mutex);
48754+ if (learn_buffer == NULL)
48755+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
48756+ if (learn_buffer_user == NULL)
48757+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
48758+ if (learn_buffer == NULL) {
48759+ retval = -ENOMEM;
48760+ goto out_error;
48761+ }
48762+ if (learn_buffer_user == NULL) {
48763+ retval = -ENOMEM;
48764+ goto out_error;
48765+ }
48766+ learn_buffer_len = 0;
48767+ learn_buffer_user_len = 0;
48768+ gr_learn_attached = 1;
48769+out_error:
48770+ mutex_unlock(&gr_learn_user_mutex);
48771+ return retval;
48772+ }
48773+ return 0;
48774+}
48775+
48776+static int
48777+close_learn(struct inode *inode, struct file *file)
48778+{
48779+ char *tmp;
48780+
48781+ if (file->f_mode & FMODE_READ) {
48782+ mutex_lock(&gr_learn_user_mutex);
48783+ if (learn_buffer != NULL) {
48784+ spin_lock(&gr_learn_lock);
48785+ tmp = learn_buffer;
48786+ learn_buffer = NULL;
48787+ spin_unlock(&gr_learn_lock);
48788+ vfree(tmp);
48789+ }
48790+ if (learn_buffer_user != NULL) {
48791+ vfree(learn_buffer_user);
48792+ learn_buffer_user = NULL;
48793+ }
48794+ learn_buffer_len = 0;
48795+ learn_buffer_user_len = 0;
48796+ gr_learn_attached = 0;
48797+ mutex_unlock(&gr_learn_user_mutex);
48798+ }
48799+
48800+ return 0;
48801+}
48802+
48803+const struct file_operations grsec_fops = {
48804+ .read = read_learn,
48805+ .write = write_grsec_handler,
48806+ .open = open_learn,
48807+ .release = close_learn,
48808+ .poll = poll_learn,
48809+};
48810diff -urNp linux-2.6.32.41/grsecurity/gracl_res.c linux-2.6.32.41/grsecurity/gracl_res.c
48811--- linux-2.6.32.41/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
48812+++ linux-2.6.32.41/grsecurity/gracl_res.c 2011-04-17 15:56:46.000000000 -0400
48813@@ -0,0 +1,67 @@
48814+#include <linux/kernel.h>
48815+#include <linux/sched.h>
48816+#include <linux/gracl.h>
48817+#include <linux/grinternal.h>
48818+
48819+static const char *restab_log[] = {
48820+ [RLIMIT_CPU] = "RLIMIT_CPU",
48821+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
48822+ [RLIMIT_DATA] = "RLIMIT_DATA",
48823+ [RLIMIT_STACK] = "RLIMIT_STACK",
48824+ [RLIMIT_CORE] = "RLIMIT_CORE",
48825+ [RLIMIT_RSS] = "RLIMIT_RSS",
48826+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
48827+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
48828+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
48829+ [RLIMIT_AS] = "RLIMIT_AS",
48830+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
48831+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
48832+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
48833+ [RLIMIT_NICE] = "RLIMIT_NICE",
48834+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
48835+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
48836+ [GR_CRASH_RES] = "RLIMIT_CRASH"
48837+};
48838+
48839+void
48840+gr_log_resource(const struct task_struct *task,
48841+ const int res, const unsigned long wanted, const int gt)
48842+{
48843+ const struct cred *cred;
48844+ unsigned long rlim;
48845+
48846+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
48847+ return;
48848+
48849+ // not yet supported resource
48850+ if (unlikely(!restab_log[res]))
48851+ return;
48852+
48853+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
48854+ rlim = task->signal->rlim[res].rlim_max;
48855+ else
48856+ rlim = task->signal->rlim[res].rlim_cur;
48857+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
48858+ return;
48859+
48860+ rcu_read_lock();
48861+ cred = __task_cred(task);
48862+
48863+ if (res == RLIMIT_NPROC &&
48864+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
48865+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
48866+ goto out_rcu_unlock;
48867+ else if (res == RLIMIT_MEMLOCK &&
48868+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
48869+ goto out_rcu_unlock;
48870+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
48871+ goto out_rcu_unlock;
48872+ rcu_read_unlock();
48873+
48874+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
48875+
48876+ return;
48877+out_rcu_unlock:
48878+ rcu_read_unlock();
48879+ return;
48880+}
48881diff -urNp linux-2.6.32.41/grsecurity/gracl_segv.c linux-2.6.32.41/grsecurity/gracl_segv.c
48882--- linux-2.6.32.41/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
48883+++ linux-2.6.32.41/grsecurity/gracl_segv.c 2011-04-17 15:56:46.000000000 -0400
48884@@ -0,0 +1,284 @@
48885+#include <linux/kernel.h>
48886+#include <linux/mm.h>
48887+#include <asm/uaccess.h>
48888+#include <asm/errno.h>
48889+#include <asm/mman.h>
48890+#include <net/sock.h>
48891+#include <linux/file.h>
48892+#include <linux/fs.h>
48893+#include <linux/net.h>
48894+#include <linux/in.h>
48895+#include <linux/smp_lock.h>
48896+#include <linux/slab.h>
48897+#include <linux/types.h>
48898+#include <linux/sched.h>
48899+#include <linux/timer.h>
48900+#include <linux/gracl.h>
48901+#include <linux/grsecurity.h>
48902+#include <linux/grinternal.h>
48903+
48904+static struct crash_uid *uid_set;
48905+static unsigned short uid_used;
48906+static DEFINE_SPINLOCK(gr_uid_lock);
48907+extern rwlock_t gr_inode_lock;
48908+extern struct acl_subject_label *
48909+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
48910+ struct acl_role_label *role);
48911+extern int gr_fake_force_sig(int sig, struct task_struct *t);
48912+
48913+int
48914+gr_init_uidset(void)
48915+{
48916+ uid_set =
48917+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
48918+ uid_used = 0;
48919+
48920+ return uid_set ? 1 : 0;
48921+}
48922+
48923+void
48924+gr_free_uidset(void)
48925+{
48926+ if (uid_set)
48927+ kfree(uid_set);
48928+
48929+ return;
48930+}
48931+
48932+int
48933+gr_find_uid(const uid_t uid)
48934+{
48935+ struct crash_uid *tmp = uid_set;
48936+ uid_t buid;
48937+ int low = 0, high = uid_used - 1, mid;
48938+
48939+ while (high >= low) {
48940+ mid = (low + high) >> 1;
48941+ buid = tmp[mid].uid;
48942+ if (buid == uid)
48943+ return mid;
48944+ if (buid > uid)
48945+ high = mid - 1;
48946+ if (buid < uid)
48947+ low = mid + 1;
48948+ }
48949+
48950+ return -1;
48951+}
48952+
48953+static __inline__ void
48954+gr_insertsort(void)
48955+{
48956+ unsigned short i, j;
48957+ struct crash_uid index;
48958+
48959+ for (i = 1; i < uid_used; i++) {
48960+ index = uid_set[i];
48961+ j = i;
48962+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
48963+ uid_set[j] = uid_set[j - 1];
48964+ j--;
48965+ }
48966+ uid_set[j] = index;
48967+ }
48968+
48969+ return;
48970+}
48971+
48972+static __inline__ void
48973+gr_insert_uid(const uid_t uid, const unsigned long expires)
48974+{
48975+ int loc;
48976+
48977+ if (uid_used == GR_UIDTABLE_MAX)
48978+ return;
48979+
48980+ loc = gr_find_uid(uid);
48981+
48982+ if (loc >= 0) {
48983+ uid_set[loc].expires = expires;
48984+ return;
48985+ }
48986+
48987+ uid_set[uid_used].uid = uid;
48988+ uid_set[uid_used].expires = expires;
48989+ uid_used++;
48990+
48991+ gr_insertsort();
48992+
48993+ return;
48994+}
48995+
48996+void
48997+gr_remove_uid(const unsigned short loc)
48998+{
48999+ unsigned short i;
49000+
49001+ for (i = loc + 1; i < uid_used; i++)
49002+ uid_set[i - 1] = uid_set[i];
49003+
49004+ uid_used--;
49005+
49006+ return;
49007+}
49008+
49009+int
49010+gr_check_crash_uid(const uid_t uid)
49011+{
49012+ int loc;
49013+ int ret = 0;
49014+
49015+ if (unlikely(!gr_acl_is_enabled()))
49016+ return 0;
49017+
49018+ spin_lock(&gr_uid_lock);
49019+ loc = gr_find_uid(uid);
49020+
49021+ if (loc < 0)
49022+ goto out_unlock;
49023+
49024+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
49025+ gr_remove_uid(loc);
49026+ else
49027+ ret = 1;
49028+
49029+out_unlock:
49030+ spin_unlock(&gr_uid_lock);
49031+ return ret;
49032+}
49033+
49034+static __inline__ int
49035+proc_is_setxid(const struct cred *cred)
49036+{
49037+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
49038+ cred->uid != cred->fsuid)
49039+ return 1;
49040+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
49041+ cred->gid != cred->fsgid)
49042+ return 1;
49043+
49044+ return 0;
49045+}
49046+
49047+void
49048+gr_handle_crash(struct task_struct *task, const int sig)
49049+{
49050+ struct acl_subject_label *curr;
49051+ struct acl_subject_label *curr2;
49052+ struct task_struct *tsk, *tsk2;
49053+ const struct cred *cred;
49054+ const struct cred *cred2;
49055+
49056+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
49057+ return;
49058+
49059+ if (unlikely(!gr_acl_is_enabled()))
49060+ return;
49061+
49062+ curr = task->acl;
49063+
49064+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
49065+ return;
49066+
49067+ if (time_before_eq(curr->expires, get_seconds())) {
49068+ curr->expires = 0;
49069+ curr->crashes = 0;
49070+ }
49071+
49072+ curr->crashes++;
49073+
49074+ if (!curr->expires)
49075+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
49076+
49077+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
49078+ time_after(curr->expires, get_seconds())) {
49079+ rcu_read_lock();
49080+ cred = __task_cred(task);
49081+ if (cred->uid && proc_is_setxid(cred)) {
49082+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
49083+ spin_lock(&gr_uid_lock);
49084+ gr_insert_uid(cred->uid, curr->expires);
49085+ spin_unlock(&gr_uid_lock);
49086+ curr->expires = 0;
49087+ curr->crashes = 0;
49088+ read_lock(&tasklist_lock);
49089+ do_each_thread(tsk2, tsk) {
49090+ cred2 = __task_cred(tsk);
49091+ if (tsk != task && cred2->uid == cred->uid)
49092+ gr_fake_force_sig(SIGKILL, tsk);
49093+ } while_each_thread(tsk2, tsk);
49094+ read_unlock(&tasklist_lock);
49095+ } else {
49096+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
49097+ read_lock(&tasklist_lock);
49098+ do_each_thread(tsk2, tsk) {
49099+ if (likely(tsk != task)) {
49100+ curr2 = tsk->acl;
49101+
49102+ if (curr2->device == curr->device &&
49103+ curr2->inode == curr->inode)
49104+ gr_fake_force_sig(SIGKILL, tsk);
49105+ }
49106+ } while_each_thread(tsk2, tsk);
49107+ read_unlock(&tasklist_lock);
49108+ }
49109+ rcu_read_unlock();
49110+ }
49111+
49112+ return;
49113+}
49114+
49115+int
49116+gr_check_crash_exec(const struct file *filp)
49117+{
49118+ struct acl_subject_label *curr;
49119+
49120+ if (unlikely(!gr_acl_is_enabled()))
49121+ return 0;
49122+
49123+ read_lock(&gr_inode_lock);
49124+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
49125+ filp->f_path.dentry->d_inode->i_sb->s_dev,
49126+ current->role);
49127+ read_unlock(&gr_inode_lock);
49128+
49129+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
49130+ (!curr->crashes && !curr->expires))
49131+ return 0;
49132+
49133+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
49134+ time_after(curr->expires, get_seconds()))
49135+ return 1;
49136+ else if (time_before_eq(curr->expires, get_seconds())) {
49137+ curr->crashes = 0;
49138+ curr->expires = 0;
49139+ }
49140+
49141+ return 0;
49142+}
49143+
49144+void
49145+gr_handle_alertkill(struct task_struct *task)
49146+{
49147+ struct acl_subject_label *curracl;
49148+ __u32 curr_ip;
49149+ struct task_struct *p, *p2;
49150+
49151+ if (unlikely(!gr_acl_is_enabled()))
49152+ return;
49153+
49154+ curracl = task->acl;
49155+ curr_ip = task->signal->curr_ip;
49156+
49157+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
49158+ read_lock(&tasklist_lock);
49159+ do_each_thread(p2, p) {
49160+ if (p->signal->curr_ip == curr_ip)
49161+ gr_fake_force_sig(SIGKILL, p);
49162+ } while_each_thread(p2, p);
49163+ read_unlock(&tasklist_lock);
49164+ } else if (curracl->mode & GR_KILLPROC)
49165+ gr_fake_force_sig(SIGKILL, task);
49166+
49167+ return;
49168+}
49169diff -urNp linux-2.6.32.41/grsecurity/gracl_shm.c linux-2.6.32.41/grsecurity/gracl_shm.c
49170--- linux-2.6.32.41/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
49171+++ linux-2.6.32.41/grsecurity/gracl_shm.c 2011-04-17 15:56:46.000000000 -0400
49172@@ -0,0 +1,40 @@
49173+#include <linux/kernel.h>
49174+#include <linux/mm.h>
49175+#include <linux/sched.h>
49176+#include <linux/file.h>
49177+#include <linux/ipc.h>
49178+#include <linux/gracl.h>
49179+#include <linux/grsecurity.h>
49180+#include <linux/grinternal.h>
49181+
49182+int
49183+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
49184+ const time_t shm_createtime, const uid_t cuid, const int shmid)
49185+{
49186+ struct task_struct *task;
49187+
49188+ if (!gr_acl_is_enabled())
49189+ return 1;
49190+
49191+ rcu_read_lock();
49192+ read_lock(&tasklist_lock);
49193+
49194+ task = find_task_by_vpid(shm_cprid);
49195+
49196+ if (unlikely(!task))
49197+ task = find_task_by_vpid(shm_lapid);
49198+
49199+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
49200+ (task->pid == shm_lapid)) &&
49201+ (task->acl->mode & GR_PROTSHM) &&
49202+ (task->acl != current->acl))) {
49203+ read_unlock(&tasklist_lock);
49204+ rcu_read_unlock();
49205+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
49206+ return 0;
49207+ }
49208+ read_unlock(&tasklist_lock);
49209+ rcu_read_unlock();
49210+
49211+ return 1;
49212+}
49213diff -urNp linux-2.6.32.41/grsecurity/grsec_chdir.c linux-2.6.32.41/grsecurity/grsec_chdir.c
49214--- linux-2.6.32.41/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
49215+++ linux-2.6.32.41/grsecurity/grsec_chdir.c 2011-04-17 15:56:46.000000000 -0400
49216@@ -0,0 +1,19 @@
49217+#include <linux/kernel.h>
49218+#include <linux/sched.h>
49219+#include <linux/fs.h>
49220+#include <linux/file.h>
49221+#include <linux/grsecurity.h>
49222+#include <linux/grinternal.h>
49223+
49224+void
49225+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
49226+{
49227+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
49228+ if ((grsec_enable_chdir && grsec_enable_group &&
49229+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
49230+ !grsec_enable_group)) {
49231+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
49232+ }
49233+#endif
49234+ return;
49235+}
49236diff -urNp linux-2.6.32.41/grsecurity/grsec_chroot.c linux-2.6.32.41/grsecurity/grsec_chroot.c
49237--- linux-2.6.32.41/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
49238+++ linux-2.6.32.41/grsecurity/grsec_chroot.c 2011-04-17 15:56:46.000000000 -0400
49239@@ -0,0 +1,395 @@
49240+#include <linux/kernel.h>
49241+#include <linux/module.h>
49242+#include <linux/sched.h>
49243+#include <linux/file.h>
49244+#include <linux/fs.h>
49245+#include <linux/mount.h>
49246+#include <linux/types.h>
49247+#include <linux/pid_namespace.h>
49248+#include <linux/grsecurity.h>
49249+#include <linux/grinternal.h>
49250+
49251+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
49252+{
49253+#ifdef CONFIG_GRKERNSEC
49254+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
49255+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
49256+ task->gr_is_chrooted = 1;
49257+ else
49258+ task->gr_is_chrooted = 0;
49259+
49260+ task->gr_chroot_dentry = path->dentry;
49261+#endif
49262+ return;
49263+}
49264+
49265+void gr_clear_chroot_entries(struct task_struct *task)
49266+{
49267+#ifdef CONFIG_GRKERNSEC
49268+ task->gr_is_chrooted = 0;
49269+ task->gr_chroot_dentry = NULL;
49270+#endif
49271+ return;
49272+}
49273+
49274+int
49275+gr_handle_chroot_unix(const pid_t pid)
49276+{
49277+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
49278+ struct pid *spid = NULL;
49279+
49280+ if (unlikely(!grsec_enable_chroot_unix))
49281+ return 1;
49282+
49283+ if (likely(!proc_is_chrooted(current)))
49284+ return 1;
49285+
49286+ rcu_read_lock();
49287+ read_lock(&tasklist_lock);
49288+
49289+ spid = find_vpid(pid);
49290+ if (spid) {
49291+ struct task_struct *p;
49292+ p = pid_task(spid, PIDTYPE_PID);
49293+ if (unlikely(p && !have_same_root(current, p))) {
49294+ read_unlock(&tasklist_lock);
49295+ rcu_read_unlock();
49296+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
49297+ return 0;
49298+ }
49299+ }
49300+ read_unlock(&tasklist_lock);
49301+ rcu_read_unlock();
49302+#endif
49303+ return 1;
49304+}
49305+
49306+int
49307+gr_handle_chroot_nice(void)
49308+{
49309+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
49310+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
49311+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
49312+ return -EPERM;
49313+ }
49314+#endif
49315+ return 0;
49316+}
49317+
49318+int
49319+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
49320+{
49321+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
49322+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
49323+ && proc_is_chrooted(current)) {
49324+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
49325+ return -EACCES;
49326+ }
49327+#endif
49328+ return 0;
49329+}
49330+
49331+int
49332+gr_handle_chroot_rawio(const struct inode *inode)
49333+{
49334+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
49335+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
49336+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
49337+ return 1;
49338+#endif
49339+ return 0;
49340+}
49341+
49342+int
49343+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
49344+{
49345+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
49346+ struct task_struct *p;
49347+ int ret = 0;
49348+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
49349+ return ret;
49350+
49351+ read_lock(&tasklist_lock);
49352+ do_each_pid_task(pid, type, p) {
49353+ if (!have_same_root(current, p)) {
49354+ ret = 1;
49355+ goto out;
49356+ }
49357+ } while_each_pid_task(pid, type, p);
49358+out:
49359+ read_unlock(&tasklist_lock);
49360+ return ret;
49361+#endif
49362+ return 0;
49363+}
49364+
49365+int
49366+gr_pid_is_chrooted(struct task_struct *p)
49367+{
49368+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
49369+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
49370+ return 0;
49371+
49372+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
49373+ !have_same_root(current, p)) {
49374+ return 1;
49375+ }
49376+#endif
49377+ return 0;
49378+}
49379+
49380+EXPORT_SYMBOL(gr_pid_is_chrooted);
49381+
49382+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
49383+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
49384+{
49385+ struct dentry *dentry = (struct dentry *)u_dentry;
49386+ struct vfsmount *mnt = (struct vfsmount *)u_mnt;
49387+ struct dentry *realroot;
49388+ struct vfsmount *realrootmnt;
49389+ struct dentry *currentroot;
49390+ struct vfsmount *currentmnt;
49391+ struct task_struct *reaper = &init_task;
49392+ int ret = 1;
49393+
49394+ read_lock(&reaper->fs->lock);
49395+ realrootmnt = mntget(reaper->fs->root.mnt);
49396+ realroot = dget(reaper->fs->root.dentry);
49397+ read_unlock(&reaper->fs->lock);
49398+
49399+ read_lock(&current->fs->lock);
49400+ currentmnt = mntget(current->fs->root.mnt);
49401+ currentroot = dget(current->fs->root.dentry);
49402+ read_unlock(&current->fs->lock);
49403+
49404+ spin_lock(&dcache_lock);
49405+ for (;;) {
49406+ if (unlikely((dentry == realroot && mnt == realrootmnt)
49407+ || (dentry == currentroot && mnt == currentmnt)))
49408+ break;
49409+ if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
49410+ if (mnt->mnt_parent == mnt)
49411+ break;
49412+ dentry = mnt->mnt_mountpoint;
49413+ mnt = mnt->mnt_parent;
49414+ continue;
49415+ }
49416+ dentry = dentry->d_parent;
49417+ }
49418+ spin_unlock(&dcache_lock);
49419+
49420+ dput(currentroot);
49421+ mntput(currentmnt);
49422+
49423+ /* access is outside of chroot */
49424+ if (dentry == realroot && mnt == realrootmnt)
49425+ ret = 0;
49426+
49427+ dput(realroot);
49428+ mntput(realrootmnt);
49429+ return ret;
49430+}
49431+#endif
49432+
49433+int
49434+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
49435+{
49436+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
49437+ if (!grsec_enable_chroot_fchdir)
49438+ return 1;
49439+
49440+ if (!proc_is_chrooted(current))
49441+ return 1;
49442+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
49443+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
49444+ return 0;
49445+ }
49446+#endif
49447+ return 1;
49448+}
49449+
49450+int
49451+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
49452+ const time_t shm_createtime)
49453+{
49454+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
49455+ struct pid *pid = NULL;
49456+ time_t starttime;
49457+
49458+ if (unlikely(!grsec_enable_chroot_shmat))
49459+ return 1;
49460+
49461+ if (likely(!proc_is_chrooted(current)))
49462+ return 1;
49463+
49464+ rcu_read_lock();
49465+ read_lock(&tasklist_lock);
49466+
49467+ pid = find_vpid(shm_cprid);
49468+ if (pid) {
49469+ struct task_struct *p;
49470+ p = pid_task(pid, PIDTYPE_PID);
49471+ if (p == NULL)
49472+ goto unlock;
49473+ starttime = p->start_time.tv_sec;
49474+ if (unlikely(!have_same_root(current, p) &&
49475+ time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime))) {
49476+ read_unlock(&tasklist_lock);
49477+ rcu_read_unlock();
49478+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
49479+ return 0;
49480+ }
49481+ } else {
49482+ pid = find_vpid(shm_lapid);
49483+ if (pid) {
49484+ struct task_struct *p;
49485+ p = pid_task(pid, PIDTYPE_PID);
49486+ if (p == NULL)
49487+ goto unlock;
49488+ if (unlikely(!have_same_root(current, p))) {
49489+ read_unlock(&tasklist_lock);
49490+ rcu_read_unlock();
49491+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
49492+ return 0;
49493+ }
49494+ }
49495+ }
49496+
49497+unlock:
49498+ read_unlock(&tasklist_lock);
49499+ rcu_read_unlock();
49500+#endif
49501+ return 1;
49502+}
49503+
49504+void
49505+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
49506+{
49507+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
49508+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
49509+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
49510+#endif
49511+ return;
49512+}
49513+
49514+int
49515+gr_handle_chroot_mknod(const struct dentry *dentry,
49516+ const struct vfsmount *mnt, const int mode)
49517+{
49518+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
49519+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
49520+ proc_is_chrooted(current)) {
49521+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
49522+ return -EPERM;
49523+ }
49524+#endif
49525+ return 0;
49526+}
49527+
49528+int
49529+gr_handle_chroot_mount(const struct dentry *dentry,
49530+ const struct vfsmount *mnt, const char *dev_name)
49531+{
49532+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
49533+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
49534+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name, dentry, mnt);
49535+ return -EPERM;
49536+ }
49537+#endif
49538+ return 0;
49539+}
49540+
49541+int
49542+gr_handle_chroot_pivot(void)
49543+{
49544+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
49545+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
49546+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
49547+ return -EPERM;
49548+ }
49549+#endif
49550+ return 0;
49551+}
49552+
49553+int
49554+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
49555+{
49556+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
49557+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
49558+ !gr_is_outside_chroot(dentry, mnt)) {
49559+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
49560+ return -EPERM;
49561+ }
49562+#endif
49563+ return 0;
49564+}
49565+
49566+int
49567+gr_handle_chroot_caps(struct path *path)
49568+{
49569+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
49570+ if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
49571+ (init_task.fs->root.dentry != path->dentry) &&
49572+ (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
49573+
49574+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
49575+ const struct cred *old = current_cred();
49576+ struct cred *new = prepare_creds();
49577+ if (new == NULL)
49578+ return 1;
49579+
49580+ new->cap_permitted = cap_drop(old->cap_permitted,
49581+ chroot_caps);
49582+ new->cap_inheritable = cap_drop(old->cap_inheritable,
49583+ chroot_caps);
49584+ new->cap_effective = cap_drop(old->cap_effective,
49585+ chroot_caps);
49586+
49587+ commit_creds(new);
49588+
49589+ return 0;
49590+ }
49591+#endif
49592+ return 0;
49593+}
49594+
49595+int
49596+gr_handle_chroot_sysctl(const int op)
49597+{
49598+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
49599+ if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
49600+ && (op & MAY_WRITE))
49601+ return -EACCES;
49602+#endif
49603+ return 0;
49604+}
49605+
49606+void
49607+gr_handle_chroot_chdir(struct path *path)
49608+{
49609+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
49610+ if (grsec_enable_chroot_chdir)
49611+ set_fs_pwd(current->fs, path);
49612+#endif
49613+ return;
49614+}
49615+
49616+int
49617+gr_handle_chroot_chmod(const struct dentry *dentry,
49618+ const struct vfsmount *mnt, const int mode)
49619+{
49620+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
49621+ /* allow chmod +s on directories, but not on files */
49622+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
49623+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
49624+ proc_is_chrooted(current)) {
49625+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
49626+ return -EPERM;
49627+ }
49628+#endif
49629+ return 0;
49630+}
49631+
49632+#ifdef CONFIG_SECURITY
49633+EXPORT_SYMBOL(gr_handle_chroot_caps);
49634+#endif
49635diff -urNp linux-2.6.32.41/grsecurity/grsec_disabled.c linux-2.6.32.41/grsecurity/grsec_disabled.c
49636--- linux-2.6.32.41/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
49637+++ linux-2.6.32.41/grsecurity/grsec_disabled.c 2011-04-17 15:56:46.000000000 -0400
49638@@ -0,0 +1,447 @@
49639+#include <linux/kernel.h>
49640+#include <linux/module.h>
49641+#include <linux/sched.h>
49642+#include <linux/file.h>
49643+#include <linux/fs.h>
49644+#include <linux/kdev_t.h>
49645+#include <linux/net.h>
49646+#include <linux/in.h>
49647+#include <linux/ip.h>
49648+#include <linux/skbuff.h>
49649+#include <linux/sysctl.h>
49650+
49651+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
49652+void
49653+pax_set_initial_flags(struct linux_binprm *bprm)
49654+{
49655+ return;
49656+}
49657+#endif
49658+
49659+#ifdef CONFIG_SYSCTL
49660+__u32
49661+gr_handle_sysctl(const struct ctl_table * table, const int op)
49662+{
49663+ return 0;
49664+}
49665+#endif
49666+
49667+#ifdef CONFIG_TASKSTATS
49668+int gr_is_taskstats_denied(int pid)
49669+{
49670+ return 0;
49671+}
49672+#endif
49673+
49674+int
49675+gr_acl_is_enabled(void)
49676+{
49677+ return 0;
49678+}
49679+
49680+int
49681+gr_handle_rawio(const struct inode *inode)
49682+{
49683+ return 0;
49684+}
49685+
49686+void
49687+gr_acl_handle_psacct(struct task_struct *task, const long code)
49688+{
49689+ return;
49690+}
49691+
49692+int
49693+gr_handle_ptrace(struct task_struct *task, const long request)
49694+{
49695+ return 0;
49696+}
49697+
49698+int
49699+gr_handle_proc_ptrace(struct task_struct *task)
49700+{
49701+ return 0;
49702+}
49703+
49704+void
49705+gr_learn_resource(const struct task_struct *task,
49706+ const int res, const unsigned long wanted, const int gt)
49707+{
49708+ return;
49709+}
49710+
49711+int
49712+gr_set_acls(const int type)
49713+{
49714+ return 0;
49715+}
49716+
49717+int
49718+gr_check_hidden_task(const struct task_struct *tsk)
49719+{
49720+ return 0;
49721+}
49722+
49723+int
49724+gr_check_protected_task(const struct task_struct *task)
49725+{
49726+ return 0;
49727+}
49728+
49729+int
49730+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
49731+{
49732+ return 0;
49733+}
49734+
49735+void
49736+gr_copy_label(struct task_struct *tsk)
49737+{
49738+ return;
49739+}
49740+
49741+void
49742+gr_set_pax_flags(struct task_struct *task)
49743+{
49744+ return;
49745+}
49746+
49747+int
49748+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
49749+ const int unsafe_share)
49750+{
49751+ return 0;
49752+}
49753+
49754+void
49755+gr_handle_delete(const ino_t ino, const dev_t dev)
49756+{
49757+ return;
49758+}
49759+
49760+void
49761+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
49762+{
49763+ return;
49764+}
49765+
49766+void
49767+gr_handle_crash(struct task_struct *task, const int sig)
49768+{
49769+ return;
49770+}
49771+
49772+int
49773+gr_check_crash_exec(const struct file *filp)
49774+{
49775+ return 0;
49776+}
49777+
49778+int
49779+gr_check_crash_uid(const uid_t uid)
49780+{
49781+ return 0;
49782+}
49783+
49784+void
49785+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
49786+ struct dentry *old_dentry,
49787+ struct dentry *new_dentry,
49788+ struct vfsmount *mnt, const __u8 replace)
49789+{
49790+ return;
49791+}
49792+
49793+int
49794+gr_search_socket(const int family, const int type, const int protocol)
49795+{
49796+ return 1;
49797+}
49798+
49799+int
49800+gr_search_connectbind(const int mode, const struct socket *sock,
49801+ const struct sockaddr_in *addr)
49802+{
49803+ return 0;
49804+}
49805+
49806+int
49807+gr_is_capable(const int cap)
49808+{
49809+ return 1;
49810+}
49811+
49812+int
49813+gr_is_capable_nolog(const int cap)
49814+{
49815+ return 1;
49816+}
49817+
49818+void
49819+gr_handle_alertkill(struct task_struct *task)
49820+{
49821+ return;
49822+}
49823+
49824+__u32
49825+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
49826+{
49827+ return 1;
49828+}
49829+
49830+__u32
49831+gr_acl_handle_hidden_file(const struct dentry * dentry,
49832+ const struct vfsmount * mnt)
49833+{
49834+ return 1;
49835+}
49836+
49837+__u32
49838+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
49839+ const int fmode)
49840+{
49841+ return 1;
49842+}
49843+
49844+__u32
49845+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
49846+{
49847+ return 1;
49848+}
49849+
49850+__u32
49851+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
49852+{
49853+ return 1;
49854+}
49855+
49856+int
49857+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
49858+ unsigned int *vm_flags)
49859+{
49860+ return 1;
49861+}
49862+
49863+__u32
49864+gr_acl_handle_truncate(const struct dentry * dentry,
49865+ const struct vfsmount * mnt)
49866+{
49867+ return 1;
49868+}
49869+
49870+__u32
49871+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
49872+{
49873+ return 1;
49874+}
49875+
49876+__u32
49877+gr_acl_handle_access(const struct dentry * dentry,
49878+ const struct vfsmount * mnt, const int fmode)
49879+{
49880+ return 1;
49881+}
49882+
49883+__u32
49884+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
49885+ mode_t mode)
49886+{
49887+ return 1;
49888+}
49889+
49890+__u32
49891+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
49892+ mode_t mode)
49893+{
49894+ return 1;
49895+}
49896+
49897+__u32
49898+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
49899+{
49900+ return 1;
49901+}
49902+
49903+__u32
49904+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
49905+{
49906+ return 1;
49907+}
49908+
49909+void
49910+grsecurity_init(void)
49911+{
49912+ return;
49913+}
49914+
49915+__u32
49916+gr_acl_handle_mknod(const struct dentry * new_dentry,
49917+ const struct dentry * parent_dentry,
49918+ const struct vfsmount * parent_mnt,
49919+ const int mode)
49920+{
49921+ return 1;
49922+}
49923+
49924+__u32
49925+gr_acl_handle_mkdir(const struct dentry * new_dentry,
49926+ const struct dentry * parent_dentry,
49927+ const struct vfsmount * parent_mnt)
49928+{
49929+ return 1;
49930+}
49931+
49932+__u32
49933+gr_acl_handle_symlink(const struct dentry * new_dentry,
49934+ const struct dentry * parent_dentry,
49935+ const struct vfsmount * parent_mnt, const char *from)
49936+{
49937+ return 1;
49938+}
49939+
49940+__u32
49941+gr_acl_handle_link(const struct dentry * new_dentry,
49942+ const struct dentry * parent_dentry,
49943+ const struct vfsmount * parent_mnt,
49944+ const struct dentry * old_dentry,
49945+ const struct vfsmount * old_mnt, const char *to)
49946+{
49947+ return 1;
49948+}
49949+
49950+int
49951+gr_acl_handle_rename(const struct dentry *new_dentry,
49952+ const struct dentry *parent_dentry,
49953+ const struct vfsmount *parent_mnt,
49954+ const struct dentry *old_dentry,
49955+ const struct inode *old_parent_inode,
49956+ const struct vfsmount *old_mnt, const char *newname)
49957+{
49958+ return 0;
49959+}
49960+
49961+int
49962+gr_acl_handle_filldir(const struct file *file, const char *name,
49963+ const int namelen, const ino_t ino)
49964+{
49965+ return 1;
49966+}
49967+
49968+int
49969+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
49970+ const time_t shm_createtime, const uid_t cuid, const int shmid)
49971+{
49972+ return 1;
49973+}
49974+
49975+int
49976+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
49977+{
49978+ return 0;
49979+}
49980+
49981+int
49982+gr_search_accept(const struct socket *sock)
49983+{
49984+ return 0;
49985+}
49986+
49987+int
49988+gr_search_listen(const struct socket *sock)
49989+{
49990+ return 0;
49991+}
49992+
49993+int
49994+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
49995+{
49996+ return 0;
49997+}
49998+
49999+__u32
50000+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
50001+{
50002+ return 1;
50003+}
50004+
50005+__u32
50006+gr_acl_handle_creat(const struct dentry * dentry,
50007+ const struct dentry * p_dentry,
50008+ const struct vfsmount * p_mnt, const int fmode,
50009+ const int imode)
50010+{
50011+ return 1;
50012+}
50013+
50014+void
50015+gr_acl_handle_exit(void)
50016+{
50017+ return;
50018+}
50019+
50020+int
50021+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
50022+{
50023+ return 1;
50024+}
50025+
50026+void
50027+gr_set_role_label(const uid_t uid, const gid_t gid)
50028+{
50029+ return;
50030+}
50031+
50032+int
50033+gr_acl_handle_procpidmem(const struct task_struct *task)
50034+{
50035+ return 0;
50036+}
50037+
50038+int
50039+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
50040+{
50041+ return 0;
50042+}
50043+
50044+int
50045+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
50046+{
50047+ return 0;
50048+}
50049+
50050+void
50051+gr_set_kernel_label(struct task_struct *task)
50052+{
50053+ return;
50054+}
50055+
50056+int
50057+gr_check_user_change(int real, int effective, int fs)
50058+{
50059+ return 0;
50060+}
50061+
50062+int
50063+gr_check_group_change(int real, int effective, int fs)
50064+{
50065+ return 0;
50066+}
50067+
50068+int gr_acl_enable_at_secure(void)
50069+{
50070+ return 0;
50071+}
50072+
50073+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
50074+{
50075+ return dentry->d_inode->i_sb->s_dev;
50076+}
50077+
50078+EXPORT_SYMBOL(gr_is_capable);
50079+EXPORT_SYMBOL(gr_is_capable_nolog);
50080+EXPORT_SYMBOL(gr_learn_resource);
50081+EXPORT_SYMBOL(gr_set_kernel_label);
50082+#ifdef CONFIG_SECURITY
50083+EXPORT_SYMBOL(gr_check_user_change);
50084+EXPORT_SYMBOL(gr_check_group_change);
50085+#endif
50086diff -urNp linux-2.6.32.41/grsecurity/grsec_exec.c linux-2.6.32.41/grsecurity/grsec_exec.c
50087--- linux-2.6.32.41/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
50088+++ linux-2.6.32.41/grsecurity/grsec_exec.c 2011-04-17 15:56:46.000000000 -0400
50089@@ -0,0 +1,148 @@
50090+#include <linux/kernel.h>
50091+#include <linux/sched.h>
50092+#include <linux/file.h>
50093+#include <linux/binfmts.h>
50094+#include <linux/smp_lock.h>
50095+#include <linux/fs.h>
50096+#include <linux/types.h>
50097+#include <linux/grdefs.h>
50098+#include <linux/grinternal.h>
50099+#include <linux/capability.h>
50100+#include <linux/compat.h>
50101+
50102+#include <asm/uaccess.h>
50103+
50104+#ifdef CONFIG_GRKERNSEC_EXECLOG
50105+static char gr_exec_arg_buf[132];
50106+static DEFINE_MUTEX(gr_exec_arg_mutex);
50107+#endif
50108+
50109+int
50110+gr_handle_nproc(void)
50111+{
50112+#ifdef CONFIG_GRKERNSEC_EXECVE
50113+ const struct cred *cred = current_cred();
50114+ if (grsec_enable_execve && cred->user &&
50115+ (atomic_read(&cred->user->processes) >
50116+ current->signal->rlim[RLIMIT_NPROC].rlim_cur) &&
50117+ !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) {
50118+ gr_log_noargs(GR_DONT_AUDIT, GR_NPROC_MSG);
50119+ return -EAGAIN;
50120+ }
50121+#endif
50122+ return 0;
50123+}
50124+
50125+void
50126+gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
50127+{
50128+#ifdef CONFIG_GRKERNSEC_EXECLOG
50129+ char *grarg = gr_exec_arg_buf;
50130+ unsigned int i, x, execlen = 0;
50131+ char c;
50132+
50133+ if (!((grsec_enable_execlog && grsec_enable_group &&
50134+ in_group_p(grsec_audit_gid))
50135+ || (grsec_enable_execlog && !grsec_enable_group)))
50136+ return;
50137+
50138+ mutex_lock(&gr_exec_arg_mutex);
50139+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
50140+
50141+ if (unlikely(argv == NULL))
50142+ goto log;
50143+
50144+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
50145+ const char __user *p;
50146+ unsigned int len;
50147+
50148+ if (copy_from_user(&p, argv + i, sizeof(p)))
50149+ goto log;
50150+ if (!p)
50151+ goto log;
50152+ len = strnlen_user(p, 128 - execlen);
50153+ if (len > 128 - execlen)
50154+ len = 128 - execlen;
50155+ else if (len > 0)
50156+ len--;
50157+ if (copy_from_user(grarg + execlen, p, len))
50158+ goto log;
50159+
50160+ /* rewrite unprintable characters */
50161+ for (x = 0; x < len; x++) {
50162+ c = *(grarg + execlen + x);
50163+ if (c < 32 || c > 126)
50164+ *(grarg + execlen + x) = ' ';
50165+ }
50166+
50167+ execlen += len;
50168+ *(grarg + execlen) = ' ';
50169+ *(grarg + execlen + 1) = '\0';
50170+ execlen++;
50171+ }
50172+
50173+ log:
50174+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
50175+ bprm->file->f_path.mnt, grarg);
50176+ mutex_unlock(&gr_exec_arg_mutex);
50177+#endif
50178+ return;
50179+}
50180+
50181+#ifdef CONFIG_COMPAT
50182+void
50183+gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
50184+{
50185+#ifdef CONFIG_GRKERNSEC_EXECLOG
50186+ char *grarg = gr_exec_arg_buf;
50187+ unsigned int i, x, execlen = 0;
50188+ char c;
50189+
50190+ if (!((grsec_enable_execlog && grsec_enable_group &&
50191+ in_group_p(grsec_audit_gid))
50192+ || (grsec_enable_execlog && !grsec_enable_group)))
50193+ return;
50194+
50195+ mutex_lock(&gr_exec_arg_mutex);
50196+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
50197+
50198+ if (unlikely(argv == NULL))
50199+ goto log;
50200+
50201+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
50202+ compat_uptr_t p;
50203+ unsigned int len;
50204+
50205+ if (get_user(p, argv + i))
50206+ goto log;
50207+ len = strnlen_user(compat_ptr(p), 128 - execlen);
50208+ if (len > 128 - execlen)
50209+ len = 128 - execlen;
50210+ else if (len > 0)
50211+ len--;
50212+ else
50213+ goto log;
50214+ if (copy_from_user(grarg + execlen, compat_ptr(p), len))
50215+ goto log;
50216+
50217+ /* rewrite unprintable characters */
50218+ for (x = 0; x < len; x++) {
50219+ c = *(grarg + execlen + x);
50220+ if (c < 32 || c > 126)
50221+ *(grarg + execlen + x) = ' ';
50222+ }
50223+
50224+ execlen += len;
50225+ *(grarg + execlen) = ' ';
50226+ *(grarg + execlen + 1) = '\0';
50227+ execlen++;
50228+ }
50229+
50230+ log:
50231+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
50232+ bprm->file->f_path.mnt, grarg);
50233+ mutex_unlock(&gr_exec_arg_mutex);
50234+#endif
50235+ return;
50236+}
50237+#endif
50238diff -urNp linux-2.6.32.41/grsecurity/grsec_fifo.c linux-2.6.32.41/grsecurity/grsec_fifo.c
50239--- linux-2.6.32.41/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
50240+++ linux-2.6.32.41/grsecurity/grsec_fifo.c 2011-04-17 15:56:46.000000000 -0400
50241@@ -0,0 +1,24 @@
50242+#include <linux/kernel.h>
50243+#include <linux/sched.h>
50244+#include <linux/fs.h>
50245+#include <linux/file.h>
50246+#include <linux/grinternal.h>
50247+
50248+int
50249+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
50250+ const struct dentry *dir, const int flag, const int acc_mode)
50251+{
50252+#ifdef CONFIG_GRKERNSEC_FIFO
50253+ const struct cred *cred = current_cred();
50254+
50255+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
50256+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
50257+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
50258+ (cred->fsuid != dentry->d_inode->i_uid)) {
50259+ if (!inode_permission(dentry->d_inode, acc_mode))
50260+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
50261+ return -EACCES;
50262+ }
50263+#endif
50264+ return 0;
50265+}
50266diff -urNp linux-2.6.32.41/grsecurity/grsec_fork.c linux-2.6.32.41/grsecurity/grsec_fork.c
50267--- linux-2.6.32.41/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
50268+++ linux-2.6.32.41/grsecurity/grsec_fork.c 2011-04-17 15:56:46.000000000 -0400
50269@@ -0,0 +1,23 @@
50270+#include <linux/kernel.h>
50271+#include <linux/sched.h>
50272+#include <linux/grsecurity.h>
50273+#include <linux/grinternal.h>
50274+#include <linux/errno.h>
50275+
50276+void
50277+gr_log_forkfail(const int retval)
50278+{
50279+#ifdef CONFIG_GRKERNSEC_FORKFAIL
50280+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
50281+ switch (retval) {
50282+ case -EAGAIN:
50283+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
50284+ break;
50285+ case -ENOMEM:
50286+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
50287+ break;
50288+ }
50289+ }
50290+#endif
50291+ return;
50292+}
50293diff -urNp linux-2.6.32.41/grsecurity/grsec_init.c linux-2.6.32.41/grsecurity/grsec_init.c
50294--- linux-2.6.32.41/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
50295+++ linux-2.6.32.41/grsecurity/grsec_init.c 2011-04-17 15:56:46.000000000 -0400
50296@@ -0,0 +1,270 @@
50297+#include <linux/kernel.h>
50298+#include <linux/sched.h>
50299+#include <linux/mm.h>
50300+#include <linux/smp_lock.h>
50301+#include <linux/gracl.h>
50302+#include <linux/slab.h>
50303+#include <linux/vmalloc.h>
50304+#include <linux/percpu.h>
50305+#include <linux/module.h>
50306+
50307+int grsec_enable_link;
50308+int grsec_enable_dmesg;
50309+int grsec_enable_harden_ptrace;
50310+int grsec_enable_fifo;
50311+int grsec_enable_execve;
50312+int grsec_enable_execlog;
50313+int grsec_enable_signal;
50314+int grsec_enable_forkfail;
50315+int grsec_enable_audit_ptrace;
50316+int grsec_enable_time;
50317+int grsec_enable_audit_textrel;
50318+int grsec_enable_group;
50319+int grsec_audit_gid;
50320+int grsec_enable_chdir;
50321+int grsec_enable_mount;
50322+int grsec_enable_rofs;
50323+int grsec_enable_chroot_findtask;
50324+int grsec_enable_chroot_mount;
50325+int grsec_enable_chroot_shmat;
50326+int grsec_enable_chroot_fchdir;
50327+int grsec_enable_chroot_double;
50328+int grsec_enable_chroot_pivot;
50329+int grsec_enable_chroot_chdir;
50330+int grsec_enable_chroot_chmod;
50331+int grsec_enable_chroot_mknod;
50332+int grsec_enable_chroot_nice;
50333+int grsec_enable_chroot_execlog;
50334+int grsec_enable_chroot_caps;
50335+int grsec_enable_chroot_sysctl;
50336+int grsec_enable_chroot_unix;
50337+int grsec_enable_tpe;
50338+int grsec_tpe_gid;
50339+int grsec_enable_blackhole;
50340+#ifdef CONFIG_IPV6_MODULE
50341+EXPORT_SYMBOL(grsec_enable_blackhole);
50342+#endif
50343+int grsec_lastack_retries;
50344+int grsec_enable_tpe_all;
50345+int grsec_enable_tpe_invert;
50346+int grsec_enable_socket_all;
50347+int grsec_socket_all_gid;
50348+int grsec_enable_socket_client;
50349+int grsec_socket_client_gid;
50350+int grsec_enable_socket_server;
50351+int grsec_socket_server_gid;
50352+int grsec_resource_logging;
50353+int grsec_disable_privio;
50354+int grsec_enable_log_rwxmaps;
50355+int grsec_lock;
50356+
50357+DEFINE_SPINLOCK(grsec_alert_lock);
50358+unsigned long grsec_alert_wtime = 0;
50359+unsigned long grsec_alert_fyet = 0;
50360+
50361+DEFINE_SPINLOCK(grsec_audit_lock);
50362+
50363+DEFINE_RWLOCK(grsec_exec_file_lock);
50364+
50365+char *gr_shared_page[4];
50366+
50367+char *gr_alert_log_fmt;
50368+char *gr_audit_log_fmt;
50369+char *gr_alert_log_buf;
50370+char *gr_audit_log_buf;
50371+
50372+extern struct gr_arg *gr_usermode;
50373+extern unsigned char *gr_system_salt;
50374+extern unsigned char *gr_system_sum;
50375+
50376+void __init
50377+grsecurity_init(void)
50378+{
50379+ int j;
50380+ /* create the per-cpu shared pages */
50381+
50382+#ifdef CONFIG_X86
50383+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
50384+#endif
50385+
50386+ for (j = 0; j < 4; j++) {
50387+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
50388+ if (gr_shared_page[j] == NULL) {
50389+ panic("Unable to allocate grsecurity shared page");
50390+ return;
50391+ }
50392+ }
50393+
50394+ /* allocate log buffers */
50395+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
50396+ if (!gr_alert_log_fmt) {
50397+ panic("Unable to allocate grsecurity alert log format buffer");
50398+ return;
50399+ }
50400+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
50401+ if (!gr_audit_log_fmt) {
50402+ panic("Unable to allocate grsecurity audit log format buffer");
50403+ return;
50404+ }
50405+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
50406+ if (!gr_alert_log_buf) {
50407+ panic("Unable to allocate grsecurity alert log buffer");
50408+ return;
50409+ }
50410+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
50411+ if (!gr_audit_log_buf) {
50412+ panic("Unable to allocate grsecurity audit log buffer");
50413+ return;
50414+ }
50415+
50416+ /* allocate memory for authentication structure */
50417+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
50418+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
50419+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
50420+
50421+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
50422+ panic("Unable to allocate grsecurity authentication structure");
50423+ return;
50424+ }
50425+
50426+
50427+#ifdef CONFIG_GRKERNSEC_IO
50428+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
50429+ grsec_disable_privio = 1;
50430+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
50431+ grsec_disable_privio = 1;
50432+#else
50433+ grsec_disable_privio = 0;
50434+#endif
50435+#endif
50436+
50437+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
50438+ /* for backward compatibility, tpe_invert always defaults to on if
50439+ enabled in the kernel
50440+ */
50441+ grsec_enable_tpe_invert = 1;
50442+#endif
50443+
50444+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
50445+#ifndef CONFIG_GRKERNSEC_SYSCTL
50446+ grsec_lock = 1;
50447+#endif
50448+
50449+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
50450+ grsec_enable_audit_textrel = 1;
50451+#endif
50452+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
50453+ grsec_enable_log_rwxmaps = 1;
50454+#endif
50455+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
50456+ grsec_enable_group = 1;
50457+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
50458+#endif
50459+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
50460+ grsec_enable_chdir = 1;
50461+#endif
50462+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
50463+ grsec_enable_harden_ptrace = 1;
50464+#endif
50465+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
50466+ grsec_enable_mount = 1;
50467+#endif
50468+#ifdef CONFIG_GRKERNSEC_LINK
50469+ grsec_enable_link = 1;
50470+#endif
50471+#ifdef CONFIG_GRKERNSEC_DMESG
50472+ grsec_enable_dmesg = 1;
50473+#endif
50474+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
50475+ grsec_enable_blackhole = 1;
50476+ grsec_lastack_retries = 4;
50477+#endif
50478+#ifdef CONFIG_GRKERNSEC_FIFO
50479+ grsec_enable_fifo = 1;
50480+#endif
50481+#ifdef CONFIG_GRKERNSEC_EXECVE
50482+ grsec_enable_execve = 1;
50483+#endif
50484+#ifdef CONFIG_GRKERNSEC_EXECLOG
50485+ grsec_enable_execlog = 1;
50486+#endif
50487+#ifdef CONFIG_GRKERNSEC_SIGNAL
50488+ grsec_enable_signal = 1;
50489+#endif
50490+#ifdef CONFIG_GRKERNSEC_FORKFAIL
50491+ grsec_enable_forkfail = 1;
50492+#endif
50493+#ifdef CONFIG_GRKERNSEC_TIME
50494+ grsec_enable_time = 1;
50495+#endif
50496+#ifdef CONFIG_GRKERNSEC_RESLOG
50497+ grsec_resource_logging = 1;
50498+#endif
50499+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
50500+ grsec_enable_chroot_findtask = 1;
50501+#endif
50502+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
50503+ grsec_enable_chroot_unix = 1;
50504+#endif
50505+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
50506+ grsec_enable_chroot_mount = 1;
50507+#endif
50508+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
50509+ grsec_enable_chroot_fchdir = 1;
50510+#endif
50511+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
50512+ grsec_enable_chroot_shmat = 1;
50513+#endif
50514+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
50515+ grsec_enable_audit_ptrace = 1;
50516+#endif
50517+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
50518+ grsec_enable_chroot_double = 1;
50519+#endif
50520+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
50521+ grsec_enable_chroot_pivot = 1;
50522+#endif
50523+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
50524+ grsec_enable_chroot_chdir = 1;
50525+#endif
50526+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
50527+ grsec_enable_chroot_chmod = 1;
50528+#endif
50529+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
50530+ grsec_enable_chroot_mknod = 1;
50531+#endif
50532+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
50533+ grsec_enable_chroot_nice = 1;
50534+#endif
50535+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
50536+ grsec_enable_chroot_execlog = 1;
50537+#endif
50538+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50539+ grsec_enable_chroot_caps = 1;
50540+#endif
50541+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
50542+ grsec_enable_chroot_sysctl = 1;
50543+#endif
50544+#ifdef CONFIG_GRKERNSEC_TPE
50545+ grsec_enable_tpe = 1;
50546+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
50547+#ifdef CONFIG_GRKERNSEC_TPE_ALL
50548+ grsec_enable_tpe_all = 1;
50549+#endif
50550+#endif
50551+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
50552+ grsec_enable_socket_all = 1;
50553+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
50554+#endif
50555+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
50556+ grsec_enable_socket_client = 1;
50557+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
50558+#endif
50559+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
50560+ grsec_enable_socket_server = 1;
50561+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
50562+#endif
50563+#endif
50564+
50565+ return;
50566+}
50567diff -urNp linux-2.6.32.41/grsecurity/grsec_link.c linux-2.6.32.41/grsecurity/grsec_link.c
50568--- linux-2.6.32.41/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
50569+++ linux-2.6.32.41/grsecurity/grsec_link.c 2011-04-17 15:56:46.000000000 -0400
50570@@ -0,0 +1,43 @@
50571+#include <linux/kernel.h>
50572+#include <linux/sched.h>
50573+#include <linux/fs.h>
50574+#include <linux/file.h>
50575+#include <linux/grinternal.h>
50576+
50577+int
50578+gr_handle_follow_link(const struct inode *parent,
50579+ const struct inode *inode,
50580+ const struct dentry *dentry, const struct vfsmount *mnt)
50581+{
50582+#ifdef CONFIG_GRKERNSEC_LINK
50583+ const struct cred *cred = current_cred();
50584+
50585+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
50586+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
50587+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
50588+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
50589+ return -EACCES;
50590+ }
50591+#endif
50592+ return 0;
50593+}
50594+
50595+int
50596+gr_handle_hardlink(const struct dentry *dentry,
50597+ const struct vfsmount *mnt,
50598+ struct inode *inode, const int mode, const char *to)
50599+{
50600+#ifdef CONFIG_GRKERNSEC_LINK
50601+ const struct cred *cred = current_cred();
50602+
50603+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
50604+ (!S_ISREG(mode) || (mode & S_ISUID) ||
50605+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
50606+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
50607+ !capable(CAP_FOWNER) && cred->uid) {
50608+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
50609+ return -EPERM;
50610+ }
50611+#endif
50612+ return 0;
50613+}
50614diff -urNp linux-2.6.32.41/grsecurity/grsec_log.c linux-2.6.32.41/grsecurity/grsec_log.c
50615--- linux-2.6.32.41/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
50616+++ linux-2.6.32.41/grsecurity/grsec_log.c 2011-05-10 21:58:49.000000000 -0400
50617@@ -0,0 +1,310 @@
50618+#include <linux/kernel.h>
50619+#include <linux/sched.h>
50620+#include <linux/file.h>
50621+#include <linux/tty.h>
50622+#include <linux/fs.h>
50623+#include <linux/grinternal.h>
50624+
50625+#ifdef CONFIG_TREE_PREEMPT_RCU
50626+#define DISABLE_PREEMPT() preempt_disable()
50627+#define ENABLE_PREEMPT() preempt_enable()
50628+#else
50629+#define DISABLE_PREEMPT()
50630+#define ENABLE_PREEMPT()
50631+#endif
50632+
50633+#define BEGIN_LOCKS(x) \
50634+ DISABLE_PREEMPT(); \
50635+ rcu_read_lock(); \
50636+ read_lock(&tasklist_lock); \
50637+ read_lock(&grsec_exec_file_lock); \
50638+ if (x != GR_DO_AUDIT) \
50639+ spin_lock(&grsec_alert_lock); \
50640+ else \
50641+ spin_lock(&grsec_audit_lock)
50642+
50643+#define END_LOCKS(x) \
50644+ if (x != GR_DO_AUDIT) \
50645+ spin_unlock(&grsec_alert_lock); \
50646+ else \
50647+ spin_unlock(&grsec_audit_lock); \
50648+ read_unlock(&grsec_exec_file_lock); \
50649+ read_unlock(&tasklist_lock); \
50650+ rcu_read_unlock(); \
50651+ ENABLE_PREEMPT(); \
50652+ if (x == GR_DONT_AUDIT) \
50653+ gr_handle_alertkill(current)
50654+
50655+enum {
50656+ FLOODING,
50657+ NO_FLOODING
50658+};
50659+
50660+extern char *gr_alert_log_fmt;
50661+extern char *gr_audit_log_fmt;
50662+extern char *gr_alert_log_buf;
50663+extern char *gr_audit_log_buf;
50664+
50665+static int gr_log_start(int audit)
50666+{
50667+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
50668+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
50669+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
50670+
50671+ if (audit == GR_DO_AUDIT)
50672+ goto set_fmt;
50673+
50674+ if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
50675+ grsec_alert_wtime = jiffies;
50676+ grsec_alert_fyet = 0;
50677+ } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
50678+ grsec_alert_fyet++;
50679+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
50680+ grsec_alert_wtime = jiffies;
50681+ grsec_alert_fyet++;
50682+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
50683+ return FLOODING;
50684+ } else return FLOODING;
50685+
50686+set_fmt:
50687+ memset(buf, 0, PAGE_SIZE);
50688+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
50689+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
50690+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
50691+ } else if (current->signal->curr_ip) {
50692+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
50693+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
50694+ } else if (gr_acl_is_enabled()) {
50695+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
50696+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
50697+ } else {
50698+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
50699+ strcpy(buf, fmt);
50700+ }
50701+
50702+ return NO_FLOODING;
50703+}
50704+
50705+static void gr_log_middle(int audit, const char *msg, va_list ap)
50706+ __attribute__ ((format (printf, 2, 0)));
50707+
50708+static void gr_log_middle(int audit, const char *msg, va_list ap)
50709+{
50710+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
50711+ unsigned int len = strlen(buf);
50712+
50713+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
50714+
50715+ return;
50716+}
50717+
50718+static void gr_log_middle_varargs(int audit, const char *msg, ...)
50719+ __attribute__ ((format (printf, 2, 3)));
50720+
50721+static void gr_log_middle_varargs(int audit, const char *msg, ...)
50722+{
50723+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
50724+ unsigned int len = strlen(buf);
50725+ va_list ap;
50726+
50727+ va_start(ap, msg);
50728+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
50729+ va_end(ap);
50730+
50731+ return;
50732+}
50733+
50734+static void gr_log_end(int audit)
50735+{
50736+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
50737+ unsigned int len = strlen(buf);
50738+
50739+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
50740+ printk("%s\n", buf);
50741+
50742+ return;
50743+}
50744+
50745+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
50746+{
50747+ int logtype;
50748+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
50749+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
50750+ void *voidptr = NULL;
50751+ int num1 = 0, num2 = 0;
50752+ unsigned long ulong1 = 0, ulong2 = 0;
50753+ struct dentry *dentry = NULL;
50754+ struct vfsmount *mnt = NULL;
50755+ struct file *file = NULL;
50756+ struct task_struct *task = NULL;
50757+ const struct cred *cred, *pcred;
50758+ va_list ap;
50759+
50760+ BEGIN_LOCKS(audit);
50761+ logtype = gr_log_start(audit);
50762+ if (logtype == FLOODING) {
50763+ END_LOCKS(audit);
50764+ return;
50765+ }
50766+ va_start(ap, argtypes);
50767+ switch (argtypes) {
50768+ case GR_TTYSNIFF:
50769+ task = va_arg(ap, struct task_struct *);
50770+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
50771+ break;
50772+ case GR_SYSCTL_HIDDEN:
50773+ str1 = va_arg(ap, char *);
50774+ gr_log_middle_varargs(audit, msg, result, str1);
50775+ break;
50776+ case GR_RBAC:
50777+ dentry = va_arg(ap, struct dentry *);
50778+ mnt = va_arg(ap, struct vfsmount *);
50779+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
50780+ break;
50781+ case GR_RBAC_STR:
50782+ dentry = va_arg(ap, struct dentry *);
50783+ mnt = va_arg(ap, struct vfsmount *);
50784+ str1 = va_arg(ap, char *);
50785+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
50786+ break;
50787+ case GR_STR_RBAC:
50788+ str1 = va_arg(ap, char *);
50789+ dentry = va_arg(ap, struct dentry *);
50790+ mnt = va_arg(ap, struct vfsmount *);
50791+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
50792+ break;
50793+ case GR_RBAC_MODE2:
50794+ dentry = va_arg(ap, struct dentry *);
50795+ mnt = va_arg(ap, struct vfsmount *);
50796+ str1 = va_arg(ap, char *);
50797+ str2 = va_arg(ap, char *);
50798+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
50799+ break;
50800+ case GR_RBAC_MODE3:
50801+ dentry = va_arg(ap, struct dentry *);
50802+ mnt = va_arg(ap, struct vfsmount *);
50803+ str1 = va_arg(ap, char *);
50804+ str2 = va_arg(ap, char *);
50805+ str3 = va_arg(ap, char *);
50806+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
50807+ break;
50808+ case GR_FILENAME:
50809+ dentry = va_arg(ap, struct dentry *);
50810+ mnt = va_arg(ap, struct vfsmount *);
50811+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
50812+ break;
50813+ case GR_STR_FILENAME:
50814+ str1 = va_arg(ap, char *);
50815+ dentry = va_arg(ap, struct dentry *);
50816+ mnt = va_arg(ap, struct vfsmount *);
50817+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
50818+ break;
50819+ case GR_FILENAME_STR:
50820+ dentry = va_arg(ap, struct dentry *);
50821+ mnt = va_arg(ap, struct vfsmount *);
50822+ str1 = va_arg(ap, char *);
50823+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
50824+ break;
50825+ case GR_FILENAME_TWO_INT:
50826+ dentry = va_arg(ap, struct dentry *);
50827+ mnt = va_arg(ap, struct vfsmount *);
50828+ num1 = va_arg(ap, int);
50829+ num2 = va_arg(ap, int);
50830+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
50831+ break;
50832+ case GR_FILENAME_TWO_INT_STR:
50833+ dentry = va_arg(ap, struct dentry *);
50834+ mnt = va_arg(ap, struct vfsmount *);
50835+ num1 = va_arg(ap, int);
50836+ num2 = va_arg(ap, int);
50837+ str1 = va_arg(ap, char *);
50838+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
50839+ break;
50840+ case GR_TEXTREL:
50841+ file = va_arg(ap, struct file *);
50842+ ulong1 = va_arg(ap, unsigned long);
50843+ ulong2 = va_arg(ap, unsigned long);
50844+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
50845+ break;
50846+ case GR_PTRACE:
50847+ task = va_arg(ap, struct task_struct *);
50848+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
50849+ break;
50850+ case GR_RESOURCE:
50851+ task = va_arg(ap, struct task_struct *);
50852+ cred = __task_cred(task);
50853+ pcred = __task_cred(task->real_parent);
50854+ ulong1 = va_arg(ap, unsigned long);
50855+ str1 = va_arg(ap, char *);
50856+ ulong2 = va_arg(ap, unsigned long);
50857+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
50858+ break;
50859+ case GR_CAP:
50860+ task = va_arg(ap, struct task_struct *);
50861+ cred = __task_cred(task);
50862+ pcred = __task_cred(task->real_parent);
50863+ str1 = va_arg(ap, char *);
50864+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
50865+ break;
50866+ case GR_SIG:
50867+ str1 = va_arg(ap, char *);
50868+ voidptr = va_arg(ap, void *);
50869+ gr_log_middle_varargs(audit, msg, str1, voidptr);
50870+ break;
50871+ case GR_SIG2:
50872+ task = va_arg(ap, struct task_struct *);
50873+ cred = __task_cred(task);
50874+ pcred = __task_cred(task->real_parent);
50875+ num1 = va_arg(ap, int);
50876+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
50877+ break;
50878+ case GR_CRASH1:
50879+ task = va_arg(ap, struct task_struct *);
50880+ cred = __task_cred(task);
50881+ pcred = __task_cred(task->real_parent);
50882+ ulong1 = va_arg(ap, unsigned long);
50883+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
50884+ break;
50885+ case GR_CRASH2:
50886+ task = va_arg(ap, struct task_struct *);
50887+ cred = __task_cred(task);
50888+ pcred = __task_cred(task->real_parent);
50889+ ulong1 = va_arg(ap, unsigned long);
50890+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
50891+ break;
50892+ case GR_RWXMAP:
50893+ file = va_arg(ap, struct file *);
50894+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
50895+ break;
50896+ case GR_PSACCT:
50897+ {
50898+ unsigned int wday, cday;
50899+ __u8 whr, chr;
50900+ __u8 wmin, cmin;
50901+ __u8 wsec, csec;
50902+ char cur_tty[64] = { 0 };
50903+ char parent_tty[64] = { 0 };
50904+
50905+ task = va_arg(ap, struct task_struct *);
50906+ wday = va_arg(ap, unsigned int);
50907+ cday = va_arg(ap, unsigned int);
50908+ whr = va_arg(ap, int);
50909+ chr = va_arg(ap, int);
50910+ wmin = va_arg(ap, int);
50911+ cmin = va_arg(ap, int);
50912+ wsec = va_arg(ap, int);
50913+ csec = va_arg(ap, int);
50914+ ulong1 = va_arg(ap, unsigned long);
50915+ cred = __task_cred(task);
50916+ pcred = __task_cred(task->real_parent);
50917+
50918+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
50919+ }
50920+ break;
50921+ default:
50922+ gr_log_middle(audit, msg, ap);
50923+ }
50924+ va_end(ap);
50925+ gr_log_end(audit);
50926+ END_LOCKS(audit);
50927+}
50928diff -urNp linux-2.6.32.41/grsecurity/grsec_mem.c linux-2.6.32.41/grsecurity/grsec_mem.c
50929--- linux-2.6.32.41/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
50930+++ linux-2.6.32.41/grsecurity/grsec_mem.c 2011-04-17 15:56:46.000000000 -0400
50931@@ -0,0 +1,33 @@
50932+#include <linux/kernel.h>
50933+#include <linux/sched.h>
50934+#include <linux/mm.h>
50935+#include <linux/mman.h>
50936+#include <linux/grinternal.h>
50937+
50938+void
50939+gr_handle_ioperm(void)
50940+{
50941+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
50942+ return;
50943+}
50944+
50945+void
50946+gr_handle_iopl(void)
50947+{
50948+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
50949+ return;
50950+}
50951+
50952+void
50953+gr_handle_mem_readwrite(u64 from, u64 to)
50954+{
50955+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
50956+ return;
50957+}
50958+
50959+void
50960+gr_handle_vm86(void)
50961+{
50962+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
50963+ return;
50964+}
50965diff -urNp linux-2.6.32.41/grsecurity/grsec_mount.c linux-2.6.32.41/grsecurity/grsec_mount.c
50966--- linux-2.6.32.41/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
50967+++ linux-2.6.32.41/grsecurity/grsec_mount.c 2011-04-17 15:56:46.000000000 -0400
50968@@ -0,0 +1,62 @@
50969+#include <linux/kernel.h>
50970+#include <linux/sched.h>
50971+#include <linux/mount.h>
50972+#include <linux/grsecurity.h>
50973+#include <linux/grinternal.h>
50974+
50975+void
50976+gr_log_remount(const char *devname, const int retval)
50977+{
50978+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
50979+ if (grsec_enable_mount && (retval >= 0))
50980+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
50981+#endif
50982+ return;
50983+}
50984+
50985+void
50986+gr_log_unmount(const char *devname, const int retval)
50987+{
50988+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
50989+ if (grsec_enable_mount && (retval >= 0))
50990+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
50991+#endif
50992+ return;
50993+}
50994+
50995+void
50996+gr_log_mount(const char *from, const char *to, const int retval)
50997+{
50998+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
50999+ if (grsec_enable_mount && (retval >= 0))
51000+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from, to);
51001+#endif
51002+ return;
51003+}
51004+
51005+int
51006+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
51007+{
51008+#ifdef CONFIG_GRKERNSEC_ROFS
51009+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
51010+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
51011+ return -EPERM;
51012+ } else
51013+ return 0;
51014+#endif
51015+ return 0;
51016+}
51017+
51018+int
51019+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
51020+{
51021+#ifdef CONFIG_GRKERNSEC_ROFS
51022+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
51023+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
51024+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
51025+ return -EPERM;
51026+ } else
51027+ return 0;
51028+#endif
51029+ return 0;
51030+}
51031diff -urNp linux-2.6.32.41/grsecurity/grsec_pax.c linux-2.6.32.41/grsecurity/grsec_pax.c
51032--- linux-2.6.32.41/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
51033+++ linux-2.6.32.41/grsecurity/grsec_pax.c 2011-04-17 15:56:46.000000000 -0400
51034@@ -0,0 +1,36 @@
51035+#include <linux/kernel.h>
51036+#include <linux/sched.h>
51037+#include <linux/mm.h>
51038+#include <linux/file.h>
51039+#include <linux/grinternal.h>
51040+#include <linux/grsecurity.h>
51041+
51042+void
51043+gr_log_textrel(struct vm_area_struct * vma)
51044+{
51045+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
51046+ if (grsec_enable_audit_textrel)
51047+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
51048+#endif
51049+ return;
51050+}
51051+
51052+void
51053+gr_log_rwxmmap(struct file *file)
51054+{
51055+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
51056+ if (grsec_enable_log_rwxmaps)
51057+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
51058+#endif
51059+ return;
51060+}
51061+
51062+void
51063+gr_log_rwxmprotect(struct file *file)
51064+{
51065+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
51066+ if (grsec_enable_log_rwxmaps)
51067+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
51068+#endif
51069+ return;
51070+}
51071diff -urNp linux-2.6.32.41/grsecurity/grsec_ptrace.c linux-2.6.32.41/grsecurity/grsec_ptrace.c
51072--- linux-2.6.32.41/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
51073+++ linux-2.6.32.41/grsecurity/grsec_ptrace.c 2011-04-17 15:56:46.000000000 -0400
51074@@ -0,0 +1,14 @@
51075+#include <linux/kernel.h>
51076+#include <linux/sched.h>
51077+#include <linux/grinternal.h>
51078+#include <linux/grsecurity.h>
51079+
51080+void
51081+gr_audit_ptrace(struct task_struct *task)
51082+{
51083+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
51084+ if (grsec_enable_audit_ptrace)
51085+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
51086+#endif
51087+ return;
51088+}
51089diff -urNp linux-2.6.32.41/grsecurity/grsec_sig.c linux-2.6.32.41/grsecurity/grsec_sig.c
51090--- linux-2.6.32.41/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
51091+++ linux-2.6.32.41/grsecurity/grsec_sig.c 2011-05-17 17:30:04.000000000 -0400
51092@@ -0,0 +1,202 @@
51093+#include <linux/kernel.h>
51094+#include <linux/sched.h>
51095+#include <linux/delay.h>
51096+#include <linux/grsecurity.h>
51097+#include <linux/grinternal.h>
51098+#include <linux/hardirq.h>
51099+
51100+char *signames[] = {
51101+ [SIGSEGV] = "Segmentation fault",
51102+ [SIGILL] = "Illegal instruction",
51103+ [SIGABRT] = "Abort",
51104+ [SIGBUS] = "Invalid alignment/Bus error"
51105+};
51106+
51107+void
51108+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
51109+{
51110+#ifdef CONFIG_GRKERNSEC_SIGNAL
51111+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
51112+ (sig == SIGABRT) || (sig == SIGBUS))) {
51113+ if (t->pid == current->pid) {
51114+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
51115+ } else {
51116+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
51117+ }
51118+ }
51119+#endif
51120+ return;
51121+}
51122+
51123+int
51124+gr_handle_signal(const struct task_struct *p, const int sig)
51125+{
51126+#ifdef CONFIG_GRKERNSEC
51127+ if (current->pid > 1 && gr_check_protected_task(p)) {
51128+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
51129+ return -EPERM;
51130+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
51131+ return -EPERM;
51132+ }
51133+#endif
51134+ return 0;
51135+}
51136+
51137+#ifdef CONFIG_GRKERNSEC
51138+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
51139+
51140+int gr_fake_force_sig(int sig, struct task_struct *t)
51141+{
51142+ unsigned long int flags;
51143+ int ret, blocked, ignored;
51144+ struct k_sigaction *action;
51145+
51146+ spin_lock_irqsave(&t->sighand->siglock, flags);
51147+ action = &t->sighand->action[sig-1];
51148+ ignored = action->sa.sa_handler == SIG_IGN;
51149+ blocked = sigismember(&t->blocked, sig);
51150+ if (blocked || ignored) {
51151+ action->sa.sa_handler = SIG_DFL;
51152+ if (blocked) {
51153+ sigdelset(&t->blocked, sig);
51154+ recalc_sigpending_and_wake(t);
51155+ }
51156+ }
51157+ if (action->sa.sa_handler == SIG_DFL)
51158+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
51159+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
51160+
51161+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
51162+
51163+ return ret;
51164+}
51165+#endif
51166+
51167+#ifdef CONFIG_GRKERNSEC_BRUTE
51168+#define GR_USER_BAN_TIME (15 * 60)
51169+
51170+static int __get_dumpable(unsigned long mm_flags)
51171+{
51172+ int ret;
51173+
51174+ ret = mm_flags & MMF_DUMPABLE_MASK;
51175+ return (ret >= 2) ? 2 : ret;
51176+}
51177+#endif
51178+
51179+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
51180+{
51181+#ifdef CONFIG_GRKERNSEC_BRUTE
51182+ uid_t uid = 0;
51183+
51184+ rcu_read_lock();
51185+ read_lock(&tasklist_lock);
51186+ read_lock(&grsec_exec_file_lock);
51187+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
51188+ p->real_parent->brute = 1;
51189+ else {
51190+ const struct cred *cred = __task_cred(p), *cred2;
51191+ struct task_struct *tsk, *tsk2;
51192+
51193+ if (!__get_dumpable(mm_flags) && cred->uid) {
51194+ struct user_struct *user;
51195+
51196+ uid = cred->uid;
51197+
51198+ /* this is put upon execution past expiration */
51199+ user = find_user(uid);
51200+ if (user == NULL)
51201+ goto unlock;
51202+ user->banned = 1;
51203+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
51204+ if (user->ban_expires == ~0UL)
51205+ user->ban_expires--;
51206+
51207+ do_each_thread(tsk2, tsk) {
51208+ cred2 = __task_cred(tsk);
51209+ if (tsk != p && cred2->uid == uid)
51210+ gr_fake_force_sig(SIGKILL, tsk);
51211+ } while_each_thread(tsk2, tsk);
51212+ }
51213+ }
51214+unlock:
51215+ read_unlock(&grsec_exec_file_lock);
51216+ read_unlock(&tasklist_lock);
51217+ rcu_read_unlock();
51218+
51219+ if (uid)
51220+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
51221+#endif
51222+ return;
51223+}
51224+
51225+void gr_handle_brute_check(void)
51226+{
51227+#ifdef CONFIG_GRKERNSEC_BRUTE
51228+ if (current->brute)
51229+ msleep(30 * 1000);
51230+#endif
51231+ return;
51232+}
51233+
51234+void gr_handle_kernel_exploit(void)
51235+{
51236+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
51237+ const struct cred *cred;
51238+ struct task_struct *tsk, *tsk2;
51239+ struct user_struct *user;
51240+ uid_t uid;
51241+
51242+ if (in_irq() || in_serving_softirq() || in_nmi())
51243+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
51244+
51245+ uid = current_uid();
51246+
51247+ if (uid == 0)
51248+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
51249+ else {
51250+ /* kill all the processes of this user, hold a reference
51251+ to their creds struct, and prevent them from creating
51252+ another process until system reset
51253+ */
51254+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
51255+ /* we intentionally leak this ref */
51256+ user = get_uid(current->cred->user);
51257+ if (user) {
51258+ user->banned = 1;
51259+ user->ban_expires = ~0UL;
51260+ }
51261+
51262+ read_lock(&tasklist_lock);
51263+ do_each_thread(tsk2, tsk) {
51264+ cred = __task_cred(tsk);
51265+ if (cred->uid == uid)
51266+ gr_fake_force_sig(SIGKILL, tsk);
51267+ } while_each_thread(tsk2, tsk);
51268+ read_unlock(&tasklist_lock);
51269+ }
51270+#endif
51271+}
51272+
51273+int __gr_process_user_ban(struct user_struct *user)
51274+{
51275+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
51276+ if (unlikely(user->banned)) {
51277+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
51278+ user->banned = 0;
51279+ user->ban_expires = 0;
51280+ free_uid(user);
51281+ } else
51282+ return -EPERM;
51283+ }
51284+#endif
51285+ return 0;
51286+}
51287+
51288+int gr_process_user_ban(void)
51289+{
51290+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
51291+ return __gr_process_user_ban(current->cred->user);
51292+#endif
51293+ return 0;
51294+}
51295diff -urNp linux-2.6.32.41/grsecurity/grsec_sock.c linux-2.6.32.41/grsecurity/grsec_sock.c
51296--- linux-2.6.32.41/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
51297+++ linux-2.6.32.41/grsecurity/grsec_sock.c 2011-04-17 15:56:46.000000000 -0400
51298@@ -0,0 +1,275 @@
51299+#include <linux/kernel.h>
51300+#include <linux/module.h>
51301+#include <linux/sched.h>
51302+#include <linux/file.h>
51303+#include <linux/net.h>
51304+#include <linux/in.h>
51305+#include <linux/ip.h>
51306+#include <net/sock.h>
51307+#include <net/inet_sock.h>
51308+#include <linux/grsecurity.h>
51309+#include <linux/grinternal.h>
51310+#include <linux/gracl.h>
51311+
51312+kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
51313+EXPORT_SYMBOL(gr_cap_rtnetlink);
51314+
51315+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
51316+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
51317+
51318+EXPORT_SYMBOL(gr_search_udp_recvmsg);
51319+EXPORT_SYMBOL(gr_search_udp_sendmsg);
51320+
51321+#ifdef CONFIG_UNIX_MODULE
51322+EXPORT_SYMBOL(gr_acl_handle_unix);
51323+EXPORT_SYMBOL(gr_acl_handle_mknod);
51324+EXPORT_SYMBOL(gr_handle_chroot_unix);
51325+EXPORT_SYMBOL(gr_handle_create);
51326+#endif
51327+
51328+#ifdef CONFIG_GRKERNSEC
51329+#define gr_conn_table_size 32749
51330+struct conn_table_entry {
51331+ struct conn_table_entry *next;
51332+ struct signal_struct *sig;
51333+};
51334+
51335+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
51336+DEFINE_SPINLOCK(gr_conn_table_lock);
51337+
51338+extern const char * gr_socktype_to_name(unsigned char type);
51339+extern const char * gr_proto_to_name(unsigned char proto);
51340+extern const char * gr_sockfamily_to_name(unsigned char family);
51341+
51342+static __inline__ int
51343+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
51344+{
51345+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
51346+}
51347+
51348+static __inline__ int
51349+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
51350+ __u16 sport, __u16 dport)
51351+{
51352+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
51353+ sig->gr_sport == sport && sig->gr_dport == dport))
51354+ return 1;
51355+ else
51356+ return 0;
51357+}
51358+
51359+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
51360+{
51361+ struct conn_table_entry **match;
51362+ unsigned int index;
51363+
51364+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
51365+ sig->gr_sport, sig->gr_dport,
51366+ gr_conn_table_size);
51367+
51368+ newent->sig = sig;
51369+
51370+ match = &gr_conn_table[index];
51371+ newent->next = *match;
51372+ *match = newent;
51373+
51374+ return;
51375+}
51376+
51377+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
51378+{
51379+ struct conn_table_entry *match, *last = NULL;
51380+ unsigned int index;
51381+
51382+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
51383+ sig->gr_sport, sig->gr_dport,
51384+ gr_conn_table_size);
51385+
51386+ match = gr_conn_table[index];
51387+ while (match && !conn_match(match->sig,
51388+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
51389+ sig->gr_dport)) {
51390+ last = match;
51391+ match = match->next;
51392+ }
51393+
51394+ if (match) {
51395+ if (last)
51396+ last->next = match->next;
51397+ else
51398+ gr_conn_table[index] = NULL;
51399+ kfree(match);
51400+ }
51401+
51402+ return;
51403+}
51404+
51405+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
51406+ __u16 sport, __u16 dport)
51407+{
51408+ struct conn_table_entry *match;
51409+ unsigned int index;
51410+
51411+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
51412+
51413+ match = gr_conn_table[index];
51414+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
51415+ match = match->next;
51416+
51417+ if (match)
51418+ return match->sig;
51419+ else
51420+ return NULL;
51421+}
51422+
51423+#endif
51424+
51425+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
51426+{
51427+#ifdef CONFIG_GRKERNSEC
51428+ struct signal_struct *sig = task->signal;
51429+ struct conn_table_entry *newent;
51430+
51431+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
51432+ if (newent == NULL)
51433+ return;
51434+ /* no bh lock needed since we are called with bh disabled */
51435+ spin_lock(&gr_conn_table_lock);
51436+ gr_del_task_from_ip_table_nolock(sig);
51437+ sig->gr_saddr = inet->rcv_saddr;
51438+ sig->gr_daddr = inet->daddr;
51439+ sig->gr_sport = inet->sport;
51440+ sig->gr_dport = inet->dport;
51441+ gr_add_to_task_ip_table_nolock(sig, newent);
51442+ spin_unlock(&gr_conn_table_lock);
51443+#endif
51444+ return;
51445+}
51446+
51447+void gr_del_task_from_ip_table(struct task_struct *task)
51448+{
51449+#ifdef CONFIG_GRKERNSEC
51450+ spin_lock_bh(&gr_conn_table_lock);
51451+ gr_del_task_from_ip_table_nolock(task->signal);
51452+ spin_unlock_bh(&gr_conn_table_lock);
51453+#endif
51454+ return;
51455+}
51456+
51457+void
51458+gr_attach_curr_ip(const struct sock *sk)
51459+{
51460+#ifdef CONFIG_GRKERNSEC
51461+ struct signal_struct *p, *set;
51462+ const struct inet_sock *inet = inet_sk(sk);
51463+
51464+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
51465+ return;
51466+
51467+ set = current->signal;
51468+
51469+ spin_lock_bh(&gr_conn_table_lock);
51470+ p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
51471+ inet->dport, inet->sport);
51472+ if (unlikely(p != NULL)) {
51473+ set->curr_ip = p->curr_ip;
51474+ set->used_accept = 1;
51475+ gr_del_task_from_ip_table_nolock(p);
51476+ spin_unlock_bh(&gr_conn_table_lock);
51477+ return;
51478+ }
51479+ spin_unlock_bh(&gr_conn_table_lock);
51480+
51481+ set->curr_ip = inet->daddr;
51482+ set->used_accept = 1;
51483+#endif
51484+ return;
51485+}
51486+
51487+int
51488+gr_handle_sock_all(const int family, const int type, const int protocol)
51489+{
51490+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
51491+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
51492+ (family != AF_UNIX)) {
51493+ if (family == AF_INET)
51494+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
51495+ else
51496+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
51497+ return -EACCES;
51498+ }
51499+#endif
51500+ return 0;
51501+}
51502+
51503+int
51504+gr_handle_sock_server(const struct sockaddr *sck)
51505+{
51506+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
51507+ if (grsec_enable_socket_server &&
51508+ in_group_p(grsec_socket_server_gid) &&
51509+ sck && (sck->sa_family != AF_UNIX) &&
51510+ (sck->sa_family != AF_LOCAL)) {
51511+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
51512+ return -EACCES;
51513+ }
51514+#endif
51515+ return 0;
51516+}
51517+
51518+int
51519+gr_handle_sock_server_other(const struct sock *sck)
51520+{
51521+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
51522+ if (grsec_enable_socket_server &&
51523+ in_group_p(grsec_socket_server_gid) &&
51524+ sck && (sck->sk_family != AF_UNIX) &&
51525+ (sck->sk_family != AF_LOCAL)) {
51526+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
51527+ return -EACCES;
51528+ }
51529+#endif
51530+ return 0;
51531+}
51532+
51533+int
51534+gr_handle_sock_client(const struct sockaddr *sck)
51535+{
51536+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
51537+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
51538+ sck && (sck->sa_family != AF_UNIX) &&
51539+ (sck->sa_family != AF_LOCAL)) {
51540+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
51541+ return -EACCES;
51542+ }
51543+#endif
51544+ return 0;
51545+}
51546+
51547+kernel_cap_t
51548+gr_cap_rtnetlink(struct sock *sock)
51549+{
51550+#ifdef CONFIG_GRKERNSEC
51551+ if (!gr_acl_is_enabled())
51552+ return current_cap();
51553+ else if (sock->sk_protocol == NETLINK_ISCSI &&
51554+ cap_raised(current_cap(), CAP_SYS_ADMIN) &&
51555+ gr_is_capable(CAP_SYS_ADMIN))
51556+ return current_cap();
51557+ else if (sock->sk_protocol == NETLINK_AUDIT &&
51558+ cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
51559+ gr_is_capable(CAP_AUDIT_WRITE) &&
51560+ cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
51561+ gr_is_capable(CAP_AUDIT_CONTROL))
51562+ return current_cap();
51563+ else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
51564+ ((sock->sk_protocol == NETLINK_ROUTE) ?
51565+ gr_is_capable_nolog(CAP_NET_ADMIN) :
51566+ gr_is_capable(CAP_NET_ADMIN)))
51567+ return current_cap();
51568+ else
51569+ return __cap_empty_set;
51570+#else
51571+ return current_cap();
51572+#endif
51573+}
51574diff -urNp linux-2.6.32.41/grsecurity/grsec_sysctl.c linux-2.6.32.41/grsecurity/grsec_sysctl.c
51575--- linux-2.6.32.41/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
51576+++ linux-2.6.32.41/grsecurity/grsec_sysctl.c 2011-04-17 15:56:46.000000000 -0400
51577@@ -0,0 +1,479 @@
51578+#include <linux/kernel.h>
51579+#include <linux/sched.h>
51580+#include <linux/sysctl.h>
51581+#include <linux/grsecurity.h>
51582+#include <linux/grinternal.h>
51583+
51584+int
51585+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
51586+{
51587+#ifdef CONFIG_GRKERNSEC_SYSCTL
51588+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
51589+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
51590+ return -EACCES;
51591+ }
51592+#endif
51593+ return 0;
51594+}
51595+
51596+#ifdef CONFIG_GRKERNSEC_ROFS
51597+static int __maybe_unused one = 1;
51598+#endif
51599+
51600+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
51601+ctl_table grsecurity_table[] = {
51602+#ifdef CONFIG_GRKERNSEC_SYSCTL
51603+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
51604+#ifdef CONFIG_GRKERNSEC_IO
51605+ {
51606+ .ctl_name = CTL_UNNUMBERED,
51607+ .procname = "disable_priv_io",
51608+ .data = &grsec_disable_privio,
51609+ .maxlen = sizeof(int),
51610+ .mode = 0600,
51611+ .proc_handler = &proc_dointvec,
51612+ },
51613+#endif
51614+#endif
51615+#ifdef CONFIG_GRKERNSEC_LINK
51616+ {
51617+ .ctl_name = CTL_UNNUMBERED,
51618+ .procname = "linking_restrictions",
51619+ .data = &grsec_enable_link,
51620+ .maxlen = sizeof(int),
51621+ .mode = 0600,
51622+ .proc_handler = &proc_dointvec,
51623+ },
51624+#endif
51625+#ifdef CONFIG_GRKERNSEC_FIFO
51626+ {
51627+ .ctl_name = CTL_UNNUMBERED,
51628+ .procname = "fifo_restrictions",
51629+ .data = &grsec_enable_fifo,
51630+ .maxlen = sizeof(int),
51631+ .mode = 0600,
51632+ .proc_handler = &proc_dointvec,
51633+ },
51634+#endif
51635+#ifdef CONFIG_GRKERNSEC_EXECVE
51636+ {
51637+ .ctl_name = CTL_UNNUMBERED,
51638+ .procname = "execve_limiting",
51639+ .data = &grsec_enable_execve,
51640+ .maxlen = sizeof(int),
51641+ .mode = 0600,
51642+ .proc_handler = &proc_dointvec,
51643+ },
51644+#endif
51645+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
51646+ {
51647+ .ctl_name = CTL_UNNUMBERED,
51648+ .procname = "ip_blackhole",
51649+ .data = &grsec_enable_blackhole,
51650+ .maxlen = sizeof(int),
51651+ .mode = 0600,
51652+ .proc_handler = &proc_dointvec,
51653+ },
51654+ {
51655+ .ctl_name = CTL_UNNUMBERED,
51656+ .procname = "lastack_retries",
51657+ .data = &grsec_lastack_retries,
51658+ .maxlen = sizeof(int),
51659+ .mode = 0600,
51660+ .proc_handler = &proc_dointvec,
51661+ },
51662+#endif
51663+#ifdef CONFIG_GRKERNSEC_EXECLOG
51664+ {
51665+ .ctl_name = CTL_UNNUMBERED,
51666+ .procname = "exec_logging",
51667+ .data = &grsec_enable_execlog,
51668+ .maxlen = sizeof(int),
51669+ .mode = 0600,
51670+ .proc_handler = &proc_dointvec,
51671+ },
51672+#endif
51673+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
51674+ {
51675+ .ctl_name = CTL_UNNUMBERED,
51676+ .procname = "rwxmap_logging",
51677+ .data = &grsec_enable_log_rwxmaps,
51678+ .maxlen = sizeof(int),
51679+ .mode = 0600,
51680+ .proc_handler = &proc_dointvec,
51681+ },
51682+#endif
51683+#ifdef CONFIG_GRKERNSEC_SIGNAL
51684+ {
51685+ .ctl_name = CTL_UNNUMBERED,
51686+ .procname = "signal_logging",
51687+ .data = &grsec_enable_signal,
51688+ .maxlen = sizeof(int),
51689+ .mode = 0600,
51690+ .proc_handler = &proc_dointvec,
51691+ },
51692+#endif
51693+#ifdef CONFIG_GRKERNSEC_FORKFAIL
51694+ {
51695+ .ctl_name = CTL_UNNUMBERED,
51696+ .procname = "forkfail_logging",
51697+ .data = &grsec_enable_forkfail,
51698+ .maxlen = sizeof(int),
51699+ .mode = 0600,
51700+ .proc_handler = &proc_dointvec,
51701+ },
51702+#endif
51703+#ifdef CONFIG_GRKERNSEC_TIME
51704+ {
51705+ .ctl_name = CTL_UNNUMBERED,
51706+ .procname = "timechange_logging",
51707+ .data = &grsec_enable_time,
51708+ .maxlen = sizeof(int),
51709+ .mode = 0600,
51710+ .proc_handler = &proc_dointvec,
51711+ },
51712+#endif
51713+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
51714+ {
51715+ .ctl_name = CTL_UNNUMBERED,
51716+ .procname = "chroot_deny_shmat",
51717+ .data = &grsec_enable_chroot_shmat,
51718+ .maxlen = sizeof(int),
51719+ .mode = 0600,
51720+ .proc_handler = &proc_dointvec,
51721+ },
51722+#endif
51723+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
51724+ {
51725+ .ctl_name = CTL_UNNUMBERED,
51726+ .procname = "chroot_deny_unix",
51727+ .data = &grsec_enable_chroot_unix,
51728+ .maxlen = sizeof(int),
51729+ .mode = 0600,
51730+ .proc_handler = &proc_dointvec,
51731+ },
51732+#endif
51733+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
51734+ {
51735+ .ctl_name = CTL_UNNUMBERED,
51736+ .procname = "chroot_deny_mount",
51737+ .data = &grsec_enable_chroot_mount,
51738+ .maxlen = sizeof(int),
51739+ .mode = 0600,
51740+ .proc_handler = &proc_dointvec,
51741+ },
51742+#endif
51743+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
51744+ {
51745+ .ctl_name = CTL_UNNUMBERED,
51746+ .procname = "chroot_deny_fchdir",
51747+ .data = &grsec_enable_chroot_fchdir,
51748+ .maxlen = sizeof(int),
51749+ .mode = 0600,
51750+ .proc_handler = &proc_dointvec,
51751+ },
51752+#endif
51753+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
51754+ {
51755+ .ctl_name = CTL_UNNUMBERED,
51756+ .procname = "chroot_deny_chroot",
51757+ .data = &grsec_enable_chroot_double,
51758+ .maxlen = sizeof(int),
51759+ .mode = 0600,
51760+ .proc_handler = &proc_dointvec,
51761+ },
51762+#endif
51763+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
51764+ {
51765+ .ctl_name = CTL_UNNUMBERED,
51766+ .procname = "chroot_deny_pivot",
51767+ .data = &grsec_enable_chroot_pivot,
51768+ .maxlen = sizeof(int),
51769+ .mode = 0600,
51770+ .proc_handler = &proc_dointvec,
51771+ },
51772+#endif
51773+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
51774+ {
51775+ .ctl_name = CTL_UNNUMBERED,
51776+ .procname = "chroot_enforce_chdir",
51777+ .data = &grsec_enable_chroot_chdir,
51778+ .maxlen = sizeof(int),
51779+ .mode = 0600,
51780+ .proc_handler = &proc_dointvec,
51781+ },
51782+#endif
51783+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
51784+ {
51785+ .ctl_name = CTL_UNNUMBERED,
51786+ .procname = "chroot_deny_chmod",
51787+ .data = &grsec_enable_chroot_chmod,
51788+ .maxlen = sizeof(int),
51789+ .mode = 0600,
51790+ .proc_handler = &proc_dointvec,
51791+ },
51792+#endif
51793+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
51794+ {
51795+ .ctl_name = CTL_UNNUMBERED,
51796+ .procname = "chroot_deny_mknod",
51797+ .data = &grsec_enable_chroot_mknod,
51798+ .maxlen = sizeof(int),
51799+ .mode = 0600,
51800+ .proc_handler = &proc_dointvec,
51801+ },
51802+#endif
51803+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
51804+ {
51805+ .ctl_name = CTL_UNNUMBERED,
51806+ .procname = "chroot_restrict_nice",
51807+ .data = &grsec_enable_chroot_nice,
51808+ .maxlen = sizeof(int),
51809+ .mode = 0600,
51810+ .proc_handler = &proc_dointvec,
51811+ },
51812+#endif
51813+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
51814+ {
51815+ .ctl_name = CTL_UNNUMBERED,
51816+ .procname = "chroot_execlog",
51817+ .data = &grsec_enable_chroot_execlog,
51818+ .maxlen = sizeof(int),
51819+ .mode = 0600,
51820+ .proc_handler = &proc_dointvec,
51821+ },
51822+#endif
51823+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
51824+ {
51825+ .ctl_name = CTL_UNNUMBERED,
51826+ .procname = "chroot_caps",
51827+ .data = &grsec_enable_chroot_caps,
51828+ .maxlen = sizeof(int),
51829+ .mode = 0600,
51830+ .proc_handler = &proc_dointvec,
51831+ },
51832+#endif
51833+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
51834+ {
51835+ .ctl_name = CTL_UNNUMBERED,
51836+ .procname = "chroot_deny_sysctl",
51837+ .data = &grsec_enable_chroot_sysctl,
51838+ .maxlen = sizeof(int),
51839+ .mode = 0600,
51840+ .proc_handler = &proc_dointvec,
51841+ },
51842+#endif
51843+#ifdef CONFIG_GRKERNSEC_TPE
51844+ {
51845+ .ctl_name = CTL_UNNUMBERED,
51846+ .procname = "tpe",
51847+ .data = &grsec_enable_tpe,
51848+ .maxlen = sizeof(int),
51849+ .mode = 0600,
51850+ .proc_handler = &proc_dointvec,
51851+ },
51852+ {
51853+ .ctl_name = CTL_UNNUMBERED,
51854+ .procname = "tpe_gid",
51855+ .data = &grsec_tpe_gid,
51856+ .maxlen = sizeof(int),
51857+ .mode = 0600,
51858+ .proc_handler = &proc_dointvec,
51859+ },
51860+#endif
51861+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
51862+ {
51863+ .ctl_name = CTL_UNNUMBERED,
51864+ .procname = "tpe_invert",
51865+ .data = &grsec_enable_tpe_invert,
51866+ .maxlen = sizeof(int),
51867+ .mode = 0600,
51868+ .proc_handler = &proc_dointvec,
51869+ },
51870+#endif
51871+#ifdef CONFIG_GRKERNSEC_TPE_ALL
51872+ {
51873+ .ctl_name = CTL_UNNUMBERED,
51874+ .procname = "tpe_restrict_all",
51875+ .data = &grsec_enable_tpe_all,
51876+ .maxlen = sizeof(int),
51877+ .mode = 0600,
51878+ .proc_handler = &proc_dointvec,
51879+ },
51880+#endif
51881+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
51882+ {
51883+ .ctl_name = CTL_UNNUMBERED,
51884+ .procname = "socket_all",
51885+ .data = &grsec_enable_socket_all,
51886+ .maxlen = sizeof(int),
51887+ .mode = 0600,
51888+ .proc_handler = &proc_dointvec,
51889+ },
51890+ {
51891+ .ctl_name = CTL_UNNUMBERED,
51892+ .procname = "socket_all_gid",
51893+ .data = &grsec_socket_all_gid,
51894+ .maxlen = sizeof(int),
51895+ .mode = 0600,
51896+ .proc_handler = &proc_dointvec,
51897+ },
51898+#endif
51899+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
51900+ {
51901+ .ctl_name = CTL_UNNUMBERED,
51902+ .procname = "socket_client",
51903+ .data = &grsec_enable_socket_client,
51904+ .maxlen = sizeof(int),
51905+ .mode = 0600,
51906+ .proc_handler = &proc_dointvec,
51907+ },
51908+ {
51909+ .ctl_name = CTL_UNNUMBERED,
51910+ .procname = "socket_client_gid",
51911+ .data = &grsec_socket_client_gid,
51912+ .maxlen = sizeof(int),
51913+ .mode = 0600,
51914+ .proc_handler = &proc_dointvec,
51915+ },
51916+#endif
51917+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
51918+ {
51919+ .ctl_name = CTL_UNNUMBERED,
51920+ .procname = "socket_server",
51921+ .data = &grsec_enable_socket_server,
51922+ .maxlen = sizeof(int),
51923+ .mode = 0600,
51924+ .proc_handler = &proc_dointvec,
51925+ },
51926+ {
51927+ .ctl_name = CTL_UNNUMBERED,
51928+ .procname = "socket_server_gid",
51929+ .data = &grsec_socket_server_gid,
51930+ .maxlen = sizeof(int),
51931+ .mode = 0600,
51932+ .proc_handler = &proc_dointvec,
51933+ },
51934+#endif
51935+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
51936+ {
51937+ .ctl_name = CTL_UNNUMBERED,
51938+ .procname = "audit_group",
51939+ .data = &grsec_enable_group,
51940+ .maxlen = sizeof(int),
51941+ .mode = 0600,
51942+ .proc_handler = &proc_dointvec,
51943+ },
51944+ {
51945+ .ctl_name = CTL_UNNUMBERED,
51946+ .procname = "audit_gid",
51947+ .data = &grsec_audit_gid,
51948+ .maxlen = sizeof(int),
51949+ .mode = 0600,
51950+ .proc_handler = &proc_dointvec,
51951+ },
51952+#endif
51953+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
51954+ {
51955+ .ctl_name = CTL_UNNUMBERED,
51956+ .procname = "audit_chdir",
51957+ .data = &grsec_enable_chdir,
51958+ .maxlen = sizeof(int),
51959+ .mode = 0600,
51960+ .proc_handler = &proc_dointvec,
51961+ },
51962+#endif
51963+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51964+ {
51965+ .ctl_name = CTL_UNNUMBERED,
51966+ .procname = "audit_mount",
51967+ .data = &grsec_enable_mount,
51968+ .maxlen = sizeof(int),
51969+ .mode = 0600,
51970+ .proc_handler = &proc_dointvec,
51971+ },
51972+#endif
51973+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
51974+ {
51975+ .ctl_name = CTL_UNNUMBERED,
51976+ .procname = "audit_textrel",
51977+ .data = &grsec_enable_audit_textrel,
51978+ .maxlen = sizeof(int),
51979+ .mode = 0600,
51980+ .proc_handler = &proc_dointvec,
51981+ },
51982+#endif
51983+#ifdef CONFIG_GRKERNSEC_DMESG
51984+ {
51985+ .ctl_name = CTL_UNNUMBERED,
51986+ .procname = "dmesg",
51987+ .data = &grsec_enable_dmesg,
51988+ .maxlen = sizeof(int),
51989+ .mode = 0600,
51990+ .proc_handler = &proc_dointvec,
51991+ },
51992+#endif
51993+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
51994+ {
51995+ .ctl_name = CTL_UNNUMBERED,
51996+ .procname = "chroot_findtask",
51997+ .data = &grsec_enable_chroot_findtask,
51998+ .maxlen = sizeof(int),
51999+ .mode = 0600,
52000+ .proc_handler = &proc_dointvec,
52001+ },
52002+#endif
52003+#ifdef CONFIG_GRKERNSEC_RESLOG
52004+ {
52005+ .ctl_name = CTL_UNNUMBERED,
52006+ .procname = "resource_logging",
52007+ .data = &grsec_resource_logging,
52008+ .maxlen = sizeof(int),
52009+ .mode = 0600,
52010+ .proc_handler = &proc_dointvec,
52011+ },
52012+#endif
52013+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
52014+ {
52015+ .ctl_name = CTL_UNNUMBERED,
52016+ .procname = "audit_ptrace",
52017+ .data = &grsec_enable_audit_ptrace,
52018+ .maxlen = sizeof(int),
52019+ .mode = 0600,
52020+ .proc_handler = &proc_dointvec,
52021+ },
52022+#endif
52023+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
52024+ {
52025+ .ctl_name = CTL_UNNUMBERED,
52026+ .procname = "harden_ptrace",
52027+ .data = &grsec_enable_harden_ptrace,
52028+ .maxlen = sizeof(int),
52029+ .mode = 0600,
52030+ .proc_handler = &proc_dointvec,
52031+ },
52032+#endif
52033+ {
52034+ .ctl_name = CTL_UNNUMBERED,
52035+ .procname = "grsec_lock",
52036+ .data = &grsec_lock,
52037+ .maxlen = sizeof(int),
52038+ .mode = 0600,
52039+ .proc_handler = &proc_dointvec,
52040+ },
52041+#endif
52042+#ifdef CONFIG_GRKERNSEC_ROFS
52043+ {
52044+ .ctl_name = CTL_UNNUMBERED,
52045+ .procname = "romount_protect",
52046+ .data = &grsec_enable_rofs,
52047+ .maxlen = sizeof(int),
52048+ .mode = 0600,
52049+ .proc_handler = &proc_dointvec_minmax,
52050+ .extra1 = &one,
52051+ .extra2 = &one,
52052+ },
52053+#endif
52054+ { .ctl_name = 0 }
52055+};
52056+#endif
52057diff -urNp linux-2.6.32.41/grsecurity/grsec_time.c linux-2.6.32.41/grsecurity/grsec_time.c
52058--- linux-2.6.32.41/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
52059+++ linux-2.6.32.41/grsecurity/grsec_time.c 2011-04-17 15:56:46.000000000 -0400
52060@@ -0,0 +1,16 @@
52061+#include <linux/kernel.h>
52062+#include <linux/sched.h>
52063+#include <linux/grinternal.h>
52064+#include <linux/module.h>
52065+
52066+void
52067+gr_log_timechange(void)
52068+{
52069+#ifdef CONFIG_GRKERNSEC_TIME
52070+ if (grsec_enable_time)
52071+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
52072+#endif
52073+ return;
52074+}
52075+
52076+EXPORT_SYMBOL(gr_log_timechange);
52077diff -urNp linux-2.6.32.41/grsecurity/grsec_tpe.c linux-2.6.32.41/grsecurity/grsec_tpe.c
52078--- linux-2.6.32.41/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
52079+++ linux-2.6.32.41/grsecurity/grsec_tpe.c 2011-04-17 15:56:46.000000000 -0400
52080@@ -0,0 +1,39 @@
52081+#include <linux/kernel.h>
52082+#include <linux/sched.h>
52083+#include <linux/file.h>
52084+#include <linux/fs.h>
52085+#include <linux/grinternal.h>
52086+
52087+extern int gr_acl_tpe_check(void);
52088+
52089+int
52090+gr_tpe_allow(const struct file *file)
52091+{
52092+#ifdef CONFIG_GRKERNSEC
52093+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
52094+ const struct cred *cred = current_cred();
52095+
52096+ if (cred->uid && ((grsec_enable_tpe &&
52097+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
52098+ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
52099+ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
52100+#else
52101+ in_group_p(grsec_tpe_gid)
52102+#endif
52103+ ) || gr_acl_tpe_check()) &&
52104+ (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
52105+ (inode->i_mode & S_IWOTH))))) {
52106+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
52107+ return 0;
52108+ }
52109+#ifdef CONFIG_GRKERNSEC_TPE_ALL
52110+ if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
52111+ ((inode->i_uid && (inode->i_uid != cred->uid)) ||
52112+ (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
52113+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
52114+ return 0;
52115+ }
52116+#endif
52117+#endif
52118+ return 1;
52119+}
52120diff -urNp linux-2.6.32.41/grsecurity/grsum.c linux-2.6.32.41/grsecurity/grsum.c
52121--- linux-2.6.32.41/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
52122+++ linux-2.6.32.41/grsecurity/grsum.c 2011-04-17 15:56:46.000000000 -0400
52123@@ -0,0 +1,61 @@
52124+#include <linux/err.h>
52125+#include <linux/kernel.h>
52126+#include <linux/sched.h>
52127+#include <linux/mm.h>
52128+#include <linux/scatterlist.h>
52129+#include <linux/crypto.h>
52130+#include <linux/gracl.h>
52131+
52132+
52133+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
52134+#error "crypto and sha256 must be built into the kernel"
52135+#endif
52136+
52137+int
52138+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
52139+{
52140+ char *p;
52141+ struct crypto_hash *tfm;
52142+ struct hash_desc desc;
52143+ struct scatterlist sg;
52144+ unsigned char temp_sum[GR_SHA_LEN];
52145+ volatile int retval = 0;
52146+ volatile int dummy = 0;
52147+ unsigned int i;
52148+
52149+ sg_init_table(&sg, 1);
52150+
52151+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
52152+ if (IS_ERR(tfm)) {
52153+ /* should never happen, since sha256 should be built in */
52154+ return 1;
52155+ }
52156+
52157+ desc.tfm = tfm;
52158+ desc.flags = 0;
52159+
52160+ crypto_hash_init(&desc);
52161+
52162+ p = salt;
52163+ sg_set_buf(&sg, p, GR_SALT_LEN);
52164+ crypto_hash_update(&desc, &sg, sg.length);
52165+
52166+ p = entry->pw;
52167+ sg_set_buf(&sg, p, strlen(p));
52168+
52169+ crypto_hash_update(&desc, &sg, sg.length);
52170+
52171+ crypto_hash_final(&desc, temp_sum);
52172+
52173+ memset(entry->pw, 0, GR_PW_LEN);
52174+
52175+ for (i = 0; i < GR_SHA_LEN; i++)
52176+ if (sum[i] != temp_sum[i])
52177+ retval = 1;
52178+ else
52179+ dummy = 1; // waste a cycle
52180+
52181+ crypto_free_hash(tfm);
52182+
52183+ return retval;
52184+}
52185diff -urNp linux-2.6.32.41/grsecurity/Kconfig linux-2.6.32.41/grsecurity/Kconfig
52186--- linux-2.6.32.41/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
52187+++ linux-2.6.32.41/grsecurity/Kconfig 2011-04-17 15:56:46.000000000 -0400
52188@@ -0,0 +1,1045 @@
52189+#
52190+# grecurity configuration
52191+#
52192+
52193+menu "Grsecurity"
52194+
52195+config GRKERNSEC
52196+ bool "Grsecurity"
52197+ select CRYPTO
52198+ select CRYPTO_SHA256
52199+ help
52200+ If you say Y here, you will be able to configure many features
52201+ that will enhance the security of your system. It is highly
52202+ recommended that you say Y here and read through the help
52203+ for each option so that you fully understand the features and
52204+ can evaluate their usefulness for your machine.
52205+
52206+choice
52207+ prompt "Security Level"
52208+ depends on GRKERNSEC
52209+ default GRKERNSEC_CUSTOM
52210+
52211+config GRKERNSEC_LOW
52212+ bool "Low"
52213+ select GRKERNSEC_LINK
52214+ select GRKERNSEC_FIFO
52215+ select GRKERNSEC_EXECVE
52216+ select GRKERNSEC_RANDNET
52217+ select GRKERNSEC_DMESG
52218+ select GRKERNSEC_CHROOT
52219+ select GRKERNSEC_CHROOT_CHDIR
52220+
52221+ help
52222+ If you choose this option, several of the grsecurity options will
52223+ be enabled that will give you greater protection against a number
52224+ of attacks, while assuring that none of your software will have any
52225+ conflicts with the additional security measures. If you run a lot
52226+ of unusual software, or you are having problems with the higher
52227+ security levels, you should say Y here. With this option, the
52228+ following features are enabled:
52229+
52230+ - Linking restrictions
52231+ - FIFO restrictions
52232+ - Enforcing RLIMIT_NPROC on execve
52233+ - Restricted dmesg
52234+ - Enforced chdir("/") on chroot
52235+ - Runtime module disabling
52236+
52237+config GRKERNSEC_MEDIUM
52238+ bool "Medium"
52239+ select PAX
52240+ select PAX_EI_PAX
52241+ select PAX_PT_PAX_FLAGS
52242+ select PAX_HAVE_ACL_FLAGS
52243+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
52244+ select GRKERNSEC_CHROOT
52245+ select GRKERNSEC_CHROOT_SYSCTL
52246+ select GRKERNSEC_LINK
52247+ select GRKERNSEC_FIFO
52248+ select GRKERNSEC_EXECVE
52249+ select GRKERNSEC_DMESG
52250+ select GRKERNSEC_RANDNET
52251+ select GRKERNSEC_FORKFAIL
52252+ select GRKERNSEC_TIME
52253+ select GRKERNSEC_SIGNAL
52254+ select GRKERNSEC_CHROOT
52255+ select GRKERNSEC_CHROOT_UNIX
52256+ select GRKERNSEC_CHROOT_MOUNT
52257+ select GRKERNSEC_CHROOT_PIVOT
52258+ select GRKERNSEC_CHROOT_DOUBLE
52259+ select GRKERNSEC_CHROOT_CHDIR
52260+ select GRKERNSEC_CHROOT_MKNOD
52261+ select GRKERNSEC_PROC
52262+ select GRKERNSEC_PROC_USERGROUP
52263+ select PAX_RANDUSTACK
52264+ select PAX_ASLR
52265+ select PAX_RANDMMAP
52266+ select PAX_REFCOUNT if (X86 || SPARC64)
52267+ select PAX_USERCOPY if ((X86 || SPARC32 || SPARC64 || PPC) && (SLAB || SLUB || SLOB))
52268+
52269+ help
52270+ If you say Y here, several features in addition to those included
52271+ in the low additional security level will be enabled. These
52272+ features provide even more security to your system, though in rare
52273+ cases they may be incompatible with very old or poorly written
52274+ software. If you enable this option, make sure that your auth
52275+ service (identd) is running as gid 1001. With this option,
52276+ the following features (in addition to those provided in the
52277+ low additional security level) will be enabled:
52278+
52279+ - Failed fork logging
52280+ - Time change logging
52281+ - Signal logging
52282+ - Deny mounts in chroot
52283+ - Deny double chrooting
52284+ - Deny sysctl writes in chroot
52285+ - Deny mknod in chroot
52286+ - Deny access to abstract AF_UNIX sockets out of chroot
52287+ - Deny pivot_root in chroot
52288+ - Denied writes of /dev/kmem, /dev/mem, and /dev/port
52289+ - /proc restrictions with special GID set to 10 (usually wheel)
52290+ - Address Space Layout Randomization (ASLR)
52291+ - Prevent exploitation of most refcount overflows
52292+ - Bounds checking of copying between the kernel and userland
52293+
52294+config GRKERNSEC_HIGH
52295+ bool "High"
52296+ select GRKERNSEC_LINK
52297+ select GRKERNSEC_FIFO
52298+ select GRKERNSEC_EXECVE
52299+ select GRKERNSEC_DMESG
52300+ select GRKERNSEC_FORKFAIL
52301+ select GRKERNSEC_TIME
52302+ select GRKERNSEC_SIGNAL
52303+ select GRKERNSEC_CHROOT
52304+ select GRKERNSEC_CHROOT_SHMAT
52305+ select GRKERNSEC_CHROOT_UNIX
52306+ select GRKERNSEC_CHROOT_MOUNT
52307+ select GRKERNSEC_CHROOT_FCHDIR
52308+ select GRKERNSEC_CHROOT_PIVOT
52309+ select GRKERNSEC_CHROOT_DOUBLE
52310+ select GRKERNSEC_CHROOT_CHDIR
52311+ select GRKERNSEC_CHROOT_MKNOD
52312+ select GRKERNSEC_CHROOT_CAPS
52313+ select GRKERNSEC_CHROOT_SYSCTL
52314+ select GRKERNSEC_CHROOT_FINDTASK
52315+ select GRKERNSEC_SYSFS_RESTRICT
52316+ select GRKERNSEC_PROC
52317+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
52318+ select GRKERNSEC_HIDESYM
52319+ select GRKERNSEC_BRUTE
52320+ select GRKERNSEC_PROC_USERGROUP
52321+ select GRKERNSEC_KMEM
52322+ select GRKERNSEC_RESLOG
52323+ select GRKERNSEC_RANDNET
52324+ select GRKERNSEC_PROC_ADD
52325+ select GRKERNSEC_CHROOT_CHMOD
52326+ select GRKERNSEC_CHROOT_NICE
52327+ select GRKERNSEC_AUDIT_MOUNT
52328+ select GRKERNSEC_MODHARDEN if (MODULES)
52329+ select GRKERNSEC_HARDEN_PTRACE
52330+ select GRKERNSEC_VM86 if (X86_32)
52331+ select GRKERNSEC_KERN_LOCKOUT if (X86)
52332+ select PAX
52333+ select PAX_RANDUSTACK
52334+ select PAX_ASLR
52335+ select PAX_RANDMMAP
52336+ select PAX_NOEXEC
52337+ select PAX_MPROTECT
52338+ select PAX_EI_PAX
52339+ select PAX_PT_PAX_FLAGS
52340+ select PAX_HAVE_ACL_FLAGS
52341+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
52342+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
52343+ select PAX_RANDKSTACK if (X86_TSC && X86)
52344+ select PAX_SEGMEXEC if (X86_32)
52345+ select PAX_PAGEEXEC
52346+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC32 || SPARC64)
52347+ select PAX_EMUTRAMP if (PARISC)
52348+ select PAX_EMUSIGRT if (PARISC)
52349+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
52350+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
52351+ select PAX_REFCOUNT if (X86 || SPARC64)
52352+ select PAX_USERCOPY if ((X86 || PPC || SPARC32 || SPARC64) && (SLAB || SLUB || SLOB))
52353+ help
52354+ If you say Y here, many of the features of grsecurity will be
52355+ enabled, which will protect you against many kinds of attacks
52356+ against your system. The heightened security comes at a cost
52357+ of an increased chance of incompatibilities with rare software
52358+ on your machine. Since this security level enables PaX, you should
52359+ view <http://pax.grsecurity.net> and read about the PaX
52360+ project. While you are there, download chpax and run it on
52361+ binaries that cause problems with PaX. Also remember that
52362+ since the /proc restrictions are enabled, you must run your
52363+ identd as gid 1001. This security level enables the following
52364+ features in addition to those listed in the low and medium
52365+ security levels:
52366+
52367+ - Additional /proc restrictions
52368+ - Chmod restrictions in chroot
52369+ - No signals, ptrace, or viewing of processes outside of chroot
52370+ - Capability restrictions in chroot
52371+ - Deny fchdir out of chroot
52372+ - Priority restrictions in chroot
52373+ - Segmentation-based implementation of PaX
52374+ - Mprotect restrictions
52375+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
52376+ - Kernel stack randomization
52377+ - Mount/unmount/remount logging
52378+ - Kernel symbol hiding
52379+ - Prevention of memory exhaustion-based exploits
52380+ - Hardening of module auto-loading
52381+ - Ptrace restrictions
52382+ - Restricted vm86 mode
52383+ - Restricted sysfs/debugfs
52384+ - Active kernel exploit response
52385+
52386+config GRKERNSEC_CUSTOM
52387+ bool "Custom"
52388+ help
52389+ If you say Y here, you will be able to configure every grsecurity
52390+ option, which allows you to enable many more features that aren't
52391+ covered in the basic security levels. These additional features
52392+ include TPE, socket restrictions, and the sysctl system for
52393+ grsecurity. It is advised that you read through the help for
52394+ each option to determine its usefulness in your situation.
52395+
52396+endchoice
52397+
52398+menu "Address Space Protection"
52399+depends on GRKERNSEC
52400+
52401+config GRKERNSEC_KMEM
52402+ bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
52403+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
52404+ help
52405+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
52406+ be written to via mmap or otherwise to modify the running kernel.
52407+ /dev/port will also not be allowed to be opened. If you have module
52408+ support disabled, enabling this will close up four ways that are
52409+ currently used to insert malicious code into the running kernel.
52410+ Even with all these features enabled, we still highly recommend that
52411+ you use the RBAC system, as it is still possible for an attacker to
52412+ modify the running kernel through privileged I/O granted by ioperm/iopl.
52413+ If you are not using XFree86, you may be able to stop this additional
52414+ case by enabling the 'Disable privileged I/O' option. Though nothing
52415+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
52416+ but only to video memory, which is the only writing we allow in this
52417+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
52418+ not be allowed to mprotect it with PROT_WRITE later.
52419+ It is highly recommended that you say Y here if you meet all the
52420+ conditions above.
52421+
52422+config GRKERNSEC_VM86
52423+ bool "Restrict VM86 mode"
52424+ depends on X86_32
52425+
52426+ help
52427+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
52428+ make use of a special execution mode on 32bit x86 processors called
52429+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
52430+ video cards and will still work with this option enabled. The purpose
52431+ of the option is to prevent exploitation of emulation errors in
52432+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
52433+ Nearly all users should be able to enable this option.
52434+
52435+config GRKERNSEC_IO
52436+ bool "Disable privileged I/O"
52437+ depends on X86
52438+ select RTC_CLASS
52439+ select RTC_INTF_DEV
52440+ select RTC_DRV_CMOS
52441+
52442+ help
52443+ If you say Y here, all ioperm and iopl calls will return an error.
52444+ Ioperm and iopl can be used to modify the running kernel.
52445+ Unfortunately, some programs need this access to operate properly,
52446+ the most notable of which are XFree86 and hwclock. hwclock can be
52447+ remedied by having RTC support in the kernel, so real-time
52448+ clock support is enabled if this option is enabled, to ensure
52449+ that hwclock operates correctly. XFree86 still will not
52450+ operate correctly with this option enabled, so DO NOT CHOOSE Y
52451+ IF YOU USE XFree86. If you use XFree86 and you still want to
52452+ protect your kernel against modification, use the RBAC system.
52453+
52454+config GRKERNSEC_PROC_MEMMAP
52455+ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
52456+ default y if (PAX_NOEXEC || PAX_ASLR)
52457+ depends on PAX_NOEXEC || PAX_ASLR
52458+ help
52459+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
52460+ give no information about the addresses of its mappings if
52461+ PaX features that rely on random addresses are enabled on the task.
52462+ If you use PaX it is greatly recommended that you say Y here as it
52463+ closes up a hole that makes the full ASLR useless for suid
52464+ binaries.
52465+
52466+config GRKERNSEC_BRUTE
52467+ bool "Deter exploit bruteforcing"
52468+ help
52469+ If you say Y here, attempts to bruteforce exploits against forking
52470+ daemons such as apache or sshd, as well as against suid/sgid binaries
52471+ will be deterred. When a child of a forking daemon is killed by PaX
52472+ or crashes due to an illegal instruction or other suspicious signal,
52473+ the parent process will be delayed 30 seconds upon every subsequent
52474+ fork until the administrator is able to assess the situation and
52475+ restart the daemon.
52476+ In the suid/sgid case, the attempt is logged, the user has all their
52477+ processes terminated, and they are prevented from executing any further
52478+ processes for 15 minutes.
52479+ It is recommended that you also enable signal logging in the auditing
52480+ section so that logs are generated when a process triggers a suspicious
52481+ signal.
52482+
52483+config GRKERNSEC_MODHARDEN
52484+ bool "Harden module auto-loading"
52485+ depends on MODULES
52486+ help
52487+ If you say Y here, module auto-loading in response to use of some
52488+ feature implemented by an unloaded module will be restricted to
52489+ root users. Enabling this option helps defend against attacks
52490+ by unprivileged users who abuse the auto-loading behavior to
52491+ cause a vulnerable module to load that is then exploited.
52492+
52493+ If this option prevents a legitimate use of auto-loading for a
52494+ non-root user, the administrator can execute modprobe manually
52495+ with the exact name of the module mentioned in the alert log.
52496+ Alternatively, the administrator can add the module to the list
52497+ of modules loaded at boot by modifying init scripts.
52498+
52499+ Modification of init scripts will most likely be needed on
52500+ Ubuntu servers with encrypted home directory support enabled,
52501+ as the first non-root user logging in will cause the ecb(aes),
52502+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
52503+
52504+config GRKERNSEC_HIDESYM
52505+ bool "Hide kernel symbols"
52506+ help
52507+ If you say Y here, getting information on loaded modules, and
52508+ displaying all kernel symbols through a syscall will be restricted
52509+ to users with CAP_SYS_MODULE. For software compatibility reasons,
52510+ /proc/kallsyms will be restricted to the root user. The RBAC
52511+ system can hide that entry even from root.
52512+
52513+ This option also prevents leaking of kernel addresses through
52514+ several /proc entries.
52515+
52516+ Note that this option is only effective provided the following
52517+ conditions are met:
52518+ 1) The kernel using grsecurity is not precompiled by some distribution
52519+ 2) You have also enabled GRKERNSEC_DMESG
52520+ 3) You are using the RBAC system and hiding other files such as your
52521+ kernel image and System.map. Alternatively, enabling this option
52522+ causes the permissions on /boot, /lib/modules, and the kernel
52523+ source directory to change at compile time to prevent
52524+ reading by non-root users.
52525+ If the above conditions are met, this option will aid in providing a
52526+ useful protection against local kernel exploitation of overflows
52527+ and arbitrary read/write vulnerabilities.
52528+
52529+config GRKERNSEC_KERN_LOCKOUT
52530+ bool "Active kernel exploit response"
52531+ depends on X86
52532+ help
52533+ If you say Y here, when a PaX alert is triggered due to suspicious
52534+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
52535+ or an OOPs occurs due to bad memory accesses, instead of just
52536+ terminating the offending process (and potentially allowing
52537+ a subsequent exploit from the same user), we will take one of two
52538+ actions:
52539+ If the user was root, we will panic the system
52540+ If the user was non-root, we will log the attempt, terminate
52541+ all processes owned by the user, then prevent them from creating
52542+ any new processes until the system is restarted
52543+ This deters repeated kernel exploitation/bruteforcing attempts
52544+ and is useful for later forensics.
52545+
52546+endmenu
52547+menu "Role Based Access Control Options"
52548+depends on GRKERNSEC
52549+
52550+config GRKERNSEC_RBAC_DEBUG
52551+ bool
52552+
52553+config GRKERNSEC_NO_RBAC
52554+ bool "Disable RBAC system"
52555+ help
52556+ If you say Y here, the /dev/grsec device will be removed from the kernel,
52557+ preventing the RBAC system from being enabled. You should only say Y
52558+ here if you have no intention of using the RBAC system, so as to prevent
52559+ an attacker with root access from misusing the RBAC system to hide files
52560+ and processes when loadable module support and /dev/[k]mem have been
52561+ locked down.
52562+
52563+config GRKERNSEC_ACL_HIDEKERN
52564+ bool "Hide kernel processes"
52565+ help
52566+ If you say Y here, all kernel threads will be hidden to all
52567+ processes but those whose subject has the "view hidden processes"
52568+ flag.
52569+
52570+config GRKERNSEC_ACL_MAXTRIES
52571+ int "Maximum tries before password lockout"
52572+ default 3
52573+ help
52574+ This option enforces the maximum number of times a user can attempt
52575+ to authorize themselves with the grsecurity RBAC system before being
52576+ denied the ability to attempt authorization again for a specified time.
52577+ The lower the number, the harder it will be to brute-force a password.
52578+
52579+config GRKERNSEC_ACL_TIMEOUT
52580+ int "Time to wait after max password tries, in seconds"
52581+ default 30
52582+ help
52583+ This option specifies the time the user must wait after attempting to
52584+ authorize to the RBAC system with the maximum number of invalid
52585+ passwords. The higher the number, the harder it will be to brute-force
52586+ a password.
52587+
52588+endmenu
52589+menu "Filesystem Protections"
52590+depends on GRKERNSEC
52591+
52592+config GRKERNSEC_PROC
52593+ bool "Proc restrictions"
52594+ help
52595+ If you say Y here, the permissions of the /proc filesystem
52596+ will be altered to enhance system security and privacy. You MUST
52597+ choose either a user only restriction or a user and group restriction.
52598+ Depending upon the option you choose, you can either restrict users to
52599+ see only the processes they themselves run, or choose a group that can
52600+ view all processes and files normally restricted to root if you choose
52601+ the "restrict to user only" option. NOTE: If you're running identd as
52602+ a non-root user, you will have to run it as the group you specify here.
52603+
52604+config GRKERNSEC_PROC_USER
52605+ bool "Restrict /proc to user only"
52606+ depends on GRKERNSEC_PROC
52607+ help
52608+ If you say Y here, non-root users will only be able to view their own
52609+ processes, and restricts them from viewing network-related information,
52610+ and viewing kernel symbol and module information.
52611+
52612+config GRKERNSEC_PROC_USERGROUP
52613+ bool "Allow special group"
52614+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
52615+ help
52616+ If you say Y here, you will be able to select a group that will be
52617+ able to view all processes and network-related information. If you've
52618+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
52619+ remain hidden. This option is useful if you want to run identd as
52620+ a non-root user.
52621+
52622+config GRKERNSEC_PROC_GID
52623+ int "GID for special group"
52624+ depends on GRKERNSEC_PROC_USERGROUP
52625+ default 1001
52626+
52627+config GRKERNSEC_PROC_ADD
52628+ bool "Additional restrictions"
52629+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
52630+ help
52631+ If you say Y here, additional restrictions will be placed on
52632+ /proc that keep normal users from viewing device information and
52633+ slabinfo information that could be useful for exploits.
52634+
52635+config GRKERNSEC_LINK
52636+ bool "Linking restrictions"
52637+ help
52638+ If you say Y here, /tmp race exploits will be prevented, since users
52639+ will no longer be able to follow symlinks owned by other users in
52640+ world-writable +t directories (e.g. /tmp), unless the owner of the
52641+ symlink is the owner of the directory. users will also not be
52642+ able to hardlink to files they do not own. If the sysctl option is
52643+ enabled, a sysctl option with name "linking_restrictions" is created.
52644+
52645+config GRKERNSEC_FIFO
52646+ bool "FIFO restrictions"
52647+ help
52648+ If you say Y here, users will not be able to write to FIFOs they don't
52649+ own in world-writable +t directories (e.g. /tmp), unless the owner of
52650+ the FIFO is the same owner of the directory it's held in. If the sysctl
52651+ option is enabled, a sysctl option with name "fifo_restrictions" is
52652+ created.
52653+
52654+config GRKERNSEC_SYSFS_RESTRICT
52655+ bool "Sysfs/debugfs restriction"
52656+ depends on SYSFS
52657+ help
52658+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
52659+ any filesystem normally mounted under it (e.g. debugfs) will only
52660+ be accessible by root. These filesystems generally provide access
52661+ to hardware and debug information that isn't appropriate for unprivileged
52662+ users of the system. Sysfs and debugfs have also become a large source
52663+ of new vulnerabilities, ranging from infoleaks to local compromise.
52664+ There has been very little oversight with an eye toward security involved
52665+ in adding new exporters of information to these filesystems, so their
52666+ use is discouraged.
52667+ This option is equivalent to a chmod 0700 of the mount paths.
52668+
52669+config GRKERNSEC_ROFS
52670+ bool "Runtime read-only mount protection"
52671+ help
52672+ If you say Y here, a sysctl option with name "romount_protect" will
52673+ be created. By setting this option to 1 at runtime, filesystems
52674+ will be protected in the following ways:
52675+ * No new writable mounts will be allowed
52676+ * Existing read-only mounts won't be able to be remounted read/write
52677+ * Write operations will be denied on all block devices
52678+ This option acts independently of grsec_lock: once it is set to 1,
52679+ it cannot be turned off. Therefore, please be mindful of the resulting
52680+ behavior if this option is enabled in an init script on a read-only
52681+ filesystem. This feature is mainly intended for secure embedded systems.
52682+
52683+config GRKERNSEC_CHROOT
52684+ bool "Chroot jail restrictions"
52685+ help
52686+ If you say Y here, you will be able to choose several options that will
52687+ make breaking out of a chrooted jail much more difficult. If you
52688+ encounter no software incompatibilities with the following options, it
52689+ is recommended that you enable each one.
52690+
52691+config GRKERNSEC_CHROOT_MOUNT
52692+ bool "Deny mounts"
52693+ depends on GRKERNSEC_CHROOT
52694+ help
52695+ If you say Y here, processes inside a chroot will not be able to
52696+ mount or remount filesystems. If the sysctl option is enabled, a
52697+ sysctl option with name "chroot_deny_mount" is created.
52698+
52699+config GRKERNSEC_CHROOT_DOUBLE
52700+ bool "Deny double-chroots"
52701+ depends on GRKERNSEC_CHROOT
52702+ help
52703+ If you say Y here, processes inside a chroot will not be able to chroot
52704+ again outside the chroot. This is a widely used method of breaking
52705+ out of a chroot jail and should not be allowed. If the sysctl
52706+ option is enabled, a sysctl option with name
52707+ "chroot_deny_chroot" is created.
52708+
52709+config GRKERNSEC_CHROOT_PIVOT
52710+ bool "Deny pivot_root in chroot"
52711+ depends on GRKERNSEC_CHROOT
52712+ help
52713+ If you say Y here, processes inside a chroot will not be able to use
52714+ a function called pivot_root() that was introduced in Linux 2.3.41. It
52715+ works similar to chroot in that it changes the root filesystem. This
52716+ function could be misused in a chrooted process to attempt to break out
52717+ of the chroot, and therefore should not be allowed. If the sysctl
52718+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
52719+ created.
52720+
52721+config GRKERNSEC_CHROOT_CHDIR
52722+ bool "Enforce chdir(\"/\") on all chroots"
52723+ depends on GRKERNSEC_CHROOT
52724+ help
52725+ If you say Y here, the current working directory of all newly-chrooted
52726+ applications will be set to the the root directory of the chroot.
52727+ The man page on chroot(2) states:
52728+ Note that this call does not change the current working
52729+ directory, so that `.' can be outside the tree rooted at
52730+ `/'. In particular, the super-user can escape from a
52731+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
52732+
52733+ It is recommended that you say Y here, since it's not known to break
52734+ any software. If the sysctl option is enabled, a sysctl option with
52735+ name "chroot_enforce_chdir" is created.
52736+
52737+config GRKERNSEC_CHROOT_CHMOD
52738+ bool "Deny (f)chmod +s"
52739+ depends on GRKERNSEC_CHROOT
52740+ help
52741+ If you say Y here, processes inside a chroot will not be able to chmod
52742+ or fchmod files to make them have suid or sgid bits. This protects
52743+ against another published method of breaking a chroot. If the sysctl
52744+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
52745+ created.
52746+
52747+config GRKERNSEC_CHROOT_FCHDIR
52748+ bool "Deny fchdir out of chroot"
52749+ depends on GRKERNSEC_CHROOT
52750+ help
52751+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
52752+ to a file descriptor of the chrooting process that points to a directory
52753+ outside the filesystem will be stopped. If the sysctl option
52754+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
52755+
52756+config GRKERNSEC_CHROOT_MKNOD
52757+ bool "Deny mknod"
52758+ depends on GRKERNSEC_CHROOT
52759+ help
52760+ If you say Y here, processes inside a chroot will not be allowed to
52761+ mknod. The problem with using mknod inside a chroot is that it
52762+ would allow an attacker to create a device entry that is the same
52763+ as one on the physical root of your system, which could range from
52764+ anything from the console device to a device for your harddrive (which
52765+ they could then use to wipe the drive or steal data). It is recommended
52766+ that you say Y here, unless you run into software incompatibilities.
52767+ If the sysctl option is enabled, a sysctl option with name
52768+ "chroot_deny_mknod" is created.
52769+
52770+config GRKERNSEC_CHROOT_SHMAT
52771+ bool "Deny shmat() out of chroot"
52772+ depends on GRKERNSEC_CHROOT
52773+ help
52774+ If you say Y here, processes inside a chroot will not be able to attach
52775+ to shared memory segments that were created outside of the chroot jail.
52776+ It is recommended that you say Y here. If the sysctl option is enabled,
52777+ a sysctl option with name "chroot_deny_shmat" is created.
52778+
52779+config GRKERNSEC_CHROOT_UNIX
52780+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
52781+ depends on GRKERNSEC_CHROOT
52782+ help
52783+ If you say Y here, processes inside a chroot will not be able to
52784+ connect to abstract (meaning not belonging to a filesystem) Unix
52785+ domain sockets that were bound outside of a chroot. It is recommended
52786+ that you say Y here. If the sysctl option is enabled, a sysctl option
52787+ with name "chroot_deny_unix" is created.
52788+
52789+config GRKERNSEC_CHROOT_FINDTASK
52790+ bool "Protect outside processes"
52791+ depends on GRKERNSEC_CHROOT
52792+ help
52793+ If you say Y here, processes inside a chroot will not be able to
52794+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
52795+ getsid, or view any process outside of the chroot. If the sysctl
52796+ option is enabled, a sysctl option with name "chroot_findtask" is
52797+ created.
52798+
52799+config GRKERNSEC_CHROOT_NICE
52800+ bool "Restrict priority changes"
52801+ depends on GRKERNSEC_CHROOT
52802+ help
52803+ If you say Y here, processes inside a chroot will not be able to raise
52804+ the priority of processes in the chroot, or alter the priority of
52805+ processes outside the chroot. This provides more security than simply
52806+ removing CAP_SYS_NICE from the process' capability set. If the
52807+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
52808+ is created.
52809+
52810+config GRKERNSEC_CHROOT_SYSCTL
52811+ bool "Deny sysctl writes"
52812+ depends on GRKERNSEC_CHROOT
52813+ help
52814+ If you say Y here, an attacker in a chroot will not be able to
52815+ write to sysctl entries, either by sysctl(2) or through a /proc
52816+ interface. It is strongly recommended that you say Y here. If the
52817+ sysctl option is enabled, a sysctl option with name
52818+ "chroot_deny_sysctl" is created.
52819+
52820+config GRKERNSEC_CHROOT_CAPS
52821+ bool "Capability restrictions"
52822+ depends on GRKERNSEC_CHROOT
52823+ help
52824+ If you say Y here, the capabilities on all root processes within a
52825+ chroot jail will be lowered to stop module insertion, raw i/o,
52826+ system and net admin tasks, rebooting the system, modifying immutable
52827+ files, modifying IPC owned by another, and changing the system time.
52828+ This is left an option because it can break some apps. Disable this
52829+ if your chrooted apps are having problems performing those kinds of
52830+ tasks. If the sysctl option is enabled, a sysctl option with
52831+ name "chroot_caps" is created.
52832+
52833+endmenu
52834+menu "Kernel Auditing"
52835+depends on GRKERNSEC
52836+
52837+config GRKERNSEC_AUDIT_GROUP
52838+ bool "Single group for auditing"
52839+ help
52840+ If you say Y here, the exec, chdir, and (un)mount logging features
52841+ will only operate on a group you specify. This option is recommended
52842+ if you only want to watch certain users instead of having a large
52843+ amount of logs from the entire system. If the sysctl option is enabled,
52844+ a sysctl option with name "audit_group" is created.
52845+
52846+config GRKERNSEC_AUDIT_GID
52847+ int "GID for auditing"
52848+ depends on GRKERNSEC_AUDIT_GROUP
52849+ default 1007
52850+
52851+config GRKERNSEC_EXECLOG
52852+ bool "Exec logging"
52853+ help
52854+ If you say Y here, all execve() calls will be logged (since the
52855+ other exec*() calls are frontends to execve(), all execution
52856+ will be logged). Useful for shell-servers that like to keep track
52857+ of their users. If the sysctl option is enabled, a sysctl option with
52858+ name "exec_logging" is created.
52859+ WARNING: This option when enabled will produce a LOT of logs, especially
52860+ on an active system.
52861+
52862+config GRKERNSEC_RESLOG
52863+ bool "Resource logging"
52864+ help
52865+ If you say Y here, all attempts to overstep resource limits will
52866+ be logged with the resource name, the requested size, and the current
52867+ limit. It is highly recommended that you say Y here. If the sysctl
52868+ option is enabled, a sysctl option with name "resource_logging" is
52869+ created. If the RBAC system is enabled, the sysctl value is ignored.
52870+
52871+config GRKERNSEC_CHROOT_EXECLOG
52872+ bool "Log execs within chroot"
52873+ help
52874+ If you say Y here, all executions inside a chroot jail will be logged
52875+ to syslog. This can cause a large amount of logs if certain
52876+ applications (eg. djb's daemontools) are installed on the system, and
52877+ is therefore left as an option. If the sysctl option is enabled, a
52878+ sysctl option with name "chroot_execlog" is created.
52879+
52880+config GRKERNSEC_AUDIT_PTRACE
52881+ bool "Ptrace logging"
52882+ help
52883+ If you say Y here, all attempts to attach to a process via ptrace
52884+ will be logged. If the sysctl option is enabled, a sysctl option
52885+ with name "audit_ptrace" is created.
52886+
52887+config GRKERNSEC_AUDIT_CHDIR
52888+ bool "Chdir logging"
52889+ help
52890+ If you say Y here, all chdir() calls will be logged. If the sysctl
52891+ option is enabled, a sysctl option with name "audit_chdir" is created.
52892+
52893+config GRKERNSEC_AUDIT_MOUNT
52894+ bool "(Un)Mount logging"
52895+ help
52896+ If you say Y here, all mounts and unmounts will be logged. If the
52897+ sysctl option is enabled, a sysctl option with name "audit_mount" is
52898+ created.
52899+
52900+config GRKERNSEC_SIGNAL
52901+ bool "Signal logging"
52902+ help
52903+ If you say Y here, certain important signals will be logged, such as
52904+ SIGSEGV, which will as a result inform you of when a error in a program
52905+ occurred, which in some cases could mean a possible exploit attempt.
52906+ If the sysctl option is enabled, a sysctl option with name
52907+ "signal_logging" is created.
52908+
52909+config GRKERNSEC_FORKFAIL
52910+ bool "Fork failure logging"
52911+ help
52912+ If you say Y here, all failed fork() attempts will be logged.
52913+ This could suggest a fork bomb, or someone attempting to overstep
52914+ their process limit. If the sysctl option is enabled, a sysctl option
52915+ with name "forkfail_logging" is created.
52916+
52917+config GRKERNSEC_TIME
52918+ bool "Time change logging"
52919+ help
52920+ If you say Y here, any changes of the system clock will be logged.
52921+ If the sysctl option is enabled, a sysctl option with name
52922+ "timechange_logging" is created.
52923+
52924+config GRKERNSEC_PROC_IPADDR
52925+ bool "/proc/<pid>/ipaddr support"
52926+ help
52927+ If you say Y here, a new entry will be added to each /proc/<pid>
52928+ directory that contains the IP address of the person using the task.
52929+ The IP is carried across local TCP and AF_UNIX stream sockets.
52930+ This information can be useful for IDS/IPSes to perform remote response
52931+ to a local attack. The entry is readable by only the owner of the
52932+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
52933+ the RBAC system), and thus does not create privacy concerns.
52934+
52935+config GRKERNSEC_RWXMAP_LOG
52936+ bool 'Denied RWX mmap/mprotect logging'
52937+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
52938+ help
52939+ If you say Y here, calls to mmap() and mprotect() with explicit
52940+ usage of PROT_WRITE and PROT_EXEC together will be logged when
52941+ denied by the PAX_MPROTECT feature. If the sysctl option is
52942+ enabled, a sysctl option with name "rwxmap_logging" is created.
52943+
52944+config GRKERNSEC_AUDIT_TEXTREL
52945+ bool 'ELF text relocations logging (READ HELP)'
52946+ depends on PAX_MPROTECT
52947+ help
52948+ If you say Y here, text relocations will be logged with the filename
52949+ of the offending library or binary. The purpose of the feature is
52950+ to help Linux distribution developers get rid of libraries and
52951+ binaries that need text relocations which hinder the future progress
52952+ of PaX. Only Linux distribution developers should say Y here, and
52953+ never on a production machine, as this option creates an information
52954+ leak that could aid an attacker in defeating the randomization of
52955+ a single memory region. If the sysctl option is enabled, a sysctl
52956+ option with name "audit_textrel" is created.
52957+
52958+endmenu
52959+
52960+menu "Executable Protections"
52961+depends on GRKERNSEC
52962+
52963+config GRKERNSEC_EXECVE
52964+ bool "Enforce RLIMIT_NPROC on execs"
52965+ help
52966+ If you say Y here, users with a resource limit on processes will
52967+ have the value checked during execve() calls. The current system
52968+ only checks the system limit during fork() calls. If the sysctl option
52969+ is enabled, a sysctl option with name "execve_limiting" is created.
52970+
52971+config GRKERNSEC_DMESG
52972+ bool "Dmesg(8) restriction"
52973+ help
52974+ If you say Y here, non-root users will not be able to use dmesg(8)
52975+ to view up to the last 4kb of messages in the kernel's log buffer.
52976+ The kernel's log buffer often contains kernel addresses and other
52977+ identifying information useful to an attacker in fingerprinting a
52978+ system for a targeted exploit.
52979+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
52980+ created.
52981+
52982+config GRKERNSEC_HARDEN_PTRACE
52983+ bool "Deter ptrace-based process snooping"
52984+ help
52985+ If you say Y here, TTY sniffers and other malicious monitoring
52986+ programs implemented through ptrace will be defeated. If you
52987+ have been using the RBAC system, this option has already been
52988+ enabled for several years for all users, with the ability to make
52989+ fine-grained exceptions.
52990+
52991+ This option only affects the ability of non-root users to ptrace
52992+ processes that are not a descendent of the ptracing process.
52993+ This means that strace ./binary and gdb ./binary will still work,
52994+ but attaching to arbitrary processes will not. If the sysctl
52995+ option is enabled, a sysctl option with name "harden_ptrace" is
52996+ created.
52997+
52998+config GRKERNSEC_TPE
52999+ bool "Trusted Path Execution (TPE)"
53000+ help
53001+ If you say Y here, you will be able to choose a gid to add to the
53002+ supplementary groups of users you want to mark as "untrusted."
53003+ These users will not be able to execute any files that are not in
53004+ root-owned directories writable only by root. If the sysctl option
53005+ is enabled, a sysctl option with name "tpe" is created.
53006+
53007+config GRKERNSEC_TPE_ALL
53008+ bool "Partially restrict all non-root users"
53009+ depends on GRKERNSEC_TPE
53010+ help
53011+ If you say Y here, all non-root users will be covered under
53012+ a weaker TPE restriction. This is separate from, and in addition to,
53013+ the main TPE options that you have selected elsewhere. Thus, if a
53014+ "trusted" GID is chosen, this restriction applies to even that GID.
53015+ Under this restriction, all non-root users will only be allowed to
53016+ execute files in directories they own that are not group or
53017+ world-writable, or in directories owned by root and writable only by
53018+ root. If the sysctl option is enabled, a sysctl option with name
53019+ "tpe_restrict_all" is created.
53020+
53021+config GRKERNSEC_TPE_INVERT
53022+ bool "Invert GID option"
53023+ depends on GRKERNSEC_TPE
53024+ help
53025+ If you say Y here, the group you specify in the TPE configuration will
53026+ decide what group TPE restrictions will be *disabled* for. This
53027+ option is useful if you want TPE restrictions to be applied to most
53028+ users on the system. If the sysctl option is enabled, a sysctl option
53029+ with name "tpe_invert" is created. Unlike other sysctl options, this
53030+ entry will default to on for backward-compatibility.
53031+
53032+config GRKERNSEC_TPE_GID
53033+ int "GID for untrusted users"
53034+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
53035+ default 1005
53036+ help
53037+ Setting this GID determines what group TPE restrictions will be
53038+ *enabled* for. If the sysctl option is enabled, a sysctl option
53039+ with name "tpe_gid" is created.
53040+
53041+config GRKERNSEC_TPE_GID
53042+ int "GID for trusted users"
53043+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
53044+ default 1005
53045+ help
53046+ Setting this GID determines what group TPE restrictions will be
53047+ *disabled* for. If the sysctl option is enabled, a sysctl option
53048+ with name "tpe_gid" is created.
53049+
53050+endmenu
53051+menu "Network Protections"
53052+depends on GRKERNSEC
53053+
53054+config GRKERNSEC_RANDNET
53055+ bool "Larger entropy pools"
53056+ help
53057+ If you say Y here, the entropy pools used for many features of Linux
53058+ and grsecurity will be doubled in size. Since several grsecurity
53059+ features use additional randomness, it is recommended that you say Y
53060+ here. Saying Y here has a similar effect as modifying
53061+ /proc/sys/kernel/random/poolsize.
53062+
53063+config GRKERNSEC_BLACKHOLE
53064+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
53065+ help
53066+ If you say Y here, neither TCP resets nor ICMP
53067+ destination-unreachable packets will be sent in response to packets
53068+ sent to ports for which no associated listening process exists.
53069+ This feature supports both IPV4 and IPV6 and exempts the
53070+ loopback interface from blackholing. Enabling this feature
53071+ makes a host more resilient to DoS attacks and reduces network
53072+ visibility against scanners.
53073+
53074+ The blackhole feature as-implemented is equivalent to the FreeBSD
53075+ blackhole feature, as it prevents RST responses to all packets, not
53076+ just SYNs. Under most application behavior this causes no
53077+ problems, but applications (like haproxy) may not close certain
53078+ connections in a way that cleanly terminates them on the remote
53079+ end, leaving the remote host in LAST_ACK state. Because of this
53080+ side-effect and to prevent intentional LAST_ACK DoSes, this
53081+ feature also adds automatic mitigation against such attacks.
53082+ The mitigation drastically reduces the amount of time a socket
53083+ can spend in LAST_ACK state. If you're using haproxy and not
53084+ all servers it connects to have this option enabled, consider
53085+ disabling this feature on the haproxy host.
53086+
53087+ If the sysctl option is enabled, two sysctl options with names
53088+ "ip_blackhole" and "lastack_retries" will be created.
53089+ While "ip_blackhole" takes the standard zero/non-zero on/off
53090+ toggle, "lastack_retries" uses the same kinds of values as
53091+ "tcp_retries1" and "tcp_retries2". The default value of 4
53092+ prevents a socket from lasting more than 45 seconds in LAST_ACK
53093+ state.
53094+
53095+config GRKERNSEC_SOCKET
53096+ bool "Socket restrictions"
53097+ help
53098+ If you say Y here, you will be able to choose from several options.
53099+ If you assign a GID on your system and add it to the supplementary
53100+ groups of users you want to restrict socket access to, this patch
53101+ will perform up to three things, based on the option(s) you choose.
53102+
53103+config GRKERNSEC_SOCKET_ALL
53104+ bool "Deny any sockets to group"
53105+ depends on GRKERNSEC_SOCKET
53106+ help
53107+ If you say Y here, you will be able to choose a GID of whose users will
53108+ be unable to connect to other hosts from your machine or run server
53109+ applications from your machine. If the sysctl option is enabled, a
53110+ sysctl option with name "socket_all" is created.
53111+
53112+config GRKERNSEC_SOCKET_ALL_GID
53113+ int "GID to deny all sockets for"
53114+ depends on GRKERNSEC_SOCKET_ALL
53115+ default 1004
53116+ help
53117+ Here you can choose the GID to disable socket access for. Remember to
53118+ add the users you want socket access disabled for to the GID
53119+ specified here. If the sysctl option is enabled, a sysctl option
53120+ with name "socket_all_gid" is created.
53121+
53122+config GRKERNSEC_SOCKET_CLIENT
53123+ bool "Deny client sockets to group"
53124+ depends on GRKERNSEC_SOCKET
53125+ help
53126+ If you say Y here, you will be able to choose a GID of whose users will
53127+ be unable to connect to other hosts from your machine, but will be
53128+ able to run servers. If this option is enabled, all users in the group
53129+ you specify will have to use passive mode when initiating ftp transfers
53130+ from the shell on your machine. If the sysctl option is enabled, a
53131+ sysctl option with name "socket_client" is created.
53132+
53133+config GRKERNSEC_SOCKET_CLIENT_GID
53134+ int "GID to deny client sockets for"
53135+ depends on GRKERNSEC_SOCKET_CLIENT
53136+ default 1003
53137+ help
53138+ Here you can choose the GID to disable client socket access for.
53139+ Remember to add the users you want client socket access disabled for to
53140+ the GID specified here. If the sysctl option is enabled, a sysctl
53141+ option with name "socket_client_gid" is created.
53142+
53143+config GRKERNSEC_SOCKET_SERVER
53144+ bool "Deny server sockets to group"
53145+ depends on GRKERNSEC_SOCKET
53146+ help
53147+ If you say Y here, you will be able to choose a GID of whose users will
53148+ be unable to run server applications from your machine. If the sysctl
53149+ option is enabled, a sysctl option with name "socket_server" is created.
53150+
53151+config GRKERNSEC_SOCKET_SERVER_GID
53152+ int "GID to deny server sockets for"
53153+ depends on GRKERNSEC_SOCKET_SERVER
53154+ default 1002
53155+ help
53156+ Here you can choose the GID to disable server socket access for.
53157+ Remember to add the users you want server socket access disabled for to
53158+ the GID specified here. If the sysctl option is enabled, a sysctl
53159+ option with name "socket_server_gid" is created.
53160+
53161+endmenu
53162+menu "Sysctl support"
53163+depends on GRKERNSEC && SYSCTL
53164+
53165+config GRKERNSEC_SYSCTL
53166+ bool "Sysctl support"
53167+ help
53168+ If you say Y here, you will be able to change the options that
53169+ grsecurity runs with at bootup, without having to recompile your
53170+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
53171+ to enable (1) or disable (0) various features. All the sysctl entries
53172+ are mutable until the "grsec_lock" entry is set to a non-zero value.
53173+ All features enabled in the kernel configuration are disabled at boot
53174+ if you do not say Y to the "Turn on features by default" option.
53175+ All options should be set at startup, and the grsec_lock entry should
53176+ be set to a non-zero value after all the options are set.
53177+ *THIS IS EXTREMELY IMPORTANT*
53178+
53179+config GRKERNSEC_SYSCTL_DISTRO
53180+ bool "Extra sysctl support for distro makers (READ HELP)"
53181+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
53182+ help
53183+ If you say Y here, additional sysctl options will be created
53184+ for features that affect processes running as root. Therefore,
53185+ it is critical when using this option that the grsec_lock entry be
53186+ enabled after boot. Only distros with prebuilt kernel packages
53187+ with this option enabled that can ensure grsec_lock is enabled
53188+ after boot should use this option.
53189+ *Failure to set grsec_lock after boot makes all grsec features
53190+ this option covers useless*
53191+
53192+ Currently this option creates the following sysctl entries:
53193+ "Disable Privileged I/O": "disable_priv_io"
53194+
53195+config GRKERNSEC_SYSCTL_ON
53196+ bool "Turn on features by default"
53197+ depends on GRKERNSEC_SYSCTL
53198+ help
53199+ If you say Y here, instead of having all features enabled in the
53200+ kernel configuration disabled at boot time, the features will be
53201+ enabled at boot time. It is recommended you say Y here unless
53202+ there is some reason you would want all sysctl-tunable features to
53203+ be disabled by default. As mentioned elsewhere, it is important
53204+ to enable the grsec_lock entry once you have finished modifying
53205+ the sysctl entries.
53206+
53207+endmenu
53208+menu "Logging Options"
53209+depends on GRKERNSEC
53210+
53211+config GRKERNSEC_FLOODTIME
53212+ int "Seconds in between log messages (minimum)"
53213+ default 10
53214+ help
53215+ This option allows you to enforce the number of seconds between
53216+ grsecurity log messages. The default should be suitable for most
53217+ people, however, if you choose to change it, choose a value small enough
53218+ to allow informative logs to be produced, but large enough to
53219+ prevent flooding.
53220+
53221+config GRKERNSEC_FLOODBURST
53222+ int "Number of messages in a burst (maximum)"
53223+ default 4
53224+ help
53225+ This option allows you to choose the maximum number of messages allowed
53226+ within the flood time interval you chose in a separate option. The
53227+ default should be suitable for most people, however if you find that
53228+ many of your logs are being interpreted as flooding, you may want to
53229+ raise this value.
53230+
53231+endmenu
53232+
53233+endmenu
53234diff -urNp linux-2.6.32.41/grsecurity/Makefile linux-2.6.32.41/grsecurity/Makefile
53235--- linux-2.6.32.41/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
53236+++ linux-2.6.32.41/grsecurity/Makefile 2011-05-24 20:27:46.000000000 -0400
53237@@ -0,0 +1,33 @@
53238+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
53239+# during 2001-2009 it has been completely redesigned by Brad Spengler
53240+# into an RBAC system
53241+#
53242+# All code in this directory and various hooks inserted throughout the kernel
53243+# are copyright Brad Spengler - Open Source Security, Inc., and released
53244+# under the GPL v2 or higher
53245+
53246+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
53247+ grsec_mount.o grsec_sig.o grsec_sock.o grsec_sysctl.o \
53248+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
53249+
53250+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
53251+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
53252+ gracl_learn.o grsec_log.o
53253+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
53254+
53255+ifdef CONFIG_NET
53256+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
53257+endif
53258+
53259+ifndef CONFIG_GRKERNSEC
53260+obj-y += grsec_disabled.o
53261+endif
53262+
53263+ifdef CONFIG_GRKERNSEC_HIDESYM
53264+extra-y := grsec_hidesym.o
53265+$(obj)/grsec_hidesym.o:
53266+ @-chmod -f 500 /boot
53267+ @-chmod -f 500 /lib/modules
53268+ @-chmod -f 700 .
53269+ @echo ' grsec: protected kernel image paths'
53270+endif
53271diff -urNp linux-2.6.32.41/include/acpi/acpi_drivers.h linux-2.6.32.41/include/acpi/acpi_drivers.h
53272--- linux-2.6.32.41/include/acpi/acpi_drivers.h 2011-03-27 14:31:47.000000000 -0400
53273+++ linux-2.6.32.41/include/acpi/acpi_drivers.h 2011-04-17 15:56:46.000000000 -0400
53274@@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acp
53275 Dock Station
53276 -------------------------------------------------------------------------- */
53277 struct acpi_dock_ops {
53278- acpi_notify_handler handler;
53279- acpi_notify_handler uevent;
53280+ const acpi_notify_handler handler;
53281+ const acpi_notify_handler uevent;
53282 };
53283
53284 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
53285@@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle ha
53286 extern int register_dock_notifier(struct notifier_block *nb);
53287 extern void unregister_dock_notifier(struct notifier_block *nb);
53288 extern int register_hotplug_dock_device(acpi_handle handle,
53289- struct acpi_dock_ops *ops,
53290+ const struct acpi_dock_ops *ops,
53291 void *context);
53292 extern void unregister_hotplug_dock_device(acpi_handle handle);
53293 #else
53294@@ -144,7 +144,7 @@ static inline void unregister_dock_notif
53295 {
53296 }
53297 static inline int register_hotplug_dock_device(acpi_handle handle,
53298- struct acpi_dock_ops *ops,
53299+ const struct acpi_dock_ops *ops,
53300 void *context)
53301 {
53302 return -ENODEV;
53303diff -urNp linux-2.6.32.41/include/asm-generic/atomic-long.h linux-2.6.32.41/include/asm-generic/atomic-long.h
53304--- linux-2.6.32.41/include/asm-generic/atomic-long.h 2011-03-27 14:31:47.000000000 -0400
53305+++ linux-2.6.32.41/include/asm-generic/atomic-long.h 2011-05-16 21:46:57.000000000 -0400
53306@@ -22,6 +22,12 @@
53307
53308 typedef atomic64_t atomic_long_t;
53309
53310+#ifdef CONFIG_PAX_REFCOUNT
53311+typedef atomic64_unchecked_t atomic_long_unchecked_t;
53312+#else
53313+typedef atomic64_t atomic_long_unchecked_t;
53314+#endif
53315+
53316 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
53317
53318 static inline long atomic_long_read(atomic_long_t *l)
53319@@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
53320 return (long)atomic64_read(v);
53321 }
53322
53323+#ifdef CONFIG_PAX_REFCOUNT
53324+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
53325+{
53326+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53327+
53328+ return (long)atomic64_read_unchecked(v);
53329+}
53330+#endif
53331+
53332 static inline void atomic_long_set(atomic_long_t *l, long i)
53333 {
53334 atomic64_t *v = (atomic64_t *)l;
53335@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
53336 atomic64_set(v, i);
53337 }
53338
53339+#ifdef CONFIG_PAX_REFCOUNT
53340+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
53341+{
53342+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53343+
53344+ atomic64_set_unchecked(v, i);
53345+}
53346+#endif
53347+
53348 static inline void atomic_long_inc(atomic_long_t *l)
53349 {
53350 atomic64_t *v = (atomic64_t *)l;
53351@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
53352 atomic64_inc(v);
53353 }
53354
53355+#ifdef CONFIG_PAX_REFCOUNT
53356+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
53357+{
53358+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53359+
53360+ atomic64_inc_unchecked(v);
53361+}
53362+#endif
53363+
53364 static inline void atomic_long_dec(atomic_long_t *l)
53365 {
53366 atomic64_t *v = (atomic64_t *)l;
53367@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
53368 atomic64_dec(v);
53369 }
53370
53371+#ifdef CONFIG_PAX_REFCOUNT
53372+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
53373+{
53374+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53375+
53376+ atomic64_dec_unchecked(v);
53377+}
53378+#endif
53379+
53380 static inline void atomic_long_add(long i, atomic_long_t *l)
53381 {
53382 atomic64_t *v = (atomic64_t *)l;
53383@@ -59,6 +101,15 @@ static inline void atomic_long_add(long
53384 atomic64_add(i, v);
53385 }
53386
53387+#ifdef CONFIG_PAX_REFCOUNT
53388+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
53389+{
53390+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53391+
53392+ atomic64_add_unchecked(i, v);
53393+}
53394+#endif
53395+
53396 static inline void atomic_long_sub(long i, atomic_long_t *l)
53397 {
53398 atomic64_t *v = (atomic64_t *)l;
53399@@ -115,6 +166,15 @@ static inline long atomic_long_inc_retur
53400 return (long)atomic64_inc_return(v);
53401 }
53402
53403+#ifdef CONFIG_PAX_REFCOUNT
53404+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
53405+{
53406+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53407+
53408+ return (long)atomic64_inc_return_unchecked(v);
53409+}
53410+#endif
53411+
53412 static inline long atomic_long_dec_return(atomic_long_t *l)
53413 {
53414 atomic64_t *v = (atomic64_t *)l;
53415@@ -140,6 +200,12 @@ static inline long atomic_long_add_unles
53416
53417 typedef atomic_t atomic_long_t;
53418
53419+#ifdef CONFIG_PAX_REFCOUNT
53420+typedef atomic_unchecked_t atomic_long_unchecked_t;
53421+#else
53422+typedef atomic_t atomic_long_unchecked_t;
53423+#endif
53424+
53425 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
53426 static inline long atomic_long_read(atomic_long_t *l)
53427 {
53428@@ -148,6 +214,15 @@ static inline long atomic_long_read(atom
53429 return (long)atomic_read(v);
53430 }
53431
53432+#ifdef CONFIG_PAX_REFCOUNT
53433+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
53434+{
53435+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53436+
53437+ return (long)atomic_read_unchecked(v);
53438+}
53439+#endif
53440+
53441 static inline void atomic_long_set(atomic_long_t *l, long i)
53442 {
53443 atomic_t *v = (atomic_t *)l;
53444@@ -155,6 +230,15 @@ static inline void atomic_long_set(atomi
53445 atomic_set(v, i);
53446 }
53447
53448+#ifdef CONFIG_PAX_REFCOUNT
53449+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
53450+{
53451+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53452+
53453+ atomic_set_unchecked(v, i);
53454+}
53455+#endif
53456+
53457 static inline void atomic_long_inc(atomic_long_t *l)
53458 {
53459 atomic_t *v = (atomic_t *)l;
53460@@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomi
53461 atomic_inc(v);
53462 }
53463
53464+#ifdef CONFIG_PAX_REFCOUNT
53465+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
53466+{
53467+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53468+
53469+ atomic_inc_unchecked(v);
53470+}
53471+#endif
53472+
53473 static inline void atomic_long_dec(atomic_long_t *l)
53474 {
53475 atomic_t *v = (atomic_t *)l;
53476@@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomi
53477 atomic_dec(v);
53478 }
53479
53480+#ifdef CONFIG_PAX_REFCOUNT
53481+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
53482+{
53483+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53484+
53485+ atomic_dec_unchecked(v);
53486+}
53487+#endif
53488+
53489 static inline void atomic_long_add(long i, atomic_long_t *l)
53490 {
53491 atomic_t *v = (atomic_t *)l;
53492@@ -176,6 +278,15 @@ static inline void atomic_long_add(long
53493 atomic_add(i, v);
53494 }
53495
53496+#ifdef CONFIG_PAX_REFCOUNT
53497+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
53498+{
53499+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53500+
53501+ atomic_add_unchecked(i, v);
53502+}
53503+#endif
53504+
53505 static inline void atomic_long_sub(long i, atomic_long_t *l)
53506 {
53507 atomic_t *v = (atomic_t *)l;
53508@@ -232,6 +343,15 @@ static inline long atomic_long_inc_retur
53509 return (long)atomic_inc_return(v);
53510 }
53511
53512+#ifdef CONFIG_PAX_REFCOUNT
53513+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
53514+{
53515+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53516+
53517+ return (long)atomic_inc_return_unchecked(v);
53518+}
53519+#endif
53520+
53521 static inline long atomic_long_dec_return(atomic_long_t *l)
53522 {
53523 atomic_t *v = (atomic_t *)l;
53524@@ -255,4 +375,47 @@ static inline long atomic_long_add_unles
53525
53526 #endif /* BITS_PER_LONG == 64 */
53527
53528+#ifdef CONFIG_PAX_REFCOUNT
53529+static inline void pax_refcount_needs_these_functions(void)
53530+{
53531+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
53532+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
53533+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
53534+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
53535+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
53536+ atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
53537+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
53538+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
53539+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
53540+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
53541+ atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
53542+
53543+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
53544+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
53545+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
53546+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
53547+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
53548+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
53549+}
53550+#else
53551+#define atomic_read_unchecked(v) atomic_read(v)
53552+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
53553+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
53554+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
53555+#define atomic_inc_unchecked(v) atomic_inc(v)
53556+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
53557+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
53558+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
53559+#define atomic_dec_unchecked(v) atomic_dec(v)
53560+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
53561+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
53562+
53563+#define atomic_long_read_unchecked(v) atomic_long_read(v)
53564+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
53565+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
53566+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
53567+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
53568+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
53569+#endif
53570+
53571 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
53572diff -urNp linux-2.6.32.41/include/asm-generic/cache.h linux-2.6.32.41/include/asm-generic/cache.h
53573--- linux-2.6.32.41/include/asm-generic/cache.h 2011-03-27 14:31:47.000000000 -0400
53574+++ linux-2.6.32.41/include/asm-generic/cache.h 2011-05-04 17:56:28.000000000 -0400
53575@@ -6,7 +6,7 @@
53576 * cache lines need to provide their own cache.h.
53577 */
53578
53579-#define L1_CACHE_SHIFT 5
53580-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
53581+#define L1_CACHE_SHIFT 5U
53582+#define L1_CACHE_BYTES (1U << L1_CACHE_SHIFT)
53583
53584 #endif /* __ASM_GENERIC_CACHE_H */
53585diff -urNp linux-2.6.32.41/include/asm-generic/dma-mapping-common.h linux-2.6.32.41/include/asm-generic/dma-mapping-common.h
53586--- linux-2.6.32.41/include/asm-generic/dma-mapping-common.h 2011-03-27 14:31:47.000000000 -0400
53587+++ linux-2.6.32.41/include/asm-generic/dma-mapping-common.h 2011-04-17 15:56:46.000000000 -0400
53588@@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_
53589 enum dma_data_direction dir,
53590 struct dma_attrs *attrs)
53591 {
53592- struct dma_map_ops *ops = get_dma_ops(dev);
53593+ const struct dma_map_ops *ops = get_dma_ops(dev);
53594 dma_addr_t addr;
53595
53596 kmemcheck_mark_initialized(ptr, size);
53597@@ -30,7 +30,7 @@ static inline void dma_unmap_single_attr
53598 enum dma_data_direction dir,
53599 struct dma_attrs *attrs)
53600 {
53601- struct dma_map_ops *ops = get_dma_ops(dev);
53602+ const struct dma_map_ops *ops = get_dma_ops(dev);
53603
53604 BUG_ON(!valid_dma_direction(dir));
53605 if (ops->unmap_page)
53606@@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struc
53607 int nents, enum dma_data_direction dir,
53608 struct dma_attrs *attrs)
53609 {
53610- struct dma_map_ops *ops = get_dma_ops(dev);
53611+ const struct dma_map_ops *ops = get_dma_ops(dev);
53612 int i, ents;
53613 struct scatterlist *s;
53614
53615@@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(st
53616 int nents, enum dma_data_direction dir,
53617 struct dma_attrs *attrs)
53618 {
53619- struct dma_map_ops *ops = get_dma_ops(dev);
53620+ const struct dma_map_ops *ops = get_dma_ops(dev);
53621
53622 BUG_ON(!valid_dma_direction(dir));
53623 debug_dma_unmap_sg(dev, sg, nents, dir);
53624@@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(st
53625 size_t offset, size_t size,
53626 enum dma_data_direction dir)
53627 {
53628- struct dma_map_ops *ops = get_dma_ops(dev);
53629+ const struct dma_map_ops *ops = get_dma_ops(dev);
53630 dma_addr_t addr;
53631
53632 kmemcheck_mark_initialized(page_address(page) + offset, size);
53633@@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(st
53634 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
53635 size_t size, enum dma_data_direction dir)
53636 {
53637- struct dma_map_ops *ops = get_dma_ops(dev);
53638+ const struct dma_map_ops *ops = get_dma_ops(dev);
53639
53640 BUG_ON(!valid_dma_direction(dir));
53641 if (ops->unmap_page)
53642@@ -97,7 +97,7 @@ static inline void dma_sync_single_for_c
53643 size_t size,
53644 enum dma_data_direction dir)
53645 {
53646- struct dma_map_ops *ops = get_dma_ops(dev);
53647+ const struct dma_map_ops *ops = get_dma_ops(dev);
53648
53649 BUG_ON(!valid_dma_direction(dir));
53650 if (ops->sync_single_for_cpu)
53651@@ -109,7 +109,7 @@ static inline void dma_sync_single_for_d
53652 dma_addr_t addr, size_t size,
53653 enum dma_data_direction dir)
53654 {
53655- struct dma_map_ops *ops = get_dma_ops(dev);
53656+ const struct dma_map_ops *ops = get_dma_ops(dev);
53657
53658 BUG_ON(!valid_dma_direction(dir));
53659 if (ops->sync_single_for_device)
53660@@ -123,7 +123,7 @@ static inline void dma_sync_single_range
53661 size_t size,
53662 enum dma_data_direction dir)
53663 {
53664- struct dma_map_ops *ops = get_dma_ops(dev);
53665+ const struct dma_map_ops *ops = get_dma_ops(dev);
53666
53667 BUG_ON(!valid_dma_direction(dir));
53668 if (ops->sync_single_range_for_cpu) {
53669@@ -140,7 +140,7 @@ static inline void dma_sync_single_range
53670 size_t size,
53671 enum dma_data_direction dir)
53672 {
53673- struct dma_map_ops *ops = get_dma_ops(dev);
53674+ const struct dma_map_ops *ops = get_dma_ops(dev);
53675
53676 BUG_ON(!valid_dma_direction(dir));
53677 if (ops->sync_single_range_for_device) {
53678@@ -155,7 +155,7 @@ static inline void
53679 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
53680 int nelems, enum dma_data_direction dir)
53681 {
53682- struct dma_map_ops *ops = get_dma_ops(dev);
53683+ const struct dma_map_ops *ops = get_dma_ops(dev);
53684
53685 BUG_ON(!valid_dma_direction(dir));
53686 if (ops->sync_sg_for_cpu)
53687@@ -167,7 +167,7 @@ static inline void
53688 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
53689 int nelems, enum dma_data_direction dir)
53690 {
53691- struct dma_map_ops *ops = get_dma_ops(dev);
53692+ const struct dma_map_ops *ops = get_dma_ops(dev);
53693
53694 BUG_ON(!valid_dma_direction(dir));
53695 if (ops->sync_sg_for_device)
53696diff -urNp linux-2.6.32.41/include/asm-generic/futex.h linux-2.6.32.41/include/asm-generic/futex.h
53697--- linux-2.6.32.41/include/asm-generic/futex.h 2011-03-27 14:31:47.000000000 -0400
53698+++ linux-2.6.32.41/include/asm-generic/futex.h 2011-04-17 15:56:46.000000000 -0400
53699@@ -6,7 +6,7 @@
53700 #include <asm/errno.h>
53701
53702 static inline int
53703-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
53704+futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
53705 {
53706 int op = (encoded_op >> 28) & 7;
53707 int cmp = (encoded_op >> 24) & 15;
53708@@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op,
53709 }
53710
53711 static inline int
53712-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
53713+futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
53714 {
53715 return -ENOSYS;
53716 }
53717diff -urNp linux-2.6.32.41/include/asm-generic/int-l64.h linux-2.6.32.41/include/asm-generic/int-l64.h
53718--- linux-2.6.32.41/include/asm-generic/int-l64.h 2011-03-27 14:31:47.000000000 -0400
53719+++ linux-2.6.32.41/include/asm-generic/int-l64.h 2011-04-17 15:56:46.000000000 -0400
53720@@ -46,6 +46,8 @@ typedef unsigned int u32;
53721 typedef signed long s64;
53722 typedef unsigned long u64;
53723
53724+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
53725+
53726 #define S8_C(x) x
53727 #define U8_C(x) x ## U
53728 #define S16_C(x) x
53729diff -urNp linux-2.6.32.41/include/asm-generic/int-ll64.h linux-2.6.32.41/include/asm-generic/int-ll64.h
53730--- linux-2.6.32.41/include/asm-generic/int-ll64.h 2011-03-27 14:31:47.000000000 -0400
53731+++ linux-2.6.32.41/include/asm-generic/int-ll64.h 2011-04-17 15:56:46.000000000 -0400
53732@@ -51,6 +51,8 @@ typedef unsigned int u32;
53733 typedef signed long long s64;
53734 typedef unsigned long long u64;
53735
53736+typedef unsigned long long intoverflow_t;
53737+
53738 #define S8_C(x) x
53739 #define U8_C(x) x ## U
53740 #define S16_C(x) x
53741diff -urNp linux-2.6.32.41/include/asm-generic/kmap_types.h linux-2.6.32.41/include/asm-generic/kmap_types.h
53742--- linux-2.6.32.41/include/asm-generic/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
53743+++ linux-2.6.32.41/include/asm-generic/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
53744@@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
53745 KMAP_D(16) KM_IRQ_PTE,
53746 KMAP_D(17) KM_NMI,
53747 KMAP_D(18) KM_NMI_PTE,
53748-KMAP_D(19) KM_TYPE_NR
53749+KMAP_D(19) KM_CLEARPAGE,
53750+KMAP_D(20) KM_TYPE_NR
53751 };
53752
53753 #undef KMAP_D
53754diff -urNp linux-2.6.32.41/include/asm-generic/pgtable.h linux-2.6.32.41/include/asm-generic/pgtable.h
53755--- linux-2.6.32.41/include/asm-generic/pgtable.h 2011-03-27 14:31:47.000000000 -0400
53756+++ linux-2.6.32.41/include/asm-generic/pgtable.h 2011-04-17 15:56:46.000000000 -0400
53757@@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_ar
53758 unsigned long size);
53759 #endif
53760
53761+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
53762+static inline unsigned long pax_open_kernel(void) { return 0; }
53763+#endif
53764+
53765+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
53766+static inline unsigned long pax_close_kernel(void) { return 0; }
53767+#endif
53768+
53769 #endif /* !__ASSEMBLY__ */
53770
53771 #endif /* _ASM_GENERIC_PGTABLE_H */
53772diff -urNp linux-2.6.32.41/include/asm-generic/pgtable-nopmd.h linux-2.6.32.41/include/asm-generic/pgtable-nopmd.h
53773--- linux-2.6.32.41/include/asm-generic/pgtable-nopmd.h 2011-03-27 14:31:47.000000000 -0400
53774+++ linux-2.6.32.41/include/asm-generic/pgtable-nopmd.h 2011-04-17 15:56:46.000000000 -0400
53775@@ -1,14 +1,19 @@
53776 #ifndef _PGTABLE_NOPMD_H
53777 #define _PGTABLE_NOPMD_H
53778
53779-#ifndef __ASSEMBLY__
53780-
53781 #include <asm-generic/pgtable-nopud.h>
53782
53783-struct mm_struct;
53784-
53785 #define __PAGETABLE_PMD_FOLDED
53786
53787+#define PMD_SHIFT PUD_SHIFT
53788+#define PTRS_PER_PMD 1
53789+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
53790+#define PMD_MASK (~(PMD_SIZE-1))
53791+
53792+#ifndef __ASSEMBLY__
53793+
53794+struct mm_struct;
53795+
53796 /*
53797 * Having the pmd type consist of a pud gets the size right, and allows
53798 * us to conceptually access the pud entry that this pmd is folded into
53799@@ -16,11 +21,6 @@ struct mm_struct;
53800 */
53801 typedef struct { pud_t pud; } pmd_t;
53802
53803-#define PMD_SHIFT PUD_SHIFT
53804-#define PTRS_PER_PMD 1
53805-#define PMD_SIZE (1UL << PMD_SHIFT)
53806-#define PMD_MASK (~(PMD_SIZE-1))
53807-
53808 /*
53809 * The "pud_xxx()" functions here are trivial for a folded two-level
53810 * setup: the pmd is never bad, and a pmd always exists (as it's folded
53811diff -urNp linux-2.6.32.41/include/asm-generic/pgtable-nopud.h linux-2.6.32.41/include/asm-generic/pgtable-nopud.h
53812--- linux-2.6.32.41/include/asm-generic/pgtable-nopud.h 2011-03-27 14:31:47.000000000 -0400
53813+++ linux-2.6.32.41/include/asm-generic/pgtable-nopud.h 2011-04-17 15:56:46.000000000 -0400
53814@@ -1,10 +1,15 @@
53815 #ifndef _PGTABLE_NOPUD_H
53816 #define _PGTABLE_NOPUD_H
53817
53818-#ifndef __ASSEMBLY__
53819-
53820 #define __PAGETABLE_PUD_FOLDED
53821
53822+#define PUD_SHIFT PGDIR_SHIFT
53823+#define PTRS_PER_PUD 1
53824+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
53825+#define PUD_MASK (~(PUD_SIZE-1))
53826+
53827+#ifndef __ASSEMBLY__
53828+
53829 /*
53830 * Having the pud type consist of a pgd gets the size right, and allows
53831 * us to conceptually access the pgd entry that this pud is folded into
53832@@ -12,11 +17,6 @@
53833 */
53834 typedef struct { pgd_t pgd; } pud_t;
53835
53836-#define PUD_SHIFT PGDIR_SHIFT
53837-#define PTRS_PER_PUD 1
53838-#define PUD_SIZE (1UL << PUD_SHIFT)
53839-#define PUD_MASK (~(PUD_SIZE-1))
53840-
53841 /*
53842 * The "pgd_xxx()" functions here are trivial for a folded two-level
53843 * setup: the pud is never bad, and a pud always exists (as it's folded
53844diff -urNp linux-2.6.32.41/include/asm-generic/vmlinux.lds.h linux-2.6.32.41/include/asm-generic/vmlinux.lds.h
53845--- linux-2.6.32.41/include/asm-generic/vmlinux.lds.h 2011-03-27 14:31:47.000000000 -0400
53846+++ linux-2.6.32.41/include/asm-generic/vmlinux.lds.h 2011-04-17 15:56:46.000000000 -0400
53847@@ -199,6 +199,7 @@
53848 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
53849 VMLINUX_SYMBOL(__start_rodata) = .; \
53850 *(.rodata) *(.rodata.*) \
53851+ *(.data.read_only) \
53852 *(__vermagic) /* Kernel version magic */ \
53853 *(__markers_strings) /* Markers: strings */ \
53854 *(__tracepoints_strings)/* Tracepoints: strings */ \
53855@@ -656,22 +657,24 @@
53856 * section in the linker script will go there too. @phdr should have
53857 * a leading colon.
53858 *
53859- * Note that this macros defines __per_cpu_load as an absolute symbol.
53860+ * Note that this macros defines per_cpu_load as an absolute symbol.
53861 * If there is no need to put the percpu section at a predetermined
53862 * address, use PERCPU().
53863 */
53864 #define PERCPU_VADDR(vaddr, phdr) \
53865- VMLINUX_SYMBOL(__per_cpu_load) = .; \
53866- .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
53867+ per_cpu_load = .; \
53868+ .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
53869 - LOAD_OFFSET) { \
53870+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
53871 VMLINUX_SYMBOL(__per_cpu_start) = .; \
53872 *(.data.percpu.first) \
53873- *(.data.percpu.page_aligned) \
53874 *(.data.percpu) \
53875+ . = ALIGN(PAGE_SIZE); \
53876+ *(.data.percpu.page_aligned) \
53877 *(.data.percpu.shared_aligned) \
53878 VMLINUX_SYMBOL(__per_cpu_end) = .; \
53879 } phdr \
53880- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
53881+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
53882
53883 /**
53884 * PERCPU - define output section for percpu area, simple version
53885diff -urNp linux-2.6.32.41/include/drm/drmP.h linux-2.6.32.41/include/drm/drmP.h
53886--- linux-2.6.32.41/include/drm/drmP.h 2011-03-27 14:31:47.000000000 -0400
53887+++ linux-2.6.32.41/include/drm/drmP.h 2011-04-17 15:56:46.000000000 -0400
53888@@ -71,6 +71,7 @@
53889 #include <linux/workqueue.h>
53890 #include <linux/poll.h>
53891 #include <asm/pgalloc.h>
53892+#include <asm/local.h>
53893 #include "drm.h"
53894
53895 #include <linux/idr.h>
53896@@ -814,7 +815,7 @@ struct drm_driver {
53897 void (*vgaarb_irq)(struct drm_device *dev, bool state);
53898
53899 /* Driver private ops for this object */
53900- struct vm_operations_struct *gem_vm_ops;
53901+ const struct vm_operations_struct *gem_vm_ops;
53902
53903 int major;
53904 int minor;
53905@@ -917,7 +918,7 @@ struct drm_device {
53906
53907 /** \name Usage Counters */
53908 /*@{ */
53909- int open_count; /**< Outstanding files open */
53910+ local_t open_count; /**< Outstanding files open */
53911 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
53912 atomic_t vma_count; /**< Outstanding vma areas open */
53913 int buf_use; /**< Buffers in use -- cannot alloc */
53914@@ -928,7 +929,7 @@ struct drm_device {
53915 /*@{ */
53916 unsigned long counters;
53917 enum drm_stat_type types[15];
53918- atomic_t counts[15];
53919+ atomic_unchecked_t counts[15];
53920 /*@} */
53921
53922 struct list_head filelist;
53923@@ -1016,7 +1017,7 @@ struct drm_device {
53924 struct pci_controller *hose;
53925 #endif
53926 struct drm_sg_mem *sg; /**< Scatter gather memory */
53927- unsigned int num_crtcs; /**< Number of CRTCs on this device */
53928+ unsigned int num_crtcs; /**< Number of CRTCs on this device */
53929 void *dev_private; /**< device private data */
53930 void *mm_private;
53931 struct address_space *dev_mapping;
53932@@ -1042,11 +1043,11 @@ struct drm_device {
53933 spinlock_t object_name_lock;
53934 struct idr object_name_idr;
53935 atomic_t object_count;
53936- atomic_t object_memory;
53937+ atomic_unchecked_t object_memory;
53938 atomic_t pin_count;
53939- atomic_t pin_memory;
53940+ atomic_unchecked_t pin_memory;
53941 atomic_t gtt_count;
53942- atomic_t gtt_memory;
53943+ atomic_unchecked_t gtt_memory;
53944 uint32_t gtt_total;
53945 uint32_t invalidate_domains; /* domains pending invalidation */
53946 uint32_t flush_domains; /* domains pending flush */
53947diff -urNp linux-2.6.32.41/include/linux/a.out.h linux-2.6.32.41/include/linux/a.out.h
53948--- linux-2.6.32.41/include/linux/a.out.h 2011-03-27 14:31:47.000000000 -0400
53949+++ linux-2.6.32.41/include/linux/a.out.h 2011-04-17 15:56:46.000000000 -0400
53950@@ -39,6 +39,14 @@ enum machine_type {
53951 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
53952 };
53953
53954+/* Constants for the N_FLAGS field */
53955+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
53956+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
53957+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
53958+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
53959+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
53960+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
53961+
53962 #if !defined (N_MAGIC)
53963 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
53964 #endif
53965diff -urNp linux-2.6.32.41/include/linux/atmdev.h linux-2.6.32.41/include/linux/atmdev.h
53966--- linux-2.6.32.41/include/linux/atmdev.h 2011-03-27 14:31:47.000000000 -0400
53967+++ linux-2.6.32.41/include/linux/atmdev.h 2011-04-17 15:56:46.000000000 -0400
53968@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
53969 #endif
53970
53971 struct k_atm_aal_stats {
53972-#define __HANDLE_ITEM(i) atomic_t i
53973+#define __HANDLE_ITEM(i) atomic_unchecked_t i
53974 __AAL_STAT_ITEMS
53975 #undef __HANDLE_ITEM
53976 };
53977diff -urNp linux-2.6.32.41/include/linux/backlight.h linux-2.6.32.41/include/linux/backlight.h
53978--- linux-2.6.32.41/include/linux/backlight.h 2011-03-27 14:31:47.000000000 -0400
53979+++ linux-2.6.32.41/include/linux/backlight.h 2011-04-17 15:56:46.000000000 -0400
53980@@ -36,18 +36,18 @@ struct backlight_device;
53981 struct fb_info;
53982
53983 struct backlight_ops {
53984- unsigned int options;
53985+ const unsigned int options;
53986
53987 #define BL_CORE_SUSPENDRESUME (1 << 0)
53988
53989 /* Notify the backlight driver some property has changed */
53990- int (*update_status)(struct backlight_device *);
53991+ int (* const update_status)(struct backlight_device *);
53992 /* Return the current backlight brightness (accounting for power,
53993 fb_blank etc.) */
53994- int (*get_brightness)(struct backlight_device *);
53995+ int (* const get_brightness)(struct backlight_device *);
53996 /* Check if given framebuffer device is the one bound to this backlight;
53997 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
53998- int (*check_fb)(struct fb_info *);
53999+ int (* const check_fb)(struct fb_info *);
54000 };
54001
54002 /* This structure defines all the properties of a backlight */
54003@@ -86,7 +86,7 @@ struct backlight_device {
54004 registered this device has been unloaded, and if class_get_devdata()
54005 points to something in the body of that driver, it is also invalid. */
54006 struct mutex ops_lock;
54007- struct backlight_ops *ops;
54008+ const struct backlight_ops *ops;
54009
54010 /* The framebuffer notifier block */
54011 struct notifier_block fb_notif;
54012@@ -103,7 +103,7 @@ static inline void backlight_update_stat
54013 }
54014
54015 extern struct backlight_device *backlight_device_register(const char *name,
54016- struct device *dev, void *devdata, struct backlight_ops *ops);
54017+ struct device *dev, void *devdata, const struct backlight_ops *ops);
54018 extern void backlight_device_unregister(struct backlight_device *bd);
54019 extern void backlight_force_update(struct backlight_device *bd,
54020 enum backlight_update_reason reason);
54021diff -urNp linux-2.6.32.41/include/linux/binfmts.h linux-2.6.32.41/include/linux/binfmts.h
54022--- linux-2.6.32.41/include/linux/binfmts.h 2011-04-17 17:00:52.000000000 -0400
54023+++ linux-2.6.32.41/include/linux/binfmts.h 2011-04-17 15:56:46.000000000 -0400
54024@@ -83,6 +83,7 @@ struct linux_binfmt {
54025 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
54026 int (*load_shlib)(struct file *);
54027 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
54028+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
54029 unsigned long min_coredump; /* minimal dump size */
54030 int hasvdso;
54031 };
54032diff -urNp linux-2.6.32.41/include/linux/blkdev.h linux-2.6.32.41/include/linux/blkdev.h
54033--- linux-2.6.32.41/include/linux/blkdev.h 2011-03-27 14:31:47.000000000 -0400
54034+++ linux-2.6.32.41/include/linux/blkdev.h 2011-04-17 15:56:46.000000000 -0400
54035@@ -1265,19 +1265,19 @@ static inline int blk_integrity_rq(struc
54036 #endif /* CONFIG_BLK_DEV_INTEGRITY */
54037
54038 struct block_device_operations {
54039- int (*open) (struct block_device *, fmode_t);
54040- int (*release) (struct gendisk *, fmode_t);
54041- int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54042- int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54043- int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54044- int (*direct_access) (struct block_device *, sector_t,
54045+ int (* const open) (struct block_device *, fmode_t);
54046+ int (* const release) (struct gendisk *, fmode_t);
54047+ int (* const locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54048+ int (* const ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54049+ int (* const compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54050+ int (* const direct_access) (struct block_device *, sector_t,
54051 void **, unsigned long *);
54052- int (*media_changed) (struct gendisk *);
54053- unsigned long long (*set_capacity) (struct gendisk *,
54054+ int (* const media_changed) (struct gendisk *);
54055+ unsigned long long (* const set_capacity) (struct gendisk *,
54056 unsigned long long);
54057- int (*revalidate_disk) (struct gendisk *);
54058- int (*getgeo)(struct block_device *, struct hd_geometry *);
54059- struct module *owner;
54060+ int (* const revalidate_disk) (struct gendisk *);
54061+ int (*const getgeo)(struct block_device *, struct hd_geometry *);
54062+ struct module * const owner;
54063 };
54064
54065 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
54066diff -urNp linux-2.6.32.41/include/linux/blktrace_api.h linux-2.6.32.41/include/linux/blktrace_api.h
54067--- linux-2.6.32.41/include/linux/blktrace_api.h 2011-03-27 14:31:47.000000000 -0400
54068+++ linux-2.6.32.41/include/linux/blktrace_api.h 2011-05-04 17:56:28.000000000 -0400
54069@@ -160,7 +160,7 @@ struct blk_trace {
54070 struct dentry *dir;
54071 struct dentry *dropped_file;
54072 struct dentry *msg_file;
54073- atomic_t dropped;
54074+ atomic_unchecked_t dropped;
54075 };
54076
54077 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
54078diff -urNp linux-2.6.32.41/include/linux/byteorder/little_endian.h linux-2.6.32.41/include/linux/byteorder/little_endian.h
54079--- linux-2.6.32.41/include/linux/byteorder/little_endian.h 2011-03-27 14:31:47.000000000 -0400
54080+++ linux-2.6.32.41/include/linux/byteorder/little_endian.h 2011-04-17 15:56:46.000000000 -0400
54081@@ -42,51 +42,51 @@
54082
54083 static inline __le64 __cpu_to_le64p(const __u64 *p)
54084 {
54085- return (__force __le64)*p;
54086+ return (__force const __le64)*p;
54087 }
54088 static inline __u64 __le64_to_cpup(const __le64 *p)
54089 {
54090- return (__force __u64)*p;
54091+ return (__force const __u64)*p;
54092 }
54093 static inline __le32 __cpu_to_le32p(const __u32 *p)
54094 {
54095- return (__force __le32)*p;
54096+ return (__force const __le32)*p;
54097 }
54098 static inline __u32 __le32_to_cpup(const __le32 *p)
54099 {
54100- return (__force __u32)*p;
54101+ return (__force const __u32)*p;
54102 }
54103 static inline __le16 __cpu_to_le16p(const __u16 *p)
54104 {
54105- return (__force __le16)*p;
54106+ return (__force const __le16)*p;
54107 }
54108 static inline __u16 __le16_to_cpup(const __le16 *p)
54109 {
54110- return (__force __u16)*p;
54111+ return (__force const __u16)*p;
54112 }
54113 static inline __be64 __cpu_to_be64p(const __u64 *p)
54114 {
54115- return (__force __be64)__swab64p(p);
54116+ return (__force const __be64)__swab64p(p);
54117 }
54118 static inline __u64 __be64_to_cpup(const __be64 *p)
54119 {
54120- return __swab64p((__u64 *)p);
54121+ return __swab64p((const __u64 *)p);
54122 }
54123 static inline __be32 __cpu_to_be32p(const __u32 *p)
54124 {
54125- return (__force __be32)__swab32p(p);
54126+ return (__force const __be32)__swab32p(p);
54127 }
54128 static inline __u32 __be32_to_cpup(const __be32 *p)
54129 {
54130- return __swab32p((__u32 *)p);
54131+ return __swab32p((const __u32 *)p);
54132 }
54133 static inline __be16 __cpu_to_be16p(const __u16 *p)
54134 {
54135- return (__force __be16)__swab16p(p);
54136+ return (__force const __be16)__swab16p(p);
54137 }
54138 static inline __u16 __be16_to_cpup(const __be16 *p)
54139 {
54140- return __swab16p((__u16 *)p);
54141+ return __swab16p((const __u16 *)p);
54142 }
54143 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
54144 #define __le64_to_cpus(x) do { (void)(x); } while (0)
54145diff -urNp linux-2.6.32.41/include/linux/cache.h linux-2.6.32.41/include/linux/cache.h
54146--- linux-2.6.32.41/include/linux/cache.h 2011-03-27 14:31:47.000000000 -0400
54147+++ linux-2.6.32.41/include/linux/cache.h 2011-04-17 15:56:46.000000000 -0400
54148@@ -16,6 +16,10 @@
54149 #define __read_mostly
54150 #endif
54151
54152+#ifndef __read_only
54153+#define __read_only __read_mostly
54154+#endif
54155+
54156 #ifndef ____cacheline_aligned
54157 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
54158 #endif
54159diff -urNp linux-2.6.32.41/include/linux/capability.h linux-2.6.32.41/include/linux/capability.h
54160--- linux-2.6.32.41/include/linux/capability.h 2011-03-27 14:31:47.000000000 -0400
54161+++ linux-2.6.32.41/include/linux/capability.h 2011-04-17 15:56:46.000000000 -0400
54162@@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff
54163 (security_real_capable_noaudit((t), (cap)) == 0)
54164
54165 extern int capable(int cap);
54166+int capable_nolog(int cap);
54167
54168 /* audit system wants to get cap info from files as well */
54169 struct dentry;
54170diff -urNp linux-2.6.32.41/include/linux/compiler-gcc4.h linux-2.6.32.41/include/linux/compiler-gcc4.h
54171--- linux-2.6.32.41/include/linux/compiler-gcc4.h 2011-03-27 14:31:47.000000000 -0400
54172+++ linux-2.6.32.41/include/linux/compiler-gcc4.h 2011-04-17 15:56:46.000000000 -0400
54173@@ -36,4 +36,8 @@
54174 the kernel context */
54175 #define __cold __attribute__((__cold__))
54176
54177+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
54178+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
54179+#define __bos0(ptr) __bos((ptr), 0)
54180+#define __bos1(ptr) __bos((ptr), 1)
54181 #endif
54182diff -urNp linux-2.6.32.41/include/linux/compiler.h linux-2.6.32.41/include/linux/compiler.h
54183--- linux-2.6.32.41/include/linux/compiler.h 2011-03-27 14:31:47.000000000 -0400
54184+++ linux-2.6.32.41/include/linux/compiler.h 2011-04-17 15:56:46.000000000 -0400
54185@@ -256,6 +256,22 @@ void ftrace_likely_update(struct ftrace_
54186 #define __cold
54187 #endif
54188
54189+#ifndef __alloc_size
54190+#define __alloc_size
54191+#endif
54192+
54193+#ifndef __bos
54194+#define __bos
54195+#endif
54196+
54197+#ifndef __bos0
54198+#define __bos0
54199+#endif
54200+
54201+#ifndef __bos1
54202+#define __bos1
54203+#endif
54204+
54205 /* Simple shorthand for a section definition */
54206 #ifndef __section
54207 # define __section(S) __attribute__ ((__section__(#S)))
54208@@ -278,6 +294,7 @@ void ftrace_likely_update(struct ftrace_
54209 * use is to mediate communication between process-level code and irq/NMI
54210 * handlers, all running on the same CPU.
54211 */
54212-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
54213+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
54214+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
54215
54216 #endif /* __LINUX_COMPILER_H */
54217diff -urNp linux-2.6.32.41/include/linux/dcache.h linux-2.6.32.41/include/linux/dcache.h
54218--- linux-2.6.32.41/include/linux/dcache.h 2011-03-27 14:31:47.000000000 -0400
54219+++ linux-2.6.32.41/include/linux/dcache.h 2011-04-23 13:34:46.000000000 -0400
54220@@ -119,6 +119,8 @@ struct dentry {
54221 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
54222 };
54223
54224+#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
54225+
54226 /*
54227 * dentry->d_lock spinlock nesting subclasses:
54228 *
54229diff -urNp linux-2.6.32.41/include/linux/decompress/mm.h linux-2.6.32.41/include/linux/decompress/mm.h
54230--- linux-2.6.32.41/include/linux/decompress/mm.h 2011-03-27 14:31:47.000000000 -0400
54231+++ linux-2.6.32.41/include/linux/decompress/mm.h 2011-04-17 15:56:46.000000000 -0400
54232@@ -78,7 +78,7 @@ static void free(void *where)
54233 * warnings when not needed (indeed large_malloc / large_free are not
54234 * needed by inflate */
54235
54236-#define malloc(a) kmalloc(a, GFP_KERNEL)
54237+#define malloc(a) kmalloc((a), GFP_KERNEL)
54238 #define free(a) kfree(a)
54239
54240 #define large_malloc(a) vmalloc(a)
54241diff -urNp linux-2.6.32.41/include/linux/dma-mapping.h linux-2.6.32.41/include/linux/dma-mapping.h
54242--- linux-2.6.32.41/include/linux/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
54243+++ linux-2.6.32.41/include/linux/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
54244@@ -16,50 +16,50 @@ enum dma_data_direction {
54245 };
54246
54247 struct dma_map_ops {
54248- void* (*alloc_coherent)(struct device *dev, size_t size,
54249+ void* (* const alloc_coherent)(struct device *dev, size_t size,
54250 dma_addr_t *dma_handle, gfp_t gfp);
54251- void (*free_coherent)(struct device *dev, size_t size,
54252+ void (* const free_coherent)(struct device *dev, size_t size,
54253 void *vaddr, dma_addr_t dma_handle);
54254- dma_addr_t (*map_page)(struct device *dev, struct page *page,
54255+ dma_addr_t (* const map_page)(struct device *dev, struct page *page,
54256 unsigned long offset, size_t size,
54257 enum dma_data_direction dir,
54258 struct dma_attrs *attrs);
54259- void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
54260+ void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
54261 size_t size, enum dma_data_direction dir,
54262 struct dma_attrs *attrs);
54263- int (*map_sg)(struct device *dev, struct scatterlist *sg,
54264+ int (* const map_sg)(struct device *dev, struct scatterlist *sg,
54265 int nents, enum dma_data_direction dir,
54266 struct dma_attrs *attrs);
54267- void (*unmap_sg)(struct device *dev,
54268+ void (* const unmap_sg)(struct device *dev,
54269 struct scatterlist *sg, int nents,
54270 enum dma_data_direction dir,
54271 struct dma_attrs *attrs);
54272- void (*sync_single_for_cpu)(struct device *dev,
54273+ void (* const sync_single_for_cpu)(struct device *dev,
54274 dma_addr_t dma_handle, size_t size,
54275 enum dma_data_direction dir);
54276- void (*sync_single_for_device)(struct device *dev,
54277+ void (* const sync_single_for_device)(struct device *dev,
54278 dma_addr_t dma_handle, size_t size,
54279 enum dma_data_direction dir);
54280- void (*sync_single_range_for_cpu)(struct device *dev,
54281+ void (* const sync_single_range_for_cpu)(struct device *dev,
54282 dma_addr_t dma_handle,
54283 unsigned long offset,
54284 size_t size,
54285 enum dma_data_direction dir);
54286- void (*sync_single_range_for_device)(struct device *dev,
54287+ void (* const sync_single_range_for_device)(struct device *dev,
54288 dma_addr_t dma_handle,
54289 unsigned long offset,
54290 size_t size,
54291 enum dma_data_direction dir);
54292- void (*sync_sg_for_cpu)(struct device *dev,
54293+ void (* const sync_sg_for_cpu)(struct device *dev,
54294 struct scatterlist *sg, int nents,
54295 enum dma_data_direction dir);
54296- void (*sync_sg_for_device)(struct device *dev,
54297+ void (* const sync_sg_for_device)(struct device *dev,
54298 struct scatterlist *sg, int nents,
54299 enum dma_data_direction dir);
54300- int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
54301- int (*dma_supported)(struct device *dev, u64 mask);
54302+ int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
54303+ int (* const dma_supported)(struct device *dev, u64 mask);
54304 int (*set_dma_mask)(struct device *dev, u64 mask);
54305- int is_phys;
54306+ const int is_phys;
54307 };
54308
54309 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
54310diff -urNp linux-2.6.32.41/include/linux/dst.h linux-2.6.32.41/include/linux/dst.h
54311--- linux-2.6.32.41/include/linux/dst.h 2011-03-27 14:31:47.000000000 -0400
54312+++ linux-2.6.32.41/include/linux/dst.h 2011-04-17 15:56:46.000000000 -0400
54313@@ -380,7 +380,7 @@ struct dst_node
54314 struct thread_pool *pool;
54315
54316 /* Transaction IDs live here */
54317- atomic_long_t gen;
54318+ atomic_long_unchecked_t gen;
54319
54320 /*
54321 * How frequently and how many times transaction
54322diff -urNp linux-2.6.32.41/include/linux/elf.h linux-2.6.32.41/include/linux/elf.h
54323--- linux-2.6.32.41/include/linux/elf.h 2011-03-27 14:31:47.000000000 -0400
54324+++ linux-2.6.32.41/include/linux/elf.h 2011-04-17 15:56:46.000000000 -0400
54325@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
54326 #define PT_GNU_EH_FRAME 0x6474e550
54327
54328 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
54329+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
54330+
54331+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
54332+
54333+/* Constants for the e_flags field */
54334+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
54335+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
54336+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
54337+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
54338+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
54339+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
54340
54341 /* These constants define the different elf file types */
54342 #define ET_NONE 0
54343@@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
54344 #define DT_DEBUG 21
54345 #define DT_TEXTREL 22
54346 #define DT_JMPREL 23
54347+#define DT_FLAGS 30
54348+ #define DF_TEXTREL 0x00000004
54349 #define DT_ENCODING 32
54350 #define OLD_DT_LOOS 0x60000000
54351 #define DT_LOOS 0x6000000d
54352@@ -230,6 +243,19 @@ typedef struct elf64_hdr {
54353 #define PF_W 0x2
54354 #define PF_X 0x1
54355
54356+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
54357+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
54358+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
54359+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
54360+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
54361+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
54362+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
54363+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
54364+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
54365+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
54366+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
54367+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
54368+
54369 typedef struct elf32_phdr{
54370 Elf32_Word p_type;
54371 Elf32_Off p_offset;
54372@@ -322,6 +348,8 @@ typedef struct elf64_shdr {
54373 #define EI_OSABI 7
54374 #define EI_PAD 8
54375
54376+#define EI_PAX 14
54377+
54378 #define ELFMAG0 0x7f /* EI_MAG */
54379 #define ELFMAG1 'E'
54380 #define ELFMAG2 'L'
54381@@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
54382 #define elf_phdr elf32_phdr
54383 #define elf_note elf32_note
54384 #define elf_addr_t Elf32_Off
54385+#define elf_dyn Elf32_Dyn
54386
54387 #else
54388
54389@@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
54390 #define elf_phdr elf64_phdr
54391 #define elf_note elf64_note
54392 #define elf_addr_t Elf64_Off
54393+#define elf_dyn Elf64_Dyn
54394
54395 #endif
54396
54397diff -urNp linux-2.6.32.41/include/linux/fscache-cache.h linux-2.6.32.41/include/linux/fscache-cache.h
54398--- linux-2.6.32.41/include/linux/fscache-cache.h 2011-03-27 14:31:47.000000000 -0400
54399+++ linux-2.6.32.41/include/linux/fscache-cache.h 2011-05-04 17:56:28.000000000 -0400
54400@@ -116,7 +116,7 @@ struct fscache_operation {
54401 #endif
54402 };
54403
54404-extern atomic_t fscache_op_debug_id;
54405+extern atomic_unchecked_t fscache_op_debug_id;
54406 extern const struct slow_work_ops fscache_op_slow_work_ops;
54407
54408 extern void fscache_enqueue_operation(struct fscache_operation *);
54409@@ -134,7 +134,7 @@ static inline void fscache_operation_ini
54410 fscache_operation_release_t release)
54411 {
54412 atomic_set(&op->usage, 1);
54413- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
54414+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
54415 op->release = release;
54416 INIT_LIST_HEAD(&op->pend_link);
54417 fscache_set_op_state(op, "Init");
54418diff -urNp linux-2.6.32.41/include/linux/fs.h linux-2.6.32.41/include/linux/fs.h
54419--- linux-2.6.32.41/include/linux/fs.h 2011-03-27 14:31:47.000000000 -0400
54420+++ linux-2.6.32.41/include/linux/fs.h 2011-04-17 15:56:46.000000000 -0400
54421@@ -90,6 +90,11 @@ struct inodes_stat_t {
54422 /* Expect random access pattern */
54423 #define FMODE_RANDOM ((__force fmode_t)4096)
54424
54425+/* Hack for grsec so as not to require read permission simply to execute
54426+ * a binary
54427+ */
54428+#define FMODE_GREXEC ((__force fmode_t)0x2000000)
54429+
54430 /*
54431 * The below are the various read and write types that we support. Some of
54432 * them include behavioral modifiers that send information down to the
54433@@ -568,41 +573,41 @@ typedef int (*read_actor_t)(read_descrip
54434 unsigned long, unsigned long);
54435
54436 struct address_space_operations {
54437- int (*writepage)(struct page *page, struct writeback_control *wbc);
54438- int (*readpage)(struct file *, struct page *);
54439- void (*sync_page)(struct page *);
54440+ int (* const writepage)(struct page *page, struct writeback_control *wbc);
54441+ int (* const readpage)(struct file *, struct page *);
54442+ void (* const sync_page)(struct page *);
54443
54444 /* Write back some dirty pages from this mapping. */
54445- int (*writepages)(struct address_space *, struct writeback_control *);
54446+ int (* const writepages)(struct address_space *, struct writeback_control *);
54447
54448 /* Set a page dirty. Return true if this dirtied it */
54449- int (*set_page_dirty)(struct page *page);
54450+ int (* const set_page_dirty)(struct page *page);
54451
54452- int (*readpages)(struct file *filp, struct address_space *mapping,
54453+ int (* const readpages)(struct file *filp, struct address_space *mapping,
54454 struct list_head *pages, unsigned nr_pages);
54455
54456- int (*write_begin)(struct file *, struct address_space *mapping,
54457+ int (* const write_begin)(struct file *, struct address_space *mapping,
54458 loff_t pos, unsigned len, unsigned flags,
54459 struct page **pagep, void **fsdata);
54460- int (*write_end)(struct file *, struct address_space *mapping,
54461+ int (* const write_end)(struct file *, struct address_space *mapping,
54462 loff_t pos, unsigned len, unsigned copied,
54463 struct page *page, void *fsdata);
54464
54465 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
54466- sector_t (*bmap)(struct address_space *, sector_t);
54467- void (*invalidatepage) (struct page *, unsigned long);
54468- int (*releasepage) (struct page *, gfp_t);
54469- ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
54470+ sector_t (* const bmap)(struct address_space *, sector_t);
54471+ void (* const invalidatepage) (struct page *, unsigned long);
54472+ int (* const releasepage) (struct page *, gfp_t);
54473+ ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
54474 loff_t offset, unsigned long nr_segs);
54475- int (*get_xip_mem)(struct address_space *, pgoff_t, int,
54476+ int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
54477 void **, unsigned long *);
54478 /* migrate the contents of a page to the specified target */
54479- int (*migratepage) (struct address_space *,
54480+ int (* const migratepage) (struct address_space *,
54481 struct page *, struct page *);
54482- int (*launder_page) (struct page *);
54483- int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
54484+ int (* const launder_page) (struct page *);
54485+ int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
54486 unsigned long);
54487- int (*error_remove_page)(struct address_space *, struct page *);
54488+ int (* const error_remove_page)(struct address_space *, struct page *);
54489 };
54490
54491 /*
54492@@ -1030,19 +1035,19 @@ static inline int file_check_writeable(s
54493 typedef struct files_struct *fl_owner_t;
54494
54495 struct file_lock_operations {
54496- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
54497- void (*fl_release_private)(struct file_lock *);
54498+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
54499+ void (* const fl_release_private)(struct file_lock *);
54500 };
54501
54502 struct lock_manager_operations {
54503- int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
54504- void (*fl_notify)(struct file_lock *); /* unblock callback */
54505- int (*fl_grant)(struct file_lock *, struct file_lock *, int);
54506- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
54507- void (*fl_release_private)(struct file_lock *);
54508- void (*fl_break)(struct file_lock *);
54509- int (*fl_mylease)(struct file_lock *, struct file_lock *);
54510- int (*fl_change)(struct file_lock **, int);
54511+ int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
54512+ void (* const fl_notify)(struct file_lock *); /* unblock callback */
54513+ int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
54514+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
54515+ void (* const fl_release_private)(struct file_lock *);
54516+ void (* const fl_break)(struct file_lock *);
54517+ int (* const fl_mylease)(struct file_lock *, struct file_lock *);
54518+ int (* const fl_change)(struct file_lock **, int);
54519 };
54520
54521 struct lock_manager {
54522@@ -1441,7 +1446,7 @@ struct fiemap_extent_info {
54523 unsigned int fi_flags; /* Flags as passed from user */
54524 unsigned int fi_extents_mapped; /* Number of mapped extents */
54525 unsigned int fi_extents_max; /* Size of fiemap_extent array */
54526- struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
54527+ struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
54528 * array */
54529 };
54530 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
54531@@ -1558,30 +1563,30 @@ extern ssize_t vfs_writev(struct file *,
54532 unsigned long, loff_t *);
54533
54534 struct super_operations {
54535- struct inode *(*alloc_inode)(struct super_block *sb);
54536- void (*destroy_inode)(struct inode *);
54537+ struct inode *(* const alloc_inode)(struct super_block *sb);
54538+ void (* const destroy_inode)(struct inode *);
54539
54540- void (*dirty_inode) (struct inode *);
54541- int (*write_inode) (struct inode *, int);
54542- void (*drop_inode) (struct inode *);
54543- void (*delete_inode) (struct inode *);
54544- void (*put_super) (struct super_block *);
54545- void (*write_super) (struct super_block *);
54546- int (*sync_fs)(struct super_block *sb, int wait);
54547- int (*freeze_fs) (struct super_block *);
54548- int (*unfreeze_fs) (struct super_block *);
54549- int (*statfs) (struct dentry *, struct kstatfs *);
54550- int (*remount_fs) (struct super_block *, int *, char *);
54551- void (*clear_inode) (struct inode *);
54552- void (*umount_begin) (struct super_block *);
54553+ void (* const dirty_inode) (struct inode *);
54554+ int (* const write_inode) (struct inode *, int);
54555+ void (* const drop_inode) (struct inode *);
54556+ void (* const delete_inode) (struct inode *);
54557+ void (* const put_super) (struct super_block *);
54558+ void (* const write_super) (struct super_block *);
54559+ int (* const sync_fs)(struct super_block *sb, int wait);
54560+ int (* const freeze_fs) (struct super_block *);
54561+ int (* const unfreeze_fs) (struct super_block *);
54562+ int (* const statfs) (struct dentry *, struct kstatfs *);
54563+ int (* const remount_fs) (struct super_block *, int *, char *);
54564+ void (* const clear_inode) (struct inode *);
54565+ void (* const umount_begin) (struct super_block *);
54566
54567- int (*show_options)(struct seq_file *, struct vfsmount *);
54568- int (*show_stats)(struct seq_file *, struct vfsmount *);
54569+ int (* const show_options)(struct seq_file *, struct vfsmount *);
54570+ int (* const show_stats)(struct seq_file *, struct vfsmount *);
54571 #ifdef CONFIG_QUOTA
54572- ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
54573- ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
54574+ ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
54575+ ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
54576 #endif
54577- int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
54578+ int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
54579 };
54580
54581 /*
54582diff -urNp linux-2.6.32.41/include/linux/fs_struct.h linux-2.6.32.41/include/linux/fs_struct.h
54583--- linux-2.6.32.41/include/linux/fs_struct.h 2011-03-27 14:31:47.000000000 -0400
54584+++ linux-2.6.32.41/include/linux/fs_struct.h 2011-04-17 15:56:46.000000000 -0400
54585@@ -4,7 +4,7 @@
54586 #include <linux/path.h>
54587
54588 struct fs_struct {
54589- int users;
54590+ atomic_t users;
54591 rwlock_t lock;
54592 int umask;
54593 int in_exec;
54594diff -urNp linux-2.6.32.41/include/linux/ftrace_event.h linux-2.6.32.41/include/linux/ftrace_event.h
54595--- linux-2.6.32.41/include/linux/ftrace_event.h 2011-03-27 14:31:47.000000000 -0400
54596+++ linux-2.6.32.41/include/linux/ftrace_event.h 2011-05-04 17:56:28.000000000 -0400
54597@@ -163,7 +163,7 @@ extern int trace_define_field(struct ftr
54598 int filter_type);
54599 extern int trace_define_common_fields(struct ftrace_event_call *call);
54600
54601-#define is_signed_type(type) (((type)(-1)) < 0)
54602+#define is_signed_type(type) (((type)(-1)) < (type)1)
54603
54604 int trace_set_clr_event(const char *system, const char *event, int set);
54605
54606diff -urNp linux-2.6.32.41/include/linux/genhd.h linux-2.6.32.41/include/linux/genhd.h
54607--- linux-2.6.32.41/include/linux/genhd.h 2011-03-27 14:31:47.000000000 -0400
54608+++ linux-2.6.32.41/include/linux/genhd.h 2011-04-17 15:56:46.000000000 -0400
54609@@ -161,7 +161,7 @@ struct gendisk {
54610
54611 struct timer_rand_state *random;
54612
54613- atomic_t sync_io; /* RAID */
54614+ atomic_unchecked_t sync_io; /* RAID */
54615 struct work_struct async_notify;
54616 #ifdef CONFIG_BLK_DEV_INTEGRITY
54617 struct blk_integrity *integrity;
54618diff -urNp linux-2.6.32.41/include/linux/gracl.h linux-2.6.32.41/include/linux/gracl.h
54619--- linux-2.6.32.41/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
54620+++ linux-2.6.32.41/include/linux/gracl.h 2011-04-17 15:56:46.000000000 -0400
54621@@ -0,0 +1,317 @@
54622+#ifndef GR_ACL_H
54623+#define GR_ACL_H
54624+
54625+#include <linux/grdefs.h>
54626+#include <linux/resource.h>
54627+#include <linux/capability.h>
54628+#include <linux/dcache.h>
54629+#include <asm/resource.h>
54630+
54631+/* Major status information */
54632+
54633+#define GR_VERSION "grsecurity 2.2.2"
54634+#define GRSECURITY_VERSION 0x2202
54635+
54636+enum {
54637+ GR_SHUTDOWN = 0,
54638+ GR_ENABLE = 1,
54639+ GR_SPROLE = 2,
54640+ GR_RELOAD = 3,
54641+ GR_SEGVMOD = 4,
54642+ GR_STATUS = 5,
54643+ GR_UNSPROLE = 6,
54644+ GR_PASSSET = 7,
54645+ GR_SPROLEPAM = 8,
54646+};
54647+
54648+/* Password setup definitions
54649+ * kernel/grhash.c */
54650+enum {
54651+ GR_PW_LEN = 128,
54652+ GR_SALT_LEN = 16,
54653+ GR_SHA_LEN = 32,
54654+};
54655+
54656+enum {
54657+ GR_SPROLE_LEN = 64,
54658+};
54659+
54660+enum {
54661+ GR_NO_GLOB = 0,
54662+ GR_REG_GLOB,
54663+ GR_CREATE_GLOB
54664+};
54665+
54666+#define GR_NLIMITS 32
54667+
54668+/* Begin Data Structures */
54669+
54670+struct sprole_pw {
54671+ unsigned char *rolename;
54672+ unsigned char salt[GR_SALT_LEN];
54673+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
54674+};
54675+
54676+struct name_entry {
54677+ __u32 key;
54678+ ino_t inode;
54679+ dev_t device;
54680+ char *name;
54681+ __u16 len;
54682+ __u8 deleted;
54683+ struct name_entry *prev;
54684+ struct name_entry *next;
54685+};
54686+
54687+struct inodev_entry {
54688+ struct name_entry *nentry;
54689+ struct inodev_entry *prev;
54690+ struct inodev_entry *next;
54691+};
54692+
54693+struct acl_role_db {
54694+ struct acl_role_label **r_hash;
54695+ __u32 r_size;
54696+};
54697+
54698+struct inodev_db {
54699+ struct inodev_entry **i_hash;
54700+ __u32 i_size;
54701+};
54702+
54703+struct name_db {
54704+ struct name_entry **n_hash;
54705+ __u32 n_size;
54706+};
54707+
54708+struct crash_uid {
54709+ uid_t uid;
54710+ unsigned long expires;
54711+};
54712+
54713+struct gr_hash_struct {
54714+ void **table;
54715+ void **nametable;
54716+ void *first;
54717+ __u32 table_size;
54718+ __u32 used_size;
54719+ int type;
54720+};
54721+
54722+/* Userspace Grsecurity ACL data structures */
54723+
54724+struct acl_subject_label {
54725+ char *filename;
54726+ ino_t inode;
54727+ dev_t device;
54728+ __u32 mode;
54729+ kernel_cap_t cap_mask;
54730+ kernel_cap_t cap_lower;
54731+ kernel_cap_t cap_invert_audit;
54732+
54733+ struct rlimit res[GR_NLIMITS];
54734+ __u32 resmask;
54735+
54736+ __u8 user_trans_type;
54737+ __u8 group_trans_type;
54738+ uid_t *user_transitions;
54739+ gid_t *group_transitions;
54740+ __u16 user_trans_num;
54741+ __u16 group_trans_num;
54742+
54743+ __u32 sock_families[2];
54744+ __u32 ip_proto[8];
54745+ __u32 ip_type;
54746+ struct acl_ip_label **ips;
54747+ __u32 ip_num;
54748+ __u32 inaddr_any_override;
54749+
54750+ __u32 crashes;
54751+ unsigned long expires;
54752+
54753+ struct acl_subject_label *parent_subject;
54754+ struct gr_hash_struct *hash;
54755+ struct acl_subject_label *prev;
54756+ struct acl_subject_label *next;
54757+
54758+ struct acl_object_label **obj_hash;
54759+ __u32 obj_hash_size;
54760+ __u16 pax_flags;
54761+};
54762+
54763+struct role_allowed_ip {
54764+ __u32 addr;
54765+ __u32 netmask;
54766+
54767+ struct role_allowed_ip *prev;
54768+ struct role_allowed_ip *next;
54769+};
54770+
54771+struct role_transition {
54772+ char *rolename;
54773+
54774+ struct role_transition *prev;
54775+ struct role_transition *next;
54776+};
54777+
54778+struct acl_role_label {
54779+ char *rolename;
54780+ uid_t uidgid;
54781+ __u16 roletype;
54782+
54783+ __u16 auth_attempts;
54784+ unsigned long expires;
54785+
54786+ struct acl_subject_label *root_label;
54787+ struct gr_hash_struct *hash;
54788+
54789+ struct acl_role_label *prev;
54790+ struct acl_role_label *next;
54791+
54792+ struct role_transition *transitions;
54793+ struct role_allowed_ip *allowed_ips;
54794+ uid_t *domain_children;
54795+ __u16 domain_child_num;
54796+
54797+ struct acl_subject_label **subj_hash;
54798+ __u32 subj_hash_size;
54799+};
54800+
54801+struct user_acl_role_db {
54802+ struct acl_role_label **r_table;
54803+ __u32 num_pointers; /* Number of allocations to track */
54804+ __u32 num_roles; /* Number of roles */
54805+ __u32 num_domain_children; /* Number of domain children */
54806+ __u32 num_subjects; /* Number of subjects */
54807+ __u32 num_objects; /* Number of objects */
54808+};
54809+
54810+struct acl_object_label {
54811+ char *filename;
54812+ ino_t inode;
54813+ dev_t device;
54814+ __u32 mode;
54815+
54816+ struct acl_subject_label *nested;
54817+ struct acl_object_label *globbed;
54818+
54819+ /* next two structures not used */
54820+
54821+ struct acl_object_label *prev;
54822+ struct acl_object_label *next;
54823+};
54824+
54825+struct acl_ip_label {
54826+ char *iface;
54827+ __u32 addr;
54828+ __u32 netmask;
54829+ __u16 low, high;
54830+ __u8 mode;
54831+ __u32 type;
54832+ __u32 proto[8];
54833+
54834+ /* next two structures not used */
54835+
54836+ struct acl_ip_label *prev;
54837+ struct acl_ip_label *next;
54838+};
54839+
54840+struct gr_arg {
54841+ struct user_acl_role_db role_db;
54842+ unsigned char pw[GR_PW_LEN];
54843+ unsigned char salt[GR_SALT_LEN];
54844+ unsigned char sum[GR_SHA_LEN];
54845+ unsigned char sp_role[GR_SPROLE_LEN];
54846+ struct sprole_pw *sprole_pws;
54847+ dev_t segv_device;
54848+ ino_t segv_inode;
54849+ uid_t segv_uid;
54850+ __u16 num_sprole_pws;
54851+ __u16 mode;
54852+};
54853+
54854+struct gr_arg_wrapper {
54855+ struct gr_arg *arg;
54856+ __u32 version;
54857+ __u32 size;
54858+};
54859+
54860+struct subject_map {
54861+ struct acl_subject_label *user;
54862+ struct acl_subject_label *kernel;
54863+ struct subject_map *prev;
54864+ struct subject_map *next;
54865+};
54866+
54867+struct acl_subj_map_db {
54868+ struct subject_map **s_hash;
54869+ __u32 s_size;
54870+};
54871+
54872+/* End Data Structures Section */
54873+
54874+/* Hash functions generated by empirical testing by Brad Spengler
54875+ Makes good use of the low bits of the inode. Generally 0-1 times
54876+ in loop for successful match. 0-3 for unsuccessful match.
54877+ Shift/add algorithm with modulus of table size and an XOR*/
54878+
54879+static __inline__ unsigned int
54880+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
54881+{
54882+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
54883+}
54884+
54885+ static __inline__ unsigned int
54886+shash(const struct acl_subject_label *userp, const unsigned int sz)
54887+{
54888+ return ((const unsigned long)userp % sz);
54889+}
54890+
54891+static __inline__ unsigned int
54892+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
54893+{
54894+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
54895+}
54896+
54897+static __inline__ unsigned int
54898+nhash(const char *name, const __u16 len, const unsigned int sz)
54899+{
54900+ return full_name_hash((const unsigned char *)name, len) % sz;
54901+}
54902+
54903+#define FOR_EACH_ROLE_START(role) \
54904+ role = role_list; \
54905+ while (role) {
54906+
54907+#define FOR_EACH_ROLE_END(role) \
54908+ role = role->prev; \
54909+ }
54910+
54911+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
54912+ subj = NULL; \
54913+ iter = 0; \
54914+ while (iter < role->subj_hash_size) { \
54915+ if (subj == NULL) \
54916+ subj = role->subj_hash[iter]; \
54917+ if (subj == NULL) { \
54918+ iter++; \
54919+ continue; \
54920+ }
54921+
54922+#define FOR_EACH_SUBJECT_END(subj,iter) \
54923+ subj = subj->next; \
54924+ if (subj == NULL) \
54925+ iter++; \
54926+ }
54927+
54928+
54929+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
54930+ subj = role->hash->first; \
54931+ while (subj != NULL) {
54932+
54933+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
54934+ subj = subj->next; \
54935+ }
54936+
54937+#endif
54938+
54939diff -urNp linux-2.6.32.41/include/linux/gralloc.h linux-2.6.32.41/include/linux/gralloc.h
54940--- linux-2.6.32.41/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
54941+++ linux-2.6.32.41/include/linux/gralloc.h 2011-04-17 15:56:46.000000000 -0400
54942@@ -0,0 +1,9 @@
54943+#ifndef __GRALLOC_H
54944+#define __GRALLOC_H
54945+
54946+void acl_free_all(void);
54947+int acl_alloc_stack_init(unsigned long size);
54948+void *acl_alloc(unsigned long len);
54949+void *acl_alloc_num(unsigned long num, unsigned long len);
54950+
54951+#endif
54952diff -urNp linux-2.6.32.41/include/linux/grdefs.h linux-2.6.32.41/include/linux/grdefs.h
54953--- linux-2.6.32.41/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
54954+++ linux-2.6.32.41/include/linux/grdefs.h 2011-04-17 15:56:46.000000000 -0400
54955@@ -0,0 +1,139 @@
54956+#ifndef GRDEFS_H
54957+#define GRDEFS_H
54958+
54959+/* Begin grsecurity status declarations */
54960+
54961+enum {
54962+ GR_READY = 0x01,
54963+ GR_STATUS_INIT = 0x00 // disabled state
54964+};
54965+
54966+/* Begin ACL declarations */
54967+
54968+/* Role flags */
54969+
54970+enum {
54971+ GR_ROLE_USER = 0x0001,
54972+ GR_ROLE_GROUP = 0x0002,
54973+ GR_ROLE_DEFAULT = 0x0004,
54974+ GR_ROLE_SPECIAL = 0x0008,
54975+ GR_ROLE_AUTH = 0x0010,
54976+ GR_ROLE_NOPW = 0x0020,
54977+ GR_ROLE_GOD = 0x0040,
54978+ GR_ROLE_LEARN = 0x0080,
54979+ GR_ROLE_TPE = 0x0100,
54980+ GR_ROLE_DOMAIN = 0x0200,
54981+ GR_ROLE_PAM = 0x0400,
54982+ GR_ROLE_PERSIST = 0x800
54983+};
54984+
54985+/* ACL Subject and Object mode flags */
54986+enum {
54987+ GR_DELETED = 0x80000000
54988+};
54989+
54990+/* ACL Object-only mode flags */
54991+enum {
54992+ GR_READ = 0x00000001,
54993+ GR_APPEND = 0x00000002,
54994+ GR_WRITE = 0x00000004,
54995+ GR_EXEC = 0x00000008,
54996+ GR_FIND = 0x00000010,
54997+ GR_INHERIT = 0x00000020,
54998+ GR_SETID = 0x00000040,
54999+ GR_CREATE = 0x00000080,
55000+ GR_DELETE = 0x00000100,
55001+ GR_LINK = 0x00000200,
55002+ GR_AUDIT_READ = 0x00000400,
55003+ GR_AUDIT_APPEND = 0x00000800,
55004+ GR_AUDIT_WRITE = 0x00001000,
55005+ GR_AUDIT_EXEC = 0x00002000,
55006+ GR_AUDIT_FIND = 0x00004000,
55007+ GR_AUDIT_INHERIT= 0x00008000,
55008+ GR_AUDIT_SETID = 0x00010000,
55009+ GR_AUDIT_CREATE = 0x00020000,
55010+ GR_AUDIT_DELETE = 0x00040000,
55011+ GR_AUDIT_LINK = 0x00080000,
55012+ GR_PTRACERD = 0x00100000,
55013+ GR_NOPTRACE = 0x00200000,
55014+ GR_SUPPRESS = 0x00400000,
55015+ GR_NOLEARN = 0x00800000,
55016+ GR_INIT_TRANSFER= 0x01000000
55017+};
55018+
55019+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
55020+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
55021+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
55022+
55023+/* ACL subject-only mode flags */
55024+enum {
55025+ GR_KILL = 0x00000001,
55026+ GR_VIEW = 0x00000002,
55027+ GR_PROTECTED = 0x00000004,
55028+ GR_LEARN = 0x00000008,
55029+ GR_OVERRIDE = 0x00000010,
55030+ /* just a placeholder, this mode is only used in userspace */
55031+ GR_DUMMY = 0x00000020,
55032+ GR_PROTSHM = 0x00000040,
55033+ GR_KILLPROC = 0x00000080,
55034+ GR_KILLIPPROC = 0x00000100,
55035+ /* just a placeholder, this mode is only used in userspace */
55036+ GR_NOTROJAN = 0x00000200,
55037+ GR_PROTPROCFD = 0x00000400,
55038+ GR_PROCACCT = 0x00000800,
55039+ GR_RELAXPTRACE = 0x00001000,
55040+ GR_NESTED = 0x00002000,
55041+ GR_INHERITLEARN = 0x00004000,
55042+ GR_PROCFIND = 0x00008000,
55043+ GR_POVERRIDE = 0x00010000,
55044+ GR_KERNELAUTH = 0x00020000,
55045+ GR_ATSECURE = 0x00040000
55046+};
55047+
55048+enum {
55049+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
55050+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
55051+ GR_PAX_ENABLE_MPROTECT = 0x0004,
55052+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
55053+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
55054+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
55055+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
55056+ GR_PAX_DISABLE_MPROTECT = 0x0400,
55057+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
55058+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
55059+};
55060+
55061+enum {
55062+ GR_ID_USER = 0x01,
55063+ GR_ID_GROUP = 0x02,
55064+};
55065+
55066+enum {
55067+ GR_ID_ALLOW = 0x01,
55068+ GR_ID_DENY = 0x02,
55069+};
55070+
55071+#define GR_CRASH_RES 31
55072+#define GR_UIDTABLE_MAX 500
55073+
55074+/* begin resource learning section */
55075+enum {
55076+ GR_RLIM_CPU_BUMP = 60,
55077+ GR_RLIM_FSIZE_BUMP = 50000,
55078+ GR_RLIM_DATA_BUMP = 10000,
55079+ GR_RLIM_STACK_BUMP = 1000,
55080+ GR_RLIM_CORE_BUMP = 10000,
55081+ GR_RLIM_RSS_BUMP = 500000,
55082+ GR_RLIM_NPROC_BUMP = 1,
55083+ GR_RLIM_NOFILE_BUMP = 5,
55084+ GR_RLIM_MEMLOCK_BUMP = 50000,
55085+ GR_RLIM_AS_BUMP = 500000,
55086+ GR_RLIM_LOCKS_BUMP = 2,
55087+ GR_RLIM_SIGPENDING_BUMP = 5,
55088+ GR_RLIM_MSGQUEUE_BUMP = 10000,
55089+ GR_RLIM_NICE_BUMP = 1,
55090+ GR_RLIM_RTPRIO_BUMP = 1,
55091+ GR_RLIM_RTTIME_BUMP = 1000000
55092+};
55093+
55094+#endif
55095diff -urNp linux-2.6.32.41/include/linux/grinternal.h linux-2.6.32.41/include/linux/grinternal.h
55096--- linux-2.6.32.41/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
55097+++ linux-2.6.32.41/include/linux/grinternal.h 2011-04-17 15:56:46.000000000 -0400
55098@@ -0,0 +1,218 @@
55099+#ifndef __GRINTERNAL_H
55100+#define __GRINTERNAL_H
55101+
55102+#ifdef CONFIG_GRKERNSEC
55103+
55104+#include <linux/fs.h>
55105+#include <linux/mnt_namespace.h>
55106+#include <linux/nsproxy.h>
55107+#include <linux/gracl.h>
55108+#include <linux/grdefs.h>
55109+#include <linux/grmsg.h>
55110+
55111+void gr_add_learn_entry(const char *fmt, ...)
55112+ __attribute__ ((format (printf, 1, 2)));
55113+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
55114+ const struct vfsmount *mnt);
55115+__u32 gr_check_create(const struct dentry *new_dentry,
55116+ const struct dentry *parent,
55117+ const struct vfsmount *mnt, const __u32 mode);
55118+int gr_check_protected_task(const struct task_struct *task);
55119+__u32 to_gr_audit(const __u32 reqmode);
55120+int gr_set_acls(const int type);
55121+int gr_apply_subject_to_task(struct task_struct *task);
55122+int gr_acl_is_enabled(void);
55123+char gr_roletype_to_char(void);
55124+
55125+void gr_handle_alertkill(struct task_struct *task);
55126+char *gr_to_filename(const struct dentry *dentry,
55127+ const struct vfsmount *mnt);
55128+char *gr_to_filename1(const struct dentry *dentry,
55129+ const struct vfsmount *mnt);
55130+char *gr_to_filename2(const struct dentry *dentry,
55131+ const struct vfsmount *mnt);
55132+char *gr_to_filename3(const struct dentry *dentry,
55133+ const struct vfsmount *mnt);
55134+
55135+extern int grsec_enable_harden_ptrace;
55136+extern int grsec_enable_link;
55137+extern int grsec_enable_fifo;
55138+extern int grsec_enable_execve;
55139+extern int grsec_enable_shm;
55140+extern int grsec_enable_execlog;
55141+extern int grsec_enable_signal;
55142+extern int grsec_enable_audit_ptrace;
55143+extern int grsec_enable_forkfail;
55144+extern int grsec_enable_time;
55145+extern int grsec_enable_rofs;
55146+extern int grsec_enable_chroot_shmat;
55147+extern int grsec_enable_chroot_findtask;
55148+extern int grsec_enable_chroot_mount;
55149+extern int grsec_enable_chroot_double;
55150+extern int grsec_enable_chroot_pivot;
55151+extern int grsec_enable_chroot_chdir;
55152+extern int grsec_enable_chroot_chmod;
55153+extern int grsec_enable_chroot_mknod;
55154+extern int grsec_enable_chroot_fchdir;
55155+extern int grsec_enable_chroot_nice;
55156+extern int grsec_enable_chroot_execlog;
55157+extern int grsec_enable_chroot_caps;
55158+extern int grsec_enable_chroot_sysctl;
55159+extern int grsec_enable_chroot_unix;
55160+extern int grsec_enable_tpe;
55161+extern int grsec_tpe_gid;
55162+extern int grsec_enable_tpe_all;
55163+extern int grsec_enable_tpe_invert;
55164+extern int grsec_enable_socket_all;
55165+extern int grsec_socket_all_gid;
55166+extern int grsec_enable_socket_client;
55167+extern int grsec_socket_client_gid;
55168+extern int grsec_enable_socket_server;
55169+extern int grsec_socket_server_gid;
55170+extern int grsec_audit_gid;
55171+extern int grsec_enable_group;
55172+extern int grsec_enable_audit_textrel;
55173+extern int grsec_enable_log_rwxmaps;
55174+extern int grsec_enable_mount;
55175+extern int grsec_enable_chdir;
55176+extern int grsec_resource_logging;
55177+extern int grsec_enable_blackhole;
55178+extern int grsec_lastack_retries;
55179+extern int grsec_lock;
55180+
55181+extern spinlock_t grsec_alert_lock;
55182+extern unsigned long grsec_alert_wtime;
55183+extern unsigned long grsec_alert_fyet;
55184+
55185+extern spinlock_t grsec_audit_lock;
55186+
55187+extern rwlock_t grsec_exec_file_lock;
55188+
55189+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
55190+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
55191+ (tsk)->exec_file->f_vfsmnt) : "/")
55192+
55193+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
55194+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
55195+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
55196+
55197+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
55198+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
55199+ (tsk)->exec_file->f_vfsmnt) : "/")
55200+
55201+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
55202+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
55203+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
55204+
55205+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
55206+
55207+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
55208+
55209+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
55210+ (task)->pid, (cred)->uid, \
55211+ (cred)->euid, (cred)->gid, (cred)->egid, \
55212+ gr_parent_task_fullpath(task), \
55213+ (task)->real_parent->comm, (task)->real_parent->pid, \
55214+ (pcred)->uid, (pcred)->euid, \
55215+ (pcred)->gid, (pcred)->egid
55216+
55217+#define GR_CHROOT_CAPS {{ \
55218+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
55219+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
55220+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
55221+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
55222+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
55223+ CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
55224+
55225+#define security_learn(normal_msg,args...) \
55226+({ \
55227+ read_lock(&grsec_exec_file_lock); \
55228+ gr_add_learn_entry(normal_msg "\n", ## args); \
55229+ read_unlock(&grsec_exec_file_lock); \
55230+})
55231+
55232+enum {
55233+ GR_DO_AUDIT,
55234+ GR_DONT_AUDIT,
55235+ GR_DONT_AUDIT_GOOD
55236+};
55237+
55238+enum {
55239+ GR_TTYSNIFF,
55240+ GR_RBAC,
55241+ GR_RBAC_STR,
55242+ GR_STR_RBAC,
55243+ GR_RBAC_MODE2,
55244+ GR_RBAC_MODE3,
55245+ GR_FILENAME,
55246+ GR_SYSCTL_HIDDEN,
55247+ GR_NOARGS,
55248+ GR_ONE_INT,
55249+ GR_ONE_INT_TWO_STR,
55250+ GR_ONE_STR,
55251+ GR_STR_INT,
55252+ GR_TWO_STR_INT,
55253+ GR_TWO_INT,
55254+ GR_TWO_U64,
55255+ GR_THREE_INT,
55256+ GR_FIVE_INT_TWO_STR,
55257+ GR_TWO_STR,
55258+ GR_THREE_STR,
55259+ GR_FOUR_STR,
55260+ GR_STR_FILENAME,
55261+ GR_FILENAME_STR,
55262+ GR_FILENAME_TWO_INT,
55263+ GR_FILENAME_TWO_INT_STR,
55264+ GR_TEXTREL,
55265+ GR_PTRACE,
55266+ GR_RESOURCE,
55267+ GR_CAP,
55268+ GR_SIG,
55269+ GR_SIG2,
55270+ GR_CRASH1,
55271+ GR_CRASH2,
55272+ GR_PSACCT,
55273+ GR_RWXMAP
55274+};
55275+
55276+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
55277+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
55278+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
55279+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
55280+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
55281+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
55282+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
55283+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
55284+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
55285+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
55286+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
55287+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
55288+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
55289+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
55290+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
55291+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
55292+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
55293+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
55294+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
55295+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
55296+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
55297+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
55298+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
55299+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
55300+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
55301+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
55302+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
55303+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
55304+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
55305+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
55306+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
55307+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
55308+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
55309+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
55310+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
55311+
55312+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
55313+
55314+#endif
55315+
55316+#endif
55317diff -urNp linux-2.6.32.41/include/linux/grmsg.h linux-2.6.32.41/include/linux/grmsg.h
55318--- linux-2.6.32.41/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
55319+++ linux-2.6.32.41/include/linux/grmsg.h 2011-04-17 15:56:46.000000000 -0400
55320@@ -0,0 +1,108 @@
55321+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
55322+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
55323+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
55324+#define GR_STOPMOD_MSG "denied modification of module state by "
55325+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
55326+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
55327+#define GR_IOPERM_MSG "denied use of ioperm() by "
55328+#define GR_IOPL_MSG "denied use of iopl() by "
55329+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
55330+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
55331+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
55332+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
55333+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
55334+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
55335+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
55336+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
55337+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
55338+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
55339+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
55340+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
55341+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
55342+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
55343+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
55344+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
55345+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
55346+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
55347+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
55348+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
55349+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
55350+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
55351+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
55352+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
55353+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
55354+#define GR_NPROC_MSG "denied overstep of process limit by "
55355+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
55356+#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
55357+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
55358+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
55359+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
55360+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
55361+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
55362+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
55363+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
55364+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
55365+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
55366+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
55367+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
55368+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
55369+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
55370+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
55371+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
55372+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
55373+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
55374+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
55375+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
55376+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
55377+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
55378+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
55379+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
55380+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
55381+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
55382+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
55383+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
55384+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
55385+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
55386+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
55387+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
55388+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
55389+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
55390+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
55391+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
55392+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
55393+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
55394+#define GR_FAILFORK_MSG "failed fork with errno %s by "
55395+#define GR_NICE_CHROOT_MSG "denied priority change by "
55396+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
55397+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
55398+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
55399+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
55400+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
55401+#define GR_TIME_MSG "time set by "
55402+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
55403+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
55404+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
55405+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
55406+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
55407+#define GR_BIND_MSG "denied bind() by "
55408+#define GR_CONNECT_MSG "denied connect() by "
55409+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
55410+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
55411+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
55412+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
55413+#define GR_CAP_ACL_MSG "use of %s denied for "
55414+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
55415+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
55416+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
55417+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
55418+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
55419+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
55420+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
55421+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
55422+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
55423+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
55424+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
55425+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
55426+#define GR_VM86_MSG "denied use of vm86 by "
55427+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
55428+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
55429diff -urNp linux-2.6.32.41/include/linux/grsecurity.h linux-2.6.32.41/include/linux/grsecurity.h
55430--- linux-2.6.32.41/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
55431+++ linux-2.6.32.41/include/linux/grsecurity.h 2011-04-17 15:56:46.000000000 -0400
55432@@ -0,0 +1,212 @@
55433+#ifndef GR_SECURITY_H
55434+#define GR_SECURITY_H
55435+#include <linux/fs.h>
55436+#include <linux/fs_struct.h>
55437+#include <linux/binfmts.h>
55438+#include <linux/gracl.h>
55439+#include <linux/compat.h>
55440+
55441+/* notify of brain-dead configs */
55442+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
55443+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
55444+#endif
55445+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
55446+#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
55447+#endif
55448+#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
55449+#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
55450+#endif
55451+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
55452+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
55453+#endif
55454+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
55455+#error "CONFIG_PAX enabled, but no PaX options are enabled."
55456+#endif
55457+
55458+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
55459+void gr_handle_brute_check(void);
55460+void gr_handle_kernel_exploit(void);
55461+int gr_process_user_ban(void);
55462+
55463+char gr_roletype_to_char(void);
55464+
55465+int gr_acl_enable_at_secure(void);
55466+
55467+int gr_check_user_change(int real, int effective, int fs);
55468+int gr_check_group_change(int real, int effective, int fs);
55469+
55470+void gr_del_task_from_ip_table(struct task_struct *p);
55471+
55472+int gr_pid_is_chrooted(struct task_struct *p);
55473+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
55474+int gr_handle_chroot_nice(void);
55475+int gr_handle_chroot_sysctl(const int op);
55476+int gr_handle_chroot_setpriority(struct task_struct *p,
55477+ const int niceval);
55478+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
55479+int gr_handle_chroot_chroot(const struct dentry *dentry,
55480+ const struct vfsmount *mnt);
55481+int gr_handle_chroot_caps(struct path *path);
55482+void gr_handle_chroot_chdir(struct path *path);
55483+int gr_handle_chroot_chmod(const struct dentry *dentry,
55484+ const struct vfsmount *mnt, const int mode);
55485+int gr_handle_chroot_mknod(const struct dentry *dentry,
55486+ const struct vfsmount *mnt, const int mode);
55487+int gr_handle_chroot_mount(const struct dentry *dentry,
55488+ const struct vfsmount *mnt,
55489+ const char *dev_name);
55490+int gr_handle_chroot_pivot(void);
55491+int gr_handle_chroot_unix(const pid_t pid);
55492+
55493+int gr_handle_rawio(const struct inode *inode);
55494+int gr_handle_nproc(void);
55495+
55496+void gr_handle_ioperm(void);
55497+void gr_handle_iopl(void);
55498+
55499+int gr_tpe_allow(const struct file *file);
55500+
55501+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
55502+void gr_clear_chroot_entries(struct task_struct *task);
55503+
55504+void gr_log_forkfail(const int retval);
55505+void gr_log_timechange(void);
55506+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
55507+void gr_log_chdir(const struct dentry *dentry,
55508+ const struct vfsmount *mnt);
55509+void gr_log_chroot_exec(const struct dentry *dentry,
55510+ const struct vfsmount *mnt);
55511+void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
55512+#ifdef CONFIG_COMPAT
55513+void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
55514+#endif
55515+void gr_log_remount(const char *devname, const int retval);
55516+void gr_log_unmount(const char *devname, const int retval);
55517+void gr_log_mount(const char *from, const char *to, const int retval);
55518+void gr_log_textrel(struct vm_area_struct *vma);
55519+void gr_log_rwxmmap(struct file *file);
55520+void gr_log_rwxmprotect(struct file *file);
55521+
55522+int gr_handle_follow_link(const struct inode *parent,
55523+ const struct inode *inode,
55524+ const struct dentry *dentry,
55525+ const struct vfsmount *mnt);
55526+int gr_handle_fifo(const struct dentry *dentry,
55527+ const struct vfsmount *mnt,
55528+ const struct dentry *dir, const int flag,
55529+ const int acc_mode);
55530+int gr_handle_hardlink(const struct dentry *dentry,
55531+ const struct vfsmount *mnt,
55532+ struct inode *inode,
55533+ const int mode, const char *to);
55534+
55535+int gr_is_capable(const int cap);
55536+int gr_is_capable_nolog(const int cap);
55537+void gr_learn_resource(const struct task_struct *task, const int limit,
55538+ const unsigned long wanted, const int gt);
55539+void gr_copy_label(struct task_struct *tsk);
55540+void gr_handle_crash(struct task_struct *task, const int sig);
55541+int gr_handle_signal(const struct task_struct *p, const int sig);
55542+int gr_check_crash_uid(const uid_t uid);
55543+int gr_check_protected_task(const struct task_struct *task);
55544+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
55545+int gr_acl_handle_mmap(const struct file *file,
55546+ const unsigned long prot);
55547+int gr_acl_handle_mprotect(const struct file *file,
55548+ const unsigned long prot);
55549+int gr_check_hidden_task(const struct task_struct *tsk);
55550+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
55551+ const struct vfsmount *mnt);
55552+__u32 gr_acl_handle_utime(const struct dentry *dentry,
55553+ const struct vfsmount *mnt);
55554+__u32 gr_acl_handle_access(const struct dentry *dentry,
55555+ const struct vfsmount *mnt, const int fmode);
55556+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
55557+ const struct vfsmount *mnt, mode_t mode);
55558+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
55559+ const struct vfsmount *mnt, mode_t mode);
55560+__u32 gr_acl_handle_chown(const struct dentry *dentry,
55561+ const struct vfsmount *mnt);
55562+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
55563+ const struct vfsmount *mnt);
55564+int gr_handle_ptrace(struct task_struct *task, const long request);
55565+int gr_handle_proc_ptrace(struct task_struct *task);
55566+__u32 gr_acl_handle_execve(const struct dentry *dentry,
55567+ const struct vfsmount *mnt);
55568+int gr_check_crash_exec(const struct file *filp);
55569+int gr_acl_is_enabled(void);
55570+void gr_set_kernel_label(struct task_struct *task);
55571+void gr_set_role_label(struct task_struct *task, const uid_t uid,
55572+ const gid_t gid);
55573+int gr_set_proc_label(const struct dentry *dentry,
55574+ const struct vfsmount *mnt,
55575+ const int unsafe_share);
55576+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
55577+ const struct vfsmount *mnt);
55578+__u32 gr_acl_handle_open(const struct dentry *dentry,
55579+ const struct vfsmount *mnt, const int fmode);
55580+__u32 gr_acl_handle_creat(const struct dentry *dentry,
55581+ const struct dentry *p_dentry,
55582+ const struct vfsmount *p_mnt, const int fmode,
55583+ const int imode);
55584+void gr_handle_create(const struct dentry *dentry,
55585+ const struct vfsmount *mnt);
55586+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
55587+ const struct dentry *parent_dentry,
55588+ const struct vfsmount *parent_mnt,
55589+ const int mode);
55590+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
55591+ const struct dentry *parent_dentry,
55592+ const struct vfsmount *parent_mnt);
55593+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
55594+ const struct vfsmount *mnt);
55595+void gr_handle_delete(const ino_t ino, const dev_t dev);
55596+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
55597+ const struct vfsmount *mnt);
55598+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
55599+ const struct dentry *parent_dentry,
55600+ const struct vfsmount *parent_mnt,
55601+ const char *from);
55602+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
55603+ const struct dentry *parent_dentry,
55604+ const struct vfsmount *parent_mnt,
55605+ const struct dentry *old_dentry,
55606+ const struct vfsmount *old_mnt, const char *to);
55607+int gr_acl_handle_rename(struct dentry *new_dentry,
55608+ struct dentry *parent_dentry,
55609+ const struct vfsmount *parent_mnt,
55610+ struct dentry *old_dentry,
55611+ struct inode *old_parent_inode,
55612+ struct vfsmount *old_mnt, const char *newname);
55613+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
55614+ struct dentry *old_dentry,
55615+ struct dentry *new_dentry,
55616+ struct vfsmount *mnt, const __u8 replace);
55617+__u32 gr_check_link(const struct dentry *new_dentry,
55618+ const struct dentry *parent_dentry,
55619+ const struct vfsmount *parent_mnt,
55620+ const struct dentry *old_dentry,
55621+ const struct vfsmount *old_mnt);
55622+int gr_acl_handle_filldir(const struct file *file, const char *name,
55623+ const unsigned int namelen, const ino_t ino);
55624+
55625+__u32 gr_acl_handle_unix(const struct dentry *dentry,
55626+ const struct vfsmount *mnt);
55627+void gr_acl_handle_exit(void);
55628+void gr_acl_handle_psacct(struct task_struct *task, const long code);
55629+int gr_acl_handle_procpidmem(const struct task_struct *task);
55630+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
55631+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
55632+void gr_audit_ptrace(struct task_struct *task);
55633+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
55634+
55635+#ifdef CONFIG_GRKERNSEC
55636+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
55637+void gr_handle_vm86(void);
55638+void gr_handle_mem_readwrite(u64 from, u64 to);
55639+
55640+extern int grsec_enable_dmesg;
55641+extern int grsec_disable_privio;
55642+#endif
55643+
55644+#endif
55645diff -urNp linux-2.6.32.41/include/linux/hdpu_features.h linux-2.6.32.41/include/linux/hdpu_features.h
55646--- linux-2.6.32.41/include/linux/hdpu_features.h 2011-03-27 14:31:47.000000000 -0400
55647+++ linux-2.6.32.41/include/linux/hdpu_features.h 2011-04-17 15:56:46.000000000 -0400
55648@@ -3,7 +3,7 @@
55649 struct cpustate_t {
55650 spinlock_t lock;
55651 int excl;
55652- int open_count;
55653+ atomic_t open_count;
55654 unsigned char cached_val;
55655 int inited;
55656 unsigned long *set_addr;
55657diff -urNp linux-2.6.32.41/include/linux/highmem.h linux-2.6.32.41/include/linux/highmem.h
55658--- linux-2.6.32.41/include/linux/highmem.h 2011-03-27 14:31:47.000000000 -0400
55659+++ linux-2.6.32.41/include/linux/highmem.h 2011-04-17 15:56:46.000000000 -0400
55660@@ -137,6 +137,18 @@ static inline void clear_highpage(struct
55661 kunmap_atomic(kaddr, KM_USER0);
55662 }
55663
55664+static inline void sanitize_highpage(struct page *page)
55665+{
55666+ void *kaddr;
55667+ unsigned long flags;
55668+
55669+ local_irq_save(flags);
55670+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
55671+ clear_page(kaddr);
55672+ kunmap_atomic(kaddr, KM_CLEARPAGE);
55673+ local_irq_restore(flags);
55674+}
55675+
55676 static inline void zero_user_segments(struct page *page,
55677 unsigned start1, unsigned end1,
55678 unsigned start2, unsigned end2)
55679diff -urNp linux-2.6.32.41/include/linux/i2o.h linux-2.6.32.41/include/linux/i2o.h
55680--- linux-2.6.32.41/include/linux/i2o.h 2011-03-27 14:31:47.000000000 -0400
55681+++ linux-2.6.32.41/include/linux/i2o.h 2011-05-04 17:56:28.000000000 -0400
55682@@ -564,7 +564,7 @@ struct i2o_controller {
55683 struct i2o_device *exec; /* Executive */
55684 #if BITS_PER_LONG == 64
55685 spinlock_t context_list_lock; /* lock for context_list */
55686- atomic_t context_list_counter; /* needed for unique contexts */
55687+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
55688 struct list_head context_list; /* list of context id's
55689 and pointers */
55690 #endif
55691diff -urNp linux-2.6.32.41/include/linux/init_task.h linux-2.6.32.41/include/linux/init_task.h
55692--- linux-2.6.32.41/include/linux/init_task.h 2011-03-27 14:31:47.000000000 -0400
55693+++ linux-2.6.32.41/include/linux/init_task.h 2011-05-18 20:44:59.000000000 -0400
55694@@ -83,6 +83,12 @@ extern struct group_info init_groups;
55695 #define INIT_IDS
55696 #endif
55697
55698+#ifdef CONFIG_X86
55699+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
55700+#else
55701+#define INIT_TASK_THREAD_INFO
55702+#endif
55703+
55704 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
55705 /*
55706 * Because of the reduced scope of CAP_SETPCAP when filesystem
55707@@ -156,6 +162,7 @@ extern struct cred init_cred;
55708 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
55709 .comm = "swapper", \
55710 .thread = INIT_THREAD, \
55711+ INIT_TASK_THREAD_INFO \
55712 .fs = &init_fs, \
55713 .files = &init_files, \
55714 .signal = &init_signals, \
55715diff -urNp linux-2.6.32.41/include/linux/interrupt.h linux-2.6.32.41/include/linux/interrupt.h
55716--- linux-2.6.32.41/include/linux/interrupt.h 2011-03-27 14:31:47.000000000 -0400
55717+++ linux-2.6.32.41/include/linux/interrupt.h 2011-04-17 15:56:46.000000000 -0400
55718@@ -362,7 +362,7 @@ enum
55719 /* map softirq index to softirq name. update 'softirq_to_name' in
55720 * kernel/softirq.c when adding a new softirq.
55721 */
55722-extern char *softirq_to_name[NR_SOFTIRQS];
55723+extern const char * const softirq_to_name[NR_SOFTIRQS];
55724
55725 /* softirq mask and active fields moved to irq_cpustat_t in
55726 * asm/hardirq.h to get better cache usage. KAO
55727@@ -370,12 +370,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
55728
55729 struct softirq_action
55730 {
55731- void (*action)(struct softirq_action *);
55732+ void (*action)(void);
55733 };
55734
55735 asmlinkage void do_softirq(void);
55736 asmlinkage void __do_softirq(void);
55737-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
55738+extern void open_softirq(int nr, void (*action)(void));
55739 extern void softirq_init(void);
55740 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
55741 extern void raise_softirq_irqoff(unsigned int nr);
55742diff -urNp linux-2.6.32.41/include/linux/irq.h linux-2.6.32.41/include/linux/irq.h
55743--- linux-2.6.32.41/include/linux/irq.h 2011-03-27 14:31:47.000000000 -0400
55744+++ linux-2.6.32.41/include/linux/irq.h 2011-04-17 15:56:46.000000000 -0400
55745@@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq,
55746 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
55747 bool boot)
55748 {
55749+#ifdef CONFIG_CPUMASK_OFFSTACK
55750 gfp_t gfp = GFP_ATOMIC;
55751
55752 if (boot)
55753 gfp = GFP_NOWAIT;
55754
55755-#ifdef CONFIG_CPUMASK_OFFSTACK
55756 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
55757 return false;
55758
55759diff -urNp linux-2.6.32.41/include/linux/kallsyms.h linux-2.6.32.41/include/linux/kallsyms.h
55760--- linux-2.6.32.41/include/linux/kallsyms.h 2011-03-27 14:31:47.000000000 -0400
55761+++ linux-2.6.32.41/include/linux/kallsyms.h 2011-04-17 15:56:46.000000000 -0400
55762@@ -15,7 +15,8 @@
55763
55764 struct module;
55765
55766-#ifdef CONFIG_KALLSYMS
55767+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
55768+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55769 /* Lookup the address for a symbol. Returns 0 if not found. */
55770 unsigned long kallsyms_lookup_name(const char *name);
55771
55772@@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(un
55773 /* Stupid that this does nothing, but I didn't create this mess. */
55774 #define __print_symbol(fmt, addr)
55775 #endif /*CONFIG_KALLSYMS*/
55776+#else /* when included by kallsyms.c, vsnprintf.c, or
55777+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
55778+extern void __print_symbol(const char *fmt, unsigned long address);
55779+extern int sprint_symbol(char *buffer, unsigned long address);
55780+const char *kallsyms_lookup(unsigned long addr,
55781+ unsigned long *symbolsize,
55782+ unsigned long *offset,
55783+ char **modname, char *namebuf);
55784+#endif
55785
55786 /* This macro allows us to keep printk typechecking */
55787 static void __check_printsym_format(const char *fmt, ...)
55788diff -urNp linux-2.6.32.41/include/linux/kgdb.h linux-2.6.32.41/include/linux/kgdb.h
55789--- linux-2.6.32.41/include/linux/kgdb.h 2011-03-27 14:31:47.000000000 -0400
55790+++ linux-2.6.32.41/include/linux/kgdb.h 2011-05-04 17:56:20.000000000 -0400
55791@@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
55792
55793 extern int kgdb_connected;
55794
55795-extern atomic_t kgdb_setting_breakpoint;
55796-extern atomic_t kgdb_cpu_doing_single_step;
55797+extern atomic_unchecked_t kgdb_setting_breakpoint;
55798+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
55799
55800 extern struct task_struct *kgdb_usethread;
55801 extern struct task_struct *kgdb_contthread;
55802@@ -251,20 +251,20 @@ struct kgdb_arch {
55803 */
55804 struct kgdb_io {
55805 const char *name;
55806- int (*read_char) (void);
55807- void (*write_char) (u8);
55808- void (*flush) (void);
55809- int (*init) (void);
55810- void (*pre_exception) (void);
55811- void (*post_exception) (void);
55812+ int (* const read_char) (void);
55813+ void (* const write_char) (u8);
55814+ void (* const flush) (void);
55815+ int (* const init) (void);
55816+ void (* const pre_exception) (void);
55817+ void (* const post_exception) (void);
55818 };
55819
55820-extern struct kgdb_arch arch_kgdb_ops;
55821+extern const struct kgdb_arch arch_kgdb_ops;
55822
55823 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
55824
55825-extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
55826-extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
55827+extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
55828+extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
55829
55830 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
55831 extern int kgdb_mem2hex(char *mem, char *buf, int count);
55832diff -urNp linux-2.6.32.41/include/linux/kmod.h linux-2.6.32.41/include/linux/kmod.h
55833--- linux-2.6.32.41/include/linux/kmod.h 2011-03-27 14:31:47.000000000 -0400
55834+++ linux-2.6.32.41/include/linux/kmod.h 2011-04-17 15:56:46.000000000 -0400
55835@@ -31,6 +31,8 @@
55836 * usually useless though. */
55837 extern int __request_module(bool wait, const char *name, ...) \
55838 __attribute__((format(printf, 2, 3)));
55839+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
55840+ __attribute__((format(printf, 3, 4)));
55841 #define request_module(mod...) __request_module(true, mod)
55842 #define request_module_nowait(mod...) __request_module(false, mod)
55843 #define try_then_request_module(x, mod...) \
55844diff -urNp linux-2.6.32.41/include/linux/kobject.h linux-2.6.32.41/include/linux/kobject.h
55845--- linux-2.6.32.41/include/linux/kobject.h 2011-03-27 14:31:47.000000000 -0400
55846+++ linux-2.6.32.41/include/linux/kobject.h 2011-04-17 15:56:46.000000000 -0400
55847@@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kob
55848
55849 struct kobj_type {
55850 void (*release)(struct kobject *kobj);
55851- struct sysfs_ops *sysfs_ops;
55852+ const struct sysfs_ops *sysfs_ops;
55853 struct attribute **default_attrs;
55854 };
55855
55856@@ -118,9 +118,9 @@ struct kobj_uevent_env {
55857 };
55858
55859 struct kset_uevent_ops {
55860- int (*filter)(struct kset *kset, struct kobject *kobj);
55861- const char *(*name)(struct kset *kset, struct kobject *kobj);
55862- int (*uevent)(struct kset *kset, struct kobject *kobj,
55863+ int (* const filter)(struct kset *kset, struct kobject *kobj);
55864+ const char *(* const name)(struct kset *kset, struct kobject *kobj);
55865+ int (* const uevent)(struct kset *kset, struct kobject *kobj,
55866 struct kobj_uevent_env *env);
55867 };
55868
55869@@ -132,7 +132,7 @@ struct kobj_attribute {
55870 const char *buf, size_t count);
55871 };
55872
55873-extern struct sysfs_ops kobj_sysfs_ops;
55874+extern const struct sysfs_ops kobj_sysfs_ops;
55875
55876 /**
55877 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
55878@@ -155,14 +155,14 @@ struct kset {
55879 struct list_head list;
55880 spinlock_t list_lock;
55881 struct kobject kobj;
55882- struct kset_uevent_ops *uevent_ops;
55883+ const struct kset_uevent_ops *uevent_ops;
55884 };
55885
55886 extern void kset_init(struct kset *kset);
55887 extern int __must_check kset_register(struct kset *kset);
55888 extern void kset_unregister(struct kset *kset);
55889 extern struct kset * __must_check kset_create_and_add(const char *name,
55890- struct kset_uevent_ops *u,
55891+ const struct kset_uevent_ops *u,
55892 struct kobject *parent_kobj);
55893
55894 static inline struct kset *to_kset(struct kobject *kobj)
55895diff -urNp linux-2.6.32.41/include/linux/kvm_host.h linux-2.6.32.41/include/linux/kvm_host.h
55896--- linux-2.6.32.41/include/linux/kvm_host.h 2011-03-27 14:31:47.000000000 -0400
55897+++ linux-2.6.32.41/include/linux/kvm_host.h 2011-04-17 15:56:46.000000000 -0400
55898@@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
55899 void vcpu_load(struct kvm_vcpu *vcpu);
55900 void vcpu_put(struct kvm_vcpu *vcpu);
55901
55902-int kvm_init(void *opaque, unsigned int vcpu_size,
55903+int kvm_init(const void *opaque, unsigned int vcpu_size,
55904 struct module *module);
55905 void kvm_exit(void);
55906
55907@@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
55908 struct kvm_guest_debug *dbg);
55909 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
55910
55911-int kvm_arch_init(void *opaque);
55912+int kvm_arch_init(const void *opaque);
55913 void kvm_arch_exit(void);
55914
55915 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
55916diff -urNp linux-2.6.32.41/include/linux/libata.h linux-2.6.32.41/include/linux/libata.h
55917--- linux-2.6.32.41/include/linux/libata.h 2011-03-27 14:31:47.000000000 -0400
55918+++ linux-2.6.32.41/include/linux/libata.h 2011-04-23 12:56:11.000000000 -0400
55919@@ -525,11 +525,11 @@ struct ata_ioports {
55920
55921 struct ata_host {
55922 spinlock_t lock;
55923- struct device *dev;
55924+ struct device *dev;
55925 void __iomem * const *iomap;
55926 unsigned int n_ports;
55927 void *private_data;
55928- struct ata_port_operations *ops;
55929+ const struct ata_port_operations *ops;
55930 unsigned long flags;
55931 #ifdef CONFIG_ATA_ACPI
55932 acpi_handle acpi_handle;
55933@@ -710,7 +710,7 @@ struct ata_link {
55934
55935 struct ata_port {
55936 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
55937- struct ata_port_operations *ops;
55938+ const struct ata_port_operations *ops;
55939 spinlock_t *lock;
55940 /* Flags owned by the EH context. Only EH should touch these once the
55941 port is active */
55942@@ -892,7 +892,7 @@ struct ata_port_info {
55943 unsigned long pio_mask;
55944 unsigned long mwdma_mask;
55945 unsigned long udma_mask;
55946- struct ata_port_operations *port_ops;
55947+ const struct ata_port_operations *port_ops;
55948 void *private_data;
55949 };
55950
55951@@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timi
55952 extern const unsigned long sata_deb_timing_hotplug[];
55953 extern const unsigned long sata_deb_timing_long[];
55954
55955-extern struct ata_port_operations ata_dummy_port_ops;
55956+extern const struct ata_port_operations ata_dummy_port_ops;
55957 extern const struct ata_port_info ata_dummy_port_info;
55958
55959 static inline const unsigned long *
55960@@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_
55961 struct scsi_host_template *sht);
55962 extern void ata_host_detach(struct ata_host *host);
55963 extern void ata_host_init(struct ata_host *, struct device *,
55964- unsigned long, struct ata_port_operations *);
55965+ unsigned long, const struct ata_port_operations *);
55966 extern int ata_scsi_detect(struct scsi_host_template *sht);
55967 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
55968 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
55969diff -urNp linux-2.6.32.41/include/linux/lockd/bind.h linux-2.6.32.41/include/linux/lockd/bind.h
55970--- linux-2.6.32.41/include/linux/lockd/bind.h 2011-03-27 14:31:47.000000000 -0400
55971+++ linux-2.6.32.41/include/linux/lockd/bind.h 2011-04-17 15:56:46.000000000 -0400
55972@@ -23,13 +23,13 @@ struct svc_rqst;
55973 * This is the set of functions for lockd->nfsd communication
55974 */
55975 struct nlmsvc_binding {
55976- __be32 (*fopen)(struct svc_rqst *,
55977+ __be32 (* const fopen)(struct svc_rqst *,
55978 struct nfs_fh *,
55979 struct file **);
55980- void (*fclose)(struct file *);
55981+ void (* const fclose)(struct file *);
55982 };
55983
55984-extern struct nlmsvc_binding * nlmsvc_ops;
55985+extern const struct nlmsvc_binding * nlmsvc_ops;
55986
55987 /*
55988 * Similar to nfs_client_initdata, but without the NFS-specific
55989diff -urNp linux-2.6.32.41/include/linux/mm.h linux-2.6.32.41/include/linux/mm.h
55990--- linux-2.6.32.41/include/linux/mm.h 2011-03-27 14:31:47.000000000 -0400
55991+++ linux-2.6.32.41/include/linux/mm.h 2011-04-17 15:56:46.000000000 -0400
55992@@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void
55993
55994 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
55995 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
55996+
55997+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
55998+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
55999+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
56000+#else
56001 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
56002+#endif
56003+
56004 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
56005 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
56006
56007@@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
56008 int set_page_dirty_lock(struct page *page);
56009 int clear_page_dirty_for_io(struct page *page);
56010
56011-/* Is the vma a continuation of the stack vma above it? */
56012-static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
56013-{
56014- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
56015-}
56016-
56017 extern unsigned long move_page_tables(struct vm_area_struct *vma,
56018 unsigned long old_addr, struct vm_area_struct *new_vma,
56019 unsigned long new_addr, unsigned long len);
56020@@ -890,6 +891,8 @@ struct shrinker {
56021 extern void register_shrinker(struct shrinker *);
56022 extern void unregister_shrinker(struct shrinker *);
56023
56024+pgprot_t vm_get_page_prot(unsigned long vm_flags);
56025+
56026 int vma_wants_writenotify(struct vm_area_struct *vma);
56027
56028 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
56029@@ -1162,6 +1165,7 @@ out:
56030 }
56031
56032 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
56033+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
56034
56035 extern unsigned long do_brk(unsigned long, unsigned long);
56036
56037@@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(
56038 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
56039 struct vm_area_struct **pprev);
56040
56041+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
56042+extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
56043+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
56044+
56045 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
56046 NULL if none. Assume start_addr < end_addr. */
56047 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
56048@@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(st
56049 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
56050 }
56051
56052-pgprot_t vm_get_page_prot(unsigned long vm_flags);
56053 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
56054 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
56055 unsigned long pfn, unsigned long size, pgprot_t);
56056@@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long
56057 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
56058 extern int sysctl_memory_failure_early_kill;
56059 extern int sysctl_memory_failure_recovery;
56060-extern atomic_long_t mce_bad_pages;
56061+extern atomic_long_unchecked_t mce_bad_pages;
56062+
56063+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
56064+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
56065+#else
56066+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
56067+#endif
56068
56069 #endif /* __KERNEL__ */
56070 #endif /* _LINUX_MM_H */
56071diff -urNp linux-2.6.32.41/include/linux/mm_types.h linux-2.6.32.41/include/linux/mm_types.h
56072--- linux-2.6.32.41/include/linux/mm_types.h 2011-03-27 14:31:47.000000000 -0400
56073+++ linux-2.6.32.41/include/linux/mm_types.h 2011-04-17 15:56:46.000000000 -0400
56074@@ -186,6 +186,8 @@ struct vm_area_struct {
56075 #ifdef CONFIG_NUMA
56076 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
56077 #endif
56078+
56079+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
56080 };
56081
56082 struct core_thread {
56083@@ -287,6 +289,24 @@ struct mm_struct {
56084 #ifdef CONFIG_MMU_NOTIFIER
56085 struct mmu_notifier_mm *mmu_notifier_mm;
56086 #endif
56087+
56088+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
56089+ unsigned long pax_flags;
56090+#endif
56091+
56092+#ifdef CONFIG_PAX_DLRESOLVE
56093+ unsigned long call_dl_resolve;
56094+#endif
56095+
56096+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
56097+ unsigned long call_syscall;
56098+#endif
56099+
56100+#ifdef CONFIG_PAX_ASLR
56101+ unsigned long delta_mmap; /* randomized offset */
56102+ unsigned long delta_stack; /* randomized offset */
56103+#endif
56104+
56105 };
56106
56107 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
56108diff -urNp linux-2.6.32.41/include/linux/mmu_notifier.h linux-2.6.32.41/include/linux/mmu_notifier.h
56109--- linux-2.6.32.41/include/linux/mmu_notifier.h 2011-03-27 14:31:47.000000000 -0400
56110+++ linux-2.6.32.41/include/linux/mmu_notifier.h 2011-04-17 15:56:46.000000000 -0400
56111@@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destr
56112 */
56113 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
56114 ({ \
56115- pte_t __pte; \
56116+ pte_t ___pte; \
56117 struct vm_area_struct *___vma = __vma; \
56118 unsigned long ___address = __address; \
56119- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
56120+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
56121 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
56122- __pte; \
56123+ ___pte; \
56124 })
56125
56126 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
56127diff -urNp linux-2.6.32.41/include/linux/mmzone.h linux-2.6.32.41/include/linux/mmzone.h
56128--- linux-2.6.32.41/include/linux/mmzone.h 2011-03-27 14:31:47.000000000 -0400
56129+++ linux-2.6.32.41/include/linux/mmzone.h 2011-04-17 15:56:46.000000000 -0400
56130@@ -350,7 +350,7 @@ struct zone {
56131 unsigned long flags; /* zone flags, see below */
56132
56133 /* Zone statistics */
56134- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
56135+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
56136
56137 /*
56138 * prev_priority holds the scanning priority for this zone. It is
56139diff -urNp linux-2.6.32.41/include/linux/mod_devicetable.h linux-2.6.32.41/include/linux/mod_devicetable.h
56140--- linux-2.6.32.41/include/linux/mod_devicetable.h 2011-03-27 14:31:47.000000000 -0400
56141+++ linux-2.6.32.41/include/linux/mod_devicetable.h 2011-04-17 15:56:46.000000000 -0400
56142@@ -12,7 +12,7 @@
56143 typedef unsigned long kernel_ulong_t;
56144 #endif
56145
56146-#define PCI_ANY_ID (~0)
56147+#define PCI_ANY_ID ((__u16)~0)
56148
56149 struct pci_device_id {
56150 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
56151@@ -131,7 +131,7 @@ struct usb_device_id {
56152 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
56153 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
56154
56155-#define HID_ANY_ID (~0)
56156+#define HID_ANY_ID (~0U)
56157
56158 struct hid_device_id {
56159 __u16 bus;
56160diff -urNp linux-2.6.32.41/include/linux/module.h linux-2.6.32.41/include/linux/module.h
56161--- linux-2.6.32.41/include/linux/module.h 2011-03-27 14:31:47.000000000 -0400
56162+++ linux-2.6.32.41/include/linux/module.h 2011-04-17 15:56:46.000000000 -0400
56163@@ -287,16 +287,16 @@ struct module
56164 int (*init)(void);
56165
56166 /* If this is non-NULL, vfree after init() returns */
56167- void *module_init;
56168+ void *module_init_rx, *module_init_rw;
56169
56170 /* Here is the actual code + data, vfree'd on unload. */
56171- void *module_core;
56172+ void *module_core_rx, *module_core_rw;
56173
56174 /* Here are the sizes of the init and core sections */
56175- unsigned int init_size, core_size;
56176+ unsigned int init_size_rw, core_size_rw;
56177
56178 /* The size of the executable code in each section. */
56179- unsigned int init_text_size, core_text_size;
56180+ unsigned int init_size_rx, core_size_rx;
56181
56182 /* Arch-specific module values */
56183 struct mod_arch_specific arch;
56184@@ -393,16 +393,46 @@ struct module *__module_address(unsigned
56185 bool is_module_address(unsigned long addr);
56186 bool is_module_text_address(unsigned long addr);
56187
56188+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
56189+{
56190+
56191+#ifdef CONFIG_PAX_KERNEXEC
56192+ if (ktla_ktva(addr) >= (unsigned long)start &&
56193+ ktla_ktva(addr) < (unsigned long)start + size)
56194+ return 1;
56195+#endif
56196+
56197+ return ((void *)addr >= start && (void *)addr < start + size);
56198+}
56199+
56200+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
56201+{
56202+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
56203+}
56204+
56205+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
56206+{
56207+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
56208+}
56209+
56210+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
56211+{
56212+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
56213+}
56214+
56215+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
56216+{
56217+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
56218+}
56219+
56220 static inline int within_module_core(unsigned long addr, struct module *mod)
56221 {
56222- return (unsigned long)mod->module_core <= addr &&
56223- addr < (unsigned long)mod->module_core + mod->core_size;
56224+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
56225 }
56226
56227 static inline int within_module_init(unsigned long addr, struct module *mod)
56228 {
56229- return (unsigned long)mod->module_init <= addr &&
56230- addr < (unsigned long)mod->module_init + mod->init_size;
56231+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
56232 }
56233
56234 /* Search for module by name: must hold module_mutex. */
56235diff -urNp linux-2.6.32.41/include/linux/moduleloader.h linux-2.6.32.41/include/linux/moduleloader.h
56236--- linux-2.6.32.41/include/linux/moduleloader.h 2011-03-27 14:31:47.000000000 -0400
56237+++ linux-2.6.32.41/include/linux/moduleloader.h 2011-04-17 15:56:46.000000000 -0400
56238@@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
56239 sections. Returns NULL on failure. */
56240 void *module_alloc(unsigned long size);
56241
56242+#ifdef CONFIG_PAX_KERNEXEC
56243+void *module_alloc_exec(unsigned long size);
56244+#else
56245+#define module_alloc_exec(x) module_alloc(x)
56246+#endif
56247+
56248 /* Free memory returned from module_alloc. */
56249 void module_free(struct module *mod, void *module_region);
56250
56251+#ifdef CONFIG_PAX_KERNEXEC
56252+void module_free_exec(struct module *mod, void *module_region);
56253+#else
56254+#define module_free_exec(x, y) module_free((x), (y))
56255+#endif
56256+
56257 /* Apply the given relocation to the (simplified) ELF. Return -error
56258 or 0. */
56259 int apply_relocate(Elf_Shdr *sechdrs,
56260diff -urNp linux-2.6.32.41/include/linux/moduleparam.h linux-2.6.32.41/include/linux/moduleparam.h
56261--- linux-2.6.32.41/include/linux/moduleparam.h 2011-03-27 14:31:47.000000000 -0400
56262+++ linux-2.6.32.41/include/linux/moduleparam.h 2011-04-17 15:56:46.000000000 -0400
56263@@ -132,7 +132,7 @@ struct kparam_array
56264
56265 /* Actually copy string: maxlen param is usually sizeof(string). */
56266 #define module_param_string(name, string, len, perm) \
56267- static const struct kparam_string __param_string_##name \
56268+ static const struct kparam_string __param_string_##name __used \
56269 = { len, string }; \
56270 __module_param_call(MODULE_PARAM_PREFIX, name, \
56271 param_set_copystring, param_get_string, \
56272@@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffe
56273
56274 /* Comma-separated array: *nump is set to number they actually specified. */
56275 #define module_param_array_named(name, array, type, nump, perm) \
56276- static const struct kparam_array __param_arr_##name \
56277+ static const struct kparam_array __param_arr_##name __used \
56278 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
56279 sizeof(array[0]), array }; \
56280 __module_param_call(MODULE_PARAM_PREFIX, name, \
56281diff -urNp linux-2.6.32.41/include/linux/mutex.h linux-2.6.32.41/include/linux/mutex.h
56282--- linux-2.6.32.41/include/linux/mutex.h 2011-03-27 14:31:47.000000000 -0400
56283+++ linux-2.6.32.41/include/linux/mutex.h 2011-04-17 15:56:46.000000000 -0400
56284@@ -51,7 +51,7 @@ struct mutex {
56285 spinlock_t wait_lock;
56286 struct list_head wait_list;
56287 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
56288- struct thread_info *owner;
56289+ struct task_struct *owner;
56290 #endif
56291 #ifdef CONFIG_DEBUG_MUTEXES
56292 const char *name;
56293diff -urNp linux-2.6.32.41/include/linux/namei.h linux-2.6.32.41/include/linux/namei.h
56294--- linux-2.6.32.41/include/linux/namei.h 2011-03-27 14:31:47.000000000 -0400
56295+++ linux-2.6.32.41/include/linux/namei.h 2011-04-17 15:56:46.000000000 -0400
56296@@ -22,7 +22,7 @@ struct nameidata {
56297 unsigned int flags;
56298 int last_type;
56299 unsigned depth;
56300- char *saved_names[MAX_NESTED_LINKS + 1];
56301+ const char *saved_names[MAX_NESTED_LINKS + 1];
56302
56303 /* Intent data */
56304 union {
56305@@ -84,12 +84,12 @@ extern int follow_up(struct path *);
56306 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
56307 extern void unlock_rename(struct dentry *, struct dentry *);
56308
56309-static inline void nd_set_link(struct nameidata *nd, char *path)
56310+static inline void nd_set_link(struct nameidata *nd, const char *path)
56311 {
56312 nd->saved_names[nd->depth] = path;
56313 }
56314
56315-static inline char *nd_get_link(struct nameidata *nd)
56316+static inline const char *nd_get_link(const struct nameidata *nd)
56317 {
56318 return nd->saved_names[nd->depth];
56319 }
56320diff -urNp linux-2.6.32.41/include/linux/netfilter/xt_gradm.h linux-2.6.32.41/include/linux/netfilter/xt_gradm.h
56321--- linux-2.6.32.41/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
56322+++ linux-2.6.32.41/include/linux/netfilter/xt_gradm.h 2011-04-17 15:56:46.000000000 -0400
56323@@ -0,0 +1,9 @@
56324+#ifndef _LINUX_NETFILTER_XT_GRADM_H
56325+#define _LINUX_NETFILTER_XT_GRADM_H 1
56326+
56327+struct xt_gradm_mtinfo {
56328+ __u16 flags;
56329+ __u16 invflags;
56330+};
56331+
56332+#endif
56333diff -urNp linux-2.6.32.41/include/linux/nodemask.h linux-2.6.32.41/include/linux/nodemask.h
56334--- linux-2.6.32.41/include/linux/nodemask.h 2011-03-27 14:31:47.000000000 -0400
56335+++ linux-2.6.32.41/include/linux/nodemask.h 2011-04-17 15:56:46.000000000 -0400
56336@@ -464,11 +464,11 @@ static inline int num_node_state(enum no
56337
56338 #define any_online_node(mask) \
56339 ({ \
56340- int node; \
56341- for_each_node_mask(node, (mask)) \
56342- if (node_online(node)) \
56343+ int __node; \
56344+ for_each_node_mask(__node, (mask)) \
56345+ if (node_online(__node)) \
56346 break; \
56347- node; \
56348+ __node; \
56349 })
56350
56351 #define num_online_nodes() num_node_state(N_ONLINE)
56352diff -urNp linux-2.6.32.41/include/linux/oprofile.h linux-2.6.32.41/include/linux/oprofile.h
56353--- linux-2.6.32.41/include/linux/oprofile.h 2011-03-27 14:31:47.000000000 -0400
56354+++ linux-2.6.32.41/include/linux/oprofile.h 2011-04-17 15:56:46.000000000 -0400
56355@@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super
56356 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
56357 char const * name, ulong * val);
56358
56359-/** Create a file for read-only access to an atomic_t. */
56360+/** Create a file for read-only access to an atomic_unchecked_t. */
56361 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
56362- char const * name, atomic_t * val);
56363+ char const * name, atomic_unchecked_t * val);
56364
56365 /** create a directory */
56366 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
56367diff -urNp linux-2.6.32.41/include/linux/perf_event.h linux-2.6.32.41/include/linux/perf_event.h
56368--- linux-2.6.32.41/include/linux/perf_event.h 2011-03-27 14:31:47.000000000 -0400
56369+++ linux-2.6.32.41/include/linux/perf_event.h 2011-05-04 17:56:28.000000000 -0400
56370@@ -476,7 +476,7 @@ struct hw_perf_event {
56371 struct hrtimer hrtimer;
56372 };
56373 };
56374- atomic64_t prev_count;
56375+ atomic64_unchecked_t prev_count;
56376 u64 sample_period;
56377 u64 last_period;
56378 atomic64_t period_left;
56379@@ -557,7 +557,7 @@ struct perf_event {
56380 const struct pmu *pmu;
56381
56382 enum perf_event_active_state state;
56383- atomic64_t count;
56384+ atomic64_unchecked_t count;
56385
56386 /*
56387 * These are the total time in nanoseconds that the event
56388@@ -595,8 +595,8 @@ struct perf_event {
56389 * These accumulate total time (in nanoseconds) that children
56390 * events have been enabled and running, respectively.
56391 */
56392- atomic64_t child_total_time_enabled;
56393- atomic64_t child_total_time_running;
56394+ atomic64_unchecked_t child_total_time_enabled;
56395+ atomic64_unchecked_t child_total_time_running;
56396
56397 /*
56398 * Protect attach/detach and child_list:
56399diff -urNp linux-2.6.32.41/include/linux/pipe_fs_i.h linux-2.6.32.41/include/linux/pipe_fs_i.h
56400--- linux-2.6.32.41/include/linux/pipe_fs_i.h 2011-03-27 14:31:47.000000000 -0400
56401+++ linux-2.6.32.41/include/linux/pipe_fs_i.h 2011-04-17 15:56:46.000000000 -0400
56402@@ -46,9 +46,9 @@ struct pipe_inode_info {
56403 wait_queue_head_t wait;
56404 unsigned int nrbufs, curbuf;
56405 struct page *tmp_page;
56406- unsigned int readers;
56407- unsigned int writers;
56408- unsigned int waiting_writers;
56409+ atomic_t readers;
56410+ atomic_t writers;
56411+ atomic_t waiting_writers;
56412 unsigned int r_counter;
56413 unsigned int w_counter;
56414 struct fasync_struct *fasync_readers;
56415diff -urNp linux-2.6.32.41/include/linux/poison.h linux-2.6.32.41/include/linux/poison.h
56416--- linux-2.6.32.41/include/linux/poison.h 2011-03-27 14:31:47.000000000 -0400
56417+++ linux-2.6.32.41/include/linux/poison.h 2011-04-17 15:56:46.000000000 -0400
56418@@ -19,8 +19,8 @@
56419 * under normal circumstances, used to verify that nobody uses
56420 * non-initialized list entries.
56421 */
56422-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
56423-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
56424+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
56425+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
56426
56427 /********** include/linux/timer.h **********/
56428 /*
56429diff -urNp linux-2.6.32.41/include/linux/proc_fs.h linux-2.6.32.41/include/linux/proc_fs.h
56430--- linux-2.6.32.41/include/linux/proc_fs.h 2011-03-27 14:31:47.000000000 -0400
56431+++ linux-2.6.32.41/include/linux/proc_fs.h 2011-04-17 15:56:46.000000000 -0400
56432@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
56433 return proc_create_data(name, mode, parent, proc_fops, NULL);
56434 }
56435
56436+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
56437+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
56438+{
56439+#ifdef CONFIG_GRKERNSEC_PROC_USER
56440+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
56441+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
56442+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
56443+#else
56444+ return proc_create_data(name, mode, parent, proc_fops, NULL);
56445+#endif
56446+}
56447+
56448+
56449 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
56450 mode_t mode, struct proc_dir_entry *base,
56451 read_proc_t *read_proc, void * data)
56452diff -urNp linux-2.6.32.41/include/linux/ptrace.h linux-2.6.32.41/include/linux/ptrace.h
56453--- linux-2.6.32.41/include/linux/ptrace.h 2011-03-27 14:31:47.000000000 -0400
56454+++ linux-2.6.32.41/include/linux/ptrace.h 2011-04-17 15:56:46.000000000 -0400
56455@@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_
56456 extern void exit_ptrace(struct task_struct *tracer);
56457 #define PTRACE_MODE_READ 1
56458 #define PTRACE_MODE_ATTACH 2
56459-/* Returns 0 on success, -errno on denial. */
56460-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
56461 /* Returns true on success, false on denial. */
56462 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
56463+/* Returns true on success, false on denial. */
56464+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
56465
56466 static inline int ptrace_reparented(struct task_struct *child)
56467 {
56468diff -urNp linux-2.6.32.41/include/linux/random.h linux-2.6.32.41/include/linux/random.h
56469--- linux-2.6.32.41/include/linux/random.h 2011-03-27 14:31:47.000000000 -0400
56470+++ linux-2.6.32.41/include/linux/random.h 2011-04-17 15:56:46.000000000 -0400
56471@@ -74,6 +74,11 @@ unsigned long randomize_range(unsigned l
56472 u32 random32(void);
56473 void srandom32(u32 seed);
56474
56475+static inline unsigned long pax_get_random_long(void)
56476+{
56477+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
56478+}
56479+
56480 #endif /* __KERNEL___ */
56481
56482 #endif /* _LINUX_RANDOM_H */
56483diff -urNp linux-2.6.32.41/include/linux/reboot.h linux-2.6.32.41/include/linux/reboot.h
56484--- linux-2.6.32.41/include/linux/reboot.h 2011-03-27 14:31:47.000000000 -0400
56485+++ linux-2.6.32.41/include/linux/reboot.h 2011-05-22 23:02:06.000000000 -0400
56486@@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
56487 * Architecture-specific implementations of sys_reboot commands.
56488 */
56489
56490-extern void machine_restart(char *cmd);
56491-extern void machine_halt(void);
56492-extern void machine_power_off(void);
56493+extern void machine_restart(char *cmd) __noreturn;
56494+extern void machine_halt(void) __noreturn;
56495+extern void machine_power_off(void) __noreturn;
56496
56497 extern void machine_shutdown(void);
56498 struct pt_regs;
56499@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
56500 */
56501
56502 extern void kernel_restart_prepare(char *cmd);
56503-extern void kernel_restart(char *cmd);
56504-extern void kernel_halt(void);
56505-extern void kernel_power_off(void);
56506+extern void kernel_restart(char *cmd) __noreturn;
56507+extern void kernel_halt(void) __noreturn;
56508+extern void kernel_power_off(void) __noreturn;
56509
56510 void ctrl_alt_del(void);
56511
56512@@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
56513 * Emergency restart, callable from an interrupt handler.
56514 */
56515
56516-extern void emergency_restart(void);
56517+extern void emergency_restart(void) __noreturn;
56518 #include <asm/emergency-restart.h>
56519
56520 #endif
56521diff -urNp linux-2.6.32.41/include/linux/reiserfs_fs.h linux-2.6.32.41/include/linux/reiserfs_fs.h
56522--- linux-2.6.32.41/include/linux/reiserfs_fs.h 2011-03-27 14:31:47.000000000 -0400
56523+++ linux-2.6.32.41/include/linux/reiserfs_fs.h 2011-04-17 15:56:46.000000000 -0400
56524@@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset
56525 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
56526
56527 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
56528-#define get_generation(s) atomic_read (&fs_generation(s))
56529+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
56530 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
56531 #define __fs_changed(gen,s) (gen != get_generation (s))
56532 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
56533@@ -1534,24 +1534,24 @@ static inline struct super_block *sb_fro
56534 */
56535
56536 struct item_operations {
56537- int (*bytes_number) (struct item_head * ih, int block_size);
56538- void (*decrement_key) (struct cpu_key *);
56539- int (*is_left_mergeable) (struct reiserfs_key * ih,
56540+ int (* const bytes_number) (struct item_head * ih, int block_size);
56541+ void (* const decrement_key) (struct cpu_key *);
56542+ int (* const is_left_mergeable) (struct reiserfs_key * ih,
56543 unsigned long bsize);
56544- void (*print_item) (struct item_head *, char *item);
56545- void (*check_item) (struct item_head *, char *item);
56546+ void (* const print_item) (struct item_head *, char *item);
56547+ void (* const check_item) (struct item_head *, char *item);
56548
56549- int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
56550+ int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
56551 int is_affected, int insert_size);
56552- int (*check_left) (struct virtual_item * vi, int free,
56553+ int (* const check_left) (struct virtual_item * vi, int free,
56554 int start_skip, int end_skip);
56555- int (*check_right) (struct virtual_item * vi, int free);
56556- int (*part_size) (struct virtual_item * vi, int from, int to);
56557- int (*unit_num) (struct virtual_item * vi);
56558- void (*print_vi) (struct virtual_item * vi);
56559+ int (* const check_right) (struct virtual_item * vi, int free);
56560+ int (* const part_size) (struct virtual_item * vi, int from, int to);
56561+ int (* const unit_num) (struct virtual_item * vi);
56562+ void (* const print_vi) (struct virtual_item * vi);
56563 };
56564
56565-extern struct item_operations *item_ops[TYPE_ANY + 1];
56566+extern const struct item_operations * const item_ops[TYPE_ANY + 1];
56567
56568 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
56569 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
56570diff -urNp linux-2.6.32.41/include/linux/reiserfs_fs_sb.h linux-2.6.32.41/include/linux/reiserfs_fs_sb.h
56571--- linux-2.6.32.41/include/linux/reiserfs_fs_sb.h 2011-03-27 14:31:47.000000000 -0400
56572+++ linux-2.6.32.41/include/linux/reiserfs_fs_sb.h 2011-04-17 15:56:46.000000000 -0400
56573@@ -377,7 +377,7 @@ struct reiserfs_sb_info {
56574 /* Comment? -Hans */
56575 wait_queue_head_t s_wait;
56576 /* To be obsoleted soon by per buffer seals.. -Hans */
56577- atomic_t s_generation_counter; // increased by one every time the
56578+ atomic_unchecked_t s_generation_counter; // increased by one every time the
56579 // tree gets re-balanced
56580 unsigned long s_properties; /* File system properties. Currently holds
56581 on-disk FS format */
56582diff -urNp linux-2.6.32.41/include/linux/sched.h linux-2.6.32.41/include/linux/sched.h
56583--- linux-2.6.32.41/include/linux/sched.h 2011-03-27 14:31:47.000000000 -0400
56584+++ linux-2.6.32.41/include/linux/sched.h 2011-05-18 20:09:37.000000000 -0400
56585@@ -101,6 +101,7 @@ struct bio;
56586 struct fs_struct;
56587 struct bts_context;
56588 struct perf_event_context;
56589+struct linux_binprm;
56590
56591 /*
56592 * List of flags we want to share for kernel threads,
56593@@ -350,7 +351,7 @@ extern signed long schedule_timeout_kill
56594 extern signed long schedule_timeout_uninterruptible(signed long timeout);
56595 asmlinkage void __schedule(void);
56596 asmlinkage void schedule(void);
56597-extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
56598+extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
56599
56600 struct nsproxy;
56601 struct user_namespace;
56602@@ -371,9 +372,12 @@ struct user_namespace;
56603 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
56604
56605 extern int sysctl_max_map_count;
56606+extern unsigned long sysctl_heap_stack_gap;
56607
56608 #include <linux/aio.h>
56609
56610+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
56611+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
56612 extern unsigned long
56613 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
56614 unsigned long, unsigned long);
56615@@ -666,6 +670,16 @@ struct signal_struct {
56616 struct tty_audit_buf *tty_audit_buf;
56617 #endif
56618
56619+#ifdef CONFIG_GRKERNSEC
56620+ u32 curr_ip;
56621+ u32 saved_ip;
56622+ u32 gr_saddr;
56623+ u32 gr_daddr;
56624+ u16 gr_sport;
56625+ u16 gr_dport;
56626+ u8 used_accept:1;
56627+#endif
56628+
56629 int oom_adj; /* OOM kill score adjustment (bit shift) */
56630 };
56631
56632@@ -723,6 +737,11 @@ struct user_struct {
56633 struct key *session_keyring; /* UID's default session keyring */
56634 #endif
56635
56636+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
56637+ unsigned int banned;
56638+ unsigned long ban_expires;
56639+#endif
56640+
56641 /* Hash table maintenance information */
56642 struct hlist_node uidhash_node;
56643 uid_t uid;
56644@@ -1328,8 +1347,8 @@ struct task_struct {
56645 struct list_head thread_group;
56646
56647 struct completion *vfork_done; /* for vfork() */
56648- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
56649- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
56650+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
56651+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
56652
56653 cputime_t utime, stime, utimescaled, stimescaled;
56654 cputime_t gtime;
56655@@ -1343,16 +1362,6 @@ struct task_struct {
56656 struct task_cputime cputime_expires;
56657 struct list_head cpu_timers[3];
56658
56659-/* process credentials */
56660- const struct cred *real_cred; /* objective and real subjective task
56661- * credentials (COW) */
56662- const struct cred *cred; /* effective (overridable) subjective task
56663- * credentials (COW) */
56664- struct mutex cred_guard_mutex; /* guard against foreign influences on
56665- * credential calculations
56666- * (notably. ptrace) */
56667- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
56668-
56669 char comm[TASK_COMM_LEN]; /* executable name excluding path
56670 - access with [gs]et_task_comm (which lock
56671 it with task_lock())
56672@@ -1369,6 +1378,10 @@ struct task_struct {
56673 #endif
56674 /* CPU-specific state of this task */
56675 struct thread_struct thread;
56676+/* thread_info moved to task_struct */
56677+#ifdef CONFIG_X86
56678+ struct thread_info tinfo;
56679+#endif
56680 /* filesystem information */
56681 struct fs_struct *fs;
56682 /* open file information */
56683@@ -1436,6 +1449,15 @@ struct task_struct {
56684 int hardirq_context;
56685 int softirq_context;
56686 #endif
56687+
56688+/* process credentials */
56689+ const struct cred *real_cred; /* objective and real subjective task
56690+ * credentials (COW) */
56691+ struct mutex cred_guard_mutex; /* guard against foreign influences on
56692+ * credential calculations
56693+ * (notably. ptrace) */
56694+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
56695+
56696 #ifdef CONFIG_LOCKDEP
56697 # define MAX_LOCK_DEPTH 48UL
56698 u64 curr_chain_key;
56699@@ -1456,6 +1478,9 @@ struct task_struct {
56700
56701 struct backing_dev_info *backing_dev_info;
56702
56703+ const struct cred *cred; /* effective (overridable) subjective task
56704+ * credentials (COW) */
56705+
56706 struct io_context *io_context;
56707
56708 unsigned long ptrace_message;
56709@@ -1519,6 +1544,21 @@ struct task_struct {
56710 unsigned long default_timer_slack_ns;
56711
56712 struct list_head *scm_work_list;
56713+
56714+#ifdef CONFIG_GRKERNSEC
56715+ /* grsecurity */
56716+ struct dentry *gr_chroot_dentry;
56717+ struct acl_subject_label *acl;
56718+ struct acl_role_label *role;
56719+ struct file *exec_file;
56720+ u16 acl_role_id;
56721+ /* is this the task that authenticated to the special role */
56722+ u8 acl_sp_role;
56723+ u8 is_writable;
56724+ u8 brute;
56725+ u8 gr_is_chrooted;
56726+#endif
56727+
56728 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
56729 /* Index of current stored adress in ret_stack */
56730 int curr_ret_stack;
56731@@ -1542,6 +1582,63 @@ struct task_struct {
56732 #endif /* CONFIG_TRACING */
56733 };
56734
56735+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
56736+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
56737+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
56738+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
56739+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
56740+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
56741+
56742+#ifdef CONFIG_PAX_SOFTMODE
56743+extern unsigned int pax_softmode;
56744+#endif
56745+
56746+extern int pax_check_flags(unsigned long *);
56747+
56748+/* if tsk != current then task_lock must be held on it */
56749+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
56750+static inline unsigned long pax_get_flags(struct task_struct *tsk)
56751+{
56752+ if (likely(tsk->mm))
56753+ return tsk->mm->pax_flags;
56754+ else
56755+ return 0UL;
56756+}
56757+
56758+/* if tsk != current then task_lock must be held on it */
56759+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
56760+{
56761+ if (likely(tsk->mm)) {
56762+ tsk->mm->pax_flags = flags;
56763+ return 0;
56764+ }
56765+ return -EINVAL;
56766+}
56767+#endif
56768+
56769+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
56770+extern void pax_set_initial_flags(struct linux_binprm *bprm);
56771+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
56772+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
56773+#endif
56774+
56775+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
56776+void pax_report_insns(void *pc, void *sp);
56777+void pax_report_refcount_overflow(struct pt_regs *regs);
56778+void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type);
56779+
56780+static inline void pax_track_stack(void)
56781+{
56782+
56783+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
56784+ unsigned long sp = current_stack_pointer;
56785+ if (current_thread_info()->lowest_stack > sp &&
56786+ (unsigned long)task_stack_page(current) < sp)
56787+ current_thread_info()->lowest_stack = sp;
56788+#endif
56789+
56790+}
56791+
56792 /* Future-safe accessor for struct task_struct's cpus_allowed. */
56793 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
56794
56795@@ -1978,7 +2075,9 @@ void yield(void);
56796 extern struct exec_domain default_exec_domain;
56797
56798 union thread_union {
56799+#ifndef CONFIG_X86
56800 struct thread_info thread_info;
56801+#endif
56802 unsigned long stack[THREAD_SIZE/sizeof(long)];
56803 };
56804
56805@@ -2155,7 +2254,7 @@ extern void __cleanup_sighand(struct sig
56806 extern void exit_itimers(struct signal_struct *);
56807 extern void flush_itimer_signals(void);
56808
56809-extern NORET_TYPE void do_group_exit(int);
56810+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
56811
56812 extern void daemonize(const char *, ...);
56813 extern int allow_signal(int);
56814@@ -2284,13 +2383,17 @@ static inline unsigned long *end_of_stac
56815
56816 #endif
56817
56818-static inline int object_is_on_stack(void *obj)
56819+static inline int object_starts_on_stack(void *obj)
56820 {
56821- void *stack = task_stack_page(current);
56822+ const void *stack = task_stack_page(current);
56823
56824 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
56825 }
56826
56827+#ifdef CONFIG_PAX_USERCOPY
56828+extern int object_is_on_stack(const void *obj, unsigned long len);
56829+#endif
56830+
56831 extern void thread_info_cache_init(void);
56832
56833 #ifdef CONFIG_DEBUG_STACK_USAGE
56834diff -urNp linux-2.6.32.41/include/linux/screen_info.h linux-2.6.32.41/include/linux/screen_info.h
56835--- linux-2.6.32.41/include/linux/screen_info.h 2011-03-27 14:31:47.000000000 -0400
56836+++ linux-2.6.32.41/include/linux/screen_info.h 2011-04-17 15:56:46.000000000 -0400
56837@@ -42,7 +42,8 @@ struct screen_info {
56838 __u16 pages; /* 0x32 */
56839 __u16 vesa_attributes; /* 0x34 */
56840 __u32 capabilities; /* 0x36 */
56841- __u8 _reserved[6]; /* 0x3a */
56842+ __u16 vesapm_size; /* 0x3a */
56843+ __u8 _reserved[4]; /* 0x3c */
56844 } __attribute__((packed));
56845
56846 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
56847diff -urNp linux-2.6.32.41/include/linux/security.h linux-2.6.32.41/include/linux/security.h
56848--- linux-2.6.32.41/include/linux/security.h 2011-03-27 14:31:47.000000000 -0400
56849+++ linux-2.6.32.41/include/linux/security.h 2011-04-17 15:56:46.000000000 -0400
56850@@ -34,6 +34,7 @@
56851 #include <linux/key.h>
56852 #include <linux/xfrm.h>
56853 #include <linux/gfp.h>
56854+#include <linux/grsecurity.h>
56855 #include <net/flow.h>
56856
56857 /* Maximum number of letters for an LSM name string */
56858diff -urNp linux-2.6.32.41/include/linux/shm.h linux-2.6.32.41/include/linux/shm.h
56859--- linux-2.6.32.41/include/linux/shm.h 2011-03-27 14:31:47.000000000 -0400
56860+++ linux-2.6.32.41/include/linux/shm.h 2011-04-17 15:56:46.000000000 -0400
56861@@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
56862 pid_t shm_cprid;
56863 pid_t shm_lprid;
56864 struct user_struct *mlock_user;
56865+#ifdef CONFIG_GRKERNSEC
56866+ time_t shm_createtime;
56867+ pid_t shm_lapid;
56868+#endif
56869 };
56870
56871 /* shm_mode upper byte flags */
56872diff -urNp linux-2.6.32.41/include/linux/skbuff.h linux-2.6.32.41/include/linux/skbuff.h
56873--- linux-2.6.32.41/include/linux/skbuff.h 2011-03-27 14:31:47.000000000 -0400
56874+++ linux-2.6.32.41/include/linux/skbuff.h 2011-05-04 17:56:20.000000000 -0400
56875@@ -544,7 +544,7 @@ static inline union skb_shared_tx *skb_t
56876 */
56877 static inline int skb_queue_empty(const struct sk_buff_head *list)
56878 {
56879- return list->next == (struct sk_buff *)list;
56880+ return list->next == (const struct sk_buff *)list;
56881 }
56882
56883 /**
56884@@ -557,7 +557,7 @@ static inline int skb_queue_empty(const
56885 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
56886 const struct sk_buff *skb)
56887 {
56888- return (skb->next == (struct sk_buff *) list);
56889+ return (skb->next == (const struct sk_buff *) list);
56890 }
56891
56892 /**
56893@@ -570,7 +570,7 @@ static inline bool skb_queue_is_last(con
56894 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
56895 const struct sk_buff *skb)
56896 {
56897- return (skb->prev == (struct sk_buff *) list);
56898+ return (skb->prev == (const struct sk_buff *) list);
56899 }
56900
56901 /**
56902@@ -1367,7 +1367,7 @@ static inline int skb_network_offset(con
56903 * headroom, you should not reduce this.
56904 */
56905 #ifndef NET_SKB_PAD
56906-#define NET_SKB_PAD 32
56907+#define NET_SKB_PAD (_AC(32,U))
56908 #endif
56909
56910 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
56911diff -urNp linux-2.6.32.41/include/linux/slab_def.h linux-2.6.32.41/include/linux/slab_def.h
56912--- linux-2.6.32.41/include/linux/slab_def.h 2011-03-27 14:31:47.000000000 -0400
56913+++ linux-2.6.32.41/include/linux/slab_def.h 2011-05-04 17:56:28.000000000 -0400
56914@@ -69,10 +69,10 @@ struct kmem_cache {
56915 unsigned long node_allocs;
56916 unsigned long node_frees;
56917 unsigned long node_overflow;
56918- atomic_t allochit;
56919- atomic_t allocmiss;
56920- atomic_t freehit;
56921- atomic_t freemiss;
56922+ atomic_unchecked_t allochit;
56923+ atomic_unchecked_t allocmiss;
56924+ atomic_unchecked_t freehit;
56925+ atomic_unchecked_t freemiss;
56926
56927 /*
56928 * If debugging is enabled, then the allocator can add additional
56929diff -urNp linux-2.6.32.41/include/linux/slab.h linux-2.6.32.41/include/linux/slab.h
56930--- linux-2.6.32.41/include/linux/slab.h 2011-03-27 14:31:47.000000000 -0400
56931+++ linux-2.6.32.41/include/linux/slab.h 2011-04-17 15:56:46.000000000 -0400
56932@@ -11,12 +11,20 @@
56933
56934 #include <linux/gfp.h>
56935 #include <linux/types.h>
56936+#include <linux/err.h>
56937
56938 /*
56939 * Flags to pass to kmem_cache_create().
56940 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
56941 */
56942 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
56943+
56944+#ifdef CONFIG_PAX_USERCOPY
56945+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
56946+#else
56947+#define SLAB_USERCOPY 0x00000000UL
56948+#endif
56949+
56950 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
56951 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
56952 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
56953@@ -82,10 +90,13 @@
56954 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
56955 * Both make kfree a no-op.
56956 */
56957-#define ZERO_SIZE_PTR ((void *)16)
56958+#define ZERO_SIZE_PTR \
56959+({ \
56960+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
56961+ (void *)(-MAX_ERRNO-1L); \
56962+})
56963
56964-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
56965- (unsigned long)ZERO_SIZE_PTR)
56966+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
56967
56968 /*
56969 * struct kmem_cache related prototypes
56970@@ -138,6 +149,7 @@ void * __must_check krealloc(const void
56971 void kfree(const void *);
56972 void kzfree(const void *);
56973 size_t ksize(const void *);
56974+void check_object_size(const void *ptr, unsigned long n, bool to);
56975
56976 /*
56977 * Allocator specific definitions. These are mainly used to establish optimized
56978@@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t
56979
56980 void __init kmem_cache_init_late(void);
56981
56982+#define kmalloc(x, y) \
56983+({ \
56984+ void *___retval; \
56985+ intoverflow_t ___x = (intoverflow_t)x; \
56986+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
56987+ ___retval = NULL; \
56988+ else \
56989+ ___retval = kmalloc((size_t)___x, (y)); \
56990+ ___retval; \
56991+})
56992+
56993+#define kmalloc_node(x, y, z) \
56994+({ \
56995+ void *___retval; \
56996+ intoverflow_t ___x = (intoverflow_t)x; \
56997+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
56998+ ___retval = NULL; \
56999+ else \
57000+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
57001+ ___retval; \
57002+})
57003+
57004+#define kzalloc(x, y) \
57005+({ \
57006+ void *___retval; \
57007+ intoverflow_t ___x = (intoverflow_t)x; \
57008+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
57009+ ___retval = NULL; \
57010+ else \
57011+ ___retval = kzalloc((size_t)___x, (y)); \
57012+ ___retval; \
57013+})
57014+
57015 #endif /* _LINUX_SLAB_H */
57016diff -urNp linux-2.6.32.41/include/linux/slub_def.h linux-2.6.32.41/include/linux/slub_def.h
57017--- linux-2.6.32.41/include/linux/slub_def.h 2011-03-27 14:31:47.000000000 -0400
57018+++ linux-2.6.32.41/include/linux/slub_def.h 2011-04-17 15:56:46.000000000 -0400
57019@@ -86,7 +86,7 @@ struct kmem_cache {
57020 struct kmem_cache_order_objects max;
57021 struct kmem_cache_order_objects min;
57022 gfp_t allocflags; /* gfp flags to use on each alloc */
57023- int refcount; /* Refcount for slab cache destroy */
57024+ atomic_t refcount; /* Refcount for slab cache destroy */
57025 void (*ctor)(void *);
57026 int inuse; /* Offset to metadata */
57027 int align; /* Alignment */
57028diff -urNp linux-2.6.32.41/include/linux/sonet.h linux-2.6.32.41/include/linux/sonet.h
57029--- linux-2.6.32.41/include/linux/sonet.h 2011-03-27 14:31:47.000000000 -0400
57030+++ linux-2.6.32.41/include/linux/sonet.h 2011-04-17 15:56:46.000000000 -0400
57031@@ -61,7 +61,7 @@ struct sonet_stats {
57032 #include <asm/atomic.h>
57033
57034 struct k_sonet_stats {
57035-#define __HANDLE_ITEM(i) atomic_t i
57036+#define __HANDLE_ITEM(i) atomic_unchecked_t i
57037 __SONET_ITEMS
57038 #undef __HANDLE_ITEM
57039 };
57040diff -urNp linux-2.6.32.41/include/linux/sunrpc/clnt.h linux-2.6.32.41/include/linux/sunrpc/clnt.h
57041--- linux-2.6.32.41/include/linux/sunrpc/clnt.h 2011-03-27 14:31:47.000000000 -0400
57042+++ linux-2.6.32.41/include/linux/sunrpc/clnt.h 2011-04-17 15:56:46.000000000 -0400
57043@@ -167,9 +167,9 @@ static inline unsigned short rpc_get_por
57044 {
57045 switch (sap->sa_family) {
57046 case AF_INET:
57047- return ntohs(((struct sockaddr_in *)sap)->sin_port);
57048+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
57049 case AF_INET6:
57050- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
57051+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
57052 }
57053 return 0;
57054 }
57055@@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const
57056 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
57057 const struct sockaddr *src)
57058 {
57059- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
57060+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
57061 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
57062
57063 dsin->sin_family = ssin->sin_family;
57064@@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const
57065 if (sa->sa_family != AF_INET6)
57066 return 0;
57067
57068- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
57069+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
57070 }
57071
57072 #endif /* __KERNEL__ */
57073diff -urNp linux-2.6.32.41/include/linux/sunrpc/svc_rdma.h linux-2.6.32.41/include/linux/sunrpc/svc_rdma.h
57074--- linux-2.6.32.41/include/linux/sunrpc/svc_rdma.h 2011-03-27 14:31:47.000000000 -0400
57075+++ linux-2.6.32.41/include/linux/sunrpc/svc_rdma.h 2011-05-04 17:56:28.000000000 -0400
57076@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
57077 extern unsigned int svcrdma_max_requests;
57078 extern unsigned int svcrdma_max_req_size;
57079
57080-extern atomic_t rdma_stat_recv;
57081-extern atomic_t rdma_stat_read;
57082-extern atomic_t rdma_stat_write;
57083-extern atomic_t rdma_stat_sq_starve;
57084-extern atomic_t rdma_stat_rq_starve;
57085-extern atomic_t rdma_stat_rq_poll;
57086-extern atomic_t rdma_stat_rq_prod;
57087-extern atomic_t rdma_stat_sq_poll;
57088-extern atomic_t rdma_stat_sq_prod;
57089+extern atomic_unchecked_t rdma_stat_recv;
57090+extern atomic_unchecked_t rdma_stat_read;
57091+extern atomic_unchecked_t rdma_stat_write;
57092+extern atomic_unchecked_t rdma_stat_sq_starve;
57093+extern atomic_unchecked_t rdma_stat_rq_starve;
57094+extern atomic_unchecked_t rdma_stat_rq_poll;
57095+extern atomic_unchecked_t rdma_stat_rq_prod;
57096+extern atomic_unchecked_t rdma_stat_sq_poll;
57097+extern atomic_unchecked_t rdma_stat_sq_prod;
57098
57099 #define RPCRDMA_VERSION 1
57100
57101diff -urNp linux-2.6.32.41/include/linux/suspend.h linux-2.6.32.41/include/linux/suspend.h
57102--- linux-2.6.32.41/include/linux/suspend.h 2011-03-27 14:31:47.000000000 -0400
57103+++ linux-2.6.32.41/include/linux/suspend.h 2011-04-17 15:56:46.000000000 -0400
57104@@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
57105 * which require special recovery actions in that situation.
57106 */
57107 struct platform_suspend_ops {
57108- int (*valid)(suspend_state_t state);
57109- int (*begin)(suspend_state_t state);
57110- int (*prepare)(void);
57111- int (*prepare_late)(void);
57112- int (*enter)(suspend_state_t state);
57113- void (*wake)(void);
57114- void (*finish)(void);
57115- void (*end)(void);
57116- void (*recover)(void);
57117+ int (* const valid)(suspend_state_t state);
57118+ int (* const begin)(suspend_state_t state);
57119+ int (* const prepare)(void);
57120+ int (* const prepare_late)(void);
57121+ int (* const enter)(suspend_state_t state);
57122+ void (* const wake)(void);
57123+ void (* const finish)(void);
57124+ void (* const end)(void);
57125+ void (* const recover)(void);
57126 };
57127
57128 #ifdef CONFIG_SUSPEND
57129@@ -120,7 +120,7 @@ struct platform_suspend_ops {
57130 * suspend_set_ops - set platform dependent suspend operations
57131 * @ops: The new suspend operations to set.
57132 */
57133-extern void suspend_set_ops(struct platform_suspend_ops *ops);
57134+extern void suspend_set_ops(const struct platform_suspend_ops *ops);
57135 extern int suspend_valid_only_mem(suspend_state_t state);
57136
57137 /**
57138@@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t st
57139 #else /* !CONFIG_SUSPEND */
57140 #define suspend_valid_only_mem NULL
57141
57142-static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
57143+static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
57144 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
57145 #endif /* !CONFIG_SUSPEND */
57146
57147@@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone
57148 * platforms which require special recovery actions in that situation.
57149 */
57150 struct platform_hibernation_ops {
57151- int (*begin)(void);
57152- void (*end)(void);
57153- int (*pre_snapshot)(void);
57154- void (*finish)(void);
57155- int (*prepare)(void);
57156- int (*enter)(void);
57157- void (*leave)(void);
57158- int (*pre_restore)(void);
57159- void (*restore_cleanup)(void);
57160- void (*recover)(void);
57161+ int (* const begin)(void);
57162+ void (* const end)(void);
57163+ int (* const pre_snapshot)(void);
57164+ void (* const finish)(void);
57165+ int (* const prepare)(void);
57166+ int (* const enter)(void);
57167+ void (* const leave)(void);
57168+ int (* const pre_restore)(void);
57169+ void (* const restore_cleanup)(void);
57170+ void (* const recover)(void);
57171 };
57172
57173 #ifdef CONFIG_HIBERNATION
57174@@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct
57175 extern void swsusp_unset_page_free(struct page *);
57176 extern unsigned long get_safe_page(gfp_t gfp_mask);
57177
57178-extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
57179+extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
57180 extern int hibernate(void);
57181 extern bool system_entering_hibernation(void);
57182 #else /* CONFIG_HIBERNATION */
57183@@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidd
57184 static inline void swsusp_set_page_free(struct page *p) {}
57185 static inline void swsusp_unset_page_free(struct page *p) {}
57186
57187-static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
57188+static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
57189 static inline int hibernate(void) { return -ENOSYS; }
57190 static inline bool system_entering_hibernation(void) { return false; }
57191 #endif /* CONFIG_HIBERNATION */
57192diff -urNp linux-2.6.32.41/include/linux/sysctl.h linux-2.6.32.41/include/linux/sysctl.h
57193--- linux-2.6.32.41/include/linux/sysctl.h 2011-03-27 14:31:47.000000000 -0400
57194+++ linux-2.6.32.41/include/linux/sysctl.h 2011-04-17 15:56:46.000000000 -0400
57195@@ -164,7 +164,11 @@ enum
57196 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
57197 };
57198
57199-
57200+#ifdef CONFIG_PAX_SOFTMODE
57201+enum {
57202+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
57203+};
57204+#endif
57205
57206 /* CTL_VM names: */
57207 enum
57208@@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_tab
57209
57210 extern int proc_dostring(struct ctl_table *, int,
57211 void __user *, size_t *, loff_t *);
57212+extern int proc_dostring_modpriv(struct ctl_table *, int,
57213+ void __user *, size_t *, loff_t *);
57214 extern int proc_dointvec(struct ctl_table *, int,
57215 void __user *, size_t *, loff_t *);
57216 extern int proc_dointvec_minmax(struct ctl_table *, int,
57217@@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name,
57218
57219 extern ctl_handler sysctl_data;
57220 extern ctl_handler sysctl_string;
57221+extern ctl_handler sysctl_string_modpriv;
57222 extern ctl_handler sysctl_intvec;
57223 extern ctl_handler sysctl_jiffies;
57224 extern ctl_handler sysctl_ms_jiffies;
57225diff -urNp linux-2.6.32.41/include/linux/sysfs.h linux-2.6.32.41/include/linux/sysfs.h
57226--- linux-2.6.32.41/include/linux/sysfs.h 2011-03-27 14:31:47.000000000 -0400
57227+++ linux-2.6.32.41/include/linux/sysfs.h 2011-04-17 15:56:46.000000000 -0400
57228@@ -75,8 +75,8 @@ struct bin_attribute {
57229 };
57230
57231 struct sysfs_ops {
57232- ssize_t (*show)(struct kobject *, struct attribute *,char *);
57233- ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
57234+ ssize_t (* const show)(struct kobject *, struct attribute *,char *);
57235+ ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
57236 };
57237
57238 struct sysfs_dirent;
57239diff -urNp linux-2.6.32.41/include/linux/thread_info.h linux-2.6.32.41/include/linux/thread_info.h
57240--- linux-2.6.32.41/include/linux/thread_info.h 2011-03-27 14:31:47.000000000 -0400
57241+++ linux-2.6.32.41/include/linux/thread_info.h 2011-04-17 15:56:46.000000000 -0400
57242@@ -23,7 +23,7 @@ struct restart_block {
57243 };
57244 /* For futex_wait and futex_wait_requeue_pi */
57245 struct {
57246- u32 *uaddr;
57247+ u32 __user *uaddr;
57248 u32 val;
57249 u32 flags;
57250 u32 bitset;
57251diff -urNp linux-2.6.32.41/include/linux/tty.h linux-2.6.32.41/include/linux/tty.h
57252--- linux-2.6.32.41/include/linux/tty.h 2011-03-27 14:31:47.000000000 -0400
57253+++ linux-2.6.32.41/include/linux/tty.h 2011-04-17 15:56:46.000000000 -0400
57254@@ -13,6 +13,7 @@
57255 #include <linux/tty_driver.h>
57256 #include <linux/tty_ldisc.h>
57257 #include <linux/mutex.h>
57258+#include <linux/poll.h>
57259
57260 #include <asm/system.h>
57261
57262@@ -443,7 +444,6 @@ extern int tty_perform_flush(struct tty_
57263 extern dev_t tty_devnum(struct tty_struct *tty);
57264 extern void proc_clear_tty(struct task_struct *p);
57265 extern struct tty_struct *get_current_tty(void);
57266-extern void tty_default_fops(struct file_operations *fops);
57267 extern struct tty_struct *alloc_tty_struct(void);
57268 extern void free_tty_struct(struct tty_struct *tty);
57269 extern void initialize_tty_struct(struct tty_struct *tty,
57270@@ -493,6 +493,18 @@ extern void tty_ldisc_begin(void);
57271 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
57272 extern void tty_ldisc_enable(struct tty_struct *tty);
57273
57274+/* tty_io.c */
57275+extern ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
57276+extern ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
57277+extern unsigned int tty_poll(struct file *, poll_table *);
57278+#ifdef CONFIG_COMPAT
57279+extern long tty_compat_ioctl(struct file *file, unsigned int cmd,
57280+ unsigned long arg);
57281+#else
57282+#define tty_compat_ioctl NULL
57283+#endif
57284+extern int tty_release(struct inode *, struct file *);
57285+extern int tty_fasync(int fd, struct file *filp, int on);
57286
57287 /* n_tty.c */
57288 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
57289diff -urNp linux-2.6.32.41/include/linux/tty_ldisc.h linux-2.6.32.41/include/linux/tty_ldisc.h
57290--- linux-2.6.32.41/include/linux/tty_ldisc.h 2011-03-27 14:31:47.000000000 -0400
57291+++ linux-2.6.32.41/include/linux/tty_ldisc.h 2011-04-17 15:56:46.000000000 -0400
57292@@ -139,7 +139,7 @@ struct tty_ldisc_ops {
57293
57294 struct module *owner;
57295
57296- int refcount;
57297+ atomic_t refcount;
57298 };
57299
57300 struct tty_ldisc {
57301diff -urNp linux-2.6.32.41/include/linux/types.h linux-2.6.32.41/include/linux/types.h
57302--- linux-2.6.32.41/include/linux/types.h 2011-03-27 14:31:47.000000000 -0400
57303+++ linux-2.6.32.41/include/linux/types.h 2011-04-17 15:56:46.000000000 -0400
57304@@ -191,10 +191,26 @@ typedef struct {
57305 volatile int counter;
57306 } atomic_t;
57307
57308+#ifdef CONFIG_PAX_REFCOUNT
57309+typedef struct {
57310+ volatile int counter;
57311+} atomic_unchecked_t;
57312+#else
57313+typedef atomic_t atomic_unchecked_t;
57314+#endif
57315+
57316 #ifdef CONFIG_64BIT
57317 typedef struct {
57318 volatile long counter;
57319 } atomic64_t;
57320+
57321+#ifdef CONFIG_PAX_REFCOUNT
57322+typedef struct {
57323+ volatile long counter;
57324+} atomic64_unchecked_t;
57325+#else
57326+typedef atomic64_t atomic64_unchecked_t;
57327+#endif
57328 #endif
57329
57330 struct ustat {
57331diff -urNp linux-2.6.32.41/include/linux/uaccess.h linux-2.6.32.41/include/linux/uaccess.h
57332--- linux-2.6.32.41/include/linux/uaccess.h 2011-03-27 14:31:47.000000000 -0400
57333+++ linux-2.6.32.41/include/linux/uaccess.h 2011-04-17 15:56:46.000000000 -0400
57334@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
57335 long ret; \
57336 mm_segment_t old_fs = get_fs(); \
57337 \
57338- set_fs(KERNEL_DS); \
57339 pagefault_disable(); \
57340+ set_fs(KERNEL_DS); \
57341 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
57342- pagefault_enable(); \
57343 set_fs(old_fs); \
57344+ pagefault_enable(); \
57345 ret; \
57346 })
57347
57348@@ -93,7 +93,7 @@ static inline unsigned long __copy_from_
57349 * Safely read from address @src to the buffer at @dst. If a kernel fault
57350 * happens, handle that and return -EFAULT.
57351 */
57352-extern long probe_kernel_read(void *dst, void *src, size_t size);
57353+extern long probe_kernel_read(void *dst, const void *src, size_t size);
57354
57355 /*
57356 * probe_kernel_write(): safely attempt to write to a location
57357@@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst,
57358 * Safely write to address @dst from the buffer at @src. If a kernel fault
57359 * happens, handle that and return -EFAULT.
57360 */
57361-extern long probe_kernel_write(void *dst, void *src, size_t size);
57362+extern long probe_kernel_write(void *dst, const void *src, size_t size);
57363
57364 #endif /* __LINUX_UACCESS_H__ */
57365diff -urNp linux-2.6.32.41/include/linux/unaligned/access_ok.h linux-2.6.32.41/include/linux/unaligned/access_ok.h
57366--- linux-2.6.32.41/include/linux/unaligned/access_ok.h 2011-03-27 14:31:47.000000000 -0400
57367+++ linux-2.6.32.41/include/linux/unaligned/access_ok.h 2011-04-17 15:56:46.000000000 -0400
57368@@ -6,32 +6,32 @@
57369
57370 static inline u16 get_unaligned_le16(const void *p)
57371 {
57372- return le16_to_cpup((__le16 *)p);
57373+ return le16_to_cpup((const __le16 *)p);
57374 }
57375
57376 static inline u32 get_unaligned_le32(const void *p)
57377 {
57378- return le32_to_cpup((__le32 *)p);
57379+ return le32_to_cpup((const __le32 *)p);
57380 }
57381
57382 static inline u64 get_unaligned_le64(const void *p)
57383 {
57384- return le64_to_cpup((__le64 *)p);
57385+ return le64_to_cpup((const __le64 *)p);
57386 }
57387
57388 static inline u16 get_unaligned_be16(const void *p)
57389 {
57390- return be16_to_cpup((__be16 *)p);
57391+ return be16_to_cpup((const __be16 *)p);
57392 }
57393
57394 static inline u32 get_unaligned_be32(const void *p)
57395 {
57396- return be32_to_cpup((__be32 *)p);
57397+ return be32_to_cpup((const __be32 *)p);
57398 }
57399
57400 static inline u64 get_unaligned_be64(const void *p)
57401 {
57402- return be64_to_cpup((__be64 *)p);
57403+ return be64_to_cpup((const __be64 *)p);
57404 }
57405
57406 static inline void put_unaligned_le16(u16 val, void *p)
57407diff -urNp linux-2.6.32.41/include/linux/vmalloc.h linux-2.6.32.41/include/linux/vmalloc.h
57408--- linux-2.6.32.41/include/linux/vmalloc.h 2011-03-27 14:31:47.000000000 -0400
57409+++ linux-2.6.32.41/include/linux/vmalloc.h 2011-04-17 15:56:46.000000000 -0400
57410@@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
57411 #define VM_MAP 0x00000004 /* vmap()ed pages */
57412 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
57413 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
57414+
57415+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
57416+#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
57417+#endif
57418+
57419 /* bits [20..32] reserved for arch specific ioremap internals */
57420
57421 /*
57422@@ -123,4 +128,81 @@ struct vm_struct **pcpu_get_vm_areas(con
57423
57424 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
57425
57426+#define vmalloc(x) \
57427+({ \
57428+ void *___retval; \
57429+ intoverflow_t ___x = (intoverflow_t)x; \
57430+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
57431+ ___retval = NULL; \
57432+ else \
57433+ ___retval = vmalloc((unsigned long)___x); \
57434+ ___retval; \
57435+})
57436+
57437+#define __vmalloc(x, y, z) \
57438+({ \
57439+ void *___retval; \
57440+ intoverflow_t ___x = (intoverflow_t)x; \
57441+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
57442+ ___retval = NULL; \
57443+ else \
57444+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
57445+ ___retval; \
57446+})
57447+
57448+#define vmalloc_user(x) \
57449+({ \
57450+ void *___retval; \
57451+ intoverflow_t ___x = (intoverflow_t)x; \
57452+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
57453+ ___retval = NULL; \
57454+ else \
57455+ ___retval = vmalloc_user((unsigned long)___x); \
57456+ ___retval; \
57457+})
57458+
57459+#define vmalloc_exec(x) \
57460+({ \
57461+ void *___retval; \
57462+ intoverflow_t ___x = (intoverflow_t)x; \
57463+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
57464+ ___retval = NULL; \
57465+ else \
57466+ ___retval = vmalloc_exec((unsigned long)___x); \
57467+ ___retval; \
57468+})
57469+
57470+#define vmalloc_node(x, y) \
57471+({ \
57472+ void *___retval; \
57473+ intoverflow_t ___x = (intoverflow_t)x; \
57474+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
57475+ ___retval = NULL; \
57476+ else \
57477+ ___retval = vmalloc_node((unsigned long)___x, (y));\
57478+ ___retval; \
57479+})
57480+
57481+#define vmalloc_32(x) \
57482+({ \
57483+ void *___retval; \
57484+ intoverflow_t ___x = (intoverflow_t)x; \
57485+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
57486+ ___retval = NULL; \
57487+ else \
57488+ ___retval = vmalloc_32((unsigned long)___x); \
57489+ ___retval; \
57490+})
57491+
57492+#define vmalloc_32_user(x) \
57493+({ \
57494+ void *___retval; \
57495+ intoverflow_t ___x = (intoverflow_t)x; \
57496+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
57497+ ___retval = NULL; \
57498+ else \
57499+ ___retval = vmalloc_32_user((unsigned long)___x);\
57500+ ___retval; \
57501+})
57502+
57503 #endif /* _LINUX_VMALLOC_H */
57504diff -urNp linux-2.6.32.41/include/linux/vmstat.h linux-2.6.32.41/include/linux/vmstat.h
57505--- linux-2.6.32.41/include/linux/vmstat.h 2011-03-27 14:31:47.000000000 -0400
57506+++ linux-2.6.32.41/include/linux/vmstat.h 2011-04-17 15:56:46.000000000 -0400
57507@@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(in
57508 /*
57509 * Zone based page accounting with per cpu differentials.
57510 */
57511-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
57512+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
57513
57514 static inline void zone_page_state_add(long x, struct zone *zone,
57515 enum zone_stat_item item)
57516 {
57517- atomic_long_add(x, &zone->vm_stat[item]);
57518- atomic_long_add(x, &vm_stat[item]);
57519+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
57520+ atomic_long_add_unchecked(x, &vm_stat[item]);
57521 }
57522
57523 static inline unsigned long global_page_state(enum zone_stat_item item)
57524 {
57525- long x = atomic_long_read(&vm_stat[item]);
57526+ long x = atomic_long_read_unchecked(&vm_stat[item]);
57527 #ifdef CONFIG_SMP
57528 if (x < 0)
57529 x = 0;
57530@@ -158,7 +158,7 @@ static inline unsigned long global_page_
57531 static inline unsigned long zone_page_state(struct zone *zone,
57532 enum zone_stat_item item)
57533 {
57534- long x = atomic_long_read(&zone->vm_stat[item]);
57535+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
57536 #ifdef CONFIG_SMP
57537 if (x < 0)
57538 x = 0;
57539@@ -175,7 +175,7 @@ static inline unsigned long zone_page_st
57540 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
57541 enum zone_stat_item item)
57542 {
57543- long x = atomic_long_read(&zone->vm_stat[item]);
57544+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
57545
57546 #ifdef CONFIG_SMP
57547 int cpu;
57548@@ -264,8 +264,8 @@ static inline void __mod_zone_page_state
57549
57550 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
57551 {
57552- atomic_long_inc(&zone->vm_stat[item]);
57553- atomic_long_inc(&vm_stat[item]);
57554+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
57555+ atomic_long_inc_unchecked(&vm_stat[item]);
57556 }
57557
57558 static inline void __inc_zone_page_state(struct page *page,
57559@@ -276,8 +276,8 @@ static inline void __inc_zone_page_state
57560
57561 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
57562 {
57563- atomic_long_dec(&zone->vm_stat[item]);
57564- atomic_long_dec(&vm_stat[item]);
57565+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
57566+ atomic_long_dec_unchecked(&vm_stat[item]);
57567 }
57568
57569 static inline void __dec_zone_page_state(struct page *page,
57570diff -urNp linux-2.6.32.41/include/media/v4l2-device.h linux-2.6.32.41/include/media/v4l2-device.h
57571--- linux-2.6.32.41/include/media/v4l2-device.h 2011-03-27 14:31:47.000000000 -0400
57572+++ linux-2.6.32.41/include/media/v4l2-device.h 2011-05-04 17:56:28.000000000 -0400
57573@@ -71,7 +71,7 @@ int __must_check v4l2_device_register(st
57574 this function returns 0. If the name ends with a digit (e.g. cx18),
57575 then the name will be set to cx18-0 since cx180 looks really odd. */
57576 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
57577- atomic_t *instance);
57578+ atomic_unchecked_t *instance);
57579
57580 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
57581 Since the parent disappears this ensures that v4l2_dev doesn't have an
57582diff -urNp linux-2.6.32.41/include/net/flow.h linux-2.6.32.41/include/net/flow.h
57583--- linux-2.6.32.41/include/net/flow.h 2011-03-27 14:31:47.000000000 -0400
57584+++ linux-2.6.32.41/include/net/flow.h 2011-05-04 17:56:28.000000000 -0400
57585@@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net
57586 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
57587 u8 dir, flow_resolve_t resolver);
57588 extern void flow_cache_flush(void);
57589-extern atomic_t flow_cache_genid;
57590+extern atomic_unchecked_t flow_cache_genid;
57591
57592 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
57593 {
57594diff -urNp linux-2.6.32.41/include/net/inetpeer.h linux-2.6.32.41/include/net/inetpeer.h
57595--- linux-2.6.32.41/include/net/inetpeer.h 2011-03-27 14:31:47.000000000 -0400
57596+++ linux-2.6.32.41/include/net/inetpeer.h 2011-04-17 15:56:46.000000000 -0400
57597@@ -24,7 +24,7 @@ struct inet_peer
57598 __u32 dtime; /* the time of last use of not
57599 * referenced entries */
57600 atomic_t refcnt;
57601- atomic_t rid; /* Frag reception counter */
57602+ atomic_unchecked_t rid; /* Frag reception counter */
57603 __u32 tcp_ts;
57604 unsigned long tcp_ts_stamp;
57605 };
57606diff -urNp linux-2.6.32.41/include/net/ip_vs.h linux-2.6.32.41/include/net/ip_vs.h
57607--- linux-2.6.32.41/include/net/ip_vs.h 2011-03-27 14:31:47.000000000 -0400
57608+++ linux-2.6.32.41/include/net/ip_vs.h 2011-05-04 17:56:28.000000000 -0400
57609@@ -365,7 +365,7 @@ struct ip_vs_conn {
57610 struct ip_vs_conn *control; /* Master control connection */
57611 atomic_t n_control; /* Number of controlled ones */
57612 struct ip_vs_dest *dest; /* real server */
57613- atomic_t in_pkts; /* incoming packet counter */
57614+ atomic_unchecked_t in_pkts; /* incoming packet counter */
57615
57616 /* packet transmitter for different forwarding methods. If it
57617 mangles the packet, it must return NF_DROP or better NF_STOLEN,
57618@@ -466,7 +466,7 @@ struct ip_vs_dest {
57619 union nf_inet_addr addr; /* IP address of the server */
57620 __be16 port; /* port number of the server */
57621 volatile unsigned flags; /* dest status flags */
57622- atomic_t conn_flags; /* flags to copy to conn */
57623+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
57624 atomic_t weight; /* server weight */
57625
57626 atomic_t refcnt; /* reference counter */
57627diff -urNp linux-2.6.32.41/include/net/irda/ircomm_tty.h linux-2.6.32.41/include/net/irda/ircomm_tty.h
57628--- linux-2.6.32.41/include/net/irda/ircomm_tty.h 2011-03-27 14:31:47.000000000 -0400
57629+++ linux-2.6.32.41/include/net/irda/ircomm_tty.h 2011-04-17 15:56:46.000000000 -0400
57630@@ -35,6 +35,7 @@
57631 #include <linux/termios.h>
57632 #include <linux/timer.h>
57633 #include <linux/tty.h> /* struct tty_struct */
57634+#include <asm/local.h>
57635
57636 #include <net/irda/irias_object.h>
57637 #include <net/irda/ircomm_core.h>
57638@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
57639 unsigned short close_delay;
57640 unsigned short closing_wait; /* time to wait before closing */
57641
57642- int open_count;
57643- int blocked_open; /* # of blocked opens */
57644+ local_t open_count;
57645+ local_t blocked_open; /* # of blocked opens */
57646
57647 /* Protect concurent access to :
57648 * o self->open_count
57649diff -urNp linux-2.6.32.41/include/net/iucv/af_iucv.h linux-2.6.32.41/include/net/iucv/af_iucv.h
57650--- linux-2.6.32.41/include/net/iucv/af_iucv.h 2011-03-27 14:31:47.000000000 -0400
57651+++ linux-2.6.32.41/include/net/iucv/af_iucv.h 2011-05-04 17:56:28.000000000 -0400
57652@@ -87,7 +87,7 @@ struct iucv_sock {
57653 struct iucv_sock_list {
57654 struct hlist_head head;
57655 rwlock_t lock;
57656- atomic_t autobind_name;
57657+ atomic_unchecked_t autobind_name;
57658 };
57659
57660 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
57661diff -urNp linux-2.6.32.41/include/net/neighbour.h linux-2.6.32.41/include/net/neighbour.h
57662--- linux-2.6.32.41/include/net/neighbour.h 2011-03-27 14:31:47.000000000 -0400
57663+++ linux-2.6.32.41/include/net/neighbour.h 2011-04-17 15:56:46.000000000 -0400
57664@@ -125,12 +125,12 @@ struct neighbour
57665 struct neigh_ops
57666 {
57667 int family;
57668- void (*solicit)(struct neighbour *, struct sk_buff*);
57669- void (*error_report)(struct neighbour *, struct sk_buff*);
57670- int (*output)(struct sk_buff*);
57671- int (*connected_output)(struct sk_buff*);
57672- int (*hh_output)(struct sk_buff*);
57673- int (*queue_xmit)(struct sk_buff*);
57674+ void (* const solicit)(struct neighbour *, struct sk_buff*);
57675+ void (* const error_report)(struct neighbour *, struct sk_buff*);
57676+ int (* const output)(struct sk_buff*);
57677+ int (* const connected_output)(struct sk_buff*);
57678+ int (* const hh_output)(struct sk_buff*);
57679+ int (* const queue_xmit)(struct sk_buff*);
57680 };
57681
57682 struct pneigh_entry
57683diff -urNp linux-2.6.32.41/include/net/netlink.h linux-2.6.32.41/include/net/netlink.h
57684--- linux-2.6.32.41/include/net/netlink.h 2011-03-27 14:31:47.000000000 -0400
57685+++ linux-2.6.32.41/include/net/netlink.h 2011-04-17 15:56:46.000000000 -0400
57686@@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct
57687 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
57688 {
57689 if (mark)
57690- skb_trim(skb, (unsigned char *) mark - skb->data);
57691+ skb_trim(skb, (const unsigned char *) mark - skb->data);
57692 }
57693
57694 /**
57695diff -urNp linux-2.6.32.41/include/net/netns/ipv4.h linux-2.6.32.41/include/net/netns/ipv4.h
57696--- linux-2.6.32.41/include/net/netns/ipv4.h 2011-03-27 14:31:47.000000000 -0400
57697+++ linux-2.6.32.41/include/net/netns/ipv4.h 2011-05-04 17:56:28.000000000 -0400
57698@@ -54,7 +54,7 @@ struct netns_ipv4 {
57699 int current_rt_cache_rebuild_count;
57700
57701 struct timer_list rt_secret_timer;
57702- atomic_t rt_genid;
57703+ atomic_unchecked_t rt_genid;
57704
57705 #ifdef CONFIG_IP_MROUTE
57706 struct sock *mroute_sk;
57707diff -urNp linux-2.6.32.41/include/net/sctp/sctp.h linux-2.6.32.41/include/net/sctp/sctp.h
57708--- linux-2.6.32.41/include/net/sctp/sctp.h 2011-03-27 14:31:47.000000000 -0400
57709+++ linux-2.6.32.41/include/net/sctp/sctp.h 2011-04-17 15:56:46.000000000 -0400
57710@@ -305,8 +305,8 @@ extern int sctp_debug_flag;
57711
57712 #else /* SCTP_DEBUG */
57713
57714-#define SCTP_DEBUG_PRINTK(whatever...)
57715-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
57716+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
57717+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
57718 #define SCTP_ENABLE_DEBUG
57719 #define SCTP_DISABLE_DEBUG
57720 #define SCTP_ASSERT(expr, str, func)
57721diff -urNp linux-2.6.32.41/include/net/sock.h linux-2.6.32.41/include/net/sock.h
57722--- linux-2.6.32.41/include/net/sock.h 2011-03-27 14:31:47.000000000 -0400
57723+++ linux-2.6.32.41/include/net/sock.h 2011-05-04 17:56:28.000000000 -0400
57724@@ -272,7 +272,7 @@ struct sock {
57725 rwlock_t sk_callback_lock;
57726 int sk_err,
57727 sk_err_soft;
57728- atomic_t sk_drops;
57729+ atomic_unchecked_t sk_drops;
57730 unsigned short sk_ack_backlog;
57731 unsigned short sk_max_ack_backlog;
57732 __u32 sk_priority;
57733diff -urNp linux-2.6.32.41/include/net/tcp.h linux-2.6.32.41/include/net/tcp.h
57734--- linux-2.6.32.41/include/net/tcp.h 2011-03-27 14:31:47.000000000 -0400
57735+++ linux-2.6.32.41/include/net/tcp.h 2011-04-17 15:56:46.000000000 -0400
57736@@ -1444,6 +1444,7 @@ enum tcp_seq_states {
57737 struct tcp_seq_afinfo {
57738 char *name;
57739 sa_family_t family;
57740+ /* cannot be const */
57741 struct file_operations seq_fops;
57742 struct seq_operations seq_ops;
57743 };
57744diff -urNp linux-2.6.32.41/include/net/udp.h linux-2.6.32.41/include/net/udp.h
57745--- linux-2.6.32.41/include/net/udp.h 2011-03-27 14:31:47.000000000 -0400
57746+++ linux-2.6.32.41/include/net/udp.h 2011-04-17 15:56:46.000000000 -0400
57747@@ -187,6 +187,7 @@ struct udp_seq_afinfo {
57748 char *name;
57749 sa_family_t family;
57750 struct udp_table *udp_table;
57751+ /* cannot be const */
57752 struct file_operations seq_fops;
57753 struct seq_operations seq_ops;
57754 };
57755diff -urNp linux-2.6.32.41/include/scsi/scsi_device.h linux-2.6.32.41/include/scsi/scsi_device.h
57756--- linux-2.6.32.41/include/scsi/scsi_device.h 2011-04-17 17:00:52.000000000 -0400
57757+++ linux-2.6.32.41/include/scsi/scsi_device.h 2011-05-04 17:56:28.000000000 -0400
57758@@ -156,9 +156,9 @@ struct scsi_device {
57759 unsigned int max_device_blocked; /* what device_blocked counts down from */
57760 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
57761
57762- atomic_t iorequest_cnt;
57763- atomic_t iodone_cnt;
57764- atomic_t ioerr_cnt;
57765+ atomic_unchecked_t iorequest_cnt;
57766+ atomic_unchecked_t iodone_cnt;
57767+ atomic_unchecked_t ioerr_cnt;
57768
57769 struct device sdev_gendev,
57770 sdev_dev;
57771diff -urNp linux-2.6.32.41/include/sound/ac97_codec.h linux-2.6.32.41/include/sound/ac97_codec.h
57772--- linux-2.6.32.41/include/sound/ac97_codec.h 2011-03-27 14:31:47.000000000 -0400
57773+++ linux-2.6.32.41/include/sound/ac97_codec.h 2011-04-17 15:56:46.000000000 -0400
57774@@ -419,15 +419,15 @@
57775 struct snd_ac97;
57776
57777 struct snd_ac97_build_ops {
57778- int (*build_3d) (struct snd_ac97 *ac97);
57779- int (*build_specific) (struct snd_ac97 *ac97);
57780- int (*build_spdif) (struct snd_ac97 *ac97);
57781- int (*build_post_spdif) (struct snd_ac97 *ac97);
57782+ int (* const build_3d) (struct snd_ac97 *ac97);
57783+ int (* const build_specific) (struct snd_ac97 *ac97);
57784+ int (* const build_spdif) (struct snd_ac97 *ac97);
57785+ int (* const build_post_spdif) (struct snd_ac97 *ac97);
57786 #ifdef CONFIG_PM
57787- void (*suspend) (struct snd_ac97 *ac97);
57788- void (*resume) (struct snd_ac97 *ac97);
57789+ void (* const suspend) (struct snd_ac97 *ac97);
57790+ void (* const resume) (struct snd_ac97 *ac97);
57791 #endif
57792- void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
57793+ void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
57794 };
57795
57796 struct snd_ac97_bus_ops {
57797@@ -477,7 +477,7 @@ struct snd_ac97_template {
57798
57799 struct snd_ac97 {
57800 /* -- lowlevel (hardware) driver specific -- */
57801- struct snd_ac97_build_ops * build_ops;
57802+ const struct snd_ac97_build_ops * build_ops;
57803 void *private_data;
57804 void (*private_free) (struct snd_ac97 *ac97);
57805 /* --- */
57806diff -urNp linux-2.6.32.41/include/sound/ymfpci.h linux-2.6.32.41/include/sound/ymfpci.h
57807--- linux-2.6.32.41/include/sound/ymfpci.h 2011-03-27 14:31:47.000000000 -0400
57808+++ linux-2.6.32.41/include/sound/ymfpci.h 2011-05-04 17:56:28.000000000 -0400
57809@@ -358,7 +358,7 @@ struct snd_ymfpci {
57810 spinlock_t reg_lock;
57811 spinlock_t voice_lock;
57812 wait_queue_head_t interrupt_sleep;
57813- atomic_t interrupt_sleep_count;
57814+ atomic_unchecked_t interrupt_sleep_count;
57815 struct snd_info_entry *proc_entry;
57816 const struct firmware *dsp_microcode;
57817 const struct firmware *controller_microcode;
57818diff -urNp linux-2.6.32.41/include/trace/events/irq.h linux-2.6.32.41/include/trace/events/irq.h
57819--- linux-2.6.32.41/include/trace/events/irq.h 2011-03-27 14:31:47.000000000 -0400
57820+++ linux-2.6.32.41/include/trace/events/irq.h 2011-04-17 15:56:46.000000000 -0400
57821@@ -34,7 +34,7 @@
57822 */
57823 TRACE_EVENT(irq_handler_entry,
57824
57825- TP_PROTO(int irq, struct irqaction *action),
57826+ TP_PROTO(int irq, const struct irqaction *action),
57827
57828 TP_ARGS(irq, action),
57829
57830@@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
57831 */
57832 TRACE_EVENT(irq_handler_exit,
57833
57834- TP_PROTO(int irq, struct irqaction *action, int ret),
57835+ TP_PROTO(int irq, const struct irqaction *action, int ret),
57836
57837 TP_ARGS(irq, action, ret),
57838
57839@@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
57840 */
57841 TRACE_EVENT(softirq_entry,
57842
57843- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
57844+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
57845
57846 TP_ARGS(h, vec),
57847
57848@@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
57849 */
57850 TRACE_EVENT(softirq_exit,
57851
57852- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
57853+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
57854
57855 TP_ARGS(h, vec),
57856
57857diff -urNp linux-2.6.32.41/include/video/uvesafb.h linux-2.6.32.41/include/video/uvesafb.h
57858--- linux-2.6.32.41/include/video/uvesafb.h 2011-03-27 14:31:47.000000000 -0400
57859+++ linux-2.6.32.41/include/video/uvesafb.h 2011-04-17 15:56:46.000000000 -0400
57860@@ -177,6 +177,7 @@ struct uvesafb_par {
57861 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
57862 u8 pmi_setpal; /* PMI for palette changes */
57863 u16 *pmi_base; /* protected mode interface location */
57864+ u8 *pmi_code; /* protected mode code location */
57865 void *pmi_start;
57866 void *pmi_pal;
57867 u8 *vbe_state_orig; /*
57868diff -urNp linux-2.6.32.41/init/do_mounts.c linux-2.6.32.41/init/do_mounts.c
57869--- linux-2.6.32.41/init/do_mounts.c 2011-03-27 14:31:47.000000000 -0400
57870+++ linux-2.6.32.41/init/do_mounts.c 2011-04-17 15:56:46.000000000 -0400
57871@@ -216,11 +216,11 @@ static void __init get_fs_names(char *pa
57872
57873 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
57874 {
57875- int err = sys_mount(name, "/root", fs, flags, data);
57876+ int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
57877 if (err)
57878 return err;
57879
57880- sys_chdir("/root");
57881+ sys_chdir((__force const char __user *)"/root");
57882 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
57883 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
57884 current->fs->pwd.mnt->mnt_sb->s_type->name,
57885@@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...
57886 va_start(args, fmt);
57887 vsprintf(buf, fmt, args);
57888 va_end(args);
57889- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
57890+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
57891 if (fd >= 0) {
57892 sys_ioctl(fd, FDEJECT, 0);
57893 sys_close(fd);
57894 }
57895 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
57896- fd = sys_open("/dev/console", O_RDWR, 0);
57897+ fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
57898 if (fd >= 0) {
57899 sys_ioctl(fd, TCGETS, (long)&termios);
57900 termios.c_lflag &= ~ICANON;
57901 sys_ioctl(fd, TCSETSF, (long)&termios);
57902- sys_read(fd, &c, 1);
57903+ sys_read(fd, (char __user *)&c, 1);
57904 termios.c_lflag |= ICANON;
57905 sys_ioctl(fd, TCSETSF, (long)&termios);
57906 sys_close(fd);
57907@@ -416,6 +416,6 @@ void __init prepare_namespace(void)
57908 mount_root();
57909 out:
57910 devtmpfs_mount("dev");
57911- sys_mount(".", "/", NULL, MS_MOVE, NULL);
57912- sys_chroot(".");
57913+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
57914+ sys_chroot((__force char __user *)".");
57915 }
57916diff -urNp linux-2.6.32.41/init/do_mounts.h linux-2.6.32.41/init/do_mounts.h
57917--- linux-2.6.32.41/init/do_mounts.h 2011-03-27 14:31:47.000000000 -0400
57918+++ linux-2.6.32.41/init/do_mounts.h 2011-04-17 15:56:46.000000000 -0400
57919@@ -15,15 +15,15 @@ extern int root_mountflags;
57920
57921 static inline int create_dev(char *name, dev_t dev)
57922 {
57923- sys_unlink(name);
57924- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
57925+ sys_unlink((__force char __user *)name);
57926+ return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
57927 }
57928
57929 #if BITS_PER_LONG == 32
57930 static inline u32 bstat(char *name)
57931 {
57932 struct stat64 stat;
57933- if (sys_stat64(name, &stat) != 0)
57934+ if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
57935 return 0;
57936 if (!S_ISBLK(stat.st_mode))
57937 return 0;
57938diff -urNp linux-2.6.32.41/init/do_mounts_initrd.c linux-2.6.32.41/init/do_mounts_initrd.c
57939--- linux-2.6.32.41/init/do_mounts_initrd.c 2011-03-27 14:31:47.000000000 -0400
57940+++ linux-2.6.32.41/init/do_mounts_initrd.c 2011-04-17 15:56:46.000000000 -0400
57941@@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shel
57942 sys_close(old_fd);sys_close(root_fd);
57943 sys_close(0);sys_close(1);sys_close(2);
57944 sys_setsid();
57945- (void) sys_open("/dev/console",O_RDWR,0);
57946+ (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
57947 (void) sys_dup(0);
57948 (void) sys_dup(0);
57949 return kernel_execve(shell, argv, envp_init);
57950@@ -47,13 +47,13 @@ static void __init handle_initrd(void)
57951 create_dev("/dev/root.old", Root_RAM0);
57952 /* mount initrd on rootfs' /root */
57953 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
57954- sys_mkdir("/old", 0700);
57955- root_fd = sys_open("/", 0, 0);
57956- old_fd = sys_open("/old", 0, 0);
57957+ sys_mkdir((__force const char __user *)"/old", 0700);
57958+ root_fd = sys_open((__force const char __user *)"/", 0, 0);
57959+ old_fd = sys_open((__force const char __user *)"/old", 0, 0);
57960 /* move initrd over / and chdir/chroot in initrd root */
57961- sys_chdir("/root");
57962- sys_mount(".", "/", NULL, MS_MOVE, NULL);
57963- sys_chroot(".");
57964+ sys_chdir((__force const char __user *)"/root");
57965+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
57966+ sys_chroot((__force const char __user *)".");
57967
57968 /*
57969 * In case that a resume from disk is carried out by linuxrc or one of
57970@@ -70,15 +70,15 @@ static void __init handle_initrd(void)
57971
57972 /* move initrd to rootfs' /old */
57973 sys_fchdir(old_fd);
57974- sys_mount("/", ".", NULL, MS_MOVE, NULL);
57975+ sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
57976 /* switch root and cwd back to / of rootfs */
57977 sys_fchdir(root_fd);
57978- sys_chroot(".");
57979+ sys_chroot((__force const char __user *)".");
57980 sys_close(old_fd);
57981 sys_close(root_fd);
57982
57983 if (new_decode_dev(real_root_dev) == Root_RAM0) {
57984- sys_chdir("/old");
57985+ sys_chdir((__force const char __user *)"/old");
57986 return;
57987 }
57988
57989@@ -86,17 +86,17 @@ static void __init handle_initrd(void)
57990 mount_root();
57991
57992 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
57993- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
57994+ error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
57995 if (!error)
57996 printk("okay\n");
57997 else {
57998- int fd = sys_open("/dev/root.old", O_RDWR, 0);
57999+ int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
58000 if (error == -ENOENT)
58001 printk("/initrd does not exist. Ignored.\n");
58002 else
58003 printk("failed\n");
58004 printk(KERN_NOTICE "Unmounting old root\n");
58005- sys_umount("/old", MNT_DETACH);
58006+ sys_umount((__force char __user *)"/old", MNT_DETACH);
58007 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
58008 if (fd < 0) {
58009 error = fd;
58010@@ -119,11 +119,11 @@ int __init initrd_load(void)
58011 * mounted in the normal path.
58012 */
58013 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
58014- sys_unlink("/initrd.image");
58015+ sys_unlink((__force const char __user *)"/initrd.image");
58016 handle_initrd();
58017 return 1;
58018 }
58019 }
58020- sys_unlink("/initrd.image");
58021+ sys_unlink((__force const char __user *)"/initrd.image");
58022 return 0;
58023 }
58024diff -urNp linux-2.6.32.41/init/do_mounts_md.c linux-2.6.32.41/init/do_mounts_md.c
58025--- linux-2.6.32.41/init/do_mounts_md.c 2011-03-27 14:31:47.000000000 -0400
58026+++ linux-2.6.32.41/init/do_mounts_md.c 2011-04-17 15:56:46.000000000 -0400
58027@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
58028 partitioned ? "_d" : "", minor,
58029 md_setup_args[ent].device_names);
58030
58031- fd = sys_open(name, 0, 0);
58032+ fd = sys_open((__force char __user *)name, 0, 0);
58033 if (fd < 0) {
58034 printk(KERN_ERR "md: open failed - cannot start "
58035 "array %s\n", name);
58036@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
58037 * array without it
58038 */
58039 sys_close(fd);
58040- fd = sys_open(name, 0, 0);
58041+ fd = sys_open((__force char __user *)name, 0, 0);
58042 sys_ioctl(fd, BLKRRPART, 0);
58043 }
58044 sys_close(fd);
58045@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
58046
58047 wait_for_device_probe();
58048
58049- fd = sys_open("/dev/md0", 0, 0);
58050+ fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
58051 if (fd >= 0) {
58052 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
58053 sys_close(fd);
58054diff -urNp linux-2.6.32.41/init/initramfs.c linux-2.6.32.41/init/initramfs.c
58055--- linux-2.6.32.41/init/initramfs.c 2011-03-27 14:31:47.000000000 -0400
58056+++ linux-2.6.32.41/init/initramfs.c 2011-04-17 15:56:46.000000000 -0400
58057@@ -74,7 +74,7 @@ static void __init free_hash(void)
58058 }
58059 }
58060
58061-static long __init do_utime(char __user *filename, time_t mtime)
58062+static long __init do_utime(__force char __user *filename, time_t mtime)
58063 {
58064 struct timespec t[2];
58065
58066@@ -109,7 +109,7 @@ static void __init dir_utime(void)
58067 struct dir_entry *de, *tmp;
58068 list_for_each_entry_safe(de, tmp, &dir_list, list) {
58069 list_del(&de->list);
58070- do_utime(de->name, de->mtime);
58071+ do_utime((__force char __user *)de->name, de->mtime);
58072 kfree(de->name);
58073 kfree(de);
58074 }
58075@@ -271,7 +271,7 @@ static int __init maybe_link(void)
58076 if (nlink >= 2) {
58077 char *old = find_link(major, minor, ino, mode, collected);
58078 if (old)
58079- return (sys_link(old, collected) < 0) ? -1 : 1;
58080+ return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
58081 }
58082 return 0;
58083 }
58084@@ -280,11 +280,11 @@ static void __init clean_path(char *path
58085 {
58086 struct stat st;
58087
58088- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
58089+ if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
58090 if (S_ISDIR(st.st_mode))
58091- sys_rmdir(path);
58092+ sys_rmdir((__force char __user *)path);
58093 else
58094- sys_unlink(path);
58095+ sys_unlink((__force char __user *)path);
58096 }
58097 }
58098
58099@@ -305,7 +305,7 @@ static int __init do_name(void)
58100 int openflags = O_WRONLY|O_CREAT;
58101 if (ml != 1)
58102 openflags |= O_TRUNC;
58103- wfd = sys_open(collected, openflags, mode);
58104+ wfd = sys_open((__force char __user *)collected, openflags, mode);
58105
58106 if (wfd >= 0) {
58107 sys_fchown(wfd, uid, gid);
58108@@ -317,17 +317,17 @@ static int __init do_name(void)
58109 }
58110 }
58111 } else if (S_ISDIR(mode)) {
58112- sys_mkdir(collected, mode);
58113- sys_chown(collected, uid, gid);
58114- sys_chmod(collected, mode);
58115+ sys_mkdir((__force char __user *)collected, mode);
58116+ sys_chown((__force char __user *)collected, uid, gid);
58117+ sys_chmod((__force char __user *)collected, mode);
58118 dir_add(collected, mtime);
58119 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
58120 S_ISFIFO(mode) || S_ISSOCK(mode)) {
58121 if (maybe_link() == 0) {
58122- sys_mknod(collected, mode, rdev);
58123- sys_chown(collected, uid, gid);
58124- sys_chmod(collected, mode);
58125- do_utime(collected, mtime);
58126+ sys_mknod((__force char __user *)collected, mode, rdev);
58127+ sys_chown((__force char __user *)collected, uid, gid);
58128+ sys_chmod((__force char __user *)collected, mode);
58129+ do_utime((__force char __user *)collected, mtime);
58130 }
58131 }
58132 return 0;
58133@@ -336,15 +336,15 @@ static int __init do_name(void)
58134 static int __init do_copy(void)
58135 {
58136 if (count >= body_len) {
58137- sys_write(wfd, victim, body_len);
58138+ sys_write(wfd, (__force char __user *)victim, body_len);
58139 sys_close(wfd);
58140- do_utime(vcollected, mtime);
58141+ do_utime((__force char __user *)vcollected, mtime);
58142 kfree(vcollected);
58143 eat(body_len);
58144 state = SkipIt;
58145 return 0;
58146 } else {
58147- sys_write(wfd, victim, count);
58148+ sys_write(wfd, (__force char __user *)victim, count);
58149 body_len -= count;
58150 eat(count);
58151 return 1;
58152@@ -355,9 +355,9 @@ static int __init do_symlink(void)
58153 {
58154 collected[N_ALIGN(name_len) + body_len] = '\0';
58155 clean_path(collected, 0);
58156- sys_symlink(collected + N_ALIGN(name_len), collected);
58157- sys_lchown(collected, uid, gid);
58158- do_utime(collected, mtime);
58159+ sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
58160+ sys_lchown((__force char __user *)collected, uid, gid);
58161+ do_utime((__force char __user *)collected, mtime);
58162 state = SkipIt;
58163 next_state = Reset;
58164 return 0;
58165diff -urNp linux-2.6.32.41/init/Kconfig linux-2.6.32.41/init/Kconfig
58166--- linux-2.6.32.41/init/Kconfig 2011-05-10 22:12:01.000000000 -0400
58167+++ linux-2.6.32.41/init/Kconfig 2011-05-10 22:12:34.000000000 -0400
58168@@ -1004,7 +1004,7 @@ config SLUB_DEBUG
58169
58170 config COMPAT_BRK
58171 bool "Disable heap randomization"
58172- default y
58173+ default n
58174 help
58175 Randomizing heap placement makes heap exploits harder, but it
58176 also breaks ancient binaries (including anything libc5 based).
58177diff -urNp linux-2.6.32.41/init/main.c linux-2.6.32.41/init/main.c
58178--- linux-2.6.32.41/init/main.c 2011-05-10 22:12:01.000000000 -0400
58179+++ linux-2.6.32.41/init/main.c 2011-05-22 23:02:06.000000000 -0400
58180@@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void)
58181 #ifdef CONFIG_TC
58182 extern void tc_init(void);
58183 #endif
58184+extern void grsecurity_init(void);
58185
58186 enum system_states system_state __read_mostly;
58187 EXPORT_SYMBOL(system_state);
58188@@ -183,6 +184,49 @@ static int __init set_reset_devices(char
58189
58190 __setup("reset_devices", set_reset_devices);
58191
58192+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
58193+extern char pax_enter_kernel_user[];
58194+extern char pax_exit_kernel_user[];
58195+extern pgdval_t clone_pgd_mask;
58196+#endif
58197+
58198+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
58199+static int __init setup_pax_nouderef(char *str)
58200+{
58201+#ifdef CONFIG_X86_32
58202+ unsigned int cpu;
58203+ struct desc_struct *gdt;
58204+
58205+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
58206+ gdt = get_cpu_gdt_table(cpu);
58207+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
58208+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
58209+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
58210+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
58211+ }
58212+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
58213+#else
58214+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
58215+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
58216+ clone_pgd_mask = ~(pgdval_t)0UL;
58217+#endif
58218+
58219+ return 0;
58220+}
58221+early_param("pax_nouderef", setup_pax_nouderef);
58222+#endif
58223+
58224+#ifdef CONFIG_PAX_SOFTMODE
58225+unsigned int pax_softmode;
58226+
58227+static int __init setup_pax_softmode(char *str)
58228+{
58229+ get_option(&str, &pax_softmode);
58230+ return 1;
58231+}
58232+__setup("pax_softmode=", setup_pax_softmode);
58233+#endif
58234+
58235 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
58236 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
58237 static const char *panic_later, *panic_param;
58238@@ -705,52 +749,53 @@ int initcall_debug;
58239 core_param(initcall_debug, initcall_debug, bool, 0644);
58240
58241 static char msgbuf[64];
58242-static struct boot_trace_call call;
58243-static struct boot_trace_ret ret;
58244+static struct boot_trace_call trace_call;
58245+static struct boot_trace_ret trace_ret;
58246
58247 int do_one_initcall(initcall_t fn)
58248 {
58249 int count = preempt_count();
58250 ktime_t calltime, delta, rettime;
58251+ const char *msg1 = "", *msg2 = "";
58252
58253 if (initcall_debug) {
58254- call.caller = task_pid_nr(current);
58255- printk("calling %pF @ %i\n", fn, call.caller);
58256+ trace_call.caller = task_pid_nr(current);
58257+ printk("calling %pF @ %i\n", fn, trace_call.caller);
58258 calltime = ktime_get();
58259- trace_boot_call(&call, fn);
58260+ trace_boot_call(&trace_call, fn);
58261 enable_boot_trace();
58262 }
58263
58264- ret.result = fn();
58265+ trace_ret.result = fn();
58266
58267 if (initcall_debug) {
58268 disable_boot_trace();
58269 rettime = ktime_get();
58270 delta = ktime_sub(rettime, calltime);
58271- ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
58272- trace_boot_ret(&ret, fn);
58273+ trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
58274+ trace_boot_ret(&trace_ret, fn);
58275 printk("initcall %pF returned %d after %Ld usecs\n", fn,
58276- ret.result, ret.duration);
58277+ trace_ret.result, trace_ret.duration);
58278 }
58279
58280 msgbuf[0] = 0;
58281
58282- if (ret.result && ret.result != -ENODEV && initcall_debug)
58283- sprintf(msgbuf, "error code %d ", ret.result);
58284+ if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
58285+ sprintf(msgbuf, "error code %d ", trace_ret.result);
58286
58287 if (preempt_count() != count) {
58288- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
58289+ msg1 = " preemption imbalance";
58290 preempt_count() = count;
58291 }
58292 if (irqs_disabled()) {
58293- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
58294+ msg2 = " disabled interrupts";
58295 local_irq_enable();
58296 }
58297- if (msgbuf[0]) {
58298- printk("initcall %pF returned with %s\n", fn, msgbuf);
58299+ if (msgbuf[0] || *msg1 || *msg2) {
58300+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
58301 }
58302
58303- return ret.result;
58304+ return trace_ret.result;
58305 }
58306
58307
58308@@ -893,11 +938,13 @@ static int __init kernel_init(void * unu
58309 if (!ramdisk_execute_command)
58310 ramdisk_execute_command = "/init";
58311
58312- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
58313+ if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
58314 ramdisk_execute_command = NULL;
58315 prepare_namespace();
58316 }
58317
58318+ grsecurity_init();
58319+
58320 /*
58321 * Ok, we have completed the initial bootup, and
58322 * we're essentially up and running. Get rid of the
58323diff -urNp linux-2.6.32.41/init/noinitramfs.c linux-2.6.32.41/init/noinitramfs.c
58324--- linux-2.6.32.41/init/noinitramfs.c 2011-03-27 14:31:47.000000000 -0400
58325+++ linux-2.6.32.41/init/noinitramfs.c 2011-04-17 15:56:46.000000000 -0400
58326@@ -29,7 +29,7 @@ static int __init default_rootfs(void)
58327 {
58328 int err;
58329
58330- err = sys_mkdir("/dev", 0755);
58331+ err = sys_mkdir((const char __user *)"/dev", 0755);
58332 if (err < 0)
58333 goto out;
58334
58335@@ -39,7 +39,7 @@ static int __init default_rootfs(void)
58336 if (err < 0)
58337 goto out;
58338
58339- err = sys_mkdir("/root", 0700);
58340+ err = sys_mkdir((const char __user *)"/root", 0700);
58341 if (err < 0)
58342 goto out;
58343
58344diff -urNp linux-2.6.32.41/ipc/mqueue.c linux-2.6.32.41/ipc/mqueue.c
58345--- linux-2.6.32.41/ipc/mqueue.c 2011-03-27 14:31:47.000000000 -0400
58346+++ linux-2.6.32.41/ipc/mqueue.c 2011-04-17 15:56:46.000000000 -0400
58347@@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(st
58348 mq_bytes = (mq_msg_tblsz +
58349 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
58350
58351+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
58352 spin_lock(&mq_lock);
58353 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
58354 u->mq_bytes + mq_bytes >
58355diff -urNp linux-2.6.32.41/ipc/sem.c linux-2.6.32.41/ipc/sem.c
58356--- linux-2.6.32.41/ipc/sem.c 2011-03-27 14:31:47.000000000 -0400
58357+++ linux-2.6.32.41/ipc/sem.c 2011-05-16 21:46:57.000000000 -0400
58358@@ -671,6 +671,8 @@ static int semctl_main(struct ipc_namesp
58359 ushort* sem_io = fast_sem_io;
58360 int nsems;
58361
58362+ pax_track_stack();
58363+
58364 sma = sem_lock_check(ns, semid);
58365 if (IS_ERR(sma))
58366 return PTR_ERR(sma);
58367@@ -1071,6 +1073,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
58368 unsigned long jiffies_left = 0;
58369 struct ipc_namespace *ns;
58370
58371+ pax_track_stack();
58372+
58373 ns = current->nsproxy->ipc_ns;
58374
58375 if (nsops < 1 || semid < 0)
58376diff -urNp linux-2.6.32.41/ipc/shm.c linux-2.6.32.41/ipc/shm.c
58377--- linux-2.6.32.41/ipc/shm.c 2011-03-27 14:31:47.000000000 -0400
58378+++ linux-2.6.32.41/ipc/shm.c 2011-04-17 15:56:46.000000000 -0400
58379@@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_name
58380 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
58381 #endif
58382
58383+#ifdef CONFIG_GRKERNSEC
58384+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
58385+ const time_t shm_createtime, const uid_t cuid,
58386+ const int shmid);
58387+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
58388+ const time_t shm_createtime);
58389+#endif
58390+
58391 void shm_init_ns(struct ipc_namespace *ns)
58392 {
58393 ns->shm_ctlmax = SHMMAX;
58394@@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *
58395 shp->shm_lprid = 0;
58396 shp->shm_atim = shp->shm_dtim = 0;
58397 shp->shm_ctim = get_seconds();
58398+#ifdef CONFIG_GRKERNSEC
58399+ {
58400+ struct timespec timeval;
58401+ do_posix_clock_monotonic_gettime(&timeval);
58402+
58403+ shp->shm_createtime = timeval.tv_sec;
58404+ }
58405+#endif
58406 shp->shm_segsz = size;
58407 shp->shm_nattch = 0;
58408 shp->shm_file = file;
58409@@ -880,9 +896,21 @@ long do_shmat(int shmid, char __user *sh
58410 if (err)
58411 goto out_unlock;
58412
58413+#ifdef CONFIG_GRKERNSEC
58414+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
58415+ shp->shm_perm.cuid, shmid) ||
58416+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
58417+ err = -EACCES;
58418+ goto out_unlock;
58419+ }
58420+#endif
58421+
58422 path.dentry = dget(shp->shm_file->f_path.dentry);
58423 path.mnt = shp->shm_file->f_path.mnt;
58424 shp->shm_nattch++;
58425+#ifdef CONFIG_GRKERNSEC
58426+ shp->shm_lapid = current->pid;
58427+#endif
58428 size = i_size_read(path.dentry->d_inode);
58429 shm_unlock(shp);
58430
58431diff -urNp linux-2.6.32.41/kernel/acct.c linux-2.6.32.41/kernel/acct.c
58432--- linux-2.6.32.41/kernel/acct.c 2011-03-27 14:31:47.000000000 -0400
58433+++ linux-2.6.32.41/kernel/acct.c 2011-04-17 15:56:46.000000000 -0400
58434@@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_a
58435 */
58436 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
58437 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
58438- file->f_op->write(file, (char *)&ac,
58439+ file->f_op->write(file, (__force char __user *)&ac,
58440 sizeof(acct_t), &file->f_pos);
58441 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
58442 set_fs(fs);
58443diff -urNp linux-2.6.32.41/kernel/audit.c linux-2.6.32.41/kernel/audit.c
58444--- linux-2.6.32.41/kernel/audit.c 2011-03-27 14:31:47.000000000 -0400
58445+++ linux-2.6.32.41/kernel/audit.c 2011-05-04 17:56:28.000000000 -0400
58446@@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
58447 3) suppressed due to audit_rate_limit
58448 4) suppressed due to audit_backlog_limit
58449 */
58450-static atomic_t audit_lost = ATOMIC_INIT(0);
58451+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
58452
58453 /* The netlink socket. */
58454 static struct sock *audit_sock;
58455@@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
58456 unsigned long now;
58457 int print;
58458
58459- atomic_inc(&audit_lost);
58460+ atomic_inc_unchecked(&audit_lost);
58461
58462 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
58463
58464@@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
58465 printk(KERN_WARNING
58466 "audit: audit_lost=%d audit_rate_limit=%d "
58467 "audit_backlog_limit=%d\n",
58468- atomic_read(&audit_lost),
58469+ atomic_read_unchecked(&audit_lost),
58470 audit_rate_limit,
58471 audit_backlog_limit);
58472 audit_panic(message);
58473@@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_b
58474 status_set.pid = audit_pid;
58475 status_set.rate_limit = audit_rate_limit;
58476 status_set.backlog_limit = audit_backlog_limit;
58477- status_set.lost = atomic_read(&audit_lost);
58478+ status_set.lost = atomic_read_unchecked(&audit_lost);
58479 status_set.backlog = skb_queue_len(&audit_skb_queue);
58480 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
58481 &status_set, sizeof(status_set));
58482@@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_b
58483 spin_unlock_irq(&tsk->sighand->siglock);
58484 }
58485 read_unlock(&tasklist_lock);
58486- audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
58487- &s, sizeof(s));
58488+
58489+ if (!err)
58490+ audit_send_reply(NETLINK_CB(skb).pid, seq,
58491+ AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
58492 break;
58493 }
58494 case AUDIT_TTY_SET: {
58495diff -urNp linux-2.6.32.41/kernel/auditsc.c linux-2.6.32.41/kernel/auditsc.c
58496--- linux-2.6.32.41/kernel/auditsc.c 2011-03-27 14:31:47.000000000 -0400
58497+++ linux-2.6.32.41/kernel/auditsc.c 2011-05-04 17:56:28.000000000 -0400
58498@@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_conte
58499 }
58500
58501 /* global counter which is incremented every time something logs in */
58502-static atomic_t session_id = ATOMIC_INIT(0);
58503+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
58504
58505 /**
58506 * audit_set_loginuid - set a task's audit_context loginuid
58507@@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT
58508 */
58509 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
58510 {
58511- unsigned int sessionid = atomic_inc_return(&session_id);
58512+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
58513 struct audit_context *context = task->audit_context;
58514
58515 if (context && context->in_syscall) {
58516diff -urNp linux-2.6.32.41/kernel/capability.c linux-2.6.32.41/kernel/capability.c
58517--- linux-2.6.32.41/kernel/capability.c 2011-03-27 14:31:47.000000000 -0400
58518+++ linux-2.6.32.41/kernel/capability.c 2011-04-17 15:56:46.000000000 -0400
58519@@ -305,10 +305,26 @@ int capable(int cap)
58520 BUG();
58521 }
58522
58523- if (security_capable(cap) == 0) {
58524+ if (security_capable(cap) == 0 && gr_is_capable(cap)) {
58525 current->flags |= PF_SUPERPRIV;
58526 return 1;
58527 }
58528 return 0;
58529 }
58530+
58531+int capable_nolog(int cap)
58532+{
58533+ if (unlikely(!cap_valid(cap))) {
58534+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
58535+ BUG();
58536+ }
58537+
58538+ if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
58539+ current->flags |= PF_SUPERPRIV;
58540+ return 1;
58541+ }
58542+ return 0;
58543+}
58544+
58545 EXPORT_SYMBOL(capable);
58546+EXPORT_SYMBOL(capable_nolog);
58547diff -urNp linux-2.6.32.41/kernel/cgroup.c linux-2.6.32.41/kernel/cgroup.c
58548--- linux-2.6.32.41/kernel/cgroup.c 2011-03-27 14:31:47.000000000 -0400
58549+++ linux-2.6.32.41/kernel/cgroup.c 2011-05-16 21:46:57.000000000 -0400
58550@@ -536,6 +536,8 @@ static struct css_set *find_css_set(
58551 struct hlist_head *hhead;
58552 struct cg_cgroup_link *link;
58553
58554+ pax_track_stack();
58555+
58556 /* First see if we already have a cgroup group that matches
58557 * the desired set */
58558 read_lock(&css_set_lock);
58559diff -urNp linux-2.6.32.41/kernel/configs.c linux-2.6.32.41/kernel/configs.c
58560--- linux-2.6.32.41/kernel/configs.c 2011-03-27 14:31:47.000000000 -0400
58561+++ linux-2.6.32.41/kernel/configs.c 2011-04-17 15:56:46.000000000 -0400
58562@@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
58563 struct proc_dir_entry *entry;
58564
58565 /* create the current config file */
58566+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
58567+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
58568+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
58569+ &ikconfig_file_ops);
58570+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58571+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
58572+ &ikconfig_file_ops);
58573+#endif
58574+#else
58575 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
58576 &ikconfig_file_ops);
58577+#endif
58578+
58579 if (!entry)
58580 return -ENOMEM;
58581
58582diff -urNp linux-2.6.32.41/kernel/cpu.c linux-2.6.32.41/kernel/cpu.c
58583--- linux-2.6.32.41/kernel/cpu.c 2011-03-27 14:31:47.000000000 -0400
58584+++ linux-2.6.32.41/kernel/cpu.c 2011-04-17 15:56:46.000000000 -0400
58585@@ -19,7 +19,7 @@
58586 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
58587 static DEFINE_MUTEX(cpu_add_remove_lock);
58588
58589-static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
58590+static RAW_NOTIFIER_HEAD(cpu_chain);
58591
58592 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
58593 * Should always be manipulated under cpu_add_remove_lock
58594diff -urNp linux-2.6.32.41/kernel/cred.c linux-2.6.32.41/kernel/cred.c
58595--- linux-2.6.32.41/kernel/cred.c 2011-03-27 14:31:47.000000000 -0400
58596+++ linux-2.6.32.41/kernel/cred.c 2011-05-17 19:26:34.000000000 -0400
58597@@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head
58598 */
58599 void __put_cred(struct cred *cred)
58600 {
58601+ pax_track_stack();
58602+
58603 kdebug("__put_cred(%p{%d,%d})", cred,
58604 atomic_read(&cred->usage),
58605 read_cred_subscribers(cred));
58606@@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
58607 {
58608 struct cred *cred;
58609
58610+ pax_track_stack();
58611+
58612 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
58613 atomic_read(&tsk->cred->usage),
58614 read_cred_subscribers(tsk->cred));
58615@@ -222,6 +226,8 @@ const struct cred *get_task_cred(struct
58616 {
58617 const struct cred *cred;
58618
58619+ pax_track_stack();
58620+
58621 rcu_read_lock();
58622
58623 do {
58624@@ -241,6 +247,8 @@ struct cred *cred_alloc_blank(void)
58625 {
58626 struct cred *new;
58627
58628+ pax_track_stack();
58629+
58630 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
58631 if (!new)
58632 return NULL;
58633@@ -289,6 +297,8 @@ struct cred *prepare_creds(void)
58634 const struct cred *old;
58635 struct cred *new;
58636
58637+ pax_track_stack();
58638+
58639 validate_process_creds();
58640
58641 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
58642@@ -335,6 +345,8 @@ struct cred *prepare_exec_creds(void)
58643 struct thread_group_cred *tgcred = NULL;
58644 struct cred *new;
58645
58646+ pax_track_stack();
58647+
58648 #ifdef CONFIG_KEYS
58649 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
58650 if (!tgcred)
58651@@ -441,6 +453,8 @@ int copy_creds(struct task_struct *p, un
58652 struct cred *new;
58653 int ret;
58654
58655+ pax_track_stack();
58656+
58657 mutex_init(&p->cred_guard_mutex);
58658
58659 if (
58660@@ -528,6 +542,8 @@ int commit_creds(struct cred *new)
58661 struct task_struct *task = current;
58662 const struct cred *old = task->real_cred;
58663
58664+ pax_track_stack();
58665+
58666 kdebug("commit_creds(%p{%d,%d})", new,
58667 atomic_read(&new->usage),
58668 read_cred_subscribers(new));
58669@@ -544,6 +560,8 @@ int commit_creds(struct cred *new)
58670
58671 get_cred(new); /* we will require a ref for the subj creds too */
58672
58673+ gr_set_role_label(task, new->uid, new->gid);
58674+
58675 /* dumpability changes */
58676 if (old->euid != new->euid ||
58677 old->egid != new->egid ||
58678@@ -606,6 +624,8 @@ EXPORT_SYMBOL(commit_creds);
58679 */
58680 void abort_creds(struct cred *new)
58681 {
58682+ pax_track_stack();
58683+
58684 kdebug("abort_creds(%p{%d,%d})", new,
58685 atomic_read(&new->usage),
58686 read_cred_subscribers(new));
58687@@ -629,6 +649,8 @@ const struct cred *override_creds(const
58688 {
58689 const struct cred *old = current->cred;
58690
58691+ pax_track_stack();
58692+
58693 kdebug("override_creds(%p{%d,%d})", new,
58694 atomic_read(&new->usage),
58695 read_cred_subscribers(new));
58696@@ -658,6 +680,8 @@ void revert_creds(const struct cred *old
58697 {
58698 const struct cred *override = current->cred;
58699
58700+ pax_track_stack();
58701+
58702 kdebug("revert_creds(%p{%d,%d})", old,
58703 atomic_read(&old->usage),
58704 read_cred_subscribers(old));
58705@@ -704,6 +728,8 @@ struct cred *prepare_kernel_cred(struct
58706 const struct cred *old;
58707 struct cred *new;
58708
58709+ pax_track_stack();
58710+
58711 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
58712 if (!new)
58713 return NULL;
58714@@ -758,6 +784,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
58715 */
58716 int set_security_override(struct cred *new, u32 secid)
58717 {
58718+ pax_track_stack();
58719+
58720 return security_kernel_act_as(new, secid);
58721 }
58722 EXPORT_SYMBOL(set_security_override);
58723@@ -777,6 +805,8 @@ int set_security_override_from_ctx(struc
58724 u32 secid;
58725 int ret;
58726
58727+ pax_track_stack();
58728+
58729 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
58730 if (ret < 0)
58731 return ret;
58732diff -urNp linux-2.6.32.41/kernel/exit.c linux-2.6.32.41/kernel/exit.c
58733--- linux-2.6.32.41/kernel/exit.c 2011-03-27 14:31:47.000000000 -0400
58734+++ linux-2.6.32.41/kernel/exit.c 2011-04-17 15:56:46.000000000 -0400
58735@@ -55,6 +55,10 @@
58736 #include <asm/pgtable.h>
58737 #include <asm/mmu_context.h>
58738
58739+#ifdef CONFIG_GRKERNSEC
58740+extern rwlock_t grsec_exec_file_lock;
58741+#endif
58742+
58743 static void exit_mm(struct task_struct * tsk);
58744
58745 static void __unhash_process(struct task_struct *p)
58746@@ -174,6 +178,8 @@ void release_task(struct task_struct * p
58747 struct task_struct *leader;
58748 int zap_leader;
58749 repeat:
58750+ gr_del_task_from_ip_table(p);
58751+
58752 tracehook_prepare_release_task(p);
58753 /* don't need to get the RCU readlock here - the process is dead and
58754 * can't be modifying its own credentials */
58755@@ -341,11 +347,22 @@ static void reparent_to_kthreadd(void)
58756 {
58757 write_lock_irq(&tasklist_lock);
58758
58759+#ifdef CONFIG_GRKERNSEC
58760+ write_lock(&grsec_exec_file_lock);
58761+ if (current->exec_file) {
58762+ fput(current->exec_file);
58763+ current->exec_file = NULL;
58764+ }
58765+ write_unlock(&grsec_exec_file_lock);
58766+#endif
58767+
58768 ptrace_unlink(current);
58769 /* Reparent to init */
58770 current->real_parent = current->parent = kthreadd_task;
58771 list_move_tail(&current->sibling, &current->real_parent->children);
58772
58773+ gr_set_kernel_label(current);
58774+
58775 /* Set the exit signal to SIGCHLD so we signal init on exit */
58776 current->exit_signal = SIGCHLD;
58777
58778@@ -397,7 +414,7 @@ int allow_signal(int sig)
58779 * know it'll be handled, so that they don't get converted to
58780 * SIGKILL or just silently dropped.
58781 */
58782- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
58783+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
58784 recalc_sigpending();
58785 spin_unlock_irq(&current->sighand->siglock);
58786 return 0;
58787@@ -433,6 +450,17 @@ void daemonize(const char *name, ...)
58788 vsnprintf(current->comm, sizeof(current->comm), name, args);
58789 va_end(args);
58790
58791+#ifdef CONFIG_GRKERNSEC
58792+ write_lock(&grsec_exec_file_lock);
58793+ if (current->exec_file) {
58794+ fput(current->exec_file);
58795+ current->exec_file = NULL;
58796+ }
58797+ write_unlock(&grsec_exec_file_lock);
58798+#endif
58799+
58800+ gr_set_kernel_label(current);
58801+
58802 /*
58803 * If we were started as result of loading a module, close all of the
58804 * user space pages. We don't need them, and if we didn't close them
58805@@ -897,17 +925,17 @@ NORET_TYPE void do_exit(long code)
58806 struct task_struct *tsk = current;
58807 int group_dead;
58808
58809- profile_task_exit(tsk);
58810-
58811- WARN_ON(atomic_read(&tsk->fs_excl));
58812-
58813+ /*
58814+ * Check this first since set_fs() below depends on
58815+ * current_thread_info(), which we better not access when we're in
58816+ * interrupt context. Other than that, we want to do the set_fs()
58817+ * as early as possible.
58818+ */
58819 if (unlikely(in_interrupt()))
58820 panic("Aiee, killing interrupt handler!");
58821- if (unlikely(!tsk->pid))
58822- panic("Attempted to kill the idle task!");
58823
58824 /*
58825- * If do_exit is called because this processes oopsed, it's possible
58826+ * If do_exit is called because this processes Oops'ed, it's possible
58827 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
58828 * continuing. Amongst other possible reasons, this is to prevent
58829 * mm_release()->clear_child_tid() from writing to a user-controlled
58830@@ -915,6 +943,13 @@ NORET_TYPE void do_exit(long code)
58831 */
58832 set_fs(USER_DS);
58833
58834+ profile_task_exit(tsk);
58835+
58836+ WARN_ON(atomic_read(&tsk->fs_excl));
58837+
58838+ if (unlikely(!tsk->pid))
58839+ panic("Attempted to kill the idle task!");
58840+
58841 tracehook_report_exit(&code);
58842
58843 validate_creds_for_do_exit(tsk);
58844@@ -973,6 +1008,9 @@ NORET_TYPE void do_exit(long code)
58845 tsk->exit_code = code;
58846 taskstats_exit(tsk, group_dead);
58847
58848+ gr_acl_handle_psacct(tsk, code);
58849+ gr_acl_handle_exit();
58850+
58851 exit_mm(tsk);
58852
58853 if (group_dead)
58854@@ -1188,7 +1226,7 @@ static int wait_task_zombie(struct wait_
58855
58856 if (unlikely(wo->wo_flags & WNOWAIT)) {
58857 int exit_code = p->exit_code;
58858- int why, status;
58859+ int why;
58860
58861 get_task_struct(p);
58862 read_unlock(&tasklist_lock);
58863diff -urNp linux-2.6.32.41/kernel/fork.c linux-2.6.32.41/kernel/fork.c
58864--- linux-2.6.32.41/kernel/fork.c 2011-03-27 14:31:47.000000000 -0400
58865+++ linux-2.6.32.41/kernel/fork.c 2011-04-17 15:56:46.000000000 -0400
58866@@ -253,7 +253,7 @@ static struct task_struct *dup_task_stru
58867 *stackend = STACK_END_MAGIC; /* for overflow detection */
58868
58869 #ifdef CONFIG_CC_STACKPROTECTOR
58870- tsk->stack_canary = get_random_int();
58871+ tsk->stack_canary = pax_get_random_long();
58872 #endif
58873
58874 /* One for us, one for whoever does the "release_task()" (usually parent) */
58875@@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm
58876 mm->locked_vm = 0;
58877 mm->mmap = NULL;
58878 mm->mmap_cache = NULL;
58879- mm->free_area_cache = oldmm->mmap_base;
58880- mm->cached_hole_size = ~0UL;
58881+ mm->free_area_cache = oldmm->free_area_cache;
58882+ mm->cached_hole_size = oldmm->cached_hole_size;
58883 mm->map_count = 0;
58884 cpumask_clear(mm_cpumask(mm));
58885 mm->mm_rb = RB_ROOT;
58886@@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm
58887 tmp->vm_flags &= ~VM_LOCKED;
58888 tmp->vm_mm = mm;
58889 tmp->vm_next = tmp->vm_prev = NULL;
58890+ tmp->vm_mirror = NULL;
58891 anon_vma_link(tmp);
58892 file = tmp->vm_file;
58893 if (file) {
58894@@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm
58895 if (retval)
58896 goto out;
58897 }
58898+
58899+#ifdef CONFIG_PAX_SEGMEXEC
58900+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
58901+ struct vm_area_struct *mpnt_m;
58902+
58903+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
58904+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
58905+
58906+ if (!mpnt->vm_mirror)
58907+ continue;
58908+
58909+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
58910+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
58911+ mpnt->vm_mirror = mpnt_m;
58912+ } else {
58913+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
58914+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
58915+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
58916+ mpnt->vm_mirror->vm_mirror = mpnt;
58917+ }
58918+ }
58919+ BUG_ON(mpnt_m);
58920+ }
58921+#endif
58922+
58923 /* a new mm has just been created */
58924 arch_dup_mmap(oldmm, mm);
58925 retval = 0;
58926@@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_f
58927 write_unlock(&fs->lock);
58928 return -EAGAIN;
58929 }
58930- fs->users++;
58931+ atomic_inc(&fs->users);
58932 write_unlock(&fs->lock);
58933 return 0;
58934 }
58935 tsk->fs = copy_fs_struct(fs);
58936 if (!tsk->fs)
58937 return -ENOMEM;
58938+ gr_set_chroot_entries(tsk, &tsk->fs->root);
58939 return 0;
58940 }
58941
58942@@ -1033,10 +1060,13 @@ static struct task_struct *copy_process(
58943 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
58944 #endif
58945 retval = -EAGAIN;
58946+
58947+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
58948+
58949 if (atomic_read(&p->real_cred->user->processes) >=
58950 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
58951- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
58952- p->real_cred->user != INIT_USER)
58953+ if (p->real_cred->user != INIT_USER &&
58954+ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
58955 goto bad_fork_free;
58956 }
58957
58958@@ -1183,6 +1213,8 @@ static struct task_struct *copy_process(
58959 goto bad_fork_free_pid;
58960 }
58961
58962+ gr_copy_label(p);
58963+
58964 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
58965 /*
58966 * Clear TID on mm_release()?
58967@@ -1333,6 +1365,8 @@ bad_fork_cleanup_count:
58968 bad_fork_free:
58969 free_task(p);
58970 fork_out:
58971+ gr_log_forkfail(retval);
58972+
58973 return ERR_PTR(retval);
58974 }
58975
58976@@ -1426,6 +1460,8 @@ long do_fork(unsigned long clone_flags,
58977 if (clone_flags & CLONE_PARENT_SETTID)
58978 put_user(nr, parent_tidptr);
58979
58980+ gr_handle_brute_check();
58981+
58982 if (clone_flags & CLONE_VFORK) {
58983 p->vfork_done = &vfork;
58984 init_completion(&vfork);
58985@@ -1558,7 +1594,7 @@ static int unshare_fs(unsigned long unsh
58986 return 0;
58987
58988 /* don't need lock here; in the worst case we'll do useless copy */
58989- if (fs->users == 1)
58990+ if (atomic_read(&fs->users) == 1)
58991 return 0;
58992
58993 *new_fsp = copy_fs_struct(fs);
58994@@ -1681,7 +1717,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
58995 fs = current->fs;
58996 write_lock(&fs->lock);
58997 current->fs = new_fs;
58998- if (--fs->users)
58999+ gr_set_chroot_entries(current, &current->fs->root);
59000+ if (atomic_dec_return(&fs->users))
59001 new_fs = NULL;
59002 else
59003 new_fs = fs;
59004diff -urNp linux-2.6.32.41/kernel/futex.c linux-2.6.32.41/kernel/futex.c
59005--- linux-2.6.32.41/kernel/futex.c 2011-03-27 14:31:47.000000000 -0400
59006+++ linux-2.6.32.41/kernel/futex.c 2011-05-16 21:46:57.000000000 -0400
59007@@ -54,6 +54,7 @@
59008 #include <linux/mount.h>
59009 #include <linux/pagemap.h>
59010 #include <linux/syscalls.h>
59011+#include <linux/ptrace.h>
59012 #include <linux/signal.h>
59013 #include <linux/module.h>
59014 #include <linux/magic.h>
59015@@ -221,6 +222,11 @@ get_futex_key(u32 __user *uaddr, int fsh
59016 struct page *page;
59017 int err;
59018
59019+#ifdef CONFIG_PAX_SEGMEXEC
59020+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
59021+ return -EFAULT;
59022+#endif
59023+
59024 /*
59025 * The futex address must be "naturally" aligned.
59026 */
59027@@ -1789,6 +1795,8 @@ static int futex_wait(u32 __user *uaddr,
59028 struct futex_q q;
59029 int ret;
59030
59031+ pax_track_stack();
59032+
59033 if (!bitset)
59034 return -EINVAL;
59035
59036@@ -1841,7 +1849,7 @@ retry:
59037
59038 restart = &current_thread_info()->restart_block;
59039 restart->fn = futex_wait_restart;
59040- restart->futex.uaddr = (u32 *)uaddr;
59041+ restart->futex.uaddr = uaddr;
59042 restart->futex.val = val;
59043 restart->futex.time = abs_time->tv64;
59044 restart->futex.bitset = bitset;
59045@@ -2203,6 +2211,8 @@ static int futex_wait_requeue_pi(u32 __u
59046 struct futex_q q;
59047 int res, ret;
59048
59049+ pax_track_stack();
59050+
59051 if (!bitset)
59052 return -EINVAL;
59053
59054@@ -2377,7 +2387,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
59055 {
59056 struct robust_list_head __user *head;
59057 unsigned long ret;
59058+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
59059 const struct cred *cred = current_cred(), *pcred;
59060+#endif
59061
59062 if (!futex_cmpxchg_enabled)
59063 return -ENOSYS;
59064@@ -2393,11 +2405,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
59065 if (!p)
59066 goto err_unlock;
59067 ret = -EPERM;
59068+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59069+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
59070+ goto err_unlock;
59071+#else
59072 pcred = __task_cred(p);
59073 if (cred->euid != pcred->euid &&
59074 cred->euid != pcred->uid &&
59075 !capable(CAP_SYS_PTRACE))
59076 goto err_unlock;
59077+#endif
59078 head = p->robust_list;
59079 rcu_read_unlock();
59080 }
59081@@ -2459,7 +2476,7 @@ retry:
59082 */
59083 static inline int fetch_robust_entry(struct robust_list __user **entry,
59084 struct robust_list __user * __user *head,
59085- int *pi)
59086+ unsigned int *pi)
59087 {
59088 unsigned long uentry;
59089
59090@@ -2640,6 +2657,7 @@ static int __init futex_init(void)
59091 {
59092 u32 curval;
59093 int i;
59094+ mm_segment_t oldfs;
59095
59096 /*
59097 * This will fail and we want it. Some arch implementations do
59098@@ -2651,7 +2669,10 @@ static int __init futex_init(void)
59099 * implementation, the non functional ones will return
59100 * -ENOSYS.
59101 */
59102+ oldfs = get_fs();
59103+ set_fs(USER_DS);
59104 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
59105+ set_fs(oldfs);
59106 if (curval == -EFAULT)
59107 futex_cmpxchg_enabled = 1;
59108
59109diff -urNp linux-2.6.32.41/kernel/futex_compat.c linux-2.6.32.41/kernel/futex_compat.c
59110--- linux-2.6.32.41/kernel/futex_compat.c 2011-03-27 14:31:47.000000000 -0400
59111+++ linux-2.6.32.41/kernel/futex_compat.c 2011-04-17 15:56:46.000000000 -0400
59112@@ -10,6 +10,7 @@
59113 #include <linux/compat.h>
59114 #include <linux/nsproxy.h>
59115 #include <linux/futex.h>
59116+#include <linux/ptrace.h>
59117
59118 #include <asm/uaccess.h>
59119
59120@@ -135,7 +136,10 @@ compat_sys_get_robust_list(int pid, comp
59121 {
59122 struct compat_robust_list_head __user *head;
59123 unsigned long ret;
59124- const struct cred *cred = current_cred(), *pcred;
59125+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
59126+ const struct cred *cred = current_cred();
59127+ const struct cred *pcred;
59128+#endif
59129
59130 if (!futex_cmpxchg_enabled)
59131 return -ENOSYS;
59132@@ -151,11 +155,16 @@ compat_sys_get_robust_list(int pid, comp
59133 if (!p)
59134 goto err_unlock;
59135 ret = -EPERM;
59136+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59137+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
59138+ goto err_unlock;
59139+#else
59140 pcred = __task_cred(p);
59141 if (cred->euid != pcred->euid &&
59142 cred->euid != pcred->uid &&
59143 !capable(CAP_SYS_PTRACE))
59144 goto err_unlock;
59145+#endif
59146 head = p->compat_robust_list;
59147 read_unlock(&tasklist_lock);
59148 }
59149diff -urNp linux-2.6.32.41/kernel/gcov/base.c linux-2.6.32.41/kernel/gcov/base.c
59150--- linux-2.6.32.41/kernel/gcov/base.c 2011-03-27 14:31:47.000000000 -0400
59151+++ linux-2.6.32.41/kernel/gcov/base.c 2011-04-17 15:56:46.000000000 -0400
59152@@ -102,11 +102,6 @@ void gcov_enable_events(void)
59153 }
59154
59155 #ifdef CONFIG_MODULES
59156-static inline int within(void *addr, void *start, unsigned long size)
59157-{
59158- return ((addr >= start) && (addr < start + size));
59159-}
59160-
59161 /* Update list and generate events when modules are unloaded. */
59162 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
59163 void *data)
59164@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
59165 prev = NULL;
59166 /* Remove entries located in module from linked list. */
59167 for (info = gcov_info_head; info; info = info->next) {
59168- if (within(info, mod->module_core, mod->core_size)) {
59169+ if (within_module_core_rw((unsigned long)info, mod)) {
59170 if (prev)
59171 prev->next = info->next;
59172 else
59173diff -urNp linux-2.6.32.41/kernel/hrtimer.c linux-2.6.32.41/kernel/hrtimer.c
59174--- linux-2.6.32.41/kernel/hrtimer.c 2011-03-27 14:31:47.000000000 -0400
59175+++ linux-2.6.32.41/kernel/hrtimer.c 2011-04-17 15:56:46.000000000 -0400
59176@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
59177 local_irq_restore(flags);
59178 }
59179
59180-static void run_hrtimer_softirq(struct softirq_action *h)
59181+static void run_hrtimer_softirq(void)
59182 {
59183 hrtimer_peek_ahead_timers();
59184 }
59185diff -urNp linux-2.6.32.41/kernel/kallsyms.c linux-2.6.32.41/kernel/kallsyms.c
59186--- linux-2.6.32.41/kernel/kallsyms.c 2011-03-27 14:31:47.000000000 -0400
59187+++ linux-2.6.32.41/kernel/kallsyms.c 2011-04-17 15:56:46.000000000 -0400
59188@@ -11,6 +11,9 @@
59189 * Changed the compression method from stem compression to "table lookup"
59190 * compression (see scripts/kallsyms.c for a more complete description)
59191 */
59192+#ifdef CONFIG_GRKERNSEC_HIDESYM
59193+#define __INCLUDED_BY_HIDESYM 1
59194+#endif
59195 #include <linux/kallsyms.h>
59196 #include <linux/module.h>
59197 #include <linux/init.h>
59198@@ -51,12 +54,33 @@ extern const unsigned long kallsyms_mark
59199
59200 static inline int is_kernel_inittext(unsigned long addr)
59201 {
59202+ if (system_state != SYSTEM_BOOTING)
59203+ return 0;
59204+
59205 if (addr >= (unsigned long)_sinittext
59206 && addr <= (unsigned long)_einittext)
59207 return 1;
59208 return 0;
59209 }
59210
59211+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
59212+#ifdef CONFIG_MODULES
59213+static inline int is_module_text(unsigned long addr)
59214+{
59215+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
59216+ return 1;
59217+
59218+ addr = ktla_ktva(addr);
59219+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
59220+}
59221+#else
59222+static inline int is_module_text(unsigned long addr)
59223+{
59224+ return 0;
59225+}
59226+#endif
59227+#endif
59228+
59229 static inline int is_kernel_text(unsigned long addr)
59230 {
59231 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
59232@@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigne
59233
59234 static inline int is_kernel(unsigned long addr)
59235 {
59236+
59237+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
59238+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
59239+ return 1;
59240+
59241+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
59242+#else
59243 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
59244+#endif
59245+
59246 return 1;
59247 return in_gate_area_no_task(addr);
59248 }
59249
59250 static int is_ksym_addr(unsigned long addr)
59251 {
59252+
59253+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
59254+ if (is_module_text(addr))
59255+ return 0;
59256+#endif
59257+
59258 if (all_var)
59259 return is_kernel(addr);
59260
59261@@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(st
59262
59263 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
59264 {
59265- iter->name[0] = '\0';
59266 iter->nameoff = get_symbol_offset(new_pos);
59267 iter->pos = new_pos;
59268 }
59269@@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, vo
59270 {
59271 struct kallsym_iter *iter = m->private;
59272
59273+#ifdef CONFIG_GRKERNSEC_HIDESYM
59274+ if (current_uid())
59275+ return 0;
59276+#endif
59277+
59278 /* Some debugging symbols have no name. Ignore them. */
59279 if (!iter->name[0])
59280 return 0;
59281@@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *i
59282 struct kallsym_iter *iter;
59283 int ret;
59284
59285- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
59286+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
59287 if (!iter)
59288 return -ENOMEM;
59289 reset_iter(iter, 0);
59290diff -urNp linux-2.6.32.41/kernel/kgdb.c linux-2.6.32.41/kernel/kgdb.c
59291--- linux-2.6.32.41/kernel/kgdb.c 2011-04-17 17:00:52.000000000 -0400
59292+++ linux-2.6.32.41/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
59293@@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
59294 /* Guard for recursive entry */
59295 static int exception_level;
59296
59297-static struct kgdb_io *kgdb_io_ops;
59298+static const struct kgdb_io *kgdb_io_ops;
59299 static DEFINE_SPINLOCK(kgdb_registration_lock);
59300
59301 /* kgdb console driver is loaded */
59302@@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1)
59303 */
59304 static atomic_t passive_cpu_wait[NR_CPUS];
59305 static atomic_t cpu_in_kgdb[NR_CPUS];
59306-atomic_t kgdb_setting_breakpoint;
59307+atomic_unchecked_t kgdb_setting_breakpoint;
59308
59309 struct task_struct *kgdb_usethread;
59310 struct task_struct *kgdb_contthread;
59311@@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBY
59312 sizeof(unsigned long)];
59313
59314 /* to keep track of the CPU which is doing the single stepping*/
59315-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
59316+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
59317
59318 /*
59319 * If you are debugging a problem where roundup (the collection of
59320@@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
59321 return 0;
59322 if (kgdb_connected)
59323 return 1;
59324- if (atomic_read(&kgdb_setting_breakpoint))
59325+ if (atomic_read_unchecked(&kgdb_setting_breakpoint))
59326 return 1;
59327 if (print_wait)
59328 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
59329@@ -1426,8 +1426,8 @@ acquirelock:
59330 * instance of the exception handler wanted to come into the
59331 * debugger on a different CPU via a single step
59332 */
59333- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
59334- atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
59335+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
59336+ atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
59337
59338 atomic_set(&kgdb_active, -1);
59339 touch_softlockup_watchdog();
59340@@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void
59341 *
59342 * Register it with the KGDB core.
59343 */
59344-int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
59345+int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
59346 {
59347 int err;
59348
59349@@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_modul
59350 *
59351 * Unregister it with the KGDB core.
59352 */
59353-void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
59354+void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
59355 {
59356 BUG_ON(kgdb_connected);
59357
59358@@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_mod
59359 */
59360 void kgdb_breakpoint(void)
59361 {
59362- atomic_set(&kgdb_setting_breakpoint, 1);
59363+ atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
59364 wmb(); /* Sync point before breakpoint */
59365 arch_kgdb_breakpoint();
59366 wmb(); /* Sync point after breakpoint */
59367- atomic_set(&kgdb_setting_breakpoint, 0);
59368+ atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
59369 }
59370 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
59371
59372diff -urNp linux-2.6.32.41/kernel/kmod.c linux-2.6.32.41/kernel/kmod.c
59373--- linux-2.6.32.41/kernel/kmod.c 2011-03-27 14:31:47.000000000 -0400
59374+++ linux-2.6.32.41/kernel/kmod.c 2011-04-17 15:56:46.000000000 -0400
59375@@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
59376 * If module auto-loading support is disabled then this function
59377 * becomes a no-operation.
59378 */
59379-int __request_module(bool wait, const char *fmt, ...)
59380+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
59381 {
59382- va_list args;
59383 char module_name[MODULE_NAME_LEN];
59384 unsigned int max_modprobes;
59385 int ret;
59386- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
59387+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
59388 static char *envp[] = { "HOME=/",
59389 "TERM=linux",
59390 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
59391@@ -84,12 +83,24 @@ int __request_module(bool wait, const ch
59392 if (ret)
59393 return ret;
59394
59395- va_start(args, fmt);
59396- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
59397- va_end(args);
59398+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
59399 if (ret >= MODULE_NAME_LEN)
59400 return -ENAMETOOLONG;
59401
59402+#ifdef CONFIG_GRKERNSEC_MODHARDEN
59403+ if (!current_uid()) {
59404+ /* hack to workaround consolekit/udisks stupidity */
59405+ read_lock(&tasklist_lock);
59406+ if (!strcmp(current->comm, "mount") &&
59407+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
59408+ read_unlock(&tasklist_lock);
59409+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
59410+ return -EPERM;
59411+ }
59412+ read_unlock(&tasklist_lock);
59413+ }
59414+#endif
59415+
59416 /* If modprobe needs a service that is in a module, we get a recursive
59417 * loop. Limit the number of running kmod threads to max_threads/2 or
59418 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
59419@@ -121,6 +132,48 @@ int __request_module(bool wait, const ch
59420 atomic_dec(&kmod_concurrent);
59421 return ret;
59422 }
59423+
59424+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
59425+{
59426+ va_list args;
59427+ int ret;
59428+
59429+ va_start(args, fmt);
59430+ ret = ____request_module(wait, module_param, fmt, args);
59431+ va_end(args);
59432+
59433+ return ret;
59434+}
59435+
59436+int __request_module(bool wait, const char *fmt, ...)
59437+{
59438+ va_list args;
59439+ int ret;
59440+
59441+#ifdef CONFIG_GRKERNSEC_MODHARDEN
59442+ if (current_uid()) {
59443+ char module_param[MODULE_NAME_LEN];
59444+
59445+ memset(module_param, 0, sizeof(module_param));
59446+
59447+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
59448+
59449+ va_start(args, fmt);
59450+ ret = ____request_module(wait, module_param, fmt, args);
59451+ va_end(args);
59452+
59453+ return ret;
59454+ }
59455+#endif
59456+
59457+ va_start(args, fmt);
59458+ ret = ____request_module(wait, NULL, fmt, args);
59459+ va_end(args);
59460+
59461+ return ret;
59462+}
59463+
59464+
59465 EXPORT_SYMBOL(__request_module);
59466 #endif /* CONFIG_MODULES */
59467
59468diff -urNp linux-2.6.32.41/kernel/kprobes.c linux-2.6.32.41/kernel/kprobes.c
59469--- linux-2.6.32.41/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
59470+++ linux-2.6.32.41/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
59471@@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_
59472 * kernel image and loaded module images reside. This is required
59473 * so x86_64 can correctly handle the %rip-relative fixups.
59474 */
59475- kip->insns = module_alloc(PAGE_SIZE);
59476+ kip->insns = module_alloc_exec(PAGE_SIZE);
59477 if (!kip->insns) {
59478 kfree(kip);
59479 return NULL;
59480@@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(st
59481 */
59482 if (!list_is_singular(&kprobe_insn_pages)) {
59483 list_del(&kip->list);
59484- module_free(NULL, kip->insns);
59485+ module_free_exec(NULL, kip->insns);
59486 kfree(kip);
59487 }
59488 return 1;
59489@@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
59490 {
59491 int i, err = 0;
59492 unsigned long offset = 0, size = 0;
59493- char *modname, namebuf[128];
59494+ char *modname, namebuf[KSYM_NAME_LEN];
59495 const char *symbol_name;
59496 void *addr;
59497 struct kprobe_blackpoint *kb;
59498@@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(st
59499 const char *sym = NULL;
59500 unsigned int i = *(loff_t *) v;
59501 unsigned long offset = 0;
59502- char *modname, namebuf[128];
59503+ char *modname, namebuf[KSYM_NAME_LEN];
59504
59505 head = &kprobe_table[i];
59506 preempt_disable();
59507diff -urNp linux-2.6.32.41/kernel/lockdep.c linux-2.6.32.41/kernel/lockdep.c
59508--- linux-2.6.32.41/kernel/lockdep.c 2011-03-27 14:31:47.000000000 -0400
59509+++ linux-2.6.32.41/kernel/lockdep.c 2011-04-17 15:56:46.000000000 -0400
59510@@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_t
59511 /*
59512 * Various lockdep statistics:
59513 */
59514-atomic_t chain_lookup_hits;
59515-atomic_t chain_lookup_misses;
59516-atomic_t hardirqs_on_events;
59517-atomic_t hardirqs_off_events;
59518-atomic_t redundant_hardirqs_on;
59519-atomic_t redundant_hardirqs_off;
59520-atomic_t softirqs_on_events;
59521-atomic_t softirqs_off_events;
59522-atomic_t redundant_softirqs_on;
59523-atomic_t redundant_softirqs_off;
59524-atomic_t nr_unused_locks;
59525-atomic_t nr_cyclic_checks;
59526-atomic_t nr_find_usage_forwards_checks;
59527-atomic_t nr_find_usage_backwards_checks;
59528+atomic_unchecked_t chain_lookup_hits;
59529+atomic_unchecked_t chain_lookup_misses;
59530+atomic_unchecked_t hardirqs_on_events;
59531+atomic_unchecked_t hardirqs_off_events;
59532+atomic_unchecked_t redundant_hardirqs_on;
59533+atomic_unchecked_t redundant_hardirqs_off;
59534+atomic_unchecked_t softirqs_on_events;
59535+atomic_unchecked_t softirqs_off_events;
59536+atomic_unchecked_t redundant_softirqs_on;
59537+atomic_unchecked_t redundant_softirqs_off;
59538+atomic_unchecked_t nr_unused_locks;
59539+atomic_unchecked_t nr_cyclic_checks;
59540+atomic_unchecked_t nr_find_usage_forwards_checks;
59541+atomic_unchecked_t nr_find_usage_backwards_checks;
59542 #endif
59543
59544 /*
59545@@ -577,6 +577,10 @@ static int static_obj(void *obj)
59546 int i;
59547 #endif
59548
59549+#ifdef CONFIG_PAX_KERNEXEC
59550+ start = ktla_ktva(start);
59551+#endif
59552+
59553 /*
59554 * static variable?
59555 */
59556@@ -592,8 +596,7 @@ static int static_obj(void *obj)
59557 */
59558 for_each_possible_cpu(i) {
59559 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
59560- end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
59561- + per_cpu_offset(i);
59562+ end = start + PERCPU_ENOUGH_ROOM;
59563
59564 if ((addr >= start) && (addr < end))
59565 return 1;
59566@@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *
59567 if (!static_obj(lock->key)) {
59568 debug_locks_off();
59569 printk("INFO: trying to register non-static key.\n");
59570+ printk("lock:%pS key:%pS.\n", lock, lock->key);
59571 printk("the code is fine but needs lockdep annotation.\n");
59572 printk("turning off the locking correctness validator.\n");
59573 dump_stack();
59574@@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep
59575 if (!class)
59576 return 0;
59577 }
59578- debug_atomic_inc((atomic_t *)&class->ops);
59579+ debug_atomic_inc((atomic_unchecked_t *)&class->ops);
59580 if (very_verbose(class)) {
59581 printk("\nacquire class [%p] %s", class->key, class->name);
59582 if (class->name_version > 1)
59583diff -urNp linux-2.6.32.41/kernel/lockdep_internals.h linux-2.6.32.41/kernel/lockdep_internals.h
59584--- linux-2.6.32.41/kernel/lockdep_internals.h 2011-03-27 14:31:47.000000000 -0400
59585+++ linux-2.6.32.41/kernel/lockdep_internals.h 2011-04-17 15:56:46.000000000 -0400
59586@@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_
59587 /*
59588 * Various lockdep statistics:
59589 */
59590-extern atomic_t chain_lookup_hits;
59591-extern atomic_t chain_lookup_misses;
59592-extern atomic_t hardirqs_on_events;
59593-extern atomic_t hardirqs_off_events;
59594-extern atomic_t redundant_hardirqs_on;
59595-extern atomic_t redundant_hardirqs_off;
59596-extern atomic_t softirqs_on_events;
59597-extern atomic_t softirqs_off_events;
59598-extern atomic_t redundant_softirqs_on;
59599-extern atomic_t redundant_softirqs_off;
59600-extern atomic_t nr_unused_locks;
59601-extern atomic_t nr_cyclic_checks;
59602-extern atomic_t nr_cyclic_check_recursions;
59603-extern atomic_t nr_find_usage_forwards_checks;
59604-extern atomic_t nr_find_usage_forwards_recursions;
59605-extern atomic_t nr_find_usage_backwards_checks;
59606-extern atomic_t nr_find_usage_backwards_recursions;
59607-# define debug_atomic_inc(ptr) atomic_inc(ptr)
59608-# define debug_atomic_dec(ptr) atomic_dec(ptr)
59609-# define debug_atomic_read(ptr) atomic_read(ptr)
59610+extern atomic_unchecked_t chain_lookup_hits;
59611+extern atomic_unchecked_t chain_lookup_misses;
59612+extern atomic_unchecked_t hardirqs_on_events;
59613+extern atomic_unchecked_t hardirqs_off_events;
59614+extern atomic_unchecked_t redundant_hardirqs_on;
59615+extern atomic_unchecked_t redundant_hardirqs_off;
59616+extern atomic_unchecked_t softirqs_on_events;
59617+extern atomic_unchecked_t softirqs_off_events;
59618+extern atomic_unchecked_t redundant_softirqs_on;
59619+extern atomic_unchecked_t redundant_softirqs_off;
59620+extern atomic_unchecked_t nr_unused_locks;
59621+extern atomic_unchecked_t nr_cyclic_checks;
59622+extern atomic_unchecked_t nr_cyclic_check_recursions;
59623+extern atomic_unchecked_t nr_find_usage_forwards_checks;
59624+extern atomic_unchecked_t nr_find_usage_forwards_recursions;
59625+extern atomic_unchecked_t nr_find_usage_backwards_checks;
59626+extern atomic_unchecked_t nr_find_usage_backwards_recursions;
59627+# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
59628+# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
59629+# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
59630 #else
59631 # define debug_atomic_inc(ptr) do { } while (0)
59632 # define debug_atomic_dec(ptr) do { } while (0)
59633diff -urNp linux-2.6.32.41/kernel/lockdep_proc.c linux-2.6.32.41/kernel/lockdep_proc.c
59634--- linux-2.6.32.41/kernel/lockdep_proc.c 2011-03-27 14:31:47.000000000 -0400
59635+++ linux-2.6.32.41/kernel/lockdep_proc.c 2011-04-17 15:56:46.000000000 -0400
59636@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
59637
59638 static void print_name(struct seq_file *m, struct lock_class *class)
59639 {
59640- char str[128];
59641+ char str[KSYM_NAME_LEN];
59642 const char *name = class->name;
59643
59644 if (!name) {
59645diff -urNp linux-2.6.32.41/kernel/module.c linux-2.6.32.41/kernel/module.c
59646--- linux-2.6.32.41/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
59647+++ linux-2.6.32.41/kernel/module.c 2011-04-29 18:52:40.000000000 -0400
59648@@ -55,6 +55,7 @@
59649 #include <linux/async.h>
59650 #include <linux/percpu.h>
59651 #include <linux/kmemleak.h>
59652+#include <linux/grsecurity.h>
59653
59654 #define CREATE_TRACE_POINTS
59655 #include <trace/events/module.h>
59656@@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq
59657 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
59658
59659 /* Bounds of module allocation, for speeding __module_address */
59660-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
59661+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
59662+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
59663
59664 int register_module_notifier(struct notifier_block * nb)
59665 {
59666@@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct
59667 return true;
59668
59669 list_for_each_entry_rcu(mod, &modules, list) {
59670- struct symsearch arr[] = {
59671+ struct symsearch modarr[] = {
59672 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
59673 NOT_GPL_ONLY, false },
59674 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
59675@@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct
59676 #endif
59677 };
59678
59679- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
59680+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
59681 return true;
59682 }
59683 return false;
59684@@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned lo
59685 void *ptr;
59686 int cpu;
59687
59688- if (align > PAGE_SIZE) {
59689+ if (align-1 >= PAGE_SIZE) {
59690 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
59691 name, align, PAGE_SIZE);
59692 align = PAGE_SIZE;
59693@@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resol
59694 * /sys/module/foo/sections stuff
59695 * J. Corbet <corbet@lwn.net>
59696 */
59697-#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
59698+#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59699
59700 static inline bool sect_empty(const Elf_Shdr *sect)
59701 {
59702@@ -1545,7 +1547,8 @@ static void free_module(struct module *m
59703 destroy_params(mod->kp, mod->num_kp);
59704
59705 /* This may be NULL, but that's OK */
59706- module_free(mod, mod->module_init);
59707+ module_free(mod, mod->module_init_rw);
59708+ module_free_exec(mod, mod->module_init_rx);
59709 kfree(mod->args);
59710 if (mod->percpu)
59711 percpu_modfree(mod->percpu);
59712@@ -1554,10 +1557,12 @@ static void free_module(struct module *m
59713 percpu_modfree(mod->refptr);
59714 #endif
59715 /* Free lock-classes: */
59716- lockdep_free_key_range(mod->module_core, mod->core_size);
59717+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
59718+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
59719
59720 /* Finally, free the core (containing the module structure) */
59721- module_free(mod, mod->module_core);
59722+ module_free_exec(mod, mod->module_core_rx);
59723+ module_free(mod, mod->module_core_rw);
59724
59725 #ifdef CONFIG_MPU
59726 update_protections(current->mm);
59727@@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *se
59728 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
59729 int ret = 0;
59730 const struct kernel_symbol *ksym;
59731+#ifdef CONFIG_GRKERNSEC_MODHARDEN
59732+ int is_fs_load = 0;
59733+ int register_filesystem_found = 0;
59734+ char *p;
59735+
59736+ p = strstr(mod->args, "grsec_modharden_fs");
59737+
59738+ if (p) {
59739+ char *endptr = p + strlen("grsec_modharden_fs");
59740+ /* copy \0 as well */
59741+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
59742+ is_fs_load = 1;
59743+ }
59744+#endif
59745+
59746
59747 for (i = 1; i < n; i++) {
59748+#ifdef CONFIG_GRKERNSEC_MODHARDEN
59749+ const char *name = strtab + sym[i].st_name;
59750+
59751+ /* it's a real shame this will never get ripped and copied
59752+ upstream! ;(
59753+ */
59754+ if (is_fs_load && !strcmp(name, "register_filesystem"))
59755+ register_filesystem_found = 1;
59756+#endif
59757 switch (sym[i].st_shndx) {
59758 case SHN_COMMON:
59759 /* We compiled with -fno-common. These are not
59760@@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *se
59761 strtab + sym[i].st_name, mod);
59762 /* Ok if resolved. */
59763 if (ksym) {
59764+ pax_open_kernel();
59765 sym[i].st_value = ksym->value;
59766+ pax_close_kernel();
59767 break;
59768 }
59769
59770@@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *se
59771 secbase = (unsigned long)mod->percpu;
59772 else
59773 secbase = sechdrs[sym[i].st_shndx].sh_addr;
59774+ pax_open_kernel();
59775 sym[i].st_value += secbase;
59776+ pax_close_kernel();
59777 break;
59778 }
59779 }
59780
59781+#ifdef CONFIG_GRKERNSEC_MODHARDEN
59782+ if (is_fs_load && !register_filesystem_found) {
59783+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
59784+ ret = -EPERM;
59785+ }
59786+#endif
59787+
59788 return ret;
59789 }
59790
59791@@ -1731,11 +1771,12 @@ static void layout_sections(struct modul
59792 || s->sh_entsize != ~0UL
59793 || strstarts(secstrings + s->sh_name, ".init"))
59794 continue;
59795- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
59796+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
59797+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
59798+ else
59799+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
59800 DEBUGP("\t%s\n", secstrings + s->sh_name);
59801 }
59802- if (m == 0)
59803- mod->core_text_size = mod->core_size;
59804 }
59805
59806 DEBUGP("Init section allocation order:\n");
59807@@ -1748,12 +1789,13 @@ static void layout_sections(struct modul
59808 || s->sh_entsize != ~0UL
59809 || !strstarts(secstrings + s->sh_name, ".init"))
59810 continue;
59811- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
59812- | INIT_OFFSET_MASK);
59813+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
59814+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
59815+ else
59816+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
59817+ s->sh_entsize |= INIT_OFFSET_MASK;
59818 DEBUGP("\t%s\n", secstrings + s->sh_name);
59819 }
59820- if (m == 0)
59821- mod->init_text_size = mod->init_size;
59822 }
59823 }
59824
59825@@ -1857,9 +1899,8 @@ static int is_exported(const char *name,
59826
59827 /* As per nm */
59828 static char elf_type(const Elf_Sym *sym,
59829- Elf_Shdr *sechdrs,
59830- const char *secstrings,
59831- struct module *mod)
59832+ const Elf_Shdr *sechdrs,
59833+ const char *secstrings)
59834 {
59835 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
59836 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
59837@@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struc
59838
59839 /* Put symbol section at end of init part of module. */
59840 symsect->sh_flags |= SHF_ALLOC;
59841- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
59842+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
59843 symindex) | INIT_OFFSET_MASK;
59844 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
59845
59846@@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struc
59847 }
59848
59849 /* Append room for core symbols at end of core part. */
59850- symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
59851- mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
59852+ symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
59853+ mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
59854
59855 /* Put string table section at end of init part of module. */
59856 strsect->sh_flags |= SHF_ALLOC;
59857- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
59858+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
59859 strindex) | INIT_OFFSET_MASK;
59860 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
59861
59862 /* Append room for core symbols' strings at end of core part. */
59863- *pstroffs = mod->core_size;
59864+ *pstroffs = mod->core_size_rx;
59865 __set_bit(0, strmap);
59866- mod->core_size += bitmap_weight(strmap, strsect->sh_size);
59867+ mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
59868
59869 return symoffs;
59870 }
59871@@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *
59872 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
59873 mod->strtab = (void *)sechdrs[strindex].sh_addr;
59874
59875+ pax_open_kernel();
59876+
59877 /* Set types up while we still have access to sections. */
59878 for (i = 0; i < mod->num_symtab; i++)
59879 mod->symtab[i].st_info
59880- = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
59881+ = elf_type(&mod->symtab[i], sechdrs, secstrings);
59882
59883- mod->core_symtab = dst = mod->module_core + symoffs;
59884+ mod->core_symtab = dst = mod->module_core_rx + symoffs;
59885 src = mod->symtab;
59886 *dst = *src;
59887 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
59888@@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *
59889 }
59890 mod->core_num_syms = ndst;
59891
59892- mod->core_strtab = s = mod->module_core + stroffs;
59893+ mod->core_strtab = s = mod->module_core_rx + stroffs;
59894 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
59895 if (test_bit(i, strmap))
59896 *++s = mod->strtab[i];
59897+
59898+ pax_close_kernel();
59899 }
59900 #else
59901 static inline unsigned long layout_symtab(struct module *mod,
59902@@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _
59903 #endif
59904 }
59905
59906-static void *module_alloc_update_bounds(unsigned long size)
59907+static void *module_alloc_update_bounds_rw(unsigned long size)
59908 {
59909 void *ret = module_alloc(size);
59910
59911 if (ret) {
59912 /* Update module bounds. */
59913- if ((unsigned long)ret < module_addr_min)
59914- module_addr_min = (unsigned long)ret;
59915- if ((unsigned long)ret + size > module_addr_max)
59916- module_addr_max = (unsigned long)ret + size;
59917+ if ((unsigned long)ret < module_addr_min_rw)
59918+ module_addr_min_rw = (unsigned long)ret;
59919+ if ((unsigned long)ret + size > module_addr_max_rw)
59920+ module_addr_max_rw = (unsigned long)ret + size;
59921+ }
59922+ return ret;
59923+}
59924+
59925+static void *module_alloc_update_bounds_rx(unsigned long size)
59926+{
59927+ void *ret = module_alloc_exec(size);
59928+
59929+ if (ret) {
59930+ /* Update module bounds. */
59931+ if ((unsigned long)ret < module_addr_min_rx)
59932+ module_addr_min_rx = (unsigned long)ret;
59933+ if ((unsigned long)ret + size > module_addr_max_rx)
59934+ module_addr_max_rx = (unsigned long)ret + size;
59935 }
59936 return ret;
59937 }
59938@@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct
59939 unsigned int i;
59940
59941 /* only scan the sections containing data */
59942- kmemleak_scan_area(mod->module_core, (unsigned long)mod -
59943- (unsigned long)mod->module_core,
59944+ kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
59945+ (unsigned long)mod->module_core_rw,
59946 sizeof(struct module), GFP_KERNEL);
59947
59948 for (i = 1; i < hdr->e_shnum; i++) {
59949@@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct
59950 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
59951 continue;
59952
59953- kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
59954- (unsigned long)mod->module_core,
59955+ kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
59956+ (unsigned long)mod->module_core_rw,
59957 sechdrs[i].sh_size, GFP_KERNEL);
59958 }
59959 }
59960@@ -2263,7 +2322,7 @@ static noinline struct module *load_modu
59961 secstrings, &stroffs, strmap);
59962
59963 /* Do the allocs. */
59964- ptr = module_alloc_update_bounds(mod->core_size);
59965+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
59966 /*
59967 * The pointer to this block is stored in the module structure
59968 * which is inside the block. Just mark it as not being a
59969@@ -2274,23 +2333,47 @@ static noinline struct module *load_modu
59970 err = -ENOMEM;
59971 goto free_percpu;
59972 }
59973- memset(ptr, 0, mod->core_size);
59974- mod->module_core = ptr;
59975+ memset(ptr, 0, mod->core_size_rw);
59976+ mod->module_core_rw = ptr;
59977
59978- ptr = module_alloc_update_bounds(mod->init_size);
59979+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
59980 /*
59981 * The pointer to this block is stored in the module structure
59982 * which is inside the block. This block doesn't need to be
59983 * scanned as it contains data and code that will be freed
59984 * after the module is initialized.
59985 */
59986- kmemleak_ignore(ptr);
59987- if (!ptr && mod->init_size) {
59988+ kmemleak_not_leak(ptr);
59989+ if (!ptr && mod->init_size_rw) {
59990+ err = -ENOMEM;
59991+ goto free_core_rw;
59992+ }
59993+ memset(ptr, 0, mod->init_size_rw);
59994+ mod->module_init_rw = ptr;
59995+
59996+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
59997+ kmemleak_not_leak(ptr);
59998+ if (!ptr) {
59999 err = -ENOMEM;
60000- goto free_core;
60001+ goto free_init_rw;
60002 }
60003- memset(ptr, 0, mod->init_size);
60004- mod->module_init = ptr;
60005+
60006+ pax_open_kernel();
60007+ memset(ptr, 0, mod->core_size_rx);
60008+ pax_close_kernel();
60009+ mod->module_core_rx = ptr;
60010+
60011+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
60012+ kmemleak_not_leak(ptr);
60013+ if (!ptr && mod->init_size_rx) {
60014+ err = -ENOMEM;
60015+ goto free_core_rx;
60016+ }
60017+
60018+ pax_open_kernel();
60019+ memset(ptr, 0, mod->init_size_rx);
60020+ pax_close_kernel();
60021+ mod->module_init_rx = ptr;
60022
60023 /* Transfer each section which specifies SHF_ALLOC */
60024 DEBUGP("final section addresses:\n");
60025@@ -2300,17 +2383,45 @@ static noinline struct module *load_modu
60026 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
60027 continue;
60028
60029- if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
60030- dest = mod->module_init
60031- + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
60032- else
60033- dest = mod->module_core + sechdrs[i].sh_entsize;
60034+ if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
60035+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
60036+ dest = mod->module_init_rw
60037+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
60038+ else
60039+ dest = mod->module_init_rx
60040+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
60041+ } else {
60042+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
60043+ dest = mod->module_core_rw + sechdrs[i].sh_entsize;
60044+ else
60045+ dest = mod->module_core_rx + sechdrs[i].sh_entsize;
60046+ }
60047+
60048+ if (sechdrs[i].sh_type != SHT_NOBITS) {
60049
60050- if (sechdrs[i].sh_type != SHT_NOBITS)
60051- memcpy(dest, (void *)sechdrs[i].sh_addr,
60052- sechdrs[i].sh_size);
60053+#ifdef CONFIG_PAX_KERNEXEC
60054+#ifdef CONFIG_X86_64
60055+ if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
60056+ set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
60057+#endif
60058+ if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
60059+ pax_open_kernel();
60060+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
60061+ pax_close_kernel();
60062+ } else
60063+#endif
60064+
60065+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
60066+ }
60067 /* Update sh_addr to point to copy in image. */
60068- sechdrs[i].sh_addr = (unsigned long)dest;
60069+
60070+#ifdef CONFIG_PAX_KERNEXEC
60071+ if (sechdrs[i].sh_flags & SHF_EXECINSTR)
60072+ sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
60073+ else
60074+#endif
60075+
60076+ sechdrs[i].sh_addr = (unsigned long)dest;
60077 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
60078 }
60079 /* Module has been moved. */
60080@@ -2322,7 +2433,7 @@ static noinline struct module *load_modu
60081 mod->name);
60082 if (!mod->refptr) {
60083 err = -ENOMEM;
60084- goto free_init;
60085+ goto free_init_rx;
60086 }
60087 #endif
60088 /* Now we've moved module, initialize linked lists, etc. */
60089@@ -2351,6 +2462,31 @@ static noinline struct module *load_modu
60090 /* Set up MODINFO_ATTR fields */
60091 setup_modinfo(mod, sechdrs, infoindex);
60092
60093+ mod->args = args;
60094+
60095+#ifdef CONFIG_GRKERNSEC_MODHARDEN
60096+ {
60097+ char *p, *p2;
60098+
60099+ if (strstr(mod->args, "grsec_modharden_netdev")) {
60100+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
60101+ err = -EPERM;
60102+ goto cleanup;
60103+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
60104+ p += strlen("grsec_modharden_normal");
60105+ p2 = strstr(p, "_");
60106+ if (p2) {
60107+ *p2 = '\0';
60108+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
60109+ *p2 = '_';
60110+ }
60111+ err = -EPERM;
60112+ goto cleanup;
60113+ }
60114+ }
60115+#endif
60116+
60117+
60118 /* Fix up syms, so that st_value is a pointer to location. */
60119 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
60120 mod);
60121@@ -2431,8 +2567,8 @@ static noinline struct module *load_modu
60122
60123 /* Now do relocations. */
60124 for (i = 1; i < hdr->e_shnum; i++) {
60125- const char *strtab = (char *)sechdrs[strindex].sh_addr;
60126 unsigned int info = sechdrs[i].sh_info;
60127+ strtab = (char *)sechdrs[strindex].sh_addr;
60128
60129 /* Not a valid relocation section? */
60130 if (info >= hdr->e_shnum)
60131@@ -2493,16 +2629,15 @@ static noinline struct module *load_modu
60132 * Do it before processing of module parameters, so the module
60133 * can provide parameter accessor functions of its own.
60134 */
60135- if (mod->module_init)
60136- flush_icache_range((unsigned long)mod->module_init,
60137- (unsigned long)mod->module_init
60138- + mod->init_size);
60139- flush_icache_range((unsigned long)mod->module_core,
60140- (unsigned long)mod->module_core + mod->core_size);
60141+ if (mod->module_init_rx)
60142+ flush_icache_range((unsigned long)mod->module_init_rx,
60143+ (unsigned long)mod->module_init_rx
60144+ + mod->init_size_rx);
60145+ flush_icache_range((unsigned long)mod->module_core_rx,
60146+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
60147
60148 set_fs(old_fs);
60149
60150- mod->args = args;
60151 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
60152 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
60153 mod->name);
60154@@ -2546,12 +2681,16 @@ static noinline struct module *load_modu
60155 free_unload:
60156 module_unload_free(mod);
60157 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
60158+ free_init_rx:
60159 percpu_modfree(mod->refptr);
60160- free_init:
60161 #endif
60162- module_free(mod, mod->module_init);
60163- free_core:
60164- module_free(mod, mod->module_core);
60165+ module_free_exec(mod, mod->module_init_rx);
60166+ free_core_rx:
60167+ module_free_exec(mod, mod->module_core_rx);
60168+ free_init_rw:
60169+ module_free(mod, mod->module_init_rw);
60170+ free_core_rw:
60171+ module_free(mod, mod->module_core_rw);
60172 /* mod will be freed with core. Don't access it beyond this line! */
60173 free_percpu:
60174 if (percpu)
60175@@ -2653,10 +2792,12 @@ SYSCALL_DEFINE3(init_module, void __user
60176 mod->symtab = mod->core_symtab;
60177 mod->strtab = mod->core_strtab;
60178 #endif
60179- module_free(mod, mod->module_init);
60180- mod->module_init = NULL;
60181- mod->init_size = 0;
60182- mod->init_text_size = 0;
60183+ module_free(mod, mod->module_init_rw);
60184+ module_free_exec(mod, mod->module_init_rx);
60185+ mod->module_init_rw = NULL;
60186+ mod->module_init_rx = NULL;
60187+ mod->init_size_rw = 0;
60188+ mod->init_size_rx = 0;
60189 mutex_unlock(&module_mutex);
60190
60191 return 0;
60192@@ -2687,10 +2828,16 @@ static const char *get_ksymbol(struct mo
60193 unsigned long nextval;
60194
60195 /* At worse, next value is at end of module */
60196- if (within_module_init(addr, mod))
60197- nextval = (unsigned long)mod->module_init+mod->init_text_size;
60198+ if (within_module_init_rx(addr, mod))
60199+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
60200+ else if (within_module_init_rw(addr, mod))
60201+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
60202+ else if (within_module_core_rx(addr, mod))
60203+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
60204+ else if (within_module_core_rw(addr, mod))
60205+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
60206 else
60207- nextval = (unsigned long)mod->module_core+mod->core_text_size;
60208+ return NULL;
60209
60210 /* Scan for closest preceeding symbol, and next symbol. (ELF
60211 starts real symbols at 1). */
60212@@ -2936,7 +3083,7 @@ static int m_show(struct seq_file *m, vo
60213 char buf[8];
60214
60215 seq_printf(m, "%s %u",
60216- mod->name, mod->init_size + mod->core_size);
60217+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
60218 print_unload_info(m, mod);
60219
60220 /* Informative for users. */
60221@@ -2945,7 +3092,7 @@ static int m_show(struct seq_file *m, vo
60222 mod->state == MODULE_STATE_COMING ? "Loading":
60223 "Live");
60224 /* Used by oprofile and other similar tools. */
60225- seq_printf(m, " 0x%p", mod->module_core);
60226+ seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
60227
60228 /* Taints info */
60229 if (mod->taints)
60230@@ -2981,7 +3128,17 @@ static const struct file_operations proc
60231
60232 static int __init proc_modules_init(void)
60233 {
60234+#ifndef CONFIG_GRKERNSEC_HIDESYM
60235+#ifdef CONFIG_GRKERNSEC_PROC_USER
60236+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
60237+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60238+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
60239+#else
60240 proc_create("modules", 0, NULL, &proc_modules_operations);
60241+#endif
60242+#else
60243+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
60244+#endif
60245 return 0;
60246 }
60247 module_init(proc_modules_init);
60248@@ -3040,12 +3197,12 @@ struct module *__module_address(unsigned
60249 {
60250 struct module *mod;
60251
60252- if (addr < module_addr_min || addr > module_addr_max)
60253+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
60254+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
60255 return NULL;
60256
60257 list_for_each_entry_rcu(mod, &modules, list)
60258- if (within_module_core(addr, mod)
60259- || within_module_init(addr, mod))
60260+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
60261 return mod;
60262 return NULL;
60263 }
60264@@ -3079,11 +3236,20 @@ bool is_module_text_address(unsigned lon
60265 */
60266 struct module *__module_text_address(unsigned long addr)
60267 {
60268- struct module *mod = __module_address(addr);
60269+ struct module *mod;
60270+
60271+#ifdef CONFIG_X86_32
60272+ addr = ktla_ktva(addr);
60273+#endif
60274+
60275+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
60276+ return NULL;
60277+
60278+ mod = __module_address(addr);
60279+
60280 if (mod) {
60281 /* Make sure it's within the text section. */
60282- if (!within(addr, mod->module_init, mod->init_text_size)
60283- && !within(addr, mod->module_core, mod->core_text_size))
60284+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
60285 mod = NULL;
60286 }
60287 return mod;
60288diff -urNp linux-2.6.32.41/kernel/mutex.c linux-2.6.32.41/kernel/mutex.c
60289--- linux-2.6.32.41/kernel/mutex.c 2011-03-27 14:31:47.000000000 -0400
60290+++ linux-2.6.32.41/kernel/mutex.c 2011-04-17 15:56:46.000000000 -0400
60291@@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock,
60292 */
60293
60294 for (;;) {
60295- struct thread_info *owner;
60296+ struct task_struct *owner;
60297
60298 /*
60299 * If we own the BKL, then don't spin. The owner of
60300@@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock,
60301 spin_lock_mutex(&lock->wait_lock, flags);
60302
60303 debug_mutex_lock_common(lock, &waiter);
60304- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
60305+ debug_mutex_add_waiter(lock, &waiter, task);
60306
60307 /* add waiting tasks to the end of the waitqueue (FIFO): */
60308 list_add_tail(&waiter.list, &lock->wait_list);
60309@@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock,
60310 * TASK_UNINTERRUPTIBLE case.)
60311 */
60312 if (unlikely(signal_pending_state(state, task))) {
60313- mutex_remove_waiter(lock, &waiter,
60314- task_thread_info(task));
60315+ mutex_remove_waiter(lock, &waiter, task);
60316 mutex_release(&lock->dep_map, 1, ip);
60317 spin_unlock_mutex(&lock->wait_lock, flags);
60318
60319@@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock,
60320 done:
60321 lock_acquired(&lock->dep_map, ip);
60322 /* got the lock - rejoice! */
60323- mutex_remove_waiter(lock, &waiter, current_thread_info());
60324+ mutex_remove_waiter(lock, &waiter, task);
60325 mutex_set_owner(lock);
60326
60327 /* set it to 0 if there are no waiters left: */
60328diff -urNp linux-2.6.32.41/kernel/mutex-debug.c linux-2.6.32.41/kernel/mutex-debug.c
60329--- linux-2.6.32.41/kernel/mutex-debug.c 2011-03-27 14:31:47.000000000 -0400
60330+++ linux-2.6.32.41/kernel/mutex-debug.c 2011-04-17 15:56:46.000000000 -0400
60331@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
60332 }
60333
60334 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
60335- struct thread_info *ti)
60336+ struct task_struct *task)
60337 {
60338 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
60339
60340 /* Mark the current thread as blocked on the lock: */
60341- ti->task->blocked_on = waiter;
60342+ task->blocked_on = waiter;
60343 }
60344
60345 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
60346- struct thread_info *ti)
60347+ struct task_struct *task)
60348 {
60349 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
60350- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
60351- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
60352- ti->task->blocked_on = NULL;
60353+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
60354+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
60355+ task->blocked_on = NULL;
60356
60357 list_del_init(&waiter->list);
60358 waiter->task = NULL;
60359@@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lo
60360 return;
60361
60362 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
60363- DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
60364+ DEBUG_LOCKS_WARN_ON(lock->owner != current);
60365 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
60366 mutex_clear_owner(lock);
60367 }
60368diff -urNp linux-2.6.32.41/kernel/mutex-debug.h linux-2.6.32.41/kernel/mutex-debug.h
60369--- linux-2.6.32.41/kernel/mutex-debug.h 2011-03-27 14:31:47.000000000 -0400
60370+++ linux-2.6.32.41/kernel/mutex-debug.h 2011-04-17 15:56:46.000000000 -0400
60371@@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(stru
60372 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
60373 extern void debug_mutex_add_waiter(struct mutex *lock,
60374 struct mutex_waiter *waiter,
60375- struct thread_info *ti);
60376+ struct task_struct *task);
60377 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
60378- struct thread_info *ti);
60379+ struct task_struct *task);
60380 extern void debug_mutex_unlock(struct mutex *lock);
60381 extern void debug_mutex_init(struct mutex *lock, const char *name,
60382 struct lock_class_key *key);
60383
60384 static inline void mutex_set_owner(struct mutex *lock)
60385 {
60386- lock->owner = current_thread_info();
60387+ lock->owner = current;
60388 }
60389
60390 static inline void mutex_clear_owner(struct mutex *lock)
60391diff -urNp linux-2.6.32.41/kernel/mutex.h linux-2.6.32.41/kernel/mutex.h
60392--- linux-2.6.32.41/kernel/mutex.h 2011-03-27 14:31:47.000000000 -0400
60393+++ linux-2.6.32.41/kernel/mutex.h 2011-04-17 15:56:46.000000000 -0400
60394@@ -19,7 +19,7 @@
60395 #ifdef CONFIG_SMP
60396 static inline void mutex_set_owner(struct mutex *lock)
60397 {
60398- lock->owner = current_thread_info();
60399+ lock->owner = current;
60400 }
60401
60402 static inline void mutex_clear_owner(struct mutex *lock)
60403diff -urNp linux-2.6.32.41/kernel/panic.c linux-2.6.32.41/kernel/panic.c
60404--- linux-2.6.32.41/kernel/panic.c 2011-03-27 14:31:47.000000000 -0400
60405+++ linux-2.6.32.41/kernel/panic.c 2011-04-17 15:56:46.000000000 -0400
60406@@ -352,7 +352,7 @@ static void warn_slowpath_common(const c
60407 const char *board;
60408
60409 printk(KERN_WARNING "------------[ cut here ]------------\n");
60410- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
60411+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
60412 board = dmi_get_system_info(DMI_PRODUCT_NAME);
60413 if (board)
60414 printk(KERN_WARNING "Hardware name: %s\n", board);
60415@@ -392,7 +392,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
60416 */
60417 void __stack_chk_fail(void)
60418 {
60419- panic("stack-protector: Kernel stack is corrupted in: %p\n",
60420+ dump_stack();
60421+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
60422 __builtin_return_address(0));
60423 }
60424 EXPORT_SYMBOL(__stack_chk_fail);
60425diff -urNp linux-2.6.32.41/kernel/params.c linux-2.6.32.41/kernel/params.c
60426--- linux-2.6.32.41/kernel/params.c 2011-03-27 14:31:47.000000000 -0400
60427+++ linux-2.6.32.41/kernel/params.c 2011-04-17 15:56:46.000000000 -0400
60428@@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct
60429 return ret;
60430 }
60431
60432-static struct sysfs_ops module_sysfs_ops = {
60433+static const struct sysfs_ops module_sysfs_ops = {
60434 .show = module_attr_show,
60435 .store = module_attr_store,
60436 };
60437@@ -739,7 +739,7 @@ static int uevent_filter(struct kset *ks
60438 return 0;
60439 }
60440
60441-static struct kset_uevent_ops module_uevent_ops = {
60442+static const struct kset_uevent_ops module_uevent_ops = {
60443 .filter = uevent_filter,
60444 };
60445
60446diff -urNp linux-2.6.32.41/kernel/perf_event.c linux-2.6.32.41/kernel/perf_event.c
60447--- linux-2.6.32.41/kernel/perf_event.c 2011-04-17 17:00:52.000000000 -0400
60448+++ linux-2.6.32.41/kernel/perf_event.c 2011-05-04 17:56:28.000000000 -0400
60449@@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostl
60450 */
60451 int sysctl_perf_event_sample_rate __read_mostly = 100000;
60452
60453-static atomic64_t perf_event_id;
60454+static atomic64_unchecked_t perf_event_id;
60455
60456 /*
60457 * Lock for (sysadmin-configurable) event reservations:
60458@@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struc
60459 * In order to keep per-task stats reliable we need to flip the event
60460 * values when we flip the contexts.
60461 */
60462- value = atomic64_read(&next_event->count);
60463- value = atomic64_xchg(&event->count, value);
60464- atomic64_set(&next_event->count, value);
60465+ value = atomic64_read_unchecked(&next_event->count);
60466+ value = atomic64_xchg_unchecked(&event->count, value);
60467+ atomic64_set_unchecked(&next_event->count, value);
60468
60469 swap(event->total_time_enabled, next_event->total_time_enabled);
60470 swap(event->total_time_running, next_event->total_time_running);
60471@@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_e
60472 update_event_times(event);
60473 }
60474
60475- return atomic64_read(&event->count);
60476+ return atomic64_read_unchecked(&event->count);
60477 }
60478
60479 /*
60480@@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct
60481 values[n++] = 1 + leader->nr_siblings;
60482 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
60483 values[n++] = leader->total_time_enabled +
60484- atomic64_read(&leader->child_total_time_enabled);
60485+ atomic64_read_unchecked(&leader->child_total_time_enabled);
60486 }
60487 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
60488 values[n++] = leader->total_time_running +
60489- atomic64_read(&leader->child_total_time_running);
60490+ atomic64_read_unchecked(&leader->child_total_time_running);
60491 }
60492
60493 size = n * sizeof(u64);
60494@@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct pe
60495 values[n++] = perf_event_read_value(event);
60496 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
60497 values[n++] = event->total_time_enabled +
60498- atomic64_read(&event->child_total_time_enabled);
60499+ atomic64_read_unchecked(&event->child_total_time_enabled);
60500 }
60501 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
60502 values[n++] = event->total_time_running +
60503- atomic64_read(&event->child_total_time_running);
60504+ atomic64_read_unchecked(&event->child_total_time_running);
60505 }
60506 if (read_format & PERF_FORMAT_ID)
60507 values[n++] = primary_event_id(event);
60508@@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct fil
60509 static void perf_event_reset(struct perf_event *event)
60510 {
60511 (void)perf_event_read(event);
60512- atomic64_set(&event->count, 0);
60513+ atomic64_set_unchecked(&event->count, 0);
60514 perf_event_update_userpage(event);
60515 }
60516
60517@@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct p
60518 ++userpg->lock;
60519 barrier();
60520 userpg->index = perf_event_index(event);
60521- userpg->offset = atomic64_read(&event->count);
60522+ userpg->offset = atomic64_read_unchecked(&event->count);
60523 if (event->state == PERF_EVENT_STATE_ACTIVE)
60524- userpg->offset -= atomic64_read(&event->hw.prev_count);
60525+ userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
60526
60527 userpg->time_enabled = event->total_time_enabled +
60528- atomic64_read(&event->child_total_time_enabled);
60529+ atomic64_read_unchecked(&event->child_total_time_enabled);
60530
60531 userpg->time_running = event->total_time_running +
60532- atomic64_read(&event->child_total_time_running);
60533+ atomic64_read_unchecked(&event->child_total_time_running);
60534
60535 barrier();
60536 ++userpg->lock;
60537@@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct
60538 u64 values[4];
60539 int n = 0;
60540
60541- values[n++] = atomic64_read(&event->count);
60542+ values[n++] = atomic64_read_unchecked(&event->count);
60543 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
60544 values[n++] = event->total_time_enabled +
60545- atomic64_read(&event->child_total_time_enabled);
60546+ atomic64_read_unchecked(&event->child_total_time_enabled);
60547 }
60548 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
60549 values[n++] = event->total_time_running +
60550- atomic64_read(&event->child_total_time_running);
60551+ atomic64_read_unchecked(&event->child_total_time_running);
60552 }
60553 if (read_format & PERF_FORMAT_ID)
60554 values[n++] = primary_event_id(event);
60555@@ -2940,7 +2940,7 @@ static void perf_output_read_group(struc
60556 if (leader != event)
60557 leader->pmu->read(leader);
60558
60559- values[n++] = atomic64_read(&leader->count);
60560+ values[n++] = atomic64_read_unchecked(&leader->count);
60561 if (read_format & PERF_FORMAT_ID)
60562 values[n++] = primary_event_id(leader);
60563
60564@@ -2952,7 +2952,7 @@ static void perf_output_read_group(struc
60565 if (sub != event)
60566 sub->pmu->read(sub);
60567
60568- values[n++] = atomic64_read(&sub->count);
60569+ values[n++] = atomic64_read_unchecked(&sub->count);
60570 if (read_format & PERF_FORMAT_ID)
60571 values[n++] = primary_event_id(sub);
60572
60573@@ -3787,7 +3787,7 @@ static void perf_swevent_add(struct perf
60574 {
60575 struct hw_perf_event *hwc = &event->hw;
60576
60577- atomic64_add(nr, &event->count);
60578+ atomic64_add_unchecked(nr, &event->count);
60579
60580 if (!hwc->sample_period)
60581 return;
60582@@ -4044,9 +4044,9 @@ static void cpu_clock_perf_event_update(
60583 u64 now;
60584
60585 now = cpu_clock(cpu);
60586- prev = atomic64_read(&event->hw.prev_count);
60587- atomic64_set(&event->hw.prev_count, now);
60588- atomic64_add(now - prev, &event->count);
60589+ prev = atomic64_read_unchecked(&event->hw.prev_count);
60590+ atomic64_set_unchecked(&event->hw.prev_count, now);
60591+ atomic64_add_unchecked(now - prev, &event->count);
60592 }
60593
60594 static int cpu_clock_perf_event_enable(struct perf_event *event)
60595@@ -4054,7 +4054,7 @@ static int cpu_clock_perf_event_enable(s
60596 struct hw_perf_event *hwc = &event->hw;
60597 int cpu = raw_smp_processor_id();
60598
60599- atomic64_set(&hwc->prev_count, cpu_clock(cpu));
60600+ atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
60601 perf_swevent_start_hrtimer(event);
60602
60603 return 0;
60604@@ -4086,9 +4086,9 @@ static void task_clock_perf_event_update
60605 u64 prev;
60606 s64 delta;
60607
60608- prev = atomic64_xchg(&event->hw.prev_count, now);
60609+ prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
60610 delta = now - prev;
60611- atomic64_add(delta, &event->count);
60612+ atomic64_add_unchecked(delta, &event->count);
60613 }
60614
60615 static int task_clock_perf_event_enable(struct perf_event *event)
60616@@ -4098,7 +4098,7 @@ static int task_clock_perf_event_enable(
60617
60618 now = event->ctx->time;
60619
60620- atomic64_set(&hwc->prev_count, now);
60621+ atomic64_set_unchecked(&hwc->prev_count, now);
60622
60623 perf_swevent_start_hrtimer(event);
60624
60625@@ -4293,7 +4293,7 @@ perf_event_alloc(struct perf_event_attr
60626 event->parent = parent_event;
60627
60628 event->ns = get_pid_ns(current->nsproxy->pid_ns);
60629- event->id = atomic64_inc_return(&perf_event_id);
60630+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
60631
60632 event->state = PERF_EVENT_STATE_INACTIVE;
60633
60634@@ -4724,15 +4724,15 @@ static void sync_child_event(struct perf
60635 if (child_event->attr.inherit_stat)
60636 perf_event_read_event(child_event, child);
60637
60638- child_val = atomic64_read(&child_event->count);
60639+ child_val = atomic64_read_unchecked(&child_event->count);
60640
60641 /*
60642 * Add back the child's count to the parent's count:
60643 */
60644- atomic64_add(child_val, &parent_event->count);
60645- atomic64_add(child_event->total_time_enabled,
60646+ atomic64_add_unchecked(child_val, &parent_event->count);
60647+ atomic64_add_unchecked(child_event->total_time_enabled,
60648 &parent_event->child_total_time_enabled);
60649- atomic64_add(child_event->total_time_running,
60650+ atomic64_add_unchecked(child_event->total_time_running,
60651 &parent_event->child_total_time_running);
60652
60653 /*
60654diff -urNp linux-2.6.32.41/kernel/pid.c linux-2.6.32.41/kernel/pid.c
60655--- linux-2.6.32.41/kernel/pid.c 2011-04-22 19:16:29.000000000 -0400
60656+++ linux-2.6.32.41/kernel/pid.c 2011-04-18 19:22:38.000000000 -0400
60657@@ -33,6 +33,7 @@
60658 #include <linux/rculist.h>
60659 #include <linux/bootmem.h>
60660 #include <linux/hash.h>
60661+#include <linux/security.h>
60662 #include <linux/pid_namespace.h>
60663 #include <linux/init_task.h>
60664 #include <linux/syscalls.h>
60665@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
60666
60667 int pid_max = PID_MAX_DEFAULT;
60668
60669-#define RESERVED_PIDS 300
60670+#define RESERVED_PIDS 500
60671
60672 int pid_max_min = RESERVED_PIDS + 1;
60673 int pid_max_max = PID_MAX_LIMIT;
60674@@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
60675 */
60676 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
60677 {
60678- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
60679+ struct task_struct *task;
60680+
60681+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
60682+
60683+ if (gr_pid_is_chrooted(task))
60684+ return NULL;
60685+
60686+ return task;
60687 }
60688
60689 struct task_struct *find_task_by_vpid(pid_t vnr)
60690diff -urNp linux-2.6.32.41/kernel/posix-cpu-timers.c linux-2.6.32.41/kernel/posix-cpu-timers.c
60691--- linux-2.6.32.41/kernel/posix-cpu-timers.c 2011-03-27 14:31:47.000000000 -0400
60692+++ linux-2.6.32.41/kernel/posix-cpu-timers.c 2011-04-17 15:56:46.000000000 -0400
60693@@ -6,6 +6,7 @@
60694 #include <linux/posix-timers.h>
60695 #include <linux/errno.h>
60696 #include <linux/math64.h>
60697+#include <linux/security.h>
60698 #include <asm/uaccess.h>
60699 #include <linux/kernel_stat.h>
60700 #include <trace/events/timer.h>
60701diff -urNp linux-2.6.32.41/kernel/posix-timers.c linux-2.6.32.41/kernel/posix-timers.c
60702--- linux-2.6.32.41/kernel/posix-timers.c 2011-03-27 14:31:47.000000000 -0400
60703+++ linux-2.6.32.41/kernel/posix-timers.c 2011-05-16 21:46:57.000000000 -0400
60704@@ -42,6 +42,7 @@
60705 #include <linux/compiler.h>
60706 #include <linux/idr.h>
60707 #include <linux/posix-timers.h>
60708+#include <linux/grsecurity.h>
60709 #include <linux/syscalls.h>
60710 #include <linux/wait.h>
60711 #include <linux/workqueue.h>
60712@@ -296,6 +297,8 @@ static __init int init_posix_timers(void
60713 .nsleep = no_nsleep,
60714 };
60715
60716+ pax_track_stack();
60717+
60718 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
60719 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
60720 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
60721@@ -948,6 +951,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
60722 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
60723 return -EFAULT;
60724
60725+ /* only the CLOCK_REALTIME clock can be set, all other clocks
60726+ have their clock_set fptr set to a nosettime dummy function
60727+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
60728+ call common_clock_set, which calls do_sys_settimeofday, which
60729+ we hook
60730+ */
60731+
60732 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
60733 }
60734
60735diff -urNp linux-2.6.32.41/kernel/power/hibernate.c linux-2.6.32.41/kernel/power/hibernate.c
60736--- linux-2.6.32.41/kernel/power/hibernate.c 2011-03-27 14:31:47.000000000 -0400
60737+++ linux-2.6.32.41/kernel/power/hibernate.c 2011-04-17 15:56:46.000000000 -0400
60738@@ -48,14 +48,14 @@ enum {
60739
60740 static int hibernation_mode = HIBERNATION_SHUTDOWN;
60741
60742-static struct platform_hibernation_ops *hibernation_ops;
60743+static const struct platform_hibernation_ops *hibernation_ops;
60744
60745 /**
60746 * hibernation_set_ops - set the global hibernate operations
60747 * @ops: the hibernation operations to use in subsequent hibernation transitions
60748 */
60749
60750-void hibernation_set_ops(struct platform_hibernation_ops *ops)
60751+void hibernation_set_ops(const struct platform_hibernation_ops *ops)
60752 {
60753 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
60754 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
60755diff -urNp linux-2.6.32.41/kernel/power/poweroff.c linux-2.6.32.41/kernel/power/poweroff.c
60756--- linux-2.6.32.41/kernel/power/poweroff.c 2011-03-27 14:31:47.000000000 -0400
60757+++ linux-2.6.32.41/kernel/power/poweroff.c 2011-04-17 15:56:46.000000000 -0400
60758@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
60759 .enable_mask = SYSRQ_ENABLE_BOOT,
60760 };
60761
60762-static int pm_sysrq_init(void)
60763+static int __init pm_sysrq_init(void)
60764 {
60765 register_sysrq_key('o', &sysrq_poweroff_op);
60766 return 0;
60767diff -urNp linux-2.6.32.41/kernel/power/process.c linux-2.6.32.41/kernel/power/process.c
60768--- linux-2.6.32.41/kernel/power/process.c 2011-03-27 14:31:47.000000000 -0400
60769+++ linux-2.6.32.41/kernel/power/process.c 2011-04-17 15:56:46.000000000 -0400
60770@@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_
60771 struct timeval start, end;
60772 u64 elapsed_csecs64;
60773 unsigned int elapsed_csecs;
60774+ bool timedout = false;
60775
60776 do_gettimeofday(&start);
60777
60778 end_time = jiffies + TIMEOUT;
60779 do {
60780 todo = 0;
60781+ if (time_after(jiffies, end_time))
60782+ timedout = true;
60783 read_lock(&tasklist_lock);
60784 do_each_thread(g, p) {
60785 if (frozen(p) || !freezeable(p))
60786@@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_
60787 * It is "frozen enough". If the task does wake
60788 * up, it will immediately call try_to_freeze.
60789 */
60790- if (!task_is_stopped_or_traced(p) &&
60791- !freezer_should_skip(p))
60792+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
60793 todo++;
60794+ if (timedout) {
60795+ printk(KERN_ERR "Task refusing to freeze:\n");
60796+ sched_show_task(p);
60797+ }
60798+ }
60799 } while_each_thread(g, p);
60800 read_unlock(&tasklist_lock);
60801 yield(); /* Yield is okay here */
60802- if (time_after(jiffies, end_time))
60803- break;
60804- } while (todo);
60805+ } while (todo && !timedout);
60806
60807 do_gettimeofday(&end);
60808 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
60809diff -urNp linux-2.6.32.41/kernel/power/suspend.c linux-2.6.32.41/kernel/power/suspend.c
60810--- linux-2.6.32.41/kernel/power/suspend.c 2011-03-27 14:31:47.000000000 -0400
60811+++ linux-2.6.32.41/kernel/power/suspend.c 2011-04-17 15:56:46.000000000 -0400
60812@@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_M
60813 [PM_SUSPEND_MEM] = "mem",
60814 };
60815
60816-static struct platform_suspend_ops *suspend_ops;
60817+static const struct platform_suspend_ops *suspend_ops;
60818
60819 /**
60820 * suspend_set_ops - Set the global suspend method table.
60821 * @ops: Pointer to ops structure.
60822 */
60823-void suspend_set_ops(struct platform_suspend_ops *ops)
60824+void suspend_set_ops(const struct platform_suspend_ops *ops)
60825 {
60826 mutex_lock(&pm_mutex);
60827 suspend_ops = ops;
60828diff -urNp linux-2.6.32.41/kernel/printk.c linux-2.6.32.41/kernel/printk.c
60829--- linux-2.6.32.41/kernel/printk.c 2011-03-27 14:31:47.000000000 -0400
60830+++ linux-2.6.32.41/kernel/printk.c 2011-04-17 15:56:46.000000000 -0400
60831@@ -278,6 +278,11 @@ int do_syslog(int type, char __user *buf
60832 char c;
60833 int error = 0;
60834
60835+#ifdef CONFIG_GRKERNSEC_DMESG
60836+ if (grsec_enable_dmesg && !capable(CAP_SYS_ADMIN))
60837+ return -EPERM;
60838+#endif
60839+
60840 error = security_syslog(type);
60841 if (error)
60842 return error;
60843diff -urNp linux-2.6.32.41/kernel/profile.c linux-2.6.32.41/kernel/profile.c
60844--- linux-2.6.32.41/kernel/profile.c 2011-03-27 14:31:47.000000000 -0400
60845+++ linux-2.6.32.41/kernel/profile.c 2011-05-04 17:56:28.000000000 -0400
60846@@ -39,7 +39,7 @@ struct profile_hit {
60847 /* Oprofile timer tick hook */
60848 static int (*timer_hook)(struct pt_regs *) __read_mostly;
60849
60850-static atomic_t *prof_buffer;
60851+static atomic_unchecked_t *prof_buffer;
60852 static unsigned long prof_len, prof_shift;
60853
60854 int prof_on __read_mostly;
60855@@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
60856 hits[i].pc = 0;
60857 continue;
60858 }
60859- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
60860+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
60861 hits[i].hits = hits[i].pc = 0;
60862 }
60863 }
60864@@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc,
60865 * Add the current hit(s) and flush the write-queue out
60866 * to the global buffer:
60867 */
60868- atomic_add(nr_hits, &prof_buffer[pc]);
60869+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
60870 for (i = 0; i < NR_PROFILE_HIT; ++i) {
60871- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
60872+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
60873 hits[i].pc = hits[i].hits = 0;
60874 }
60875 out:
60876@@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc,
60877 if (prof_on != type || !prof_buffer)
60878 return;
60879 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
60880- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
60881+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
60882 }
60883 #endif /* !CONFIG_SMP */
60884 EXPORT_SYMBOL_GPL(profile_hits);
60885@@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
60886 return -EFAULT;
60887 buf++; p++; count--; read++;
60888 }
60889- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
60890+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
60891 if (copy_to_user(buf, (void *)pnt, count))
60892 return -EFAULT;
60893 read += count;
60894@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
60895 }
60896 #endif
60897 profile_discard_flip_buffers();
60898- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
60899+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
60900 return count;
60901 }
60902
60903diff -urNp linux-2.6.32.41/kernel/ptrace.c linux-2.6.32.41/kernel/ptrace.c
60904--- linux-2.6.32.41/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
60905+++ linux-2.6.32.41/kernel/ptrace.c 2011-05-22 23:02:06.000000000 -0400
60906@@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_stru
60907 return ret;
60908 }
60909
60910-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
60911+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
60912+ unsigned int log)
60913 {
60914 const struct cred *cred = current_cred(), *tcred;
60915
60916@@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_stru
60917 cred->gid != tcred->egid ||
60918 cred->gid != tcred->sgid ||
60919 cred->gid != tcred->gid) &&
60920- !capable(CAP_SYS_PTRACE)) {
60921+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
60922+ (log && !capable(CAP_SYS_PTRACE)))
60923+ ) {
60924 rcu_read_unlock();
60925 return -EPERM;
60926 }
60927@@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_stru
60928 smp_rmb();
60929 if (task->mm)
60930 dumpable = get_dumpable(task->mm);
60931- if (!dumpable && !capable(CAP_SYS_PTRACE))
60932+ if (!dumpable &&
60933+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
60934+ (log && !capable(CAP_SYS_PTRACE))))
60935 return -EPERM;
60936
60937 return security_ptrace_access_check(task, mode);
60938@@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struc
60939 {
60940 int err;
60941 task_lock(task);
60942- err = __ptrace_may_access(task, mode);
60943+ err = __ptrace_may_access(task, mode, 0);
60944+ task_unlock(task);
60945+ return !err;
60946+}
60947+
60948+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
60949+{
60950+ int err;
60951+ task_lock(task);
60952+ err = __ptrace_may_access(task, mode, 1);
60953 task_unlock(task);
60954 return !err;
60955 }
60956@@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *ta
60957 goto out;
60958
60959 task_lock(task);
60960- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
60961+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
60962 task_unlock(task);
60963 if (retval)
60964 goto unlock_creds;
60965@@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *ta
60966 goto unlock_tasklist;
60967
60968 task->ptrace = PT_PTRACED;
60969- if (capable(CAP_SYS_PTRACE))
60970+ if (capable_nolog(CAP_SYS_PTRACE))
60971 task->ptrace |= PT_PTRACE_CAP;
60972
60973 __ptrace_link(task, current);
60974@@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *
60975 {
60976 int copied = 0;
60977
60978+ pax_track_stack();
60979+
60980 while (len > 0) {
60981 char buf[128];
60982 int this_len, retval;
60983@@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct
60984 {
60985 int copied = 0;
60986
60987+ pax_track_stack();
60988+
60989 while (len > 0) {
60990 char buf[128];
60991 int this_len, retval;
60992@@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *c
60993 int ret = -EIO;
60994 siginfo_t siginfo;
60995
60996+ pax_track_stack();
60997+
60998 switch (request) {
60999 case PTRACE_PEEKTEXT:
61000 case PTRACE_PEEKDATA:
61001@@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *c
61002 ret = ptrace_setoptions(child, data);
61003 break;
61004 case PTRACE_GETEVENTMSG:
61005- ret = put_user(child->ptrace_message, (unsigned long __user *) data);
61006+ ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
61007 break;
61008
61009 case PTRACE_GETSIGINFO:
61010 ret = ptrace_getsiginfo(child, &siginfo);
61011 if (!ret)
61012- ret = copy_siginfo_to_user((siginfo_t __user *) data,
61013+ ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
61014 &siginfo);
61015 break;
61016
61017 case PTRACE_SETSIGINFO:
61018- if (copy_from_user(&siginfo, (siginfo_t __user *) data,
61019+ if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
61020 sizeof siginfo))
61021 ret = -EFAULT;
61022 else
61023@@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
61024 goto out;
61025 }
61026
61027+ if (gr_handle_ptrace(child, request)) {
61028+ ret = -EPERM;
61029+ goto out_put_task_struct;
61030+ }
61031+
61032 if (request == PTRACE_ATTACH) {
61033 ret = ptrace_attach(child);
61034 /*
61035 * Some architectures need to do book-keeping after
61036 * a ptrace attach.
61037 */
61038- if (!ret)
61039+ if (!ret) {
61040 arch_ptrace_attach(child);
61041+ gr_audit_ptrace(child);
61042+ }
61043 goto out_put_task_struct;
61044 }
61045
61046@@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_
61047 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
61048 if (copied != sizeof(tmp))
61049 return -EIO;
61050- return put_user(tmp, (unsigned long __user *)data);
61051+ return put_user(tmp, (__force unsigned long __user *)data);
61052 }
61053
61054 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
61055@@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_st
61056 siginfo_t siginfo;
61057 int ret;
61058
61059+ pax_track_stack();
61060+
61061 switch (request) {
61062 case PTRACE_PEEKTEXT:
61063 case PTRACE_PEEKDATA:
61064@@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat
61065 goto out;
61066 }
61067
61068+ if (gr_handle_ptrace(child, request)) {
61069+ ret = -EPERM;
61070+ goto out_put_task_struct;
61071+ }
61072+
61073 if (request == PTRACE_ATTACH) {
61074 ret = ptrace_attach(child);
61075 /*
61076 * Some architectures need to do book-keeping after
61077 * a ptrace attach.
61078 */
61079- if (!ret)
61080+ if (!ret) {
61081 arch_ptrace_attach(child);
61082+ gr_audit_ptrace(child);
61083+ }
61084 goto out_put_task_struct;
61085 }
61086
61087diff -urNp linux-2.6.32.41/kernel/rcutorture.c linux-2.6.32.41/kernel/rcutorture.c
61088--- linux-2.6.32.41/kernel/rcutorture.c 2011-03-27 14:31:47.000000000 -0400
61089+++ linux-2.6.32.41/kernel/rcutorture.c 2011-05-04 17:56:28.000000000 -0400
61090@@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
61091 { 0 };
61092 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
61093 { 0 };
61094-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
61095-static atomic_t n_rcu_torture_alloc;
61096-static atomic_t n_rcu_torture_alloc_fail;
61097-static atomic_t n_rcu_torture_free;
61098-static atomic_t n_rcu_torture_mberror;
61099-static atomic_t n_rcu_torture_error;
61100+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
61101+static atomic_unchecked_t n_rcu_torture_alloc;
61102+static atomic_unchecked_t n_rcu_torture_alloc_fail;
61103+static atomic_unchecked_t n_rcu_torture_free;
61104+static atomic_unchecked_t n_rcu_torture_mberror;
61105+static atomic_unchecked_t n_rcu_torture_error;
61106 static long n_rcu_torture_timers;
61107 static struct list_head rcu_torture_removed;
61108 static cpumask_var_t shuffle_tmp_mask;
61109@@ -187,11 +187,11 @@ rcu_torture_alloc(void)
61110
61111 spin_lock_bh(&rcu_torture_lock);
61112 if (list_empty(&rcu_torture_freelist)) {
61113- atomic_inc(&n_rcu_torture_alloc_fail);
61114+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
61115 spin_unlock_bh(&rcu_torture_lock);
61116 return NULL;
61117 }
61118- atomic_inc(&n_rcu_torture_alloc);
61119+ atomic_inc_unchecked(&n_rcu_torture_alloc);
61120 p = rcu_torture_freelist.next;
61121 list_del_init(p);
61122 spin_unlock_bh(&rcu_torture_lock);
61123@@ -204,7 +204,7 @@ rcu_torture_alloc(void)
61124 static void
61125 rcu_torture_free(struct rcu_torture *p)
61126 {
61127- atomic_inc(&n_rcu_torture_free);
61128+ atomic_inc_unchecked(&n_rcu_torture_free);
61129 spin_lock_bh(&rcu_torture_lock);
61130 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
61131 spin_unlock_bh(&rcu_torture_lock);
61132@@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
61133 i = rp->rtort_pipe_count;
61134 if (i > RCU_TORTURE_PIPE_LEN)
61135 i = RCU_TORTURE_PIPE_LEN;
61136- atomic_inc(&rcu_torture_wcount[i]);
61137+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
61138 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
61139 rp->rtort_mbtest = 0;
61140 rcu_torture_free(rp);
61141@@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_fr
61142 i = rp->rtort_pipe_count;
61143 if (i > RCU_TORTURE_PIPE_LEN)
61144 i = RCU_TORTURE_PIPE_LEN;
61145- atomic_inc(&rcu_torture_wcount[i]);
61146+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
61147 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
61148 rp->rtort_mbtest = 0;
61149 list_del(&rp->rtort_free);
61150@@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
61151 i = old_rp->rtort_pipe_count;
61152 if (i > RCU_TORTURE_PIPE_LEN)
61153 i = RCU_TORTURE_PIPE_LEN;
61154- atomic_inc(&rcu_torture_wcount[i]);
61155+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
61156 old_rp->rtort_pipe_count++;
61157 cur_ops->deferred_free(old_rp);
61158 }
61159@@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned l
61160 return;
61161 }
61162 if (p->rtort_mbtest == 0)
61163- atomic_inc(&n_rcu_torture_mberror);
61164+ atomic_inc_unchecked(&n_rcu_torture_mberror);
61165 spin_lock(&rand_lock);
61166 cur_ops->read_delay(&rand);
61167 n_rcu_torture_timers++;
61168@@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
61169 continue;
61170 }
61171 if (p->rtort_mbtest == 0)
61172- atomic_inc(&n_rcu_torture_mberror);
61173+ atomic_inc_unchecked(&n_rcu_torture_mberror);
61174 cur_ops->read_delay(&rand);
61175 preempt_disable();
61176 pipe_count = p->rtort_pipe_count;
61177@@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
61178 rcu_torture_current,
61179 rcu_torture_current_version,
61180 list_empty(&rcu_torture_freelist),
61181- atomic_read(&n_rcu_torture_alloc),
61182- atomic_read(&n_rcu_torture_alloc_fail),
61183- atomic_read(&n_rcu_torture_free),
61184- atomic_read(&n_rcu_torture_mberror),
61185+ atomic_read_unchecked(&n_rcu_torture_alloc),
61186+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
61187+ atomic_read_unchecked(&n_rcu_torture_free),
61188+ atomic_read_unchecked(&n_rcu_torture_mberror),
61189 n_rcu_torture_timers);
61190- if (atomic_read(&n_rcu_torture_mberror) != 0)
61191+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
61192 cnt += sprintf(&page[cnt], " !!!");
61193 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
61194 if (i > 1) {
61195 cnt += sprintf(&page[cnt], "!!! ");
61196- atomic_inc(&n_rcu_torture_error);
61197+ atomic_inc_unchecked(&n_rcu_torture_error);
61198 WARN_ON_ONCE(1);
61199 }
61200 cnt += sprintf(&page[cnt], "Reader Pipe: ");
61201@@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
61202 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
61203 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
61204 cnt += sprintf(&page[cnt], " %d",
61205- atomic_read(&rcu_torture_wcount[i]));
61206+ atomic_read_unchecked(&rcu_torture_wcount[i]));
61207 }
61208 cnt += sprintf(&page[cnt], "\n");
61209 if (cur_ops->stats)
61210@@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
61211
61212 if (cur_ops->cleanup)
61213 cur_ops->cleanup();
61214- if (atomic_read(&n_rcu_torture_error))
61215+ if (atomic_read_unchecked(&n_rcu_torture_error))
61216 rcu_torture_print_module_parms("End of test: FAILURE");
61217 else
61218 rcu_torture_print_module_parms("End of test: SUCCESS");
61219@@ -1138,13 +1138,13 @@ rcu_torture_init(void)
61220
61221 rcu_torture_current = NULL;
61222 rcu_torture_current_version = 0;
61223- atomic_set(&n_rcu_torture_alloc, 0);
61224- atomic_set(&n_rcu_torture_alloc_fail, 0);
61225- atomic_set(&n_rcu_torture_free, 0);
61226- atomic_set(&n_rcu_torture_mberror, 0);
61227- atomic_set(&n_rcu_torture_error, 0);
61228+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
61229+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
61230+ atomic_set_unchecked(&n_rcu_torture_free, 0);
61231+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
61232+ atomic_set_unchecked(&n_rcu_torture_error, 0);
61233 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
61234- atomic_set(&rcu_torture_wcount[i], 0);
61235+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
61236 for_each_possible_cpu(cpu) {
61237 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
61238 per_cpu(rcu_torture_count, cpu)[i] = 0;
61239diff -urNp linux-2.6.32.41/kernel/rcutree.c linux-2.6.32.41/kernel/rcutree.c
61240--- linux-2.6.32.41/kernel/rcutree.c 2011-03-27 14:31:47.000000000 -0400
61241+++ linux-2.6.32.41/kernel/rcutree.c 2011-04-17 15:56:46.000000000 -0400
61242@@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state
61243 /*
61244 * Do softirq processing for the current CPU.
61245 */
61246-static void rcu_process_callbacks(struct softirq_action *unused)
61247+static void rcu_process_callbacks(void)
61248 {
61249 /*
61250 * Memory references from any prior RCU read-side critical sections
61251diff -urNp linux-2.6.32.41/kernel/rcutree_plugin.h linux-2.6.32.41/kernel/rcutree_plugin.h
61252--- linux-2.6.32.41/kernel/rcutree_plugin.h 2011-03-27 14:31:47.000000000 -0400
61253+++ linux-2.6.32.41/kernel/rcutree_plugin.h 2011-04-17 15:56:46.000000000 -0400
61254@@ -145,7 +145,7 @@ static void rcu_preempt_note_context_swi
61255 */
61256 void __rcu_read_lock(void)
61257 {
61258- ACCESS_ONCE(current->rcu_read_lock_nesting)++;
61259+ ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
61260 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
61261 }
61262 EXPORT_SYMBOL_GPL(__rcu_read_lock);
61263@@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
61264 struct task_struct *t = current;
61265
61266 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
61267- if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
61268+ if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
61269 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
61270 rcu_read_unlock_special(t);
61271 }
61272diff -urNp linux-2.6.32.41/kernel/relay.c linux-2.6.32.41/kernel/relay.c
61273--- linux-2.6.32.41/kernel/relay.c 2011-03-27 14:31:47.000000000 -0400
61274+++ linux-2.6.32.41/kernel/relay.c 2011-05-16 21:46:57.000000000 -0400
61275@@ -1222,7 +1222,7 @@ static int subbuf_splice_actor(struct fi
61276 unsigned int flags,
61277 int *nonpad_ret)
61278 {
61279- unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
61280+ unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
61281 struct rchan_buf *rbuf = in->private_data;
61282 unsigned int subbuf_size = rbuf->chan->subbuf_size;
61283 uint64_t pos = (uint64_t) *ppos;
61284@@ -1241,6 +1241,9 @@ static int subbuf_splice_actor(struct fi
61285 .ops = &relay_pipe_buf_ops,
61286 .spd_release = relay_page_release,
61287 };
61288+ ssize_t ret;
61289+
61290+ pax_track_stack();
61291
61292 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
61293 return 0;
61294diff -urNp linux-2.6.32.41/kernel/resource.c linux-2.6.32.41/kernel/resource.c
61295--- linux-2.6.32.41/kernel/resource.c 2011-03-27 14:31:47.000000000 -0400
61296+++ linux-2.6.32.41/kernel/resource.c 2011-04-17 15:56:46.000000000 -0400
61297@@ -132,8 +132,18 @@ static const struct file_operations proc
61298
61299 static int __init ioresources_init(void)
61300 {
61301+#ifdef CONFIG_GRKERNSEC_PROC_ADD
61302+#ifdef CONFIG_GRKERNSEC_PROC_USER
61303+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
61304+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
61305+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61306+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
61307+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
61308+#endif
61309+#else
61310 proc_create("ioports", 0, NULL, &proc_ioports_operations);
61311 proc_create("iomem", 0, NULL, &proc_iomem_operations);
61312+#endif
61313 return 0;
61314 }
61315 __initcall(ioresources_init);
61316diff -urNp linux-2.6.32.41/kernel/rtmutex.c linux-2.6.32.41/kernel/rtmutex.c
61317--- linux-2.6.32.41/kernel/rtmutex.c 2011-03-27 14:31:47.000000000 -0400
61318+++ linux-2.6.32.41/kernel/rtmutex.c 2011-04-17 15:56:46.000000000 -0400
61319@@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt
61320 */
61321 spin_lock_irqsave(&pendowner->pi_lock, flags);
61322
61323- WARN_ON(!pendowner->pi_blocked_on);
61324+ BUG_ON(!pendowner->pi_blocked_on);
61325 WARN_ON(pendowner->pi_blocked_on != waiter);
61326 WARN_ON(pendowner->pi_blocked_on->lock != lock);
61327
61328diff -urNp linux-2.6.32.41/kernel/rtmutex-tester.c linux-2.6.32.41/kernel/rtmutex-tester.c
61329--- linux-2.6.32.41/kernel/rtmutex-tester.c 2011-03-27 14:31:47.000000000 -0400
61330+++ linux-2.6.32.41/kernel/rtmutex-tester.c 2011-05-04 17:56:28.000000000 -0400
61331@@ -21,7 +21,7 @@
61332 #define MAX_RT_TEST_MUTEXES 8
61333
61334 static spinlock_t rttest_lock;
61335-static atomic_t rttest_event;
61336+static atomic_unchecked_t rttest_event;
61337
61338 struct test_thread_data {
61339 int opcode;
61340@@ -64,7 +64,7 @@ static int handle_op(struct test_thread_
61341
61342 case RTTEST_LOCKCONT:
61343 td->mutexes[td->opdata] = 1;
61344- td->event = atomic_add_return(1, &rttest_event);
61345+ td->event = atomic_add_return_unchecked(1, &rttest_event);
61346 return 0;
61347
61348 case RTTEST_RESET:
61349@@ -82,7 +82,7 @@ static int handle_op(struct test_thread_
61350 return 0;
61351
61352 case RTTEST_RESETEVENT:
61353- atomic_set(&rttest_event, 0);
61354+ atomic_set_unchecked(&rttest_event, 0);
61355 return 0;
61356
61357 default:
61358@@ -99,9 +99,9 @@ static int handle_op(struct test_thread_
61359 return ret;
61360
61361 td->mutexes[id] = 1;
61362- td->event = atomic_add_return(1, &rttest_event);
61363+ td->event = atomic_add_return_unchecked(1, &rttest_event);
61364 rt_mutex_lock(&mutexes[id]);
61365- td->event = atomic_add_return(1, &rttest_event);
61366+ td->event = atomic_add_return_unchecked(1, &rttest_event);
61367 td->mutexes[id] = 4;
61368 return 0;
61369
61370@@ -112,9 +112,9 @@ static int handle_op(struct test_thread_
61371 return ret;
61372
61373 td->mutexes[id] = 1;
61374- td->event = atomic_add_return(1, &rttest_event);
61375+ td->event = atomic_add_return_unchecked(1, &rttest_event);
61376 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
61377- td->event = atomic_add_return(1, &rttest_event);
61378+ td->event = atomic_add_return_unchecked(1, &rttest_event);
61379 td->mutexes[id] = ret ? 0 : 4;
61380 return ret ? -EINTR : 0;
61381
61382@@ -123,9 +123,9 @@ static int handle_op(struct test_thread_
61383 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
61384 return ret;
61385
61386- td->event = atomic_add_return(1, &rttest_event);
61387+ td->event = atomic_add_return_unchecked(1, &rttest_event);
61388 rt_mutex_unlock(&mutexes[id]);
61389- td->event = atomic_add_return(1, &rttest_event);
61390+ td->event = atomic_add_return_unchecked(1, &rttest_event);
61391 td->mutexes[id] = 0;
61392 return 0;
61393
61394@@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mu
61395 break;
61396
61397 td->mutexes[dat] = 2;
61398- td->event = atomic_add_return(1, &rttest_event);
61399+ td->event = atomic_add_return_unchecked(1, &rttest_event);
61400 break;
61401
61402 case RTTEST_LOCKBKL:
61403@@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mu
61404 return;
61405
61406 td->mutexes[dat] = 3;
61407- td->event = atomic_add_return(1, &rttest_event);
61408+ td->event = atomic_add_return_unchecked(1, &rttest_event);
61409 break;
61410
61411 case RTTEST_LOCKNOWAIT:
61412@@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mu
61413 return;
61414
61415 td->mutexes[dat] = 1;
61416- td->event = atomic_add_return(1, &rttest_event);
61417+ td->event = atomic_add_return_unchecked(1, &rttest_event);
61418 return;
61419
61420 case RTTEST_LOCKBKL:
61421diff -urNp linux-2.6.32.41/kernel/sched.c linux-2.6.32.41/kernel/sched.c
61422--- linux-2.6.32.41/kernel/sched.c 2011-03-27 14:31:47.000000000 -0400
61423+++ linux-2.6.32.41/kernel/sched.c 2011-05-22 23:02:06.000000000 -0400
61424@@ -5043,7 +5043,7 @@ out:
61425 * In CONFIG_NO_HZ case, the idle load balance owner will do the
61426 * rebalancing for all the cpus for whom scheduler ticks are stopped.
61427 */
61428-static void run_rebalance_domains(struct softirq_action *h)
61429+static void run_rebalance_domains(void)
61430 {
61431 int this_cpu = smp_processor_id();
61432 struct rq *this_rq = cpu_rq(this_cpu);
61433@@ -5700,6 +5700,8 @@ asmlinkage void __sched schedule(void)
61434 struct rq *rq;
61435 int cpu;
61436
61437+ pax_track_stack();
61438+
61439 need_resched:
61440 preempt_disable();
61441 cpu = smp_processor_id();
61442@@ -5770,7 +5772,7 @@ EXPORT_SYMBOL(schedule);
61443 * Look out! "owner" is an entirely speculative pointer
61444 * access and not reliable.
61445 */
61446-int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
61447+int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
61448 {
61449 unsigned int cpu;
61450 struct rq *rq;
61451@@ -5784,10 +5786,10 @@ int mutex_spin_on_owner(struct mutex *lo
61452 * DEBUG_PAGEALLOC could have unmapped it if
61453 * the mutex owner just released it and exited.
61454 */
61455- if (probe_kernel_address(&owner->cpu, cpu))
61456+ if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
61457 return 0;
61458 #else
61459- cpu = owner->cpu;
61460+ cpu = task_thread_info(owner)->cpu;
61461 #endif
61462
61463 /*
61464@@ -5816,7 +5818,7 @@ int mutex_spin_on_owner(struct mutex *lo
61465 /*
61466 * Is that owner really running on that cpu?
61467 */
61468- if (task_thread_info(rq->curr) != owner || need_resched())
61469+ if (rq->curr != owner || need_resched())
61470 return 0;
61471
61472 cpu_relax();
61473@@ -6359,6 +6361,8 @@ int can_nice(const struct task_struct *p
61474 /* convert nice value [19,-20] to rlimit style value [1,40] */
61475 int nice_rlim = 20 - nice;
61476
61477+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
61478+
61479 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
61480 capable(CAP_SYS_NICE));
61481 }
61482@@ -6392,7 +6396,8 @@ SYSCALL_DEFINE1(nice, int, increment)
61483 if (nice > 19)
61484 nice = 19;
61485
61486- if (increment < 0 && !can_nice(current, nice))
61487+ if (increment < 0 && (!can_nice(current, nice) ||
61488+ gr_handle_chroot_nice()))
61489 return -EPERM;
61490
61491 retval = security_task_setnice(current, nice);
61492@@ -8774,7 +8779,7 @@ static void init_sched_groups_power(int
61493 long power;
61494 int weight;
61495
61496- WARN_ON(!sd || !sd->groups);
61497+ BUG_ON(!sd || !sd->groups);
61498
61499 if (cpu != group_first_cpu(sd->groups))
61500 return;
61501diff -urNp linux-2.6.32.41/kernel/signal.c linux-2.6.32.41/kernel/signal.c
61502--- linux-2.6.32.41/kernel/signal.c 2011-04-17 17:00:52.000000000 -0400
61503+++ linux-2.6.32.41/kernel/signal.c 2011-05-22 23:02:06.000000000 -0400
61504@@ -41,12 +41,12 @@
61505
61506 static struct kmem_cache *sigqueue_cachep;
61507
61508-static void __user *sig_handler(struct task_struct *t, int sig)
61509+static __sighandler_t sig_handler(struct task_struct *t, int sig)
61510 {
61511 return t->sighand->action[sig - 1].sa.sa_handler;
61512 }
61513
61514-static int sig_handler_ignored(void __user *handler, int sig)
61515+static int sig_handler_ignored(__sighandler_t handler, int sig)
61516 {
61517 /* Is it explicitly or implicitly ignored? */
61518 return handler == SIG_IGN ||
61519@@ -56,7 +56,7 @@ static int sig_handler_ignored(void __us
61520 static int sig_task_ignored(struct task_struct *t, int sig,
61521 int from_ancestor_ns)
61522 {
61523- void __user *handler;
61524+ __sighandler_t handler;
61525
61526 handler = sig_handler(t, sig);
61527
61528@@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc
61529 */
61530 user = get_uid(__task_cred(t)->user);
61531 atomic_inc(&user->sigpending);
61532+
61533+ if (!override_rlimit)
61534+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
61535 if (override_rlimit ||
61536 atomic_read(&user->sigpending) <=
61537 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
61538@@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct
61539
61540 int unhandled_signal(struct task_struct *tsk, int sig)
61541 {
61542- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
61543+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
61544 if (is_global_init(tsk))
61545 return 1;
61546 if (handler != SIG_IGN && handler != SIG_DFL)
61547@@ -627,6 +630,9 @@ static int check_kill_permission(int sig
61548 }
61549 }
61550
61551+ if (gr_handle_signal(t, sig))
61552+ return -EPERM;
61553+
61554 return security_task_kill(t, info, sig, 0);
61555 }
61556
61557@@ -968,7 +974,7 @@ __group_send_sig_info(int sig, struct si
61558 return send_signal(sig, info, p, 1);
61559 }
61560
61561-static int
61562+int
61563 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
61564 {
61565 return send_signal(sig, info, t, 0);
61566@@ -1005,6 +1011,7 @@ force_sig_info(int sig, struct siginfo *
61567 unsigned long int flags;
61568 int ret, blocked, ignored;
61569 struct k_sigaction *action;
61570+ int is_unhandled = 0;
61571
61572 spin_lock_irqsave(&t->sighand->siglock, flags);
61573 action = &t->sighand->action[sig-1];
61574@@ -1019,9 +1026,18 @@ force_sig_info(int sig, struct siginfo *
61575 }
61576 if (action->sa.sa_handler == SIG_DFL)
61577 t->signal->flags &= ~SIGNAL_UNKILLABLE;
61578+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
61579+ is_unhandled = 1;
61580 ret = specific_send_sig_info(sig, info, t);
61581 spin_unlock_irqrestore(&t->sighand->siglock, flags);
61582
61583+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
61584+ normal operation */
61585+ if (is_unhandled) {
61586+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
61587+ gr_handle_crash(t, sig);
61588+ }
61589+
61590 return ret;
61591 }
61592
61593@@ -1081,8 +1097,11 @@ int group_send_sig_info(int sig, struct
61594 {
61595 int ret = check_kill_permission(sig, info, p);
61596
61597- if (!ret && sig)
61598+ if (!ret && sig) {
61599 ret = do_send_sig_info(sig, info, p, true);
61600+ if (!ret)
61601+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
61602+ }
61603
61604 return ret;
61605 }
61606@@ -1644,6 +1663,8 @@ void ptrace_notify(int exit_code)
61607 {
61608 siginfo_t info;
61609
61610+ pax_track_stack();
61611+
61612 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
61613
61614 memset(&info, 0, sizeof info);
61615diff -urNp linux-2.6.32.41/kernel/smp.c linux-2.6.32.41/kernel/smp.c
61616--- linux-2.6.32.41/kernel/smp.c 2011-03-27 14:31:47.000000000 -0400
61617+++ linux-2.6.32.41/kernel/smp.c 2011-04-17 15:56:46.000000000 -0400
61618@@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void
61619 }
61620 EXPORT_SYMBOL(smp_call_function);
61621
61622-void ipi_call_lock(void)
61623+void ipi_call_lock(void) __acquires(call_function.lock)
61624 {
61625 spin_lock(&call_function.lock);
61626 }
61627
61628-void ipi_call_unlock(void)
61629+void ipi_call_unlock(void) __releases(call_function.lock)
61630 {
61631 spin_unlock(&call_function.lock);
61632 }
61633
61634-void ipi_call_lock_irq(void)
61635+void ipi_call_lock_irq(void) __acquires(call_function.lock)
61636 {
61637 spin_lock_irq(&call_function.lock);
61638 }
61639
61640-void ipi_call_unlock_irq(void)
61641+void ipi_call_unlock_irq(void) __releases(call_function.lock)
61642 {
61643 spin_unlock_irq(&call_function.lock);
61644 }
61645diff -urNp linux-2.6.32.41/kernel/softirq.c linux-2.6.32.41/kernel/softirq.c
61646--- linux-2.6.32.41/kernel/softirq.c 2011-03-27 14:31:47.000000000 -0400
61647+++ linux-2.6.32.41/kernel/softirq.c 2011-04-17 15:56:46.000000000 -0400
61648@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
61649
61650 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
61651
61652-char *softirq_to_name[NR_SOFTIRQS] = {
61653+const char * const softirq_to_name[NR_SOFTIRQS] = {
61654 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
61655 "TASKLET", "SCHED", "HRTIMER", "RCU"
61656 };
61657@@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
61658
61659 asmlinkage void __do_softirq(void)
61660 {
61661- struct softirq_action *h;
61662+ const struct softirq_action *h;
61663 __u32 pending;
61664 int max_restart = MAX_SOFTIRQ_RESTART;
61665 int cpu;
61666@@ -233,7 +233,7 @@ restart:
61667 kstat_incr_softirqs_this_cpu(h - softirq_vec);
61668
61669 trace_softirq_entry(h, softirq_vec);
61670- h->action(h);
61671+ h->action();
61672 trace_softirq_exit(h, softirq_vec);
61673 if (unlikely(prev_count != preempt_count())) {
61674 printk(KERN_ERR "huh, entered softirq %td %s %p"
61675@@ -363,7 +363,7 @@ void raise_softirq(unsigned int nr)
61676 local_irq_restore(flags);
61677 }
61678
61679-void open_softirq(int nr, void (*action)(struct softirq_action *))
61680+void open_softirq(int nr, void (*action)(void))
61681 {
61682 softirq_vec[nr].action = action;
61683 }
61684@@ -419,7 +419,7 @@ void __tasklet_hi_schedule_first(struct
61685
61686 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
61687
61688-static void tasklet_action(struct softirq_action *a)
61689+static void tasklet_action(void)
61690 {
61691 struct tasklet_struct *list;
61692
61693@@ -454,7 +454,7 @@ static void tasklet_action(struct softir
61694 }
61695 }
61696
61697-static void tasklet_hi_action(struct softirq_action *a)
61698+static void tasklet_hi_action(void)
61699 {
61700 struct tasklet_struct *list;
61701
61702diff -urNp linux-2.6.32.41/kernel/sys.c linux-2.6.32.41/kernel/sys.c
61703--- linux-2.6.32.41/kernel/sys.c 2011-03-27 14:31:47.000000000 -0400
61704+++ linux-2.6.32.41/kernel/sys.c 2011-04-17 15:56:46.000000000 -0400
61705@@ -133,6 +133,12 @@ static int set_one_prio(struct task_stru
61706 error = -EACCES;
61707 goto out;
61708 }
61709+
61710+ if (gr_handle_chroot_setpriority(p, niceval)) {
61711+ error = -EACCES;
61712+ goto out;
61713+ }
61714+
61715 no_nice = security_task_setnice(p, niceval);
61716 if (no_nice) {
61717 error = no_nice;
61718@@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which,
61719 !(user = find_user(who)))
61720 goto out_unlock; /* No processes for this user */
61721
61722- do_each_thread(g, p)
61723+ do_each_thread(g, p) {
61724 if (__task_cred(p)->uid == who)
61725 error = set_one_prio(p, niceval, error);
61726- while_each_thread(g, p);
61727+ } while_each_thread(g, p);
61728 if (who != cred->uid)
61729 free_uid(user); /* For find_user() */
61730 break;
61731@@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which,
61732 !(user = find_user(who)))
61733 goto out_unlock; /* No processes for this user */
61734
61735- do_each_thread(g, p)
61736+ do_each_thread(g, p) {
61737 if (__task_cred(p)->uid == who) {
61738 niceval = 20 - task_nice(p);
61739 if (niceval > retval)
61740 retval = niceval;
61741 }
61742- while_each_thread(g, p);
61743+ } while_each_thread(g, p);
61744 if (who != cred->uid)
61745 free_uid(user); /* for find_user() */
61746 break;
61747@@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
61748 goto error;
61749 }
61750
61751+ if (gr_check_group_change(new->gid, new->egid, -1))
61752+ goto error;
61753+
61754 if (rgid != (gid_t) -1 ||
61755 (egid != (gid_t) -1 && egid != old->gid))
61756 new->sgid = new->egid;
61757@@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
61758 goto error;
61759
61760 retval = -EPERM;
61761+
61762+ if (gr_check_group_change(gid, gid, gid))
61763+ goto error;
61764+
61765 if (capable(CAP_SETGID))
61766 new->gid = new->egid = new->sgid = new->fsgid = gid;
61767 else if (gid == old->gid || gid == old->sgid)
61768@@ -627,6 +640,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
61769 goto error;
61770 }
61771
61772+ if (gr_check_user_change(new->uid, new->euid, -1))
61773+ goto error;
61774+
61775 if (new->uid != old->uid) {
61776 retval = set_user(new);
61777 if (retval < 0)
61778@@ -675,6 +691,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
61779 goto error;
61780
61781 retval = -EPERM;
61782+
61783+ if (gr_check_crash_uid(uid))
61784+ goto error;
61785+ if (gr_check_user_change(uid, uid, uid))
61786+ goto error;
61787+
61788 if (capable(CAP_SETUID)) {
61789 new->suid = new->uid = uid;
61790 if (uid != old->uid) {
61791@@ -732,6 +754,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
61792 goto error;
61793 }
61794
61795+ if (gr_check_user_change(ruid, euid, -1))
61796+ goto error;
61797+
61798 if (ruid != (uid_t) -1) {
61799 new->uid = ruid;
61800 if (ruid != old->uid) {
61801@@ -800,6 +825,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
61802 goto error;
61803 }
61804
61805+ if (gr_check_group_change(rgid, egid, -1))
61806+ goto error;
61807+
61808 if (rgid != (gid_t) -1)
61809 new->gid = rgid;
61810 if (egid != (gid_t) -1)
61811@@ -849,6 +877,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
61812 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
61813 goto error;
61814
61815+ if (gr_check_user_change(-1, -1, uid))
61816+ goto error;
61817+
61818 if (uid == old->uid || uid == old->euid ||
61819 uid == old->suid || uid == old->fsuid ||
61820 capable(CAP_SETUID)) {
61821@@ -889,6 +920,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
61822 if (gid == old->gid || gid == old->egid ||
61823 gid == old->sgid || gid == old->fsgid ||
61824 capable(CAP_SETGID)) {
61825+ if (gr_check_group_change(-1, -1, gid))
61826+ goto error;
61827+
61828 if (gid != old_fsgid) {
61829 new->fsgid = gid;
61830 goto change_okay;
61831@@ -1454,7 +1488,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
61832 error = get_dumpable(me->mm);
61833 break;
61834 case PR_SET_DUMPABLE:
61835- if (arg2 < 0 || arg2 > 1) {
61836+ if (arg2 > 1) {
61837 error = -EINVAL;
61838 break;
61839 }
61840diff -urNp linux-2.6.32.41/kernel/sysctl.c linux-2.6.32.41/kernel/sysctl.c
61841--- linux-2.6.32.41/kernel/sysctl.c 2011-03-27 14:31:47.000000000 -0400
61842+++ linux-2.6.32.41/kernel/sysctl.c 2011-04-17 15:56:46.000000000 -0400
61843@@ -63,6 +63,13 @@
61844 static int deprecated_sysctl_warning(struct __sysctl_args *args);
61845
61846 #if defined(CONFIG_SYSCTL)
61847+#include <linux/grsecurity.h>
61848+#include <linux/grinternal.h>
61849+
61850+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
61851+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
61852+ const int op);
61853+extern int gr_handle_chroot_sysctl(const int op);
61854
61855 /* External variables not in a header file. */
61856 extern int C_A_D;
61857@@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_ta
61858 static int proc_taint(struct ctl_table *table, int write,
61859 void __user *buffer, size_t *lenp, loff_t *ppos);
61860 #endif
61861+extern ctl_table grsecurity_table[];
61862
61863 static struct ctl_table root_table[];
61864 static struct ctl_table_root sysctl_table_root;
61865@@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
61866 int sysctl_legacy_va_layout;
61867 #endif
61868
61869+#ifdef CONFIG_PAX_SOFTMODE
61870+static ctl_table pax_table[] = {
61871+ {
61872+ .ctl_name = CTL_UNNUMBERED,
61873+ .procname = "softmode",
61874+ .data = &pax_softmode,
61875+ .maxlen = sizeof(unsigned int),
61876+ .mode = 0600,
61877+ .proc_handler = &proc_dointvec,
61878+ },
61879+
61880+ { .ctl_name = 0 }
61881+};
61882+#endif
61883+
61884 extern int prove_locking;
61885 extern int lock_stat;
61886
61887@@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = N
61888 #endif
61889
61890 static struct ctl_table kern_table[] = {
61891+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
61892+ {
61893+ .ctl_name = CTL_UNNUMBERED,
61894+ .procname = "grsecurity",
61895+ .mode = 0500,
61896+ .child = grsecurity_table,
61897+ },
61898+#endif
61899+
61900+#ifdef CONFIG_PAX_SOFTMODE
61901+ {
61902+ .ctl_name = CTL_UNNUMBERED,
61903+ .procname = "pax",
61904+ .mode = 0500,
61905+ .child = pax_table,
61906+ },
61907+#endif
61908+
61909 {
61910 .ctl_name = CTL_UNNUMBERED,
61911 .procname = "sched_child_runs_first",
61912@@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
61913 .data = &modprobe_path,
61914 .maxlen = KMOD_PATH_LEN,
61915 .mode = 0644,
61916- .proc_handler = &proc_dostring,
61917- .strategy = &sysctl_string,
61918+ .proc_handler = &proc_dostring_modpriv,
61919+ .strategy = &sysctl_string_modpriv,
61920 },
61921 {
61922 .ctl_name = CTL_UNNUMBERED,
61923@@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
61924 .mode = 0644,
61925 .proc_handler = &proc_dointvec
61926 },
61927+ {
61928+ .procname = "heap_stack_gap",
61929+ .data = &sysctl_heap_stack_gap,
61930+ .maxlen = sizeof(sysctl_heap_stack_gap),
61931+ .mode = 0644,
61932+ .proc_handler = proc_doulongvec_minmax,
61933+ },
61934 #else
61935 {
61936 .ctl_name = CTL_UNNUMBERED,
61937@@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl
61938 return 0;
61939 }
61940
61941+static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
61942+
61943 static int parse_table(int __user *name, int nlen,
61944 void __user *oldval, size_t __user *oldlenp,
61945 void __user *newval, size_t newlen,
61946@@ -1821,7 +1871,7 @@ repeat:
61947 if (n == table->ctl_name) {
61948 int error;
61949 if (table->child) {
61950- if (sysctl_perm(root, table, MAY_EXEC))
61951+ if (sysctl_perm_nochk(root, table, MAY_EXEC))
61952 return -EPERM;
61953 name++;
61954 nlen--;
61955@@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *r
61956 int error;
61957 int mode;
61958
61959+ if (table->parent != NULL && table->parent->procname != NULL &&
61960+ table->procname != NULL &&
61961+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
61962+ return -EACCES;
61963+ if (gr_handle_chroot_sysctl(op))
61964+ return -EACCES;
61965+ error = gr_handle_sysctl(table, op);
61966+ if (error)
61967+ return error;
61968+
61969+ error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
61970+ if (error)
61971+ return error;
61972+
61973+ if (root->permissions)
61974+ mode = root->permissions(root, current->nsproxy, table);
61975+ else
61976+ mode = table->mode;
61977+
61978+ return test_perm(mode, op);
61979+}
61980+
61981+int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
61982+{
61983+ int error;
61984+ int mode;
61985+
61986 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
61987 if (error)
61988 return error;
61989@@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *tabl
61990 buffer, lenp, ppos);
61991 }
61992
61993+int proc_dostring_modpriv(struct ctl_table *table, int write,
61994+ void __user *buffer, size_t *lenp, loff_t *ppos)
61995+{
61996+ if (write && !capable(CAP_SYS_MODULE))
61997+ return -EPERM;
61998+
61999+ return _proc_do_string(table->data, table->maxlen, write,
62000+ buffer, lenp, ppos);
62001+}
62002+
62003
62004 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
62005 int *valp,
62006@@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(v
62007 vleft = table->maxlen / sizeof(unsigned long);
62008 left = *lenp;
62009
62010- for (; left && vleft--; i++, min++, max++, first=0) {
62011+ for (; left && vleft--; i++, first=0) {
62012 if (write) {
62013 while (left) {
62014 char c;
62015@@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *tabl
62016 return -ENOSYS;
62017 }
62018
62019+int proc_dostring_modpriv(struct ctl_table *table, int write,
62020+ void __user *buffer, size_t *lenp, loff_t *ppos)
62021+{
62022+ return -ENOSYS;
62023+}
62024+
62025 int proc_dointvec(struct ctl_table *table, int write,
62026 void __user *buffer, size_t *lenp, loff_t *ppos)
62027 {
62028@@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *tabl
62029 return 1;
62030 }
62031
62032+int sysctl_string_modpriv(struct ctl_table *table,
62033+ void __user *oldval, size_t __user *oldlenp,
62034+ void __user *newval, size_t newlen)
62035+{
62036+ if (newval && newlen && !capable(CAP_SYS_MODULE))
62037+ return -EPERM;
62038+
62039+ return sysctl_string(table, oldval, oldlenp, newval, newlen);
62040+}
62041+
62042 /*
62043 * This function makes sure that all of the integers in the vector
62044 * are between the minimum and maximum values given in the arrays
62045@@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *tabl
62046 return -ENOSYS;
62047 }
62048
62049+int sysctl_string_modpriv(struct ctl_table *table,
62050+ void __user *oldval, size_t __user *oldlenp,
62051+ void __user *newval, size_t newlen)
62052+{
62053+ return -ENOSYS;
62054+}
62055+
62056 int sysctl_intvec(struct ctl_table *table,
62057 void __user *oldval, size_t __user *oldlenp,
62058 void __user *newval, size_t newlen)
62059@@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
62060 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
62061 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
62062 EXPORT_SYMBOL(proc_dostring);
62063+EXPORT_SYMBOL(proc_dostring_modpriv);
62064 EXPORT_SYMBOL(proc_doulongvec_minmax);
62065 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
62066 EXPORT_SYMBOL(register_sysctl_table);
62067@@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
62068 EXPORT_SYMBOL(sysctl_jiffies);
62069 EXPORT_SYMBOL(sysctl_ms_jiffies);
62070 EXPORT_SYMBOL(sysctl_string);
62071+EXPORT_SYMBOL(sysctl_string_modpriv);
62072 EXPORT_SYMBOL(sysctl_data);
62073 EXPORT_SYMBOL(unregister_sysctl_table);
62074diff -urNp linux-2.6.32.41/kernel/sysctl_check.c linux-2.6.32.41/kernel/sysctl_check.c
62075--- linux-2.6.32.41/kernel/sysctl_check.c 2011-03-27 14:31:47.000000000 -0400
62076+++ linux-2.6.32.41/kernel/sysctl_check.c 2011-04-17 15:56:46.000000000 -0400
62077@@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *n
62078 } else {
62079 if ((table->strategy == sysctl_data) ||
62080 (table->strategy == sysctl_string) ||
62081+ (table->strategy == sysctl_string_modpriv) ||
62082 (table->strategy == sysctl_intvec) ||
62083 (table->strategy == sysctl_jiffies) ||
62084 (table->strategy == sysctl_ms_jiffies) ||
62085 (table->proc_handler == proc_dostring) ||
62086+ (table->proc_handler == proc_dostring_modpriv) ||
62087 (table->proc_handler == proc_dointvec) ||
62088 (table->proc_handler == proc_dointvec_minmax) ||
62089 (table->proc_handler == proc_dointvec_jiffies) ||
62090diff -urNp linux-2.6.32.41/kernel/taskstats.c linux-2.6.32.41/kernel/taskstats.c
62091--- linux-2.6.32.41/kernel/taskstats.c 2011-03-27 14:31:47.000000000 -0400
62092+++ linux-2.6.32.41/kernel/taskstats.c 2011-04-17 15:56:46.000000000 -0400
62093@@ -26,9 +26,12 @@
62094 #include <linux/cgroup.h>
62095 #include <linux/fs.h>
62096 #include <linux/file.h>
62097+#include <linux/grsecurity.h>
62098 #include <net/genetlink.h>
62099 #include <asm/atomic.h>
62100
62101+extern int gr_is_taskstats_denied(int pid);
62102+
62103 /*
62104 * Maximum length of a cpumask that can be specified in
62105 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
62106@@ -433,6 +436,9 @@ static int taskstats_user_cmd(struct sk_
62107 size_t size;
62108 cpumask_var_t mask;
62109
62110+ if (gr_is_taskstats_denied(current->pid))
62111+ return -EACCES;
62112+
62113 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
62114 return -ENOMEM;
62115
62116diff -urNp linux-2.6.32.41/kernel/time/tick-broadcast.c linux-2.6.32.41/kernel/time/tick-broadcast.c
62117--- linux-2.6.32.41/kernel/time/tick-broadcast.c 2011-05-23 16:56:59.000000000 -0400
62118+++ linux-2.6.32.41/kernel/time/tick-broadcast.c 2011-05-23 16:57:13.000000000 -0400
62119@@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct cl
62120 * then clear the broadcast bit.
62121 */
62122 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
62123- int cpu = smp_processor_id();
62124+ cpu = smp_processor_id();
62125
62126 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
62127 tick_broadcast_clear_oneshot(cpu);
62128diff -urNp linux-2.6.32.41/kernel/time/timekeeping.c linux-2.6.32.41/kernel/time/timekeeping.c
62129--- linux-2.6.32.41/kernel/time/timekeeping.c 2011-05-23 16:56:59.000000000 -0400
62130+++ linux-2.6.32.41/kernel/time/timekeeping.c 2011-05-23 19:09:33.000000000 -0400
62131@@ -14,6 +14,7 @@
62132 #include <linux/init.h>
62133 #include <linux/mm.h>
62134 #include <linux/sched.h>
62135+#include <linux/grsecurity.h>
62136 #include <linux/sysdev.h>
62137 #include <linux/clocksource.h>
62138 #include <linux/jiffies.h>
62139@@ -176,7 +177,7 @@ void update_xtime_cache(u64 nsec)
62140 */
62141 struct timespec ts = xtime;
62142 timespec_add_ns(&ts, nsec);
62143- ACCESS_ONCE(xtime_cache) = ts;
62144+ ACCESS_ONCE_RW(xtime_cache) = ts;
62145 }
62146
62147 /* must hold xtime_lock */
62148@@ -329,6 +330,8 @@ int do_settimeofday(struct timespec *tv)
62149 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
62150 return -EINVAL;
62151
62152+ gr_log_timechange();
62153+
62154 write_seqlock_irqsave(&xtime_lock, flags);
62155
62156 timekeeping_forward_now();
62157diff -urNp linux-2.6.32.41/kernel/time/timer_list.c linux-2.6.32.41/kernel/time/timer_list.c
62158--- linux-2.6.32.41/kernel/time/timer_list.c 2011-03-27 14:31:47.000000000 -0400
62159+++ linux-2.6.32.41/kernel/time/timer_list.c 2011-04-17 15:56:46.000000000 -0400
62160@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
62161
62162 static void print_name_offset(struct seq_file *m, void *sym)
62163 {
62164+#ifdef CONFIG_GRKERNSEC_HIDESYM
62165+ SEQ_printf(m, "<%p>", NULL);
62166+#else
62167 char symname[KSYM_NAME_LEN];
62168
62169 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
62170 SEQ_printf(m, "<%p>", sym);
62171 else
62172 SEQ_printf(m, "%s", symname);
62173+#endif
62174 }
62175
62176 static void
62177@@ -112,7 +116,11 @@ next_one:
62178 static void
62179 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
62180 {
62181+#ifdef CONFIG_GRKERNSEC_HIDESYM
62182+ SEQ_printf(m, " .base: %p\n", NULL);
62183+#else
62184 SEQ_printf(m, " .base: %p\n", base);
62185+#endif
62186 SEQ_printf(m, " .index: %d\n",
62187 base->index);
62188 SEQ_printf(m, " .resolution: %Lu nsecs\n",
62189@@ -289,7 +297,11 @@ static int __init init_timer_list_procfs
62190 {
62191 struct proc_dir_entry *pe;
62192
62193+#ifdef CONFIG_GRKERNSEC_PROC_ADD
62194+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
62195+#else
62196 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
62197+#endif
62198 if (!pe)
62199 return -ENOMEM;
62200 return 0;
62201diff -urNp linux-2.6.32.41/kernel/time/timer_stats.c linux-2.6.32.41/kernel/time/timer_stats.c
62202--- linux-2.6.32.41/kernel/time/timer_stats.c 2011-03-27 14:31:47.000000000 -0400
62203+++ linux-2.6.32.41/kernel/time/timer_stats.c 2011-05-04 17:56:28.000000000 -0400
62204@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
62205 static unsigned long nr_entries;
62206 static struct entry entries[MAX_ENTRIES];
62207
62208-static atomic_t overflow_count;
62209+static atomic_unchecked_t overflow_count;
62210
62211 /*
62212 * The entries are in a hash-table, for fast lookup:
62213@@ -140,7 +140,7 @@ static void reset_entries(void)
62214 nr_entries = 0;
62215 memset(entries, 0, sizeof(entries));
62216 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
62217- atomic_set(&overflow_count, 0);
62218+ atomic_set_unchecked(&overflow_count, 0);
62219 }
62220
62221 static struct entry *alloc_entry(void)
62222@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
62223 if (likely(entry))
62224 entry->count++;
62225 else
62226- atomic_inc(&overflow_count);
62227+ atomic_inc_unchecked(&overflow_count);
62228
62229 out_unlock:
62230 spin_unlock_irqrestore(lock, flags);
62231@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
62232
62233 static void print_name_offset(struct seq_file *m, unsigned long addr)
62234 {
62235+#ifdef CONFIG_GRKERNSEC_HIDESYM
62236+ seq_printf(m, "<%p>", NULL);
62237+#else
62238 char symname[KSYM_NAME_LEN];
62239
62240 if (lookup_symbol_name(addr, symname) < 0)
62241 seq_printf(m, "<%p>", (void *)addr);
62242 else
62243 seq_printf(m, "%s", symname);
62244+#endif
62245 }
62246
62247 static int tstats_show(struct seq_file *m, void *v)
62248@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
62249
62250 seq_puts(m, "Timer Stats Version: v0.2\n");
62251 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
62252- if (atomic_read(&overflow_count))
62253+ if (atomic_read_unchecked(&overflow_count))
62254 seq_printf(m, "Overflow: %d entries\n",
62255- atomic_read(&overflow_count));
62256+ atomic_read_unchecked(&overflow_count));
62257
62258 for (i = 0; i < nr_entries; i++) {
62259 entry = entries + i;
62260@@ -415,7 +419,11 @@ static int __init init_tstats_procfs(voi
62261 {
62262 struct proc_dir_entry *pe;
62263
62264+#ifdef CONFIG_GRKERNSEC_PROC_ADD
62265+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
62266+#else
62267 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
62268+#endif
62269 if (!pe)
62270 return -ENOMEM;
62271 return 0;
62272diff -urNp linux-2.6.32.41/kernel/time.c linux-2.6.32.41/kernel/time.c
62273--- linux-2.6.32.41/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
62274+++ linux-2.6.32.41/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
62275@@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec
62276 return error;
62277
62278 if (tz) {
62279+ /* we log in do_settimeofday called below, so don't log twice
62280+ */
62281+ if (!tv)
62282+ gr_log_timechange();
62283+
62284 /* SMP safe, global irq locking makes it work. */
62285 sys_tz = *tz;
62286 update_vsyscall_tz();
62287@@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
62288 * Avoid unnecessary multiplications/divisions in the
62289 * two most common HZ cases:
62290 */
62291-unsigned int inline jiffies_to_msecs(const unsigned long j)
62292+inline unsigned int jiffies_to_msecs(const unsigned long j)
62293 {
62294 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
62295 return (MSEC_PER_SEC / HZ) * j;
62296@@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(con
62297 }
62298 EXPORT_SYMBOL(jiffies_to_msecs);
62299
62300-unsigned int inline jiffies_to_usecs(const unsigned long j)
62301+inline unsigned int jiffies_to_usecs(const unsigned long j)
62302 {
62303 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
62304 return (USEC_PER_SEC / HZ) * j;
62305diff -urNp linux-2.6.32.41/kernel/timer.c linux-2.6.32.41/kernel/timer.c
62306--- linux-2.6.32.41/kernel/timer.c 2011-03-27 14:31:47.000000000 -0400
62307+++ linux-2.6.32.41/kernel/timer.c 2011-04-17 15:56:46.000000000 -0400
62308@@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
62309 /*
62310 * This function runs timers and the timer-tq in bottom half context.
62311 */
62312-static void run_timer_softirq(struct softirq_action *h)
62313+static void run_timer_softirq(void)
62314 {
62315 struct tvec_base *base = __get_cpu_var(tvec_bases);
62316
62317diff -urNp linux-2.6.32.41/kernel/trace/blktrace.c linux-2.6.32.41/kernel/trace/blktrace.c
62318--- linux-2.6.32.41/kernel/trace/blktrace.c 2011-03-27 14:31:47.000000000 -0400
62319+++ linux-2.6.32.41/kernel/trace/blktrace.c 2011-05-04 17:56:28.000000000 -0400
62320@@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct f
62321 struct blk_trace *bt = filp->private_data;
62322 char buf[16];
62323
62324- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
62325+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
62326
62327 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
62328 }
62329@@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(str
62330 return 1;
62331
62332 bt = buf->chan->private_data;
62333- atomic_inc(&bt->dropped);
62334+ atomic_inc_unchecked(&bt->dropped);
62335 return 0;
62336 }
62337
62338@@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_qu
62339
62340 bt->dir = dir;
62341 bt->dev = dev;
62342- atomic_set(&bt->dropped, 0);
62343+ atomic_set_unchecked(&bt->dropped, 0);
62344
62345 ret = -EIO;
62346 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
62347diff -urNp linux-2.6.32.41/kernel/trace/ftrace.c linux-2.6.32.41/kernel/trace/ftrace.c
62348--- linux-2.6.32.41/kernel/trace/ftrace.c 2011-03-27 14:31:47.000000000 -0400
62349+++ linux-2.6.32.41/kernel/trace/ftrace.c 2011-04-17 15:56:46.000000000 -0400
62350@@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod,
62351
62352 ip = rec->ip;
62353
62354+ ret = ftrace_arch_code_modify_prepare();
62355+ FTRACE_WARN_ON(ret);
62356+ if (ret)
62357+ return 0;
62358+
62359 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
62360+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
62361 if (ret) {
62362 ftrace_bug(ret, ip);
62363 rec->flags |= FTRACE_FL_FAILED;
62364- return 0;
62365 }
62366- return 1;
62367+ return ret ? 0 : 1;
62368 }
62369
62370 /*
62371diff -urNp linux-2.6.32.41/kernel/trace/ring_buffer.c linux-2.6.32.41/kernel/trace/ring_buffer.c
62372--- linux-2.6.32.41/kernel/trace/ring_buffer.c 2011-03-27 14:31:47.000000000 -0400
62373+++ linux-2.6.32.41/kernel/trace/ring_buffer.c 2011-04-17 15:56:46.000000000 -0400
62374@@ -606,7 +606,7 @@ static struct list_head *rb_list_head(st
62375 * the reader page). But if the next page is a header page,
62376 * its flags will be non zero.
62377 */
62378-static int inline
62379+static inline int
62380 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
62381 struct buffer_page *page, struct list_head *list)
62382 {
62383diff -urNp linux-2.6.32.41/kernel/trace/trace.c linux-2.6.32.41/kernel/trace/trace.c
62384--- linux-2.6.32.41/kernel/trace/trace.c 2011-03-27 14:31:47.000000000 -0400
62385+++ linux-2.6.32.41/kernel/trace/trace.c 2011-05-16 21:46:57.000000000 -0400
62386@@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(
62387 size_t rem;
62388 unsigned int i;
62389
62390+ pax_track_stack();
62391+
62392 /* copy the tracer to avoid using a global lock all around */
62393 mutex_lock(&trace_types_lock);
62394 if (unlikely(old_tracer != current_trace && current_trace)) {
62395@@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file
62396 int entries, size, i;
62397 size_t ret;
62398
62399+ pax_track_stack();
62400+
62401 if (*ppos & (PAGE_SIZE - 1)) {
62402 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
62403 return -EINVAL;
62404@@ -3816,10 +3820,9 @@ static const struct file_operations trac
62405 };
62406 #endif
62407
62408-static struct dentry *d_tracer;
62409-
62410 struct dentry *tracing_init_dentry(void)
62411 {
62412+ static struct dentry *d_tracer;
62413 static int once;
62414
62415 if (d_tracer)
62416@@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
62417 return d_tracer;
62418 }
62419
62420-static struct dentry *d_percpu;
62421-
62422 struct dentry *tracing_dentry_percpu(void)
62423 {
62424+ static struct dentry *d_percpu;
62425 static int once;
62426 struct dentry *d_tracer;
62427
62428diff -urNp linux-2.6.32.41/kernel/trace/trace_events.c linux-2.6.32.41/kernel/trace/trace_events.c
62429--- linux-2.6.32.41/kernel/trace/trace_events.c 2011-03-27 14:31:47.000000000 -0400
62430+++ linux-2.6.32.41/kernel/trace/trace_events.c 2011-04-17 15:56:46.000000000 -0400
62431@@ -951,6 +951,8 @@ static LIST_HEAD(ftrace_module_file_list
62432 * Modules must own their file_operations to keep up with
62433 * reference counting.
62434 */
62435+
62436+/* cannot be const */
62437 struct ftrace_module_file_ops {
62438 struct list_head list;
62439 struct module *mod;
62440diff -urNp linux-2.6.32.41/kernel/trace/trace_mmiotrace.c linux-2.6.32.41/kernel/trace/trace_mmiotrace.c
62441--- linux-2.6.32.41/kernel/trace/trace_mmiotrace.c 2011-03-27 14:31:47.000000000 -0400
62442+++ linux-2.6.32.41/kernel/trace/trace_mmiotrace.c 2011-05-04 17:56:28.000000000 -0400
62443@@ -23,7 +23,7 @@ struct header_iter {
62444 static struct trace_array *mmio_trace_array;
62445 static bool overrun_detected;
62446 static unsigned long prev_overruns;
62447-static atomic_t dropped_count;
62448+static atomic_unchecked_t dropped_count;
62449
62450 static void mmio_reset_data(struct trace_array *tr)
62451 {
62452@@ -126,7 +126,7 @@ static void mmio_close(struct trace_iter
62453
62454 static unsigned long count_overruns(struct trace_iterator *iter)
62455 {
62456- unsigned long cnt = atomic_xchg(&dropped_count, 0);
62457+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
62458 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
62459
62460 if (over > prev_overruns)
62461@@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct
62462 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
62463 sizeof(*entry), 0, pc);
62464 if (!event) {
62465- atomic_inc(&dropped_count);
62466+ atomic_inc_unchecked(&dropped_count);
62467 return;
62468 }
62469 entry = ring_buffer_event_data(event);
62470@@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct
62471 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
62472 sizeof(*entry), 0, pc);
62473 if (!event) {
62474- atomic_inc(&dropped_count);
62475+ atomic_inc_unchecked(&dropped_count);
62476 return;
62477 }
62478 entry = ring_buffer_event_data(event);
62479diff -urNp linux-2.6.32.41/kernel/trace/trace_output.c linux-2.6.32.41/kernel/trace/trace_output.c
62480--- linux-2.6.32.41/kernel/trace/trace_output.c 2011-03-27 14:31:47.000000000 -0400
62481+++ linux-2.6.32.41/kernel/trace/trace_output.c 2011-04-17 15:56:46.000000000 -0400
62482@@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s,
62483 return 0;
62484 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
62485 if (!IS_ERR(p)) {
62486- p = mangle_path(s->buffer + s->len, p, "\n");
62487+ p = mangle_path(s->buffer + s->len, p, "\n\\");
62488 if (p) {
62489 s->len = p - s->buffer;
62490 return 1;
62491diff -urNp linux-2.6.32.41/kernel/trace/trace_stack.c linux-2.6.32.41/kernel/trace/trace_stack.c
62492--- linux-2.6.32.41/kernel/trace/trace_stack.c 2011-03-27 14:31:47.000000000 -0400
62493+++ linux-2.6.32.41/kernel/trace/trace_stack.c 2011-04-17 15:56:46.000000000 -0400
62494@@ -50,7 +50,7 @@ static inline void check_stack(void)
62495 return;
62496
62497 /* we do not handle interrupt stacks yet */
62498- if (!object_is_on_stack(&this_size))
62499+ if (!object_starts_on_stack(&this_size))
62500 return;
62501
62502 local_irq_save(flags);
62503diff -urNp linux-2.6.32.41/kernel/trace/trace_workqueue.c linux-2.6.32.41/kernel/trace/trace_workqueue.c
62504--- linux-2.6.32.41/kernel/trace/trace_workqueue.c 2011-03-27 14:31:47.000000000 -0400
62505+++ linux-2.6.32.41/kernel/trace/trace_workqueue.c 2011-04-17 15:56:46.000000000 -0400
62506@@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
62507 int cpu;
62508 pid_t pid;
62509 /* Can be inserted from interrupt or user context, need to be atomic */
62510- atomic_t inserted;
62511+ atomic_unchecked_t inserted;
62512 /*
62513 * Don't need to be atomic, works are serialized in a single workqueue thread
62514 * on a single CPU.
62515@@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_st
62516 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
62517 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
62518 if (node->pid == wq_thread->pid) {
62519- atomic_inc(&node->inserted);
62520+ atomic_inc_unchecked(&node->inserted);
62521 goto found;
62522 }
62523 }
62524@@ -205,7 +205,7 @@ static int workqueue_stat_show(struct se
62525 tsk = get_pid_task(pid, PIDTYPE_PID);
62526 if (tsk) {
62527 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
62528- atomic_read(&cws->inserted), cws->executed,
62529+ atomic_read_unchecked(&cws->inserted), cws->executed,
62530 tsk->comm);
62531 put_task_struct(tsk);
62532 }
62533diff -urNp linux-2.6.32.41/kernel/user.c linux-2.6.32.41/kernel/user.c
62534--- linux-2.6.32.41/kernel/user.c 2011-03-27 14:31:47.000000000 -0400
62535+++ linux-2.6.32.41/kernel/user.c 2011-04-17 15:56:46.000000000 -0400
62536@@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct use
62537 spin_lock_irq(&uidhash_lock);
62538 up = uid_hash_find(uid, hashent);
62539 if (up) {
62540+ put_user_ns(ns);
62541 key_put(new->uid_keyring);
62542 key_put(new->session_keyring);
62543 kmem_cache_free(uid_cachep, new);
62544diff -urNp linux-2.6.32.41/lib/bug.c linux-2.6.32.41/lib/bug.c
62545--- linux-2.6.32.41/lib/bug.c 2011-03-27 14:31:47.000000000 -0400
62546+++ linux-2.6.32.41/lib/bug.c 2011-04-17 15:56:46.000000000 -0400
62547@@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned l
62548 return BUG_TRAP_TYPE_NONE;
62549
62550 bug = find_bug(bugaddr);
62551+ if (!bug)
62552+ return BUG_TRAP_TYPE_NONE;
62553
62554 printk(KERN_EMERG "------------[ cut here ]------------\n");
62555
62556diff -urNp linux-2.6.32.41/lib/debugobjects.c linux-2.6.32.41/lib/debugobjects.c
62557--- linux-2.6.32.41/lib/debugobjects.c 2011-03-27 14:31:47.000000000 -0400
62558+++ linux-2.6.32.41/lib/debugobjects.c 2011-04-17 15:56:46.000000000 -0400
62559@@ -277,7 +277,7 @@ static void debug_object_is_on_stack(voi
62560 if (limit > 4)
62561 return;
62562
62563- is_on_stack = object_is_on_stack(addr);
62564+ is_on_stack = object_starts_on_stack(addr);
62565 if (is_on_stack == onstack)
62566 return;
62567
62568diff -urNp linux-2.6.32.41/lib/dma-debug.c linux-2.6.32.41/lib/dma-debug.c
62569--- linux-2.6.32.41/lib/dma-debug.c 2011-03-27 14:31:47.000000000 -0400
62570+++ linux-2.6.32.41/lib/dma-debug.c 2011-04-17 15:56:46.000000000 -0400
62571@@ -861,7 +861,7 @@ out:
62572
62573 static void check_for_stack(struct device *dev, void *addr)
62574 {
62575- if (object_is_on_stack(addr))
62576+ if (object_starts_on_stack(addr))
62577 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
62578 "stack [addr=%p]\n", addr);
62579 }
62580diff -urNp linux-2.6.32.41/lib/idr.c linux-2.6.32.41/lib/idr.c
62581--- linux-2.6.32.41/lib/idr.c 2011-03-27 14:31:47.000000000 -0400
62582+++ linux-2.6.32.41/lib/idr.c 2011-04-17 15:56:46.000000000 -0400
62583@@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, in
62584 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
62585
62586 /* if already at the top layer, we need to grow */
62587- if (id >= 1 << (idp->layers * IDR_BITS)) {
62588+ if (id >= (1 << (idp->layers * IDR_BITS))) {
62589 *starting_id = id;
62590 return IDR_NEED_TO_GROW;
62591 }
62592diff -urNp linux-2.6.32.41/lib/inflate.c linux-2.6.32.41/lib/inflate.c
62593--- linux-2.6.32.41/lib/inflate.c 2011-03-27 14:31:47.000000000 -0400
62594+++ linux-2.6.32.41/lib/inflate.c 2011-04-17 15:56:46.000000000 -0400
62595@@ -266,7 +266,7 @@ static void free(void *where)
62596 malloc_ptr = free_mem_ptr;
62597 }
62598 #else
62599-#define malloc(a) kmalloc(a, GFP_KERNEL)
62600+#define malloc(a) kmalloc((a), GFP_KERNEL)
62601 #define free(a) kfree(a)
62602 #endif
62603
62604diff -urNp linux-2.6.32.41/lib/Kconfig.debug linux-2.6.32.41/lib/Kconfig.debug
62605--- linux-2.6.32.41/lib/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
62606+++ linux-2.6.32.41/lib/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
62607@@ -905,7 +905,7 @@ config LATENCYTOP
62608 select STACKTRACE
62609 select SCHEDSTATS
62610 select SCHED_DEBUG
62611- depends on HAVE_LATENCYTOP_SUPPORT
62612+ depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
62613 help
62614 Enable this option if you want to use the LatencyTOP tool
62615 to find out which userspace is blocking on what kernel operations.
62616diff -urNp linux-2.6.32.41/lib/kobject.c linux-2.6.32.41/lib/kobject.c
62617--- linux-2.6.32.41/lib/kobject.c 2011-03-27 14:31:47.000000000 -0400
62618+++ linux-2.6.32.41/lib/kobject.c 2011-04-17 15:56:46.000000000 -0400
62619@@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct ko
62620 return ret;
62621 }
62622
62623-struct sysfs_ops kobj_sysfs_ops = {
62624+const struct sysfs_ops kobj_sysfs_ops = {
62625 .show = kobj_attr_show,
62626 .store = kobj_attr_store,
62627 };
62628@@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
62629 * If the kset was not able to be created, NULL will be returned.
62630 */
62631 static struct kset *kset_create(const char *name,
62632- struct kset_uevent_ops *uevent_ops,
62633+ const struct kset_uevent_ops *uevent_ops,
62634 struct kobject *parent_kobj)
62635 {
62636 struct kset *kset;
62637@@ -832,7 +832,7 @@ static struct kset *kset_create(const ch
62638 * If the kset was not able to be created, NULL will be returned.
62639 */
62640 struct kset *kset_create_and_add(const char *name,
62641- struct kset_uevent_ops *uevent_ops,
62642+ const struct kset_uevent_ops *uevent_ops,
62643 struct kobject *parent_kobj)
62644 {
62645 struct kset *kset;
62646diff -urNp linux-2.6.32.41/lib/kobject_uevent.c linux-2.6.32.41/lib/kobject_uevent.c
62647--- linux-2.6.32.41/lib/kobject_uevent.c 2011-03-27 14:31:47.000000000 -0400
62648+++ linux-2.6.32.41/lib/kobject_uevent.c 2011-04-17 15:56:46.000000000 -0400
62649@@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *k
62650 const char *subsystem;
62651 struct kobject *top_kobj;
62652 struct kset *kset;
62653- struct kset_uevent_ops *uevent_ops;
62654+ const struct kset_uevent_ops *uevent_ops;
62655 u64 seq;
62656 int i = 0;
62657 int retval = 0;
62658diff -urNp linux-2.6.32.41/lib/kref.c linux-2.6.32.41/lib/kref.c
62659--- linux-2.6.32.41/lib/kref.c 2011-03-27 14:31:47.000000000 -0400
62660+++ linux-2.6.32.41/lib/kref.c 2011-04-17 15:56:46.000000000 -0400
62661@@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
62662 */
62663 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
62664 {
62665- WARN_ON(release == NULL);
62666+ BUG_ON(release == NULL);
62667 WARN_ON(release == (void (*)(struct kref *))kfree);
62668
62669 if (atomic_dec_and_test(&kref->refcount)) {
62670diff -urNp linux-2.6.32.41/lib/parser.c linux-2.6.32.41/lib/parser.c
62671--- linux-2.6.32.41/lib/parser.c 2011-03-27 14:31:47.000000000 -0400
62672+++ linux-2.6.32.41/lib/parser.c 2011-04-17 15:56:46.000000000 -0400
62673@@ -126,7 +126,7 @@ static int match_number(substring_t *s,
62674 char *buf;
62675 int ret;
62676
62677- buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
62678+ buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
62679 if (!buf)
62680 return -ENOMEM;
62681 memcpy(buf, s->from, s->to - s->from);
62682diff -urNp linux-2.6.32.41/lib/radix-tree.c linux-2.6.32.41/lib/radix-tree.c
62683--- linux-2.6.32.41/lib/radix-tree.c 2011-03-27 14:31:47.000000000 -0400
62684+++ linux-2.6.32.41/lib/radix-tree.c 2011-04-17 15:56:46.000000000 -0400
62685@@ -81,7 +81,7 @@ struct radix_tree_preload {
62686 int nr;
62687 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
62688 };
62689-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
62690+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
62691
62692 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
62693 {
62694diff -urNp linux-2.6.32.41/lib/random32.c linux-2.6.32.41/lib/random32.c
62695--- linux-2.6.32.41/lib/random32.c 2011-03-27 14:31:47.000000000 -0400
62696+++ linux-2.6.32.41/lib/random32.c 2011-04-17 15:56:46.000000000 -0400
62697@@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *
62698 */
62699 static inline u32 __seed(u32 x, u32 m)
62700 {
62701- return (x < m) ? x + m : x;
62702+ return (x <= m) ? x + m + 1 : x;
62703 }
62704
62705 /**
62706diff -urNp linux-2.6.32.41/lib/vsprintf.c linux-2.6.32.41/lib/vsprintf.c
62707--- linux-2.6.32.41/lib/vsprintf.c 2011-03-27 14:31:47.000000000 -0400
62708+++ linux-2.6.32.41/lib/vsprintf.c 2011-04-17 15:56:46.000000000 -0400
62709@@ -16,6 +16,9 @@
62710 * - scnprintf and vscnprintf
62711 */
62712
62713+#ifdef CONFIG_GRKERNSEC_HIDESYM
62714+#define __INCLUDED_BY_HIDESYM 1
62715+#endif
62716 #include <stdarg.h>
62717 #include <linux/module.h>
62718 #include <linux/types.h>
62719@@ -546,12 +549,12 @@ static char *number(char *buf, char *end
62720 return buf;
62721 }
62722
62723-static char *string(char *buf, char *end, char *s, struct printf_spec spec)
62724+static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
62725 {
62726 int len, i;
62727
62728 if ((unsigned long)s < PAGE_SIZE)
62729- s = "<NULL>";
62730+ s = "(null)";
62731
62732 len = strnlen(s, spec.precision);
62733
62734@@ -581,7 +584,7 @@ static char *symbol_string(char *buf, ch
62735 unsigned long value = (unsigned long) ptr;
62736 #ifdef CONFIG_KALLSYMS
62737 char sym[KSYM_SYMBOL_LEN];
62738- if (ext != 'f' && ext != 's')
62739+ if (ext != 'f' && ext != 's' && ext != 'a')
62740 sprint_symbol(sym, value);
62741 else
62742 kallsyms_lookup(value, NULL, NULL, NULL, sym);
62743@@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf,
62744 * - 'f' For simple symbolic function names without offset
62745 * - 'S' For symbolic direct pointers with offset
62746 * - 's' For symbolic direct pointers without offset
62747+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
62748+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
62749 * - 'R' For a struct resource pointer, it prints the range of
62750 * addresses (not the name nor the flags)
62751 * - 'M' For a 6-byte MAC address, it prints the address in the
62752@@ -822,7 +827,7 @@ static char *pointer(const char *fmt, ch
62753 struct printf_spec spec)
62754 {
62755 if (!ptr)
62756- return string(buf, end, "(null)", spec);
62757+ return string(buf, end, "(nil)", spec);
62758
62759 switch (*fmt) {
62760 case 'F':
62761@@ -831,6 +836,14 @@ static char *pointer(const char *fmt, ch
62762 case 's':
62763 /* Fallthrough */
62764 case 'S':
62765+#ifdef CONFIG_GRKERNSEC_HIDESYM
62766+ break;
62767+#else
62768+ return symbol_string(buf, end, ptr, spec, *fmt);
62769+#endif
62770+ case 'a':
62771+ /* Fallthrough */
62772+ case 'A':
62773 return symbol_string(buf, end, ptr, spec, *fmt);
62774 case 'R':
62775 return resource_string(buf, end, ptr, spec);
62776@@ -1445,7 +1458,7 @@ do { \
62777 size_t len;
62778 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
62779 || (unsigned long)save_str < PAGE_SIZE)
62780- save_str = "<NULL>";
62781+ save_str = "(null)";
62782 len = strlen(save_str);
62783 if (str + len + 1 < end)
62784 memcpy(str, save_str, len + 1);
62785@@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size,
62786 typeof(type) value; \
62787 if (sizeof(type) == 8) { \
62788 args = PTR_ALIGN(args, sizeof(u32)); \
62789- *(u32 *)&value = *(u32 *)args; \
62790- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
62791+ *(u32 *)&value = *(const u32 *)args; \
62792+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
62793 } else { \
62794 args = PTR_ALIGN(args, sizeof(type)); \
62795- value = *(typeof(type) *)args; \
62796+ value = *(const typeof(type) *)args; \
62797 } \
62798 args += sizeof(type); \
62799 value; \
62800@@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size,
62801 const char *str_arg = args;
62802 size_t len = strlen(str_arg);
62803 args += len + 1;
62804- str = string(str, end, (char *)str_arg, spec);
62805+ str = string(str, end, str_arg, spec);
62806 break;
62807 }
62808
62809diff -urNp linux-2.6.32.41/localversion-grsec linux-2.6.32.41/localversion-grsec
62810--- linux-2.6.32.41/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
62811+++ linux-2.6.32.41/localversion-grsec 2011-04-17 15:56:46.000000000 -0400
62812@@ -0,0 +1 @@
62813+-grsec
62814diff -urNp linux-2.6.32.41/Makefile linux-2.6.32.41/Makefile
62815--- linux-2.6.32.41/Makefile 2011-05-23 16:56:59.000000000 -0400
62816+++ linux-2.6.32.41/Makefile 2011-05-23 16:57:13.000000000 -0400
62817@@ -221,8 +221,8 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
62818
62819 HOSTCC = gcc
62820 HOSTCXX = g++
62821-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
62822-HOSTCXXFLAGS = -O2
62823+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-empty-body -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
62824+HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
62825
62826 # Decide whether to build built-in, modular, or both.
62827 # Normally, just do built-in.
62828@@ -342,6 +342,7 @@ LINUXINCLUDE := -Iinclude \
62829 KBUILD_CPPFLAGS := -D__KERNEL__
62830
62831 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
62832+ -W -Wno-empty-body -Wno-unused-parameter -Wno-missing-field-initializers \
62833 -fno-strict-aliasing -fno-common \
62834 -Werror-implicit-function-declaration \
62835 -Wno-format-security \
62836@@ -644,7 +645,7 @@ export mod_strip_cmd
62837
62838
62839 ifeq ($(KBUILD_EXTMOD),)
62840-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
62841+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
62842
62843 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
62844 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
62845diff -urNp linux-2.6.32.41/mm/backing-dev.c linux-2.6.32.41/mm/backing-dev.c
62846--- linux-2.6.32.41/mm/backing-dev.c 2011-03-27 14:31:47.000000000 -0400
62847+++ linux-2.6.32.41/mm/backing-dev.c 2011-05-04 17:56:28.000000000 -0400
62848@@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rc
62849 * Add the default flusher task that gets created for any bdi
62850 * that has dirty data pending writeout
62851 */
62852-void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
62853+static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
62854 {
62855 if (!bdi_cap_writeback_dirty(bdi))
62856 return;
62857diff -urNp linux-2.6.32.41/mm/filemap.c linux-2.6.32.41/mm/filemap.c
62858--- linux-2.6.32.41/mm/filemap.c 2011-03-27 14:31:47.000000000 -0400
62859+++ linux-2.6.32.41/mm/filemap.c 2011-04-17 15:56:46.000000000 -0400
62860@@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file
62861 struct address_space *mapping = file->f_mapping;
62862
62863 if (!mapping->a_ops->readpage)
62864- return -ENOEXEC;
62865+ return -ENODEV;
62866 file_accessed(file);
62867 vma->vm_ops = &generic_file_vm_ops;
62868 vma->vm_flags |= VM_CAN_NONLINEAR;
62869@@ -2027,6 +2027,7 @@ inline int generic_write_checks(struct f
62870 *pos = i_size_read(inode);
62871
62872 if (limit != RLIM_INFINITY) {
62873+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
62874 if (*pos >= limit) {
62875 send_sig(SIGXFSZ, current, 0);
62876 return -EFBIG;
62877diff -urNp linux-2.6.32.41/mm/fremap.c linux-2.6.32.41/mm/fremap.c
62878--- linux-2.6.32.41/mm/fremap.c 2011-03-27 14:31:47.000000000 -0400
62879+++ linux-2.6.32.41/mm/fremap.c 2011-04-17 15:56:46.000000000 -0400
62880@@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
62881 retry:
62882 vma = find_vma(mm, start);
62883
62884+#ifdef CONFIG_PAX_SEGMEXEC
62885+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
62886+ goto out;
62887+#endif
62888+
62889 /*
62890 * Make sure the vma is shared, that it supports prefaulting,
62891 * and that the remapped range is valid and fully within
62892@@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
62893 /*
62894 * drop PG_Mlocked flag for over-mapped range
62895 */
62896- unsigned int saved_flags = vma->vm_flags;
62897+ unsigned long saved_flags = vma->vm_flags;
62898 munlock_vma_pages_range(vma, start, start + size);
62899 vma->vm_flags = saved_flags;
62900 }
62901diff -urNp linux-2.6.32.41/mm/highmem.c linux-2.6.32.41/mm/highmem.c
62902--- linux-2.6.32.41/mm/highmem.c 2011-03-27 14:31:47.000000000 -0400
62903+++ linux-2.6.32.41/mm/highmem.c 2011-04-17 15:56:46.000000000 -0400
62904@@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
62905 * So no dangers, even with speculative execution.
62906 */
62907 page = pte_page(pkmap_page_table[i]);
62908+ pax_open_kernel();
62909 pte_clear(&init_mm, (unsigned long)page_address(page),
62910 &pkmap_page_table[i]);
62911-
62912+ pax_close_kernel();
62913 set_page_address(page, NULL);
62914 need_flush = 1;
62915 }
62916@@ -177,9 +178,11 @@ start:
62917 }
62918 }
62919 vaddr = PKMAP_ADDR(last_pkmap_nr);
62920+
62921+ pax_open_kernel();
62922 set_pte_at(&init_mm, vaddr,
62923 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
62924-
62925+ pax_close_kernel();
62926 pkmap_count[last_pkmap_nr] = 1;
62927 set_page_address(page, (void *)vaddr);
62928
62929diff -urNp linux-2.6.32.41/mm/hugetlb.c linux-2.6.32.41/mm/hugetlb.c
62930--- linux-2.6.32.41/mm/hugetlb.c 2011-03-27 14:31:47.000000000 -0400
62931+++ linux-2.6.32.41/mm/hugetlb.c 2011-04-17 15:56:46.000000000 -0400
62932@@ -1925,6 +1925,26 @@ static int unmap_ref_private(struct mm_s
62933 return 1;
62934 }
62935
62936+#ifdef CONFIG_PAX_SEGMEXEC
62937+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
62938+{
62939+ struct mm_struct *mm = vma->vm_mm;
62940+ struct vm_area_struct *vma_m;
62941+ unsigned long address_m;
62942+ pte_t *ptep_m;
62943+
62944+ vma_m = pax_find_mirror_vma(vma);
62945+ if (!vma_m)
62946+ return;
62947+
62948+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
62949+ address_m = address + SEGMEXEC_TASK_SIZE;
62950+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
62951+ get_page(page_m);
62952+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
62953+}
62954+#endif
62955+
62956 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
62957 unsigned long address, pte_t *ptep, pte_t pte,
62958 struct page *pagecache_page)
62959@@ -1996,6 +2016,11 @@ retry_avoidcopy:
62960 huge_ptep_clear_flush(vma, address, ptep);
62961 set_huge_pte_at(mm, address, ptep,
62962 make_huge_pte(vma, new_page, 1));
62963+
62964+#ifdef CONFIG_PAX_SEGMEXEC
62965+ pax_mirror_huge_pte(vma, address, new_page);
62966+#endif
62967+
62968 /* Make the old page be freed below */
62969 new_page = old_page;
62970 }
62971@@ -2127,6 +2152,10 @@ retry:
62972 && (vma->vm_flags & VM_SHARED)));
62973 set_huge_pte_at(mm, address, ptep, new_pte);
62974
62975+#ifdef CONFIG_PAX_SEGMEXEC
62976+ pax_mirror_huge_pte(vma, address, page);
62977+#endif
62978+
62979 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
62980 /* Optimization, do the COW without a second fault */
62981 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
62982@@ -2155,6 +2184,28 @@ int hugetlb_fault(struct mm_struct *mm,
62983 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
62984 struct hstate *h = hstate_vma(vma);
62985
62986+#ifdef CONFIG_PAX_SEGMEXEC
62987+ struct vm_area_struct *vma_m;
62988+
62989+ vma_m = pax_find_mirror_vma(vma);
62990+ if (vma_m) {
62991+ unsigned long address_m;
62992+
62993+ if (vma->vm_start > vma_m->vm_start) {
62994+ address_m = address;
62995+ address -= SEGMEXEC_TASK_SIZE;
62996+ vma = vma_m;
62997+ h = hstate_vma(vma);
62998+ } else
62999+ address_m = address + SEGMEXEC_TASK_SIZE;
63000+
63001+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
63002+ return VM_FAULT_OOM;
63003+ address_m &= HPAGE_MASK;
63004+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
63005+ }
63006+#endif
63007+
63008 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
63009 if (!ptep)
63010 return VM_FAULT_OOM;
63011diff -urNp linux-2.6.32.41/mm/Kconfig linux-2.6.32.41/mm/Kconfig
63012--- linux-2.6.32.41/mm/Kconfig 2011-03-27 14:31:47.000000000 -0400
63013+++ linux-2.6.32.41/mm/Kconfig 2011-04-17 15:56:46.000000000 -0400
63014@@ -228,7 +228,7 @@ config KSM
63015 config DEFAULT_MMAP_MIN_ADDR
63016 int "Low address space to protect from user allocation"
63017 depends on MMU
63018- default 4096
63019+ default 65536
63020 help
63021 This is the portion of low virtual memory which should be protected
63022 from userspace allocation. Keeping a user from writing to low pages
63023diff -urNp linux-2.6.32.41/mm/kmemleak.c linux-2.6.32.41/mm/kmemleak.c
63024--- linux-2.6.32.41/mm/kmemleak.c 2011-03-27 14:31:47.000000000 -0400
63025+++ linux-2.6.32.41/mm/kmemleak.c 2011-04-17 15:56:46.000000000 -0400
63026@@ -358,7 +358,7 @@ static void print_unreferenced(struct se
63027
63028 for (i = 0; i < object->trace_len; i++) {
63029 void *ptr = (void *)object->trace[i];
63030- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
63031+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
63032 }
63033 }
63034
63035diff -urNp linux-2.6.32.41/mm/maccess.c linux-2.6.32.41/mm/maccess.c
63036--- linux-2.6.32.41/mm/maccess.c 2011-03-27 14:31:47.000000000 -0400
63037+++ linux-2.6.32.41/mm/maccess.c 2011-04-17 15:56:46.000000000 -0400
63038@@ -14,7 +14,7 @@
63039 * Safely read from address @src to the buffer at @dst. If a kernel fault
63040 * happens, handle that and return -EFAULT.
63041 */
63042-long probe_kernel_read(void *dst, void *src, size_t size)
63043+long probe_kernel_read(void *dst, const void *src, size_t size)
63044 {
63045 long ret;
63046 mm_segment_t old_fs = get_fs();
63047@@ -39,7 +39,7 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
63048 * Safely write to address @dst from the buffer at @src. If a kernel fault
63049 * happens, handle that and return -EFAULT.
63050 */
63051-long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
63052+long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
63053 {
63054 long ret;
63055 mm_segment_t old_fs = get_fs();
63056diff -urNp linux-2.6.32.41/mm/madvise.c linux-2.6.32.41/mm/madvise.c
63057--- linux-2.6.32.41/mm/madvise.c 2011-03-27 14:31:47.000000000 -0400
63058+++ linux-2.6.32.41/mm/madvise.c 2011-04-17 15:56:46.000000000 -0400
63059@@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_a
63060 pgoff_t pgoff;
63061 unsigned long new_flags = vma->vm_flags;
63062
63063+#ifdef CONFIG_PAX_SEGMEXEC
63064+ struct vm_area_struct *vma_m;
63065+#endif
63066+
63067 switch (behavior) {
63068 case MADV_NORMAL:
63069 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
63070@@ -103,6 +107,13 @@ success:
63071 /*
63072 * vm_flags is protected by the mmap_sem held in write mode.
63073 */
63074+
63075+#ifdef CONFIG_PAX_SEGMEXEC
63076+ vma_m = pax_find_mirror_vma(vma);
63077+ if (vma_m)
63078+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
63079+#endif
63080+
63081 vma->vm_flags = new_flags;
63082
63083 out:
63084@@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_a
63085 struct vm_area_struct ** prev,
63086 unsigned long start, unsigned long end)
63087 {
63088+
63089+#ifdef CONFIG_PAX_SEGMEXEC
63090+ struct vm_area_struct *vma_m;
63091+#endif
63092+
63093 *prev = vma;
63094 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
63095 return -EINVAL;
63096@@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_a
63097 zap_page_range(vma, start, end - start, &details);
63098 } else
63099 zap_page_range(vma, start, end - start, NULL);
63100+
63101+#ifdef CONFIG_PAX_SEGMEXEC
63102+ vma_m = pax_find_mirror_vma(vma);
63103+ if (vma_m) {
63104+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
63105+ struct zap_details details = {
63106+ .nonlinear_vma = vma_m,
63107+ .last_index = ULONG_MAX,
63108+ };
63109+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
63110+ } else
63111+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
63112+ }
63113+#endif
63114+
63115 return 0;
63116 }
63117
63118@@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
63119 if (end < start)
63120 goto out;
63121
63122+#ifdef CONFIG_PAX_SEGMEXEC
63123+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
63124+ if (end > SEGMEXEC_TASK_SIZE)
63125+ goto out;
63126+ } else
63127+#endif
63128+
63129+ if (end > TASK_SIZE)
63130+ goto out;
63131+
63132 error = 0;
63133 if (end == start)
63134 goto out;
63135diff -urNp linux-2.6.32.41/mm/memory.c linux-2.6.32.41/mm/memory.c
63136--- linux-2.6.32.41/mm/memory.c 2011-03-27 14:31:47.000000000 -0400
63137+++ linux-2.6.32.41/mm/memory.c 2011-04-17 15:56:46.000000000 -0400
63138@@ -187,8 +187,12 @@ static inline void free_pmd_range(struct
63139 return;
63140
63141 pmd = pmd_offset(pud, start);
63142+
63143+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
63144 pud_clear(pud);
63145 pmd_free_tlb(tlb, pmd, start);
63146+#endif
63147+
63148 }
63149
63150 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
63151@@ -219,9 +223,12 @@ static inline void free_pud_range(struct
63152 if (end - 1 > ceiling - 1)
63153 return;
63154
63155+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
63156 pud = pud_offset(pgd, start);
63157 pgd_clear(pgd);
63158 pud_free_tlb(tlb, pud, start);
63159+#endif
63160+
63161 }
63162
63163 /*
63164@@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct
63165 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
63166 i = 0;
63167
63168- do {
63169+ while (nr_pages) {
63170 struct vm_area_struct *vma;
63171
63172- vma = find_extend_vma(mm, start);
63173+ vma = find_vma(mm, start);
63174 if (!vma && in_gate_area(tsk, start)) {
63175 unsigned long pg = start & PAGE_MASK;
63176 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
63177@@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct
63178 continue;
63179 }
63180
63181- if (!vma ||
63182+ if (!vma || start < vma->vm_start ||
63183 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
63184 !(vm_flags & vma->vm_flags))
63185 return i ? : -EFAULT;
63186@@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct
63187 start += PAGE_SIZE;
63188 nr_pages--;
63189 } while (nr_pages && start < vma->vm_end);
63190- } while (nr_pages);
63191+ }
63192 return i;
63193 }
63194
63195@@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_st
63196 page_add_file_rmap(page);
63197 set_pte_at(mm, addr, pte, mk_pte(page, prot));
63198
63199+#ifdef CONFIG_PAX_SEGMEXEC
63200+ pax_mirror_file_pte(vma, addr, page, ptl);
63201+#endif
63202+
63203 retval = 0;
63204 pte_unmap_unlock(pte, ptl);
63205 return retval;
63206@@ -1560,10 +1571,22 @@ out:
63207 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
63208 struct page *page)
63209 {
63210+
63211+#ifdef CONFIG_PAX_SEGMEXEC
63212+ struct vm_area_struct *vma_m;
63213+#endif
63214+
63215 if (addr < vma->vm_start || addr >= vma->vm_end)
63216 return -EFAULT;
63217 if (!page_count(page))
63218 return -EINVAL;
63219+
63220+#ifdef CONFIG_PAX_SEGMEXEC
63221+ vma_m = pax_find_mirror_vma(vma);
63222+ if (vma_m)
63223+ vma_m->vm_flags |= VM_INSERTPAGE;
63224+#endif
63225+
63226 vma->vm_flags |= VM_INSERTPAGE;
63227 return insert_page(vma, addr, page, vma->vm_page_prot);
63228 }
63229@@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struc
63230 unsigned long pfn)
63231 {
63232 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
63233+ BUG_ON(vma->vm_mirror);
63234
63235 if (addr < vma->vm_start || addr >= vma->vm_end)
63236 return -EFAULT;
63237@@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct
63238 copy_user_highpage(dst, src, va, vma);
63239 }
63240
63241+#ifdef CONFIG_PAX_SEGMEXEC
63242+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
63243+{
63244+ struct mm_struct *mm = vma->vm_mm;
63245+ spinlock_t *ptl;
63246+ pte_t *pte, entry;
63247+
63248+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
63249+ entry = *pte;
63250+ if (!pte_present(entry)) {
63251+ if (!pte_none(entry)) {
63252+ BUG_ON(pte_file(entry));
63253+ free_swap_and_cache(pte_to_swp_entry(entry));
63254+ pte_clear_not_present_full(mm, address, pte, 0);
63255+ }
63256+ } else {
63257+ struct page *page;
63258+
63259+ flush_cache_page(vma, address, pte_pfn(entry));
63260+ entry = ptep_clear_flush(vma, address, pte);
63261+ BUG_ON(pte_dirty(entry));
63262+ page = vm_normal_page(vma, address, entry);
63263+ if (page) {
63264+ update_hiwater_rss(mm);
63265+ if (PageAnon(page))
63266+ dec_mm_counter(mm, anon_rss);
63267+ else
63268+ dec_mm_counter(mm, file_rss);
63269+ page_remove_rmap(page);
63270+ page_cache_release(page);
63271+ }
63272+ }
63273+ pte_unmap_unlock(pte, ptl);
63274+}
63275+
63276+/* PaX: if vma is mirrored, synchronize the mirror's PTE
63277+ *
63278+ * the ptl of the lower mapped page is held on entry and is not released on exit
63279+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
63280+ */
63281+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
63282+{
63283+ struct mm_struct *mm = vma->vm_mm;
63284+ unsigned long address_m;
63285+ spinlock_t *ptl_m;
63286+ struct vm_area_struct *vma_m;
63287+ pmd_t *pmd_m;
63288+ pte_t *pte_m, entry_m;
63289+
63290+ BUG_ON(!page_m || !PageAnon(page_m));
63291+
63292+ vma_m = pax_find_mirror_vma(vma);
63293+ if (!vma_m)
63294+ return;
63295+
63296+ BUG_ON(!PageLocked(page_m));
63297+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63298+ address_m = address + SEGMEXEC_TASK_SIZE;
63299+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
63300+ pte_m = pte_offset_map_nested(pmd_m, address_m);
63301+ ptl_m = pte_lockptr(mm, pmd_m);
63302+ if (ptl != ptl_m) {
63303+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
63304+ if (!pte_none(*pte_m))
63305+ goto out;
63306+ }
63307+
63308+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
63309+ page_cache_get(page_m);
63310+ page_add_anon_rmap(page_m, vma_m, address_m);
63311+ inc_mm_counter(mm, anon_rss);
63312+ set_pte_at(mm, address_m, pte_m, entry_m);
63313+ update_mmu_cache(vma_m, address_m, entry_m);
63314+out:
63315+ if (ptl != ptl_m)
63316+ spin_unlock(ptl_m);
63317+ pte_unmap_nested(pte_m);
63318+ unlock_page(page_m);
63319+}
63320+
63321+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
63322+{
63323+ struct mm_struct *mm = vma->vm_mm;
63324+ unsigned long address_m;
63325+ spinlock_t *ptl_m;
63326+ struct vm_area_struct *vma_m;
63327+ pmd_t *pmd_m;
63328+ pte_t *pte_m, entry_m;
63329+
63330+ BUG_ON(!page_m || PageAnon(page_m));
63331+
63332+ vma_m = pax_find_mirror_vma(vma);
63333+ if (!vma_m)
63334+ return;
63335+
63336+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63337+ address_m = address + SEGMEXEC_TASK_SIZE;
63338+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
63339+ pte_m = pte_offset_map_nested(pmd_m, address_m);
63340+ ptl_m = pte_lockptr(mm, pmd_m);
63341+ if (ptl != ptl_m) {
63342+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
63343+ if (!pte_none(*pte_m))
63344+ goto out;
63345+ }
63346+
63347+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
63348+ page_cache_get(page_m);
63349+ page_add_file_rmap(page_m);
63350+ inc_mm_counter(mm, file_rss);
63351+ set_pte_at(mm, address_m, pte_m, entry_m);
63352+ update_mmu_cache(vma_m, address_m, entry_m);
63353+out:
63354+ if (ptl != ptl_m)
63355+ spin_unlock(ptl_m);
63356+ pte_unmap_nested(pte_m);
63357+}
63358+
63359+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
63360+{
63361+ struct mm_struct *mm = vma->vm_mm;
63362+ unsigned long address_m;
63363+ spinlock_t *ptl_m;
63364+ struct vm_area_struct *vma_m;
63365+ pmd_t *pmd_m;
63366+ pte_t *pte_m, entry_m;
63367+
63368+ vma_m = pax_find_mirror_vma(vma);
63369+ if (!vma_m)
63370+ return;
63371+
63372+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63373+ address_m = address + SEGMEXEC_TASK_SIZE;
63374+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
63375+ pte_m = pte_offset_map_nested(pmd_m, address_m);
63376+ ptl_m = pte_lockptr(mm, pmd_m);
63377+ if (ptl != ptl_m) {
63378+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
63379+ if (!pte_none(*pte_m))
63380+ goto out;
63381+ }
63382+
63383+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
63384+ set_pte_at(mm, address_m, pte_m, entry_m);
63385+out:
63386+ if (ptl != ptl_m)
63387+ spin_unlock(ptl_m);
63388+ pte_unmap_nested(pte_m);
63389+}
63390+
63391+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
63392+{
63393+ struct page *page_m;
63394+ pte_t entry;
63395+
63396+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
63397+ goto out;
63398+
63399+ entry = *pte;
63400+ page_m = vm_normal_page(vma, address, entry);
63401+ if (!page_m)
63402+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
63403+ else if (PageAnon(page_m)) {
63404+ if (pax_find_mirror_vma(vma)) {
63405+ pte_unmap_unlock(pte, ptl);
63406+ lock_page(page_m);
63407+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
63408+ if (pte_same(entry, *pte))
63409+ pax_mirror_anon_pte(vma, address, page_m, ptl);
63410+ else
63411+ unlock_page(page_m);
63412+ }
63413+ } else
63414+ pax_mirror_file_pte(vma, address, page_m, ptl);
63415+
63416+out:
63417+ pte_unmap_unlock(pte, ptl);
63418+}
63419+#endif
63420+
63421 /*
63422 * This routine handles present pages, when users try to write
63423 * to a shared page. It is done by copying the page to a new address
63424@@ -2156,6 +2360,12 @@ gotten:
63425 */
63426 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
63427 if (likely(pte_same(*page_table, orig_pte))) {
63428+
63429+#ifdef CONFIG_PAX_SEGMEXEC
63430+ if (pax_find_mirror_vma(vma))
63431+ BUG_ON(!trylock_page(new_page));
63432+#endif
63433+
63434 if (old_page) {
63435 if (!PageAnon(old_page)) {
63436 dec_mm_counter(mm, file_rss);
63437@@ -2207,6 +2417,10 @@ gotten:
63438 page_remove_rmap(old_page);
63439 }
63440
63441+#ifdef CONFIG_PAX_SEGMEXEC
63442+ pax_mirror_anon_pte(vma, address, new_page, ptl);
63443+#endif
63444+
63445 /* Free the old page.. */
63446 new_page = old_page;
63447 ret |= VM_FAULT_WRITE;
63448@@ -2604,6 +2818,11 @@ static int do_swap_page(struct mm_struct
63449 swap_free(entry);
63450 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
63451 try_to_free_swap(page);
63452+
63453+#ifdef CONFIG_PAX_SEGMEXEC
63454+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
63455+#endif
63456+
63457 unlock_page(page);
63458
63459 if (flags & FAULT_FLAG_WRITE) {
63460@@ -2615,6 +2834,11 @@ static int do_swap_page(struct mm_struct
63461
63462 /* No need to invalidate - it was non-present before */
63463 update_mmu_cache(vma, address, pte);
63464+
63465+#ifdef CONFIG_PAX_SEGMEXEC
63466+ pax_mirror_anon_pte(vma, address, page, ptl);
63467+#endif
63468+
63469 unlock:
63470 pte_unmap_unlock(page_table, ptl);
63471 out:
63472@@ -2630,40 +2854,6 @@ out_release:
63473 }
63474
63475 /*
63476- * This is like a special single-page "expand_{down|up}wards()",
63477- * except we must first make sure that 'address{-|+}PAGE_SIZE'
63478- * doesn't hit another vma.
63479- */
63480-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
63481-{
63482- address &= PAGE_MASK;
63483- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
63484- struct vm_area_struct *prev = vma->vm_prev;
63485-
63486- /*
63487- * Is there a mapping abutting this one below?
63488- *
63489- * That's only ok if it's the same stack mapping
63490- * that has gotten split..
63491- */
63492- if (prev && prev->vm_end == address)
63493- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
63494-
63495- expand_stack(vma, address - PAGE_SIZE);
63496- }
63497- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
63498- struct vm_area_struct *next = vma->vm_next;
63499-
63500- /* As VM_GROWSDOWN but s/below/above/ */
63501- if (next && next->vm_start == address + PAGE_SIZE)
63502- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
63503-
63504- expand_upwards(vma, address + PAGE_SIZE);
63505- }
63506- return 0;
63507-}
63508-
63509-/*
63510 * We enter with non-exclusive mmap_sem (to exclude vma changes,
63511 * but allow concurrent faults), and pte mapped but not yet locked.
63512 * We return with mmap_sem still held, but pte unmapped and unlocked.
63513@@ -2672,27 +2862,23 @@ static int do_anonymous_page(struct mm_s
63514 unsigned long address, pte_t *page_table, pmd_t *pmd,
63515 unsigned int flags)
63516 {
63517- struct page *page;
63518+ struct page *page = NULL;
63519 spinlock_t *ptl;
63520 pte_t entry;
63521
63522- pte_unmap(page_table);
63523-
63524- /* Check if we need to add a guard page to the stack */
63525- if (check_stack_guard_page(vma, address) < 0)
63526- return VM_FAULT_SIGBUS;
63527-
63528- /* Use the zero-page for reads */
63529 if (!(flags & FAULT_FLAG_WRITE)) {
63530 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
63531 vma->vm_page_prot));
63532- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
63533+ ptl = pte_lockptr(mm, pmd);
63534+ spin_lock(ptl);
63535 if (!pte_none(*page_table))
63536 goto unlock;
63537 goto setpte;
63538 }
63539
63540 /* Allocate our own private page. */
63541+ pte_unmap(page_table);
63542+
63543 if (unlikely(anon_vma_prepare(vma)))
63544 goto oom;
63545 page = alloc_zeroed_user_highpage_movable(vma, address);
63546@@ -2711,6 +2897,11 @@ static int do_anonymous_page(struct mm_s
63547 if (!pte_none(*page_table))
63548 goto release;
63549
63550+#ifdef CONFIG_PAX_SEGMEXEC
63551+ if (pax_find_mirror_vma(vma))
63552+ BUG_ON(!trylock_page(page));
63553+#endif
63554+
63555 inc_mm_counter(mm, anon_rss);
63556 page_add_new_anon_rmap(page, vma, address);
63557 setpte:
63558@@ -2718,6 +2909,12 @@ setpte:
63559
63560 /* No need to invalidate - it was non-present before */
63561 update_mmu_cache(vma, address, entry);
63562+
63563+#ifdef CONFIG_PAX_SEGMEXEC
63564+ if (page)
63565+ pax_mirror_anon_pte(vma, address, page, ptl);
63566+#endif
63567+
63568 unlock:
63569 pte_unmap_unlock(page_table, ptl);
63570 return 0;
63571@@ -2860,6 +3057,12 @@ static int __do_fault(struct mm_struct *
63572 */
63573 /* Only go through if we didn't race with anybody else... */
63574 if (likely(pte_same(*page_table, orig_pte))) {
63575+
63576+#ifdef CONFIG_PAX_SEGMEXEC
63577+ if (anon && pax_find_mirror_vma(vma))
63578+ BUG_ON(!trylock_page(page));
63579+#endif
63580+
63581 flush_icache_page(vma, page);
63582 entry = mk_pte(page, vma->vm_page_prot);
63583 if (flags & FAULT_FLAG_WRITE)
63584@@ -2879,6 +3082,14 @@ static int __do_fault(struct mm_struct *
63585
63586 /* no need to invalidate: a not-present page won't be cached */
63587 update_mmu_cache(vma, address, entry);
63588+
63589+#ifdef CONFIG_PAX_SEGMEXEC
63590+ if (anon)
63591+ pax_mirror_anon_pte(vma, address, page, ptl);
63592+ else
63593+ pax_mirror_file_pte(vma, address, page, ptl);
63594+#endif
63595+
63596 } else {
63597 if (charged)
63598 mem_cgroup_uncharge_page(page);
63599@@ -3026,6 +3237,12 @@ static inline int handle_pte_fault(struc
63600 if (flags & FAULT_FLAG_WRITE)
63601 flush_tlb_page(vma, address);
63602 }
63603+
63604+#ifdef CONFIG_PAX_SEGMEXEC
63605+ pax_mirror_pte(vma, address, pte, pmd, ptl);
63606+ return 0;
63607+#endif
63608+
63609 unlock:
63610 pte_unmap_unlock(pte, ptl);
63611 return 0;
63612@@ -3042,6 +3259,10 @@ int handle_mm_fault(struct mm_struct *mm
63613 pmd_t *pmd;
63614 pte_t *pte;
63615
63616+#ifdef CONFIG_PAX_SEGMEXEC
63617+ struct vm_area_struct *vma_m;
63618+#endif
63619+
63620 __set_current_state(TASK_RUNNING);
63621
63622 count_vm_event(PGFAULT);
63623@@ -3049,6 +3270,34 @@ int handle_mm_fault(struct mm_struct *mm
63624 if (unlikely(is_vm_hugetlb_page(vma)))
63625 return hugetlb_fault(mm, vma, address, flags);
63626
63627+#ifdef CONFIG_PAX_SEGMEXEC
63628+ vma_m = pax_find_mirror_vma(vma);
63629+ if (vma_m) {
63630+ unsigned long address_m;
63631+ pgd_t *pgd_m;
63632+ pud_t *pud_m;
63633+ pmd_t *pmd_m;
63634+
63635+ if (vma->vm_start > vma_m->vm_start) {
63636+ address_m = address;
63637+ address -= SEGMEXEC_TASK_SIZE;
63638+ vma = vma_m;
63639+ } else
63640+ address_m = address + SEGMEXEC_TASK_SIZE;
63641+
63642+ pgd_m = pgd_offset(mm, address_m);
63643+ pud_m = pud_alloc(mm, pgd_m, address_m);
63644+ if (!pud_m)
63645+ return VM_FAULT_OOM;
63646+ pmd_m = pmd_alloc(mm, pud_m, address_m);
63647+ if (!pmd_m)
63648+ return VM_FAULT_OOM;
63649+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
63650+ return VM_FAULT_OOM;
63651+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
63652+ }
63653+#endif
63654+
63655 pgd = pgd_offset(mm, address);
63656 pud = pud_alloc(mm, pgd, address);
63657 if (!pud)
63658@@ -3146,7 +3395,7 @@ static int __init gate_vma_init(void)
63659 gate_vma.vm_start = FIXADDR_USER_START;
63660 gate_vma.vm_end = FIXADDR_USER_END;
63661 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
63662- gate_vma.vm_page_prot = __P101;
63663+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
63664 /*
63665 * Make sure the vDSO gets into every core dump.
63666 * Dumping its contents makes post-mortem fully interpretable later
63667diff -urNp linux-2.6.32.41/mm/memory-failure.c linux-2.6.32.41/mm/memory-failure.c
63668--- linux-2.6.32.41/mm/memory-failure.c 2011-03-27 14:31:47.000000000 -0400
63669+++ linux-2.6.32.41/mm/memory-failure.c 2011-04-17 15:56:46.000000000 -0400
63670@@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __r
63671
63672 int sysctl_memory_failure_recovery __read_mostly = 1;
63673
63674-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
63675+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
63676
63677 /*
63678 * Send all the processes who have the page mapped an ``action optional''
63679@@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn,
63680 return 0;
63681 }
63682
63683- atomic_long_add(1, &mce_bad_pages);
63684+ atomic_long_add_unchecked(1, &mce_bad_pages);
63685
63686 /*
63687 * We need/can do nothing about count=0 pages.
63688diff -urNp linux-2.6.32.41/mm/mempolicy.c linux-2.6.32.41/mm/mempolicy.c
63689--- linux-2.6.32.41/mm/mempolicy.c 2011-03-27 14:31:47.000000000 -0400
63690+++ linux-2.6.32.41/mm/mempolicy.c 2011-04-17 15:56:46.000000000 -0400
63691@@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_st
63692 struct vm_area_struct *next;
63693 int err;
63694
63695+#ifdef CONFIG_PAX_SEGMEXEC
63696+ struct vm_area_struct *vma_m;
63697+#endif
63698+
63699 err = 0;
63700 for (; vma && vma->vm_start < end; vma = next) {
63701 next = vma->vm_next;
63702@@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_st
63703 err = policy_vma(vma, new);
63704 if (err)
63705 break;
63706+
63707+#ifdef CONFIG_PAX_SEGMEXEC
63708+ vma_m = pax_find_mirror_vma(vma);
63709+ if (vma_m) {
63710+ err = policy_vma(vma_m, new);
63711+ if (err)
63712+ break;
63713+ }
63714+#endif
63715+
63716 }
63717 return err;
63718 }
63719@@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start
63720
63721 if (end < start)
63722 return -EINVAL;
63723+
63724+#ifdef CONFIG_PAX_SEGMEXEC
63725+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
63726+ if (end > SEGMEXEC_TASK_SIZE)
63727+ return -EINVAL;
63728+ } else
63729+#endif
63730+
63731+ if (end > TASK_SIZE)
63732+ return -EINVAL;
63733+
63734 if (end == start)
63735 return 0;
63736
63737@@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
63738 if (!mm)
63739 return -EINVAL;
63740
63741+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63742+ if (mm != current->mm &&
63743+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
63744+ err = -EPERM;
63745+ goto out;
63746+ }
63747+#endif
63748+
63749 /*
63750 * Check if this process has the right to modify the specified
63751 * process. The right exists if the process has administrative
63752@@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
63753 rcu_read_lock();
63754 tcred = __task_cred(task);
63755 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
63756- cred->uid != tcred->suid && cred->uid != tcred->uid &&
63757- !capable(CAP_SYS_NICE)) {
63758+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
63759 rcu_read_unlock();
63760 err = -EPERM;
63761 goto out;
63762@@ -2396,7 +2428,7 @@ int show_numa_map(struct seq_file *m, vo
63763
63764 if (file) {
63765 seq_printf(m, " file=");
63766- seq_path(m, &file->f_path, "\n\t= ");
63767+ seq_path(m, &file->f_path, "\n\t\\= ");
63768 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
63769 seq_printf(m, " heap");
63770 } else if (vma->vm_start <= mm->start_stack &&
63771diff -urNp linux-2.6.32.41/mm/migrate.c linux-2.6.32.41/mm/migrate.c
63772--- linux-2.6.32.41/mm/migrate.c 2011-03-27 14:31:47.000000000 -0400
63773+++ linux-2.6.32.41/mm/migrate.c 2011-05-16 21:46:57.000000000 -0400
63774@@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struc
63775 unsigned long chunk_start;
63776 int err;
63777
63778+ pax_track_stack();
63779+
63780 task_nodes = cpuset_mems_allowed(task);
63781
63782 err = -ENOMEM;
63783@@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
63784 if (!mm)
63785 return -EINVAL;
63786
63787+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63788+ if (mm != current->mm &&
63789+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
63790+ err = -EPERM;
63791+ goto out;
63792+ }
63793+#endif
63794+
63795 /*
63796 * Check if this process has the right to modify the specified
63797 * process. The right exists if the process has administrative
63798@@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
63799 rcu_read_lock();
63800 tcred = __task_cred(task);
63801 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
63802- cred->uid != tcred->suid && cred->uid != tcred->uid &&
63803- !capable(CAP_SYS_NICE)) {
63804+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
63805 rcu_read_unlock();
63806 err = -EPERM;
63807 goto out;
63808diff -urNp linux-2.6.32.41/mm/mlock.c linux-2.6.32.41/mm/mlock.c
63809--- linux-2.6.32.41/mm/mlock.c 2011-03-27 14:31:47.000000000 -0400
63810+++ linux-2.6.32.41/mm/mlock.c 2011-04-17 15:56:46.000000000 -0400
63811@@ -13,6 +13,7 @@
63812 #include <linux/pagemap.h>
63813 #include <linux/mempolicy.h>
63814 #include <linux/syscalls.h>
63815+#include <linux/security.h>
63816 #include <linux/sched.h>
63817 #include <linux/module.h>
63818 #include <linux/rmap.h>
63819@@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
63820 }
63821 }
63822
63823-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
63824-{
63825- return (vma->vm_flags & VM_GROWSDOWN) &&
63826- (vma->vm_start == addr) &&
63827- !vma_stack_continue(vma->vm_prev, addr);
63828-}
63829-
63830 /**
63831 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
63832 * @vma: target vma
63833@@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(stru
63834 if (vma->vm_flags & VM_WRITE)
63835 gup_flags |= FOLL_WRITE;
63836
63837- /* We don't try to access the guard page of a stack vma */
63838- if (stack_guard_page(vma, start)) {
63839- addr += PAGE_SIZE;
63840- nr_pages--;
63841- }
63842-
63843 while (nr_pages > 0) {
63844 int i;
63845
63846@@ -440,7 +428,7 @@ static int do_mlock(unsigned long start,
63847 {
63848 unsigned long nstart, end, tmp;
63849 struct vm_area_struct * vma, * prev;
63850- int error;
63851+ int error = -EINVAL;
63852
63853 len = PAGE_ALIGN(len);
63854 end = start + len;
63855@@ -448,6 +436,9 @@ static int do_mlock(unsigned long start,
63856 return -EINVAL;
63857 if (end == start)
63858 return 0;
63859+ if (end > TASK_SIZE)
63860+ return -EINVAL;
63861+
63862 vma = find_vma_prev(current->mm, start, &prev);
63863 if (!vma || vma->vm_start > start)
63864 return -ENOMEM;
63865@@ -458,6 +449,11 @@ static int do_mlock(unsigned long start,
63866 for (nstart = start ; ; ) {
63867 unsigned int newflags;
63868
63869+#ifdef CONFIG_PAX_SEGMEXEC
63870+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
63871+ break;
63872+#endif
63873+
63874 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
63875
63876 newflags = vma->vm_flags | VM_LOCKED;
63877@@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
63878 lock_limit >>= PAGE_SHIFT;
63879
63880 /* check against resource limits */
63881+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
63882 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
63883 error = do_mlock(start, len, 1);
63884 up_write(&current->mm->mmap_sem);
63885@@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
63886 static int do_mlockall(int flags)
63887 {
63888 struct vm_area_struct * vma, * prev = NULL;
63889- unsigned int def_flags = 0;
63890
63891 if (flags & MCL_FUTURE)
63892- def_flags = VM_LOCKED;
63893- current->mm->def_flags = def_flags;
63894+ current->mm->def_flags |= VM_LOCKED;
63895+ else
63896+ current->mm->def_flags &= ~VM_LOCKED;
63897 if (flags == MCL_FUTURE)
63898 goto out;
63899
63900 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
63901- unsigned int newflags;
63902+ unsigned long newflags;
63903+
63904+#ifdef CONFIG_PAX_SEGMEXEC
63905+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
63906+ break;
63907+#endif
63908
63909+ BUG_ON(vma->vm_end > TASK_SIZE);
63910 newflags = vma->vm_flags | VM_LOCKED;
63911 if (!(flags & MCL_CURRENT))
63912 newflags &= ~VM_LOCKED;
63913@@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
63914 lock_limit >>= PAGE_SHIFT;
63915
63916 ret = -ENOMEM;
63917+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
63918 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
63919 capable(CAP_IPC_LOCK))
63920 ret = do_mlockall(flags);
63921diff -urNp linux-2.6.32.41/mm/mmap.c linux-2.6.32.41/mm/mmap.c
63922--- linux-2.6.32.41/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
63923+++ linux-2.6.32.41/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
63924@@ -45,6 +45,16 @@
63925 #define arch_rebalance_pgtables(addr, len) (addr)
63926 #endif
63927
63928+static inline void verify_mm_writelocked(struct mm_struct *mm)
63929+{
63930+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
63931+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
63932+ up_read(&mm->mmap_sem);
63933+ BUG();
63934+ }
63935+#endif
63936+}
63937+
63938 static void unmap_region(struct mm_struct *mm,
63939 struct vm_area_struct *vma, struct vm_area_struct *prev,
63940 unsigned long start, unsigned long end);
63941@@ -70,22 +80,32 @@ static void unmap_region(struct mm_struc
63942 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
63943 *
63944 */
63945-pgprot_t protection_map[16] = {
63946+pgprot_t protection_map[16] __read_only = {
63947 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
63948 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
63949 };
63950
63951 pgprot_t vm_get_page_prot(unsigned long vm_flags)
63952 {
63953- return __pgprot(pgprot_val(protection_map[vm_flags &
63954+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
63955 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
63956 pgprot_val(arch_vm_get_page_prot(vm_flags)));
63957+
63958+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
63959+ if (!nx_enabled &&
63960+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
63961+ (vm_flags & (VM_READ | VM_WRITE)))
63962+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
63963+#endif
63964+
63965+ return prot;
63966 }
63967 EXPORT_SYMBOL(vm_get_page_prot);
63968
63969 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
63970 int sysctl_overcommit_ratio = 50; /* default is 50% */
63971 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
63972+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
63973 struct percpu_counter vm_committed_as;
63974
63975 /*
63976@@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma
63977 struct vm_area_struct *next = vma->vm_next;
63978
63979 might_sleep();
63980+ BUG_ON(vma->vm_mirror);
63981 if (vma->vm_ops && vma->vm_ops->close)
63982 vma->vm_ops->close(vma);
63983 if (vma->vm_file) {
63984@@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
63985 * not page aligned -Ram Gupta
63986 */
63987 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
63988+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
63989 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
63990 (mm->end_data - mm->start_data) > rlim)
63991 goto out;
63992@@ -704,6 +726,12 @@ static int
63993 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
63994 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
63995 {
63996+
63997+#ifdef CONFIG_PAX_SEGMEXEC
63998+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
63999+ return 0;
64000+#endif
64001+
64002 if (is_mergeable_vma(vma, file, vm_flags) &&
64003 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
64004 if (vma->vm_pgoff == vm_pgoff)
64005@@ -723,6 +751,12 @@ static int
64006 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
64007 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
64008 {
64009+
64010+#ifdef CONFIG_PAX_SEGMEXEC
64011+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
64012+ return 0;
64013+#endif
64014+
64015 if (is_mergeable_vma(vma, file, vm_flags) &&
64016 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
64017 pgoff_t vm_pglen;
64018@@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struc
64019 struct vm_area_struct *vma_merge(struct mm_struct *mm,
64020 struct vm_area_struct *prev, unsigned long addr,
64021 unsigned long end, unsigned long vm_flags,
64022- struct anon_vma *anon_vma, struct file *file,
64023+ struct anon_vma *anon_vma, struct file *file,
64024 pgoff_t pgoff, struct mempolicy *policy)
64025 {
64026 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
64027 struct vm_area_struct *area, *next;
64028
64029+#ifdef CONFIG_PAX_SEGMEXEC
64030+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
64031+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
64032+
64033+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
64034+#endif
64035+
64036 /*
64037 * We later require that vma->vm_flags == vm_flags,
64038 * so this tests vma->vm_flags & VM_SPECIAL, too.
64039@@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct
64040 if (next && next->vm_end == end) /* cases 6, 7, 8 */
64041 next = next->vm_next;
64042
64043+#ifdef CONFIG_PAX_SEGMEXEC
64044+ if (prev)
64045+ prev_m = pax_find_mirror_vma(prev);
64046+ if (area)
64047+ area_m = pax_find_mirror_vma(area);
64048+ if (next)
64049+ next_m = pax_find_mirror_vma(next);
64050+#endif
64051+
64052 /*
64053 * Can it merge with the predecessor?
64054 */
64055@@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct
64056 /* cases 1, 6 */
64057 vma_adjust(prev, prev->vm_start,
64058 next->vm_end, prev->vm_pgoff, NULL);
64059- } else /* cases 2, 5, 7 */
64060+
64061+#ifdef CONFIG_PAX_SEGMEXEC
64062+ if (prev_m)
64063+ vma_adjust(prev_m, prev_m->vm_start,
64064+ next_m->vm_end, prev_m->vm_pgoff, NULL);
64065+#endif
64066+
64067+ } else { /* cases 2, 5, 7 */
64068 vma_adjust(prev, prev->vm_start,
64069 end, prev->vm_pgoff, NULL);
64070+
64071+#ifdef CONFIG_PAX_SEGMEXEC
64072+ if (prev_m)
64073+ vma_adjust(prev_m, prev_m->vm_start,
64074+ end_m, prev_m->vm_pgoff, NULL);
64075+#endif
64076+
64077+ }
64078 return prev;
64079 }
64080
64081@@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct
64082 mpol_equal(policy, vma_policy(next)) &&
64083 can_vma_merge_before(next, vm_flags,
64084 anon_vma, file, pgoff+pglen)) {
64085- if (prev && addr < prev->vm_end) /* case 4 */
64086+ if (prev && addr < prev->vm_end) { /* case 4 */
64087 vma_adjust(prev, prev->vm_start,
64088 addr, prev->vm_pgoff, NULL);
64089- else /* cases 3, 8 */
64090+
64091+#ifdef CONFIG_PAX_SEGMEXEC
64092+ if (prev_m)
64093+ vma_adjust(prev_m, prev_m->vm_start,
64094+ addr_m, prev_m->vm_pgoff, NULL);
64095+#endif
64096+
64097+ } else { /* cases 3, 8 */
64098 vma_adjust(area, addr, next->vm_end,
64099 next->vm_pgoff - pglen, NULL);
64100+
64101+#ifdef CONFIG_PAX_SEGMEXEC
64102+ if (area_m)
64103+ vma_adjust(area_m, addr_m, next_m->vm_end,
64104+ next_m->vm_pgoff - pglen, NULL);
64105+#endif
64106+
64107+ }
64108 return area;
64109 }
64110
64111@@ -898,14 +978,11 @@ none:
64112 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
64113 struct file *file, long pages)
64114 {
64115- const unsigned long stack_flags
64116- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
64117-
64118 if (file) {
64119 mm->shared_vm += pages;
64120 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
64121 mm->exec_vm += pages;
64122- } else if (flags & stack_flags)
64123+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
64124 mm->stack_vm += pages;
64125 if (flags & (VM_RESERVED|VM_IO))
64126 mm->reserved_vm += pages;
64127@@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file
64128 * (the exception is when the underlying filesystem is noexec
64129 * mounted, in which case we dont add PROT_EXEC.)
64130 */
64131- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
64132+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
64133 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
64134 prot |= PROT_EXEC;
64135
64136@@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file
64137 /* Obtain the address to map to. we verify (or select) it and ensure
64138 * that it represents a valid section of the address space.
64139 */
64140- addr = get_unmapped_area(file, addr, len, pgoff, flags);
64141+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
64142 if (addr & ~PAGE_MASK)
64143 return addr;
64144
64145@@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file
64146 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
64147 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
64148
64149+#ifdef CONFIG_PAX_MPROTECT
64150+ if (mm->pax_flags & MF_PAX_MPROTECT) {
64151+#ifndef CONFIG_PAX_MPROTECT_COMPAT
64152+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
64153+ gr_log_rwxmmap(file);
64154+
64155+#ifdef CONFIG_PAX_EMUPLT
64156+ vm_flags &= ~VM_EXEC;
64157+#else
64158+ return -EPERM;
64159+#endif
64160+
64161+ }
64162+
64163+ if (!(vm_flags & VM_EXEC))
64164+ vm_flags &= ~VM_MAYEXEC;
64165+#else
64166+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
64167+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
64168+#endif
64169+ else
64170+ vm_flags &= ~VM_MAYWRITE;
64171+ }
64172+#endif
64173+
64174+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
64175+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
64176+ vm_flags &= ~VM_PAGEEXEC;
64177+#endif
64178+
64179 if (flags & MAP_LOCKED)
64180 if (!can_do_mlock())
64181 return -EPERM;
64182@@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file
64183 locked += mm->locked_vm;
64184 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
64185 lock_limit >>= PAGE_SHIFT;
64186+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
64187 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
64188 return -EAGAIN;
64189 }
64190@@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file
64191 if (error)
64192 return error;
64193
64194+ if (!gr_acl_handle_mmap(file, prot))
64195+ return -EACCES;
64196+
64197 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
64198 }
64199 EXPORT_SYMBOL(do_mmap_pgoff);
64200@@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
64201 */
64202 int vma_wants_writenotify(struct vm_area_struct *vma)
64203 {
64204- unsigned int vm_flags = vma->vm_flags;
64205+ unsigned long vm_flags = vma->vm_flags;
64206
64207 /* If it was private or non-writable, the write bit is already clear */
64208- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
64209+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
64210 return 0;
64211
64212 /* The backer wishes to know when pages are first written to? */
64213@@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *f
64214 unsigned long charged = 0;
64215 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
64216
64217+#ifdef CONFIG_PAX_SEGMEXEC
64218+ struct vm_area_struct *vma_m = NULL;
64219+#endif
64220+
64221+ /*
64222+ * mm->mmap_sem is required to protect against another thread
64223+ * changing the mappings in case we sleep.
64224+ */
64225+ verify_mm_writelocked(mm);
64226+
64227 /* Clear old maps */
64228 error = -ENOMEM;
64229-munmap_back:
64230 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
64231 if (vma && vma->vm_start < addr + len) {
64232 if (do_munmap(mm, addr, len))
64233 return -ENOMEM;
64234- goto munmap_back;
64235+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
64236+ BUG_ON(vma && vma->vm_start < addr + len);
64237 }
64238
64239 /* Check against address space limit. */
64240@@ -1173,6 +1294,16 @@ munmap_back:
64241 goto unacct_error;
64242 }
64243
64244+#ifdef CONFIG_PAX_SEGMEXEC
64245+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
64246+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
64247+ if (!vma_m) {
64248+ error = -ENOMEM;
64249+ goto free_vma;
64250+ }
64251+ }
64252+#endif
64253+
64254 vma->vm_mm = mm;
64255 vma->vm_start = addr;
64256 vma->vm_end = addr + len;
64257@@ -1195,6 +1326,19 @@ munmap_back:
64258 error = file->f_op->mmap(file, vma);
64259 if (error)
64260 goto unmap_and_free_vma;
64261+
64262+#ifdef CONFIG_PAX_SEGMEXEC
64263+ if (vma_m && (vm_flags & VM_EXECUTABLE))
64264+ added_exe_file_vma(mm);
64265+#endif
64266+
64267+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
64268+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
64269+ vma->vm_flags |= VM_PAGEEXEC;
64270+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
64271+ }
64272+#endif
64273+
64274 if (vm_flags & VM_EXECUTABLE)
64275 added_exe_file_vma(mm);
64276
64277@@ -1218,6 +1362,11 @@ munmap_back:
64278 vma_link(mm, vma, prev, rb_link, rb_parent);
64279 file = vma->vm_file;
64280
64281+#ifdef CONFIG_PAX_SEGMEXEC
64282+ if (vma_m)
64283+ pax_mirror_vma(vma_m, vma);
64284+#endif
64285+
64286 /* Once vma denies write, undo our temporary denial count */
64287 if (correct_wcount)
64288 atomic_inc(&inode->i_writecount);
64289@@ -1226,6 +1375,7 @@ out:
64290
64291 mm->total_vm += len >> PAGE_SHIFT;
64292 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
64293+ track_exec_limit(mm, addr, addr + len, vm_flags);
64294 if (vm_flags & VM_LOCKED) {
64295 /*
64296 * makes pages present; downgrades, drops, reacquires mmap_sem
64297@@ -1248,6 +1398,12 @@ unmap_and_free_vma:
64298 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
64299 charged = 0;
64300 free_vma:
64301+
64302+#ifdef CONFIG_PAX_SEGMEXEC
64303+ if (vma_m)
64304+ kmem_cache_free(vm_area_cachep, vma_m);
64305+#endif
64306+
64307 kmem_cache_free(vm_area_cachep, vma);
64308 unacct_error:
64309 if (charged)
64310@@ -1255,6 +1411,44 @@ unacct_error:
64311 return error;
64312 }
64313
64314+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
64315+{
64316+ if (!vma) {
64317+#ifdef CONFIG_STACK_GROWSUP
64318+ if (addr > sysctl_heap_stack_gap)
64319+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
64320+ else
64321+ vma = find_vma(current->mm, 0);
64322+ if (vma && (vma->vm_flags & VM_GROWSUP))
64323+ return false;
64324+#endif
64325+ return true;
64326+ }
64327+
64328+ if (addr + len > vma->vm_start)
64329+ return false;
64330+
64331+ if (vma->vm_flags & VM_GROWSDOWN)
64332+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
64333+#ifdef CONFIG_STACK_GROWSUP
64334+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
64335+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
64336+#endif
64337+
64338+ return true;
64339+}
64340+
64341+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
64342+{
64343+ if (vma->vm_start < len)
64344+ return -ENOMEM;
64345+ if (!(vma->vm_flags & VM_GROWSDOWN))
64346+ return vma->vm_start - len;
64347+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
64348+ return vma->vm_start - len - sysctl_heap_stack_gap;
64349+ return -ENOMEM;
64350+}
64351+
64352 /* Get an address range which is currently unmapped.
64353 * For shmat() with addr=0.
64354 *
64355@@ -1281,18 +1475,23 @@ arch_get_unmapped_area(struct file *filp
64356 if (flags & MAP_FIXED)
64357 return addr;
64358
64359+#ifdef CONFIG_PAX_RANDMMAP
64360+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
64361+#endif
64362+
64363 if (addr) {
64364 addr = PAGE_ALIGN(addr);
64365- vma = find_vma(mm, addr);
64366- if (TASK_SIZE - len >= addr &&
64367- (!vma || addr + len <= vma->vm_start))
64368- return addr;
64369+ if (TASK_SIZE - len >= addr) {
64370+ vma = find_vma(mm, addr);
64371+ if (check_heap_stack_gap(vma, addr, len))
64372+ return addr;
64373+ }
64374 }
64375 if (len > mm->cached_hole_size) {
64376- start_addr = addr = mm->free_area_cache;
64377+ start_addr = addr = mm->free_area_cache;
64378 } else {
64379- start_addr = addr = TASK_UNMAPPED_BASE;
64380- mm->cached_hole_size = 0;
64381+ start_addr = addr = mm->mmap_base;
64382+ mm->cached_hole_size = 0;
64383 }
64384
64385 full_search:
64386@@ -1303,34 +1502,40 @@ full_search:
64387 * Start a new search - just in case we missed
64388 * some holes.
64389 */
64390- if (start_addr != TASK_UNMAPPED_BASE) {
64391- addr = TASK_UNMAPPED_BASE;
64392- start_addr = addr;
64393+ if (start_addr != mm->mmap_base) {
64394+ start_addr = addr = mm->mmap_base;
64395 mm->cached_hole_size = 0;
64396 goto full_search;
64397 }
64398 return -ENOMEM;
64399 }
64400- if (!vma || addr + len <= vma->vm_start) {
64401- /*
64402- * Remember the place where we stopped the search:
64403- */
64404- mm->free_area_cache = addr + len;
64405- return addr;
64406- }
64407+ if (check_heap_stack_gap(vma, addr, len))
64408+ break;
64409 if (addr + mm->cached_hole_size < vma->vm_start)
64410 mm->cached_hole_size = vma->vm_start - addr;
64411 addr = vma->vm_end;
64412 }
64413+
64414+ /*
64415+ * Remember the place where we stopped the search:
64416+ */
64417+ mm->free_area_cache = addr + len;
64418+ return addr;
64419 }
64420 #endif
64421
64422 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
64423 {
64424+
64425+#ifdef CONFIG_PAX_SEGMEXEC
64426+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
64427+ return;
64428+#endif
64429+
64430 /*
64431 * Is this a new hole at the lowest possible address?
64432 */
64433- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
64434+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
64435 mm->free_area_cache = addr;
64436 mm->cached_hole_size = ~0UL;
64437 }
64438@@ -1348,7 +1553,7 @@ arch_get_unmapped_area_topdown(struct fi
64439 {
64440 struct vm_area_struct *vma;
64441 struct mm_struct *mm = current->mm;
64442- unsigned long addr = addr0;
64443+ unsigned long base = mm->mmap_base, addr = addr0;
64444
64445 /* requested length too big for entire address space */
64446 if (len > TASK_SIZE)
64447@@ -1357,13 +1562,18 @@ arch_get_unmapped_area_topdown(struct fi
64448 if (flags & MAP_FIXED)
64449 return addr;
64450
64451+#ifdef CONFIG_PAX_RANDMMAP
64452+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
64453+#endif
64454+
64455 /* requesting a specific address */
64456 if (addr) {
64457 addr = PAGE_ALIGN(addr);
64458- vma = find_vma(mm, addr);
64459- if (TASK_SIZE - len >= addr &&
64460- (!vma || addr + len <= vma->vm_start))
64461- return addr;
64462+ if (TASK_SIZE - len >= addr) {
64463+ vma = find_vma(mm, addr);
64464+ if (check_heap_stack_gap(vma, addr, len))
64465+ return addr;
64466+ }
64467 }
64468
64469 /* check if free_area_cache is useful for us */
64470@@ -1378,7 +1588,7 @@ arch_get_unmapped_area_topdown(struct fi
64471 /* make sure it can fit in the remaining address space */
64472 if (addr > len) {
64473 vma = find_vma(mm, addr-len);
64474- if (!vma || addr <= vma->vm_start)
64475+ if (check_heap_stack_gap(vma, addr - len, len))
64476 /* remember the address as a hint for next time */
64477 return (mm->free_area_cache = addr-len);
64478 }
64479@@ -1395,7 +1605,7 @@ arch_get_unmapped_area_topdown(struct fi
64480 * return with success:
64481 */
64482 vma = find_vma(mm, addr);
64483- if (!vma || addr+len <= vma->vm_start)
64484+ if (check_heap_stack_gap(vma, addr, len))
64485 /* remember the address as a hint for next time */
64486 return (mm->free_area_cache = addr);
64487
64488@@ -1404,8 +1614,8 @@ arch_get_unmapped_area_topdown(struct fi
64489 mm->cached_hole_size = vma->vm_start - addr;
64490
64491 /* try just below the current vma->vm_start */
64492- addr = vma->vm_start-len;
64493- } while (len < vma->vm_start);
64494+ addr = skip_heap_stack_gap(vma, len);
64495+ } while (!IS_ERR_VALUE(addr));
64496
64497 bottomup:
64498 /*
64499@@ -1414,13 +1624,21 @@ bottomup:
64500 * can happen with large stack limits and large mmap()
64501 * allocations.
64502 */
64503+ mm->mmap_base = TASK_UNMAPPED_BASE;
64504+
64505+#ifdef CONFIG_PAX_RANDMMAP
64506+ if (mm->pax_flags & MF_PAX_RANDMMAP)
64507+ mm->mmap_base += mm->delta_mmap;
64508+#endif
64509+
64510+ mm->free_area_cache = mm->mmap_base;
64511 mm->cached_hole_size = ~0UL;
64512- mm->free_area_cache = TASK_UNMAPPED_BASE;
64513 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
64514 /*
64515 * Restore the topdown base:
64516 */
64517- mm->free_area_cache = mm->mmap_base;
64518+ mm->mmap_base = base;
64519+ mm->free_area_cache = base;
64520 mm->cached_hole_size = ~0UL;
64521
64522 return addr;
64523@@ -1429,6 +1647,12 @@ bottomup:
64524
64525 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
64526 {
64527+
64528+#ifdef CONFIG_PAX_SEGMEXEC
64529+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
64530+ return;
64531+#endif
64532+
64533 /*
64534 * Is this a new hole at the highest possible address?
64535 */
64536@@ -1436,8 +1660,10 @@ void arch_unmap_area_topdown(struct mm_s
64537 mm->free_area_cache = addr;
64538
64539 /* dont allow allocations above current base */
64540- if (mm->free_area_cache > mm->mmap_base)
64541+ if (mm->free_area_cache > mm->mmap_base) {
64542 mm->free_area_cache = mm->mmap_base;
64543+ mm->cached_hole_size = ~0UL;
64544+ }
64545 }
64546
64547 unsigned long
64548@@ -1545,6 +1771,27 @@ out:
64549 return prev ? prev->vm_next : vma;
64550 }
64551
64552+#ifdef CONFIG_PAX_SEGMEXEC
64553+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
64554+{
64555+ struct vm_area_struct *vma_m;
64556+
64557+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
64558+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
64559+ BUG_ON(vma->vm_mirror);
64560+ return NULL;
64561+ }
64562+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
64563+ vma_m = vma->vm_mirror;
64564+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
64565+ BUG_ON(vma->vm_file != vma_m->vm_file);
64566+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
64567+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
64568+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
64569+ return vma_m;
64570+}
64571+#endif
64572+
64573 /*
64574 * Verify that the stack growth is acceptable and
64575 * update accounting. This is shared with both the
64576@@ -1561,6 +1808,7 @@ static int acct_stack_growth(struct vm_a
64577 return -ENOMEM;
64578
64579 /* Stack limit test */
64580+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
64581 if (size > rlim[RLIMIT_STACK].rlim_cur)
64582 return -ENOMEM;
64583
64584@@ -1570,6 +1818,7 @@ static int acct_stack_growth(struct vm_a
64585 unsigned long limit;
64586 locked = mm->locked_vm + grow;
64587 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
64588+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
64589 if (locked > limit && !capable(CAP_IPC_LOCK))
64590 return -ENOMEM;
64591 }
64592@@ -1600,37 +1849,48 @@ static int acct_stack_growth(struct vm_a
64593 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
64594 * vma is the last one with address > vma->vm_end. Have to extend vma.
64595 */
64596+#ifndef CONFIG_IA64
64597+static
64598+#endif
64599 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
64600 {
64601 int error;
64602+ bool locknext;
64603
64604 if (!(vma->vm_flags & VM_GROWSUP))
64605 return -EFAULT;
64606
64607+ /* Also guard against wrapping around to address 0. */
64608+ if (address < PAGE_ALIGN(address+1))
64609+ address = PAGE_ALIGN(address+1);
64610+ else
64611+ return -ENOMEM;
64612+
64613 /*
64614 * We must make sure the anon_vma is allocated
64615 * so that the anon_vma locking is not a noop.
64616 */
64617 if (unlikely(anon_vma_prepare(vma)))
64618 return -ENOMEM;
64619+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
64620+ if (locknext && anon_vma_prepare(vma->vm_next))
64621+ return -ENOMEM;
64622 anon_vma_lock(vma);
64623+ if (locknext)
64624+ anon_vma_lock(vma->vm_next);
64625
64626 /*
64627 * vma->vm_start/vm_end cannot change under us because the caller
64628 * is required to hold the mmap_sem in read mode. We need the
64629- * anon_vma lock to serialize against concurrent expand_stacks.
64630- * Also guard against wrapping around to address 0.
64631+ * anon_vma locks to serialize against concurrent expand_stacks
64632+ * and expand_upwards.
64633 */
64634- if (address < PAGE_ALIGN(address+4))
64635- address = PAGE_ALIGN(address+4);
64636- else {
64637- anon_vma_unlock(vma);
64638- return -ENOMEM;
64639- }
64640 error = 0;
64641
64642 /* Somebody else might have raced and expanded it already */
64643- if (address > vma->vm_end) {
64644+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
64645+ error = -ENOMEM;
64646+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
64647 unsigned long size, grow;
64648
64649 size = address - vma->vm_start;
64650@@ -1640,6 +1900,8 @@ int expand_upwards(struct vm_area_struct
64651 if (!error)
64652 vma->vm_end = address;
64653 }
64654+ if (locknext)
64655+ anon_vma_unlock(vma->vm_next);
64656 anon_vma_unlock(vma);
64657 return error;
64658 }
64659@@ -1652,6 +1914,8 @@ static int expand_downwards(struct vm_ar
64660 unsigned long address)
64661 {
64662 int error;
64663+ bool lockprev = false;
64664+ struct vm_area_struct *prev;
64665
64666 /*
64667 * We must make sure the anon_vma is allocated
64668@@ -1665,6 +1929,15 @@ static int expand_downwards(struct vm_ar
64669 if (error)
64670 return error;
64671
64672+ prev = vma->vm_prev;
64673+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
64674+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
64675+#endif
64676+ if (lockprev && anon_vma_prepare(prev))
64677+ return -ENOMEM;
64678+ if (lockprev)
64679+ anon_vma_lock(prev);
64680+
64681 anon_vma_lock(vma);
64682
64683 /*
64684@@ -1674,9 +1947,17 @@ static int expand_downwards(struct vm_ar
64685 */
64686
64687 /* Somebody else might have raced and expanded it already */
64688- if (address < vma->vm_start) {
64689+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
64690+ error = -ENOMEM;
64691+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
64692 unsigned long size, grow;
64693
64694+#ifdef CONFIG_PAX_SEGMEXEC
64695+ struct vm_area_struct *vma_m;
64696+
64697+ vma_m = pax_find_mirror_vma(vma);
64698+#endif
64699+
64700 size = vma->vm_end - address;
64701 grow = (vma->vm_start - address) >> PAGE_SHIFT;
64702
64703@@ -1684,9 +1965,20 @@ static int expand_downwards(struct vm_ar
64704 if (!error) {
64705 vma->vm_start = address;
64706 vma->vm_pgoff -= grow;
64707+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
64708+
64709+#ifdef CONFIG_PAX_SEGMEXEC
64710+ if (vma_m) {
64711+ vma_m->vm_start -= grow << PAGE_SHIFT;
64712+ vma_m->vm_pgoff -= grow;
64713+ }
64714+#endif
64715+
64716 }
64717 }
64718 anon_vma_unlock(vma);
64719+ if (lockprev)
64720+ anon_vma_unlock(prev);
64721 return error;
64722 }
64723
64724@@ -1762,6 +2054,13 @@ static void remove_vma_list(struct mm_st
64725 do {
64726 long nrpages = vma_pages(vma);
64727
64728+#ifdef CONFIG_PAX_SEGMEXEC
64729+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
64730+ vma = remove_vma(vma);
64731+ continue;
64732+ }
64733+#endif
64734+
64735 mm->total_vm -= nrpages;
64736 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
64737 vma = remove_vma(vma);
64738@@ -1807,6 +2106,16 @@ detach_vmas_to_be_unmapped(struct mm_str
64739 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
64740 vma->vm_prev = NULL;
64741 do {
64742+
64743+#ifdef CONFIG_PAX_SEGMEXEC
64744+ if (vma->vm_mirror) {
64745+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
64746+ vma->vm_mirror->vm_mirror = NULL;
64747+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
64748+ vma->vm_mirror = NULL;
64749+ }
64750+#endif
64751+
64752 rb_erase(&vma->vm_rb, &mm->mm_rb);
64753 mm->map_count--;
64754 tail_vma = vma;
64755@@ -1834,10 +2143,25 @@ int split_vma(struct mm_struct * mm, str
64756 struct mempolicy *pol;
64757 struct vm_area_struct *new;
64758
64759+#ifdef CONFIG_PAX_SEGMEXEC
64760+ struct vm_area_struct *vma_m, *new_m = NULL;
64761+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
64762+#endif
64763+
64764 if (is_vm_hugetlb_page(vma) && (addr &
64765 ~(huge_page_mask(hstate_vma(vma)))))
64766 return -EINVAL;
64767
64768+#ifdef CONFIG_PAX_SEGMEXEC
64769+ vma_m = pax_find_mirror_vma(vma);
64770+
64771+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
64772+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
64773+ if (mm->map_count >= sysctl_max_map_count-1)
64774+ return -ENOMEM;
64775+ } else
64776+#endif
64777+
64778 if (mm->map_count >= sysctl_max_map_count)
64779 return -ENOMEM;
64780
64781@@ -1845,6 +2169,16 @@ int split_vma(struct mm_struct * mm, str
64782 if (!new)
64783 return -ENOMEM;
64784
64785+#ifdef CONFIG_PAX_SEGMEXEC
64786+ if (vma_m) {
64787+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
64788+ if (!new_m) {
64789+ kmem_cache_free(vm_area_cachep, new);
64790+ return -ENOMEM;
64791+ }
64792+ }
64793+#endif
64794+
64795 /* most fields are the same, copy all, and then fixup */
64796 *new = *vma;
64797
64798@@ -1855,8 +2189,29 @@ int split_vma(struct mm_struct * mm, str
64799 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
64800 }
64801
64802+#ifdef CONFIG_PAX_SEGMEXEC
64803+ if (vma_m) {
64804+ *new_m = *vma_m;
64805+ new_m->vm_mirror = new;
64806+ new->vm_mirror = new_m;
64807+
64808+ if (new_below)
64809+ new_m->vm_end = addr_m;
64810+ else {
64811+ new_m->vm_start = addr_m;
64812+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
64813+ }
64814+ }
64815+#endif
64816+
64817 pol = mpol_dup(vma_policy(vma));
64818 if (IS_ERR(pol)) {
64819+
64820+#ifdef CONFIG_PAX_SEGMEXEC
64821+ if (new_m)
64822+ kmem_cache_free(vm_area_cachep, new_m);
64823+#endif
64824+
64825 kmem_cache_free(vm_area_cachep, new);
64826 return PTR_ERR(pol);
64827 }
64828@@ -1877,6 +2232,28 @@ int split_vma(struct mm_struct * mm, str
64829 else
64830 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
64831
64832+#ifdef CONFIG_PAX_SEGMEXEC
64833+ if (vma_m) {
64834+ mpol_get(pol);
64835+ vma_set_policy(new_m, pol);
64836+
64837+ if (new_m->vm_file) {
64838+ get_file(new_m->vm_file);
64839+ if (vma_m->vm_flags & VM_EXECUTABLE)
64840+ added_exe_file_vma(mm);
64841+ }
64842+
64843+ if (new_m->vm_ops && new_m->vm_ops->open)
64844+ new_m->vm_ops->open(new_m);
64845+
64846+ if (new_below)
64847+ vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
64848+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
64849+ else
64850+ vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
64851+ }
64852+#endif
64853+
64854 return 0;
64855 }
64856
64857@@ -1885,11 +2262,30 @@ int split_vma(struct mm_struct * mm, str
64858 * work. This now handles partial unmappings.
64859 * Jeremy Fitzhardinge <jeremy@goop.org>
64860 */
64861+#ifdef CONFIG_PAX_SEGMEXEC
64862+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
64863+{
64864+ int ret = __do_munmap(mm, start, len);
64865+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
64866+ return ret;
64867+
64868+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
64869+}
64870+
64871+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
64872+#else
64873 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
64874+#endif
64875 {
64876 unsigned long end;
64877 struct vm_area_struct *vma, *prev, *last;
64878
64879+ /*
64880+ * mm->mmap_sem is required to protect against another thread
64881+ * changing the mappings in case we sleep.
64882+ */
64883+ verify_mm_writelocked(mm);
64884+
64885 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
64886 return -EINVAL;
64887
64888@@ -1953,6 +2349,8 @@ int do_munmap(struct mm_struct *mm, unsi
64889 /* Fix up all other VM information */
64890 remove_vma_list(mm, vma);
64891
64892+ track_exec_limit(mm, start, end, 0UL);
64893+
64894 return 0;
64895 }
64896
64897@@ -1965,22 +2363,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
64898
64899 profile_munmap(addr);
64900
64901+#ifdef CONFIG_PAX_SEGMEXEC
64902+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
64903+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
64904+ return -EINVAL;
64905+#endif
64906+
64907 down_write(&mm->mmap_sem);
64908 ret = do_munmap(mm, addr, len);
64909 up_write(&mm->mmap_sem);
64910 return ret;
64911 }
64912
64913-static inline void verify_mm_writelocked(struct mm_struct *mm)
64914-{
64915-#ifdef CONFIG_DEBUG_VM
64916- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
64917- WARN_ON(1);
64918- up_read(&mm->mmap_sem);
64919- }
64920-#endif
64921-}
64922-
64923 /*
64924 * this is really a simplified "do_mmap". it only handles
64925 * anonymous maps. eventually we may be able to do some
64926@@ -1994,6 +2388,7 @@ unsigned long do_brk(unsigned long addr,
64927 struct rb_node ** rb_link, * rb_parent;
64928 pgoff_t pgoff = addr >> PAGE_SHIFT;
64929 int error;
64930+ unsigned long charged;
64931
64932 len = PAGE_ALIGN(len);
64933 if (!len)
64934@@ -2005,16 +2400,30 @@ unsigned long do_brk(unsigned long addr,
64935
64936 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
64937
64938+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
64939+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
64940+ flags &= ~VM_EXEC;
64941+
64942+#ifdef CONFIG_PAX_MPROTECT
64943+ if (mm->pax_flags & MF_PAX_MPROTECT)
64944+ flags &= ~VM_MAYEXEC;
64945+#endif
64946+
64947+ }
64948+#endif
64949+
64950 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
64951 if (error & ~PAGE_MASK)
64952 return error;
64953
64954+ charged = len >> PAGE_SHIFT;
64955+
64956 /*
64957 * mlock MCL_FUTURE?
64958 */
64959 if (mm->def_flags & VM_LOCKED) {
64960 unsigned long locked, lock_limit;
64961- locked = len >> PAGE_SHIFT;
64962+ locked = charged;
64963 locked += mm->locked_vm;
64964 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
64965 lock_limit >>= PAGE_SHIFT;
64966@@ -2031,22 +2440,22 @@ unsigned long do_brk(unsigned long addr,
64967 /*
64968 * Clear old maps. this also does some error checking for us
64969 */
64970- munmap_back:
64971 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
64972 if (vma && vma->vm_start < addr + len) {
64973 if (do_munmap(mm, addr, len))
64974 return -ENOMEM;
64975- goto munmap_back;
64976+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
64977+ BUG_ON(vma && vma->vm_start < addr + len);
64978 }
64979
64980 /* Check against address space limits *after* clearing old maps... */
64981- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
64982+ if (!may_expand_vm(mm, charged))
64983 return -ENOMEM;
64984
64985 if (mm->map_count > sysctl_max_map_count)
64986 return -ENOMEM;
64987
64988- if (security_vm_enough_memory(len >> PAGE_SHIFT))
64989+ if (security_vm_enough_memory(charged))
64990 return -ENOMEM;
64991
64992 /* Can we just expand an old private anonymous mapping? */
64993@@ -2060,7 +2469,7 @@ unsigned long do_brk(unsigned long addr,
64994 */
64995 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
64996 if (!vma) {
64997- vm_unacct_memory(len >> PAGE_SHIFT);
64998+ vm_unacct_memory(charged);
64999 return -ENOMEM;
65000 }
65001
65002@@ -2072,11 +2481,12 @@ unsigned long do_brk(unsigned long addr,
65003 vma->vm_page_prot = vm_get_page_prot(flags);
65004 vma_link(mm, vma, prev, rb_link, rb_parent);
65005 out:
65006- mm->total_vm += len >> PAGE_SHIFT;
65007+ mm->total_vm += charged;
65008 if (flags & VM_LOCKED) {
65009 if (!mlock_vma_pages_range(vma, addr, addr + len))
65010- mm->locked_vm += (len >> PAGE_SHIFT);
65011+ mm->locked_vm += charged;
65012 }
65013+ track_exec_limit(mm, addr, addr + len, flags);
65014 return addr;
65015 }
65016
65017@@ -2123,8 +2533,10 @@ void exit_mmap(struct mm_struct *mm)
65018 * Walk the list again, actually closing and freeing it,
65019 * with preemption enabled, without holding any MM locks.
65020 */
65021- while (vma)
65022+ while (vma) {
65023+ vma->vm_mirror = NULL;
65024 vma = remove_vma(vma);
65025+ }
65026
65027 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
65028 }
65029@@ -2138,6 +2550,10 @@ int insert_vm_struct(struct mm_struct *
65030 struct vm_area_struct * __vma, * prev;
65031 struct rb_node ** rb_link, * rb_parent;
65032
65033+#ifdef CONFIG_PAX_SEGMEXEC
65034+ struct vm_area_struct *vma_m = NULL;
65035+#endif
65036+
65037 /*
65038 * The vm_pgoff of a purely anonymous vma should be irrelevant
65039 * until its first write fault, when page's anon_vma and index
65040@@ -2160,7 +2576,22 @@ int insert_vm_struct(struct mm_struct *
65041 if ((vma->vm_flags & VM_ACCOUNT) &&
65042 security_vm_enough_memory_mm(mm, vma_pages(vma)))
65043 return -ENOMEM;
65044+
65045+#ifdef CONFIG_PAX_SEGMEXEC
65046+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
65047+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65048+ if (!vma_m)
65049+ return -ENOMEM;
65050+ }
65051+#endif
65052+
65053 vma_link(mm, vma, prev, rb_link, rb_parent);
65054+
65055+#ifdef CONFIG_PAX_SEGMEXEC
65056+ if (vma_m)
65057+ pax_mirror_vma(vma_m, vma);
65058+#endif
65059+
65060 return 0;
65061 }
65062
65063@@ -2178,6 +2609,8 @@ struct vm_area_struct *copy_vma(struct v
65064 struct rb_node **rb_link, *rb_parent;
65065 struct mempolicy *pol;
65066
65067+ BUG_ON(vma->vm_mirror);
65068+
65069 /*
65070 * If anonymous vma has not yet been faulted, update new pgoff
65071 * to match new location, to increase its chance of merging.
65072@@ -2221,6 +2654,35 @@ struct vm_area_struct *copy_vma(struct v
65073 return new_vma;
65074 }
65075
65076+#ifdef CONFIG_PAX_SEGMEXEC
65077+void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
65078+{
65079+ struct vm_area_struct *prev_m;
65080+ struct rb_node **rb_link_m, *rb_parent_m;
65081+ struct mempolicy *pol_m;
65082+
65083+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
65084+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
65085+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
65086+ *vma_m = *vma;
65087+ pol_m = vma_policy(vma_m);
65088+ mpol_get(pol_m);
65089+ vma_set_policy(vma_m, pol_m);
65090+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
65091+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
65092+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
65093+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
65094+ if (vma_m->vm_file)
65095+ get_file(vma_m->vm_file);
65096+ if (vma_m->vm_ops && vma_m->vm_ops->open)
65097+ vma_m->vm_ops->open(vma_m);
65098+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
65099+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
65100+ vma_m->vm_mirror = vma;
65101+ vma->vm_mirror = vma_m;
65102+}
65103+#endif
65104+
65105 /*
65106 * Return true if the calling process may expand its vm space by the passed
65107 * number of pages
65108@@ -2231,7 +2693,7 @@ int may_expand_vm(struct mm_struct *mm,
65109 unsigned long lim;
65110
65111 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
65112-
65113+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
65114 if (cur + npages > lim)
65115 return 0;
65116 return 1;
65117@@ -2301,6 +2763,22 @@ int install_special_mapping(struct mm_st
65118 vma->vm_start = addr;
65119 vma->vm_end = addr + len;
65120
65121+#ifdef CONFIG_PAX_MPROTECT
65122+ if (mm->pax_flags & MF_PAX_MPROTECT) {
65123+#ifndef CONFIG_PAX_MPROTECT_COMPAT
65124+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
65125+ return -EPERM;
65126+ if (!(vm_flags & VM_EXEC))
65127+ vm_flags &= ~VM_MAYEXEC;
65128+#else
65129+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
65130+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
65131+#endif
65132+ else
65133+ vm_flags &= ~VM_MAYWRITE;
65134+ }
65135+#endif
65136+
65137 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
65138 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
65139
65140diff -urNp linux-2.6.32.41/mm/mprotect.c linux-2.6.32.41/mm/mprotect.c
65141--- linux-2.6.32.41/mm/mprotect.c 2011-03-27 14:31:47.000000000 -0400
65142+++ linux-2.6.32.41/mm/mprotect.c 2011-04-17 15:56:46.000000000 -0400
65143@@ -24,10 +24,16 @@
65144 #include <linux/mmu_notifier.h>
65145 #include <linux/migrate.h>
65146 #include <linux/perf_event.h>
65147+
65148+#ifdef CONFIG_PAX_MPROTECT
65149+#include <linux/elf.h>
65150+#endif
65151+
65152 #include <asm/uaccess.h>
65153 #include <asm/pgtable.h>
65154 #include <asm/cacheflush.h>
65155 #include <asm/tlbflush.h>
65156+#include <asm/mmu_context.h>
65157
65158 #ifndef pgprot_modify
65159 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
65160@@ -132,6 +138,48 @@ static void change_protection(struct vm_
65161 flush_tlb_range(vma, start, end);
65162 }
65163
65164+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
65165+/* called while holding the mmap semaphor for writing except stack expansion */
65166+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
65167+{
65168+ unsigned long oldlimit, newlimit = 0UL;
65169+
65170+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
65171+ return;
65172+
65173+ spin_lock(&mm->page_table_lock);
65174+ oldlimit = mm->context.user_cs_limit;
65175+ if ((prot & VM_EXEC) && oldlimit < end)
65176+ /* USER_CS limit moved up */
65177+ newlimit = end;
65178+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
65179+ /* USER_CS limit moved down */
65180+ newlimit = start;
65181+
65182+ if (newlimit) {
65183+ mm->context.user_cs_limit = newlimit;
65184+
65185+#ifdef CONFIG_SMP
65186+ wmb();
65187+ cpus_clear(mm->context.cpu_user_cs_mask);
65188+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
65189+#endif
65190+
65191+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
65192+ }
65193+ spin_unlock(&mm->page_table_lock);
65194+ if (newlimit == end) {
65195+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
65196+
65197+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
65198+ if (is_vm_hugetlb_page(vma))
65199+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
65200+ else
65201+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
65202+ }
65203+}
65204+#endif
65205+
65206 int
65207 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
65208 unsigned long start, unsigned long end, unsigned long newflags)
65209@@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vm
65210 int error;
65211 int dirty_accountable = 0;
65212
65213+#ifdef CONFIG_PAX_SEGMEXEC
65214+ struct vm_area_struct *vma_m = NULL;
65215+ unsigned long start_m, end_m;
65216+
65217+ start_m = start + SEGMEXEC_TASK_SIZE;
65218+ end_m = end + SEGMEXEC_TASK_SIZE;
65219+#endif
65220+
65221 if (newflags == oldflags) {
65222 *pprev = vma;
65223 return 0;
65224 }
65225
65226+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
65227+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
65228+
65229+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
65230+ return -ENOMEM;
65231+
65232+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
65233+ return -ENOMEM;
65234+ }
65235+
65236 /*
65237 * If we make a private mapping writable we increase our commit;
65238 * but (without finer accounting) cannot reduce our commit if we
65239@@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vm
65240 }
65241 }
65242
65243+#ifdef CONFIG_PAX_SEGMEXEC
65244+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
65245+ if (start != vma->vm_start) {
65246+ error = split_vma(mm, vma, start, 1);
65247+ if (error)
65248+ goto fail;
65249+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
65250+ *pprev = (*pprev)->vm_next;
65251+ }
65252+
65253+ if (end != vma->vm_end) {
65254+ error = split_vma(mm, vma, end, 0);
65255+ if (error)
65256+ goto fail;
65257+ }
65258+
65259+ if (pax_find_mirror_vma(vma)) {
65260+ error = __do_munmap(mm, start_m, end_m - start_m);
65261+ if (error)
65262+ goto fail;
65263+ } else {
65264+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65265+ if (!vma_m) {
65266+ error = -ENOMEM;
65267+ goto fail;
65268+ }
65269+ vma->vm_flags = newflags;
65270+ pax_mirror_vma(vma_m, vma);
65271+ }
65272+ }
65273+#endif
65274+
65275 /*
65276 * First try to merge with previous and/or next vma.
65277 */
65278@@ -195,9 +293,21 @@ success:
65279 * vm_flags and vm_page_prot are protected by the mmap_sem
65280 * held in write mode.
65281 */
65282+
65283+#ifdef CONFIG_PAX_SEGMEXEC
65284+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
65285+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
65286+#endif
65287+
65288 vma->vm_flags = newflags;
65289+
65290+#ifdef CONFIG_PAX_MPROTECT
65291+ if (mm->binfmt && mm->binfmt->handle_mprotect)
65292+ mm->binfmt->handle_mprotect(vma, newflags);
65293+#endif
65294+
65295 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
65296- vm_get_page_prot(newflags));
65297+ vm_get_page_prot(vma->vm_flags));
65298
65299 if (vma_wants_writenotify(vma)) {
65300 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
65301@@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65302 end = start + len;
65303 if (end <= start)
65304 return -ENOMEM;
65305+
65306+#ifdef CONFIG_PAX_SEGMEXEC
65307+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
65308+ if (end > SEGMEXEC_TASK_SIZE)
65309+ return -EINVAL;
65310+ } else
65311+#endif
65312+
65313+ if (end > TASK_SIZE)
65314+ return -EINVAL;
65315+
65316 if (!arch_validate_prot(prot))
65317 return -EINVAL;
65318
65319@@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65320 /*
65321 * Does the application expect PROT_READ to imply PROT_EXEC:
65322 */
65323- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
65324+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
65325 prot |= PROT_EXEC;
65326
65327 vm_flags = calc_vm_prot_bits(prot);
65328@@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65329 if (start > vma->vm_start)
65330 prev = vma;
65331
65332+#ifdef CONFIG_PAX_MPROTECT
65333+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
65334+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
65335+#endif
65336+
65337 for (nstart = start ; ; ) {
65338 unsigned long newflags;
65339
65340@@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65341
65342 /* newflags >> 4 shift VM_MAY% in place of VM_% */
65343 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
65344+ if (prot & (PROT_WRITE | PROT_EXEC))
65345+ gr_log_rwxmprotect(vma->vm_file);
65346+
65347+ error = -EACCES;
65348+ goto out;
65349+ }
65350+
65351+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
65352 error = -EACCES;
65353 goto out;
65354 }
65355@@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65356 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
65357 if (error)
65358 goto out;
65359+
65360+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
65361+
65362 nstart = tmp;
65363
65364 if (nstart < prev->vm_end)
65365diff -urNp linux-2.6.32.41/mm/mremap.c linux-2.6.32.41/mm/mremap.c
65366--- linux-2.6.32.41/mm/mremap.c 2011-04-17 17:00:52.000000000 -0400
65367+++ linux-2.6.32.41/mm/mremap.c 2011-04-17 17:03:58.000000000 -0400
65368@@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_str
65369 continue;
65370 pte = ptep_clear_flush(vma, old_addr, old_pte);
65371 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
65372+
65373+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
65374+ if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
65375+ pte = pte_exprotect(pte);
65376+#endif
65377+
65378 set_pte_at(mm, new_addr, new_pte, pte);
65379 }
65380
65381@@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_res
65382 if (is_vm_hugetlb_page(vma))
65383 goto Einval;
65384
65385+#ifdef CONFIG_PAX_SEGMEXEC
65386+ if (pax_find_mirror_vma(vma))
65387+ goto Einval;
65388+#endif
65389+
65390 /* We can't remap across vm area boundaries */
65391 if (old_len > vma->vm_end - addr)
65392 goto Efault;
65393@@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned
65394 unsigned long ret = -EINVAL;
65395 unsigned long charged = 0;
65396 unsigned long map_flags;
65397+ unsigned long pax_task_size = TASK_SIZE;
65398
65399 if (new_addr & ~PAGE_MASK)
65400 goto out;
65401
65402- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
65403+#ifdef CONFIG_PAX_SEGMEXEC
65404+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
65405+ pax_task_size = SEGMEXEC_TASK_SIZE;
65406+#endif
65407+
65408+ pax_task_size -= PAGE_SIZE;
65409+
65410+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
65411 goto out;
65412
65413 /* Check if the location we're moving into overlaps the
65414 * old location at all, and fail if it does.
65415 */
65416- if ((new_addr <= addr) && (new_addr+new_len) > addr)
65417- goto out;
65418-
65419- if ((addr <= new_addr) && (addr+old_len) > new_addr)
65420+ if (addr + old_len > new_addr && new_addr + new_len > addr)
65421 goto out;
65422
65423 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
65424@@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long ad
65425 struct vm_area_struct *vma;
65426 unsigned long ret = -EINVAL;
65427 unsigned long charged = 0;
65428+ unsigned long pax_task_size = TASK_SIZE;
65429
65430 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
65431 goto out;
65432@@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long ad
65433 if (!new_len)
65434 goto out;
65435
65436+#ifdef CONFIG_PAX_SEGMEXEC
65437+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
65438+ pax_task_size = SEGMEXEC_TASK_SIZE;
65439+#endif
65440+
65441+ pax_task_size -= PAGE_SIZE;
65442+
65443+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
65444+ old_len > pax_task_size || addr > pax_task_size-old_len)
65445+ goto out;
65446+
65447 if (flags & MREMAP_FIXED) {
65448 if (flags & MREMAP_MAYMOVE)
65449 ret = mremap_to(addr, old_len, new_addr, new_len);
65450@@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long ad
65451 addr + new_len);
65452 }
65453 ret = addr;
65454+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
65455 goto out;
65456 }
65457 }
65458@@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long ad
65459 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
65460 if (ret)
65461 goto out;
65462+
65463+ map_flags = vma->vm_flags;
65464 ret = move_vma(vma, addr, old_len, new_len, new_addr);
65465+ if (!(ret & ~PAGE_MASK)) {
65466+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
65467+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
65468+ }
65469 }
65470 out:
65471 if (ret & ~PAGE_MASK)
65472diff -urNp linux-2.6.32.41/mm/nommu.c linux-2.6.32.41/mm/nommu.c
65473--- linux-2.6.32.41/mm/nommu.c 2011-03-27 14:31:47.000000000 -0400
65474+++ linux-2.6.32.41/mm/nommu.c 2011-04-17 15:56:46.000000000 -0400
65475@@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMI
65476 int sysctl_overcommit_ratio = 50; /* default is 50% */
65477 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
65478 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
65479-int heap_stack_gap = 0;
65480
65481 atomic_long_t mmap_pages_allocated;
65482
65483@@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct m
65484 EXPORT_SYMBOL(find_vma);
65485
65486 /*
65487- * find a VMA
65488- * - we don't extend stack VMAs under NOMMU conditions
65489- */
65490-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
65491-{
65492- return find_vma(mm, addr);
65493-}
65494-
65495-/*
65496 * expand a stack to a given address
65497 * - not supported under NOMMU conditions
65498 */
65499diff -urNp linux-2.6.32.41/mm/page_alloc.c linux-2.6.32.41/mm/page_alloc.c
65500--- linux-2.6.32.41/mm/page_alloc.c 2011-03-27 14:31:47.000000000 -0400
65501+++ linux-2.6.32.41/mm/page_alloc.c 2011-05-16 21:46:57.000000000 -0400
65502@@ -587,6 +587,10 @@ static void __free_pages_ok(struct page
65503 int bad = 0;
65504 int wasMlocked = __TestClearPageMlocked(page);
65505
65506+#ifdef CONFIG_PAX_MEMORY_SANITIZE
65507+ unsigned long index = 1UL << order;
65508+#endif
65509+
65510 kmemcheck_free_shadow(page, order);
65511
65512 for (i = 0 ; i < (1 << order) ; ++i)
65513@@ -599,6 +603,12 @@ static void __free_pages_ok(struct page
65514 debug_check_no_obj_freed(page_address(page),
65515 PAGE_SIZE << order);
65516 }
65517+
65518+#ifdef CONFIG_PAX_MEMORY_SANITIZE
65519+ for (; index; --index)
65520+ sanitize_highpage(page + index - 1);
65521+#endif
65522+
65523 arch_free_page(page, order);
65524 kernel_map_pages(page, 1 << order, 0);
65525
65526@@ -702,8 +712,10 @@ static int prep_new_page(struct page *pa
65527 arch_alloc_page(page, order);
65528 kernel_map_pages(page, 1 << order, 1);
65529
65530+#ifndef CONFIG_PAX_MEMORY_SANITIZE
65531 if (gfp_flags & __GFP_ZERO)
65532 prep_zero_page(page, order, gfp_flags);
65533+#endif
65534
65535 if (order && (gfp_flags & __GFP_COMP))
65536 prep_compound_page(page, order);
65537@@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct pa
65538 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
65539 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
65540 }
65541+
65542+#ifdef CONFIG_PAX_MEMORY_SANITIZE
65543+ sanitize_highpage(page);
65544+#endif
65545+
65546 arch_free_page(page, 0);
65547 kernel_map_pages(page, 1, 0);
65548
65549@@ -2179,6 +2196,8 @@ void show_free_areas(void)
65550 int cpu;
65551 struct zone *zone;
65552
65553+ pax_track_stack();
65554+
65555 for_each_populated_zone(zone) {
65556 show_node(zone);
65557 printk("%s per-cpu:\n", zone->name);
65558@@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct p
65559 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
65560 }
65561 #else
65562-static void inline setup_usemap(struct pglist_data *pgdat,
65563+static inline void setup_usemap(struct pglist_data *pgdat,
65564 struct zone *zone, unsigned long zonesize) {}
65565 #endif /* CONFIG_SPARSEMEM */
65566
65567diff -urNp linux-2.6.32.41/mm/percpu.c linux-2.6.32.41/mm/percpu.c
65568--- linux-2.6.32.41/mm/percpu.c 2011-03-27 14:31:47.000000000 -0400
65569+++ linux-2.6.32.41/mm/percpu.c 2011-04-17 15:56:46.000000000 -0400
65570@@ -115,7 +115,7 @@ static unsigned int pcpu_first_unit_cpu
65571 static unsigned int pcpu_last_unit_cpu __read_mostly;
65572
65573 /* the address of the first chunk which starts with the kernel static area */
65574-void *pcpu_base_addr __read_mostly;
65575+void *pcpu_base_addr __read_only;
65576 EXPORT_SYMBOL_GPL(pcpu_base_addr);
65577
65578 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
65579diff -urNp linux-2.6.32.41/mm/rmap.c linux-2.6.32.41/mm/rmap.c
65580--- linux-2.6.32.41/mm/rmap.c 2011-03-27 14:31:47.000000000 -0400
65581+++ linux-2.6.32.41/mm/rmap.c 2011-04-17 15:56:46.000000000 -0400
65582@@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_stru
65583 /* page_table_lock to protect against threads */
65584 spin_lock(&mm->page_table_lock);
65585 if (likely(!vma->anon_vma)) {
65586+
65587+#ifdef CONFIG_PAX_SEGMEXEC
65588+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
65589+
65590+ if (vma_m) {
65591+ BUG_ON(vma_m->anon_vma);
65592+ vma_m->anon_vma = anon_vma;
65593+ list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
65594+ }
65595+#endif
65596+
65597 vma->anon_vma = anon_vma;
65598 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
65599 allocated = NULL;
65600diff -urNp linux-2.6.32.41/mm/shmem.c linux-2.6.32.41/mm/shmem.c
65601--- linux-2.6.32.41/mm/shmem.c 2011-03-27 14:31:47.000000000 -0400
65602+++ linux-2.6.32.41/mm/shmem.c 2011-05-18 20:09:37.000000000 -0400
65603@@ -31,7 +31,7 @@
65604 #include <linux/swap.h>
65605 #include <linux/ima.h>
65606
65607-static struct vfsmount *shm_mnt;
65608+struct vfsmount *shm_mnt;
65609
65610 #ifdef CONFIG_SHMEM
65611 /*
65612@@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *
65613 goto unlock;
65614 }
65615 entry = shmem_swp_entry(info, index, NULL);
65616+ if (!entry)
65617+ goto unlock;
65618 if (entry->val) {
65619 /*
65620 * The more uptodate page coming down from a stacked
65621@@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_ent
65622 struct vm_area_struct pvma;
65623 struct page *page;
65624
65625+ pax_track_stack();
65626+
65627 spol = mpol_cond_copy(&mpol,
65628 mpol_shared_policy_lookup(&info->policy, idx));
65629
65630@@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *d
65631
65632 info = SHMEM_I(inode);
65633 inode->i_size = len-1;
65634- if (len <= (char *)inode - (char *)info) {
65635+ if (len <= (char *)inode - (char *)info && len <= 64) {
65636 /* do it inline */
65637 memcpy(info, symname, len);
65638 inode->i_op = &shmem_symlink_inline_operations;
65639@@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block
65640 int err = -ENOMEM;
65641
65642 /* Round up to L1_CACHE_BYTES to resist false sharing */
65643- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
65644- L1_CACHE_BYTES), GFP_KERNEL);
65645+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
65646 if (!sbinfo)
65647 return -ENOMEM;
65648
65649diff -urNp linux-2.6.32.41/mm/slab.c linux-2.6.32.41/mm/slab.c
65650--- linux-2.6.32.41/mm/slab.c 2011-03-27 14:31:47.000000000 -0400
65651+++ linux-2.6.32.41/mm/slab.c 2011-05-04 17:56:20.000000000 -0400
65652@@ -174,7 +174,7 @@
65653
65654 /* Legal flag mask for kmem_cache_create(). */
65655 #if DEBUG
65656-# define CREATE_MASK (SLAB_RED_ZONE | \
65657+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
65658 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
65659 SLAB_CACHE_DMA | \
65660 SLAB_STORE_USER | \
65661@@ -182,7 +182,7 @@
65662 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
65663 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
65664 #else
65665-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
65666+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
65667 SLAB_CACHE_DMA | \
65668 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
65669 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
65670@@ -308,7 +308,7 @@ struct kmem_list3 {
65671 * Need this for bootstrapping a per node allocator.
65672 */
65673 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
65674-struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
65675+struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
65676 #define CACHE_CACHE 0
65677 #define SIZE_AC MAX_NUMNODES
65678 #define SIZE_L3 (2 * MAX_NUMNODES)
65679@@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_
65680 if ((x)->max_freeable < i) \
65681 (x)->max_freeable = i; \
65682 } while (0)
65683-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
65684-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
65685-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
65686-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
65687+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
65688+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
65689+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
65690+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
65691 #else
65692 #define STATS_INC_ACTIVE(x) do { } while (0)
65693 #define STATS_DEC_ACTIVE(x) do { } while (0)
65694@@ -558,7 +558,7 @@ static inline void *index_to_obj(struct
65695 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
65696 */
65697 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
65698- const struct slab *slab, void *obj)
65699+ const struct slab *slab, const void *obj)
65700 {
65701 u32 offset = (obj - slab->s_mem);
65702 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
65703@@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
65704 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
65705 sizes[INDEX_AC].cs_size,
65706 ARCH_KMALLOC_MINALIGN,
65707- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
65708+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
65709 NULL);
65710
65711 if (INDEX_AC != INDEX_L3) {
65712@@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
65713 kmem_cache_create(names[INDEX_L3].name,
65714 sizes[INDEX_L3].cs_size,
65715 ARCH_KMALLOC_MINALIGN,
65716- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
65717+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
65718 NULL);
65719 }
65720
65721@@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
65722 sizes->cs_cachep = kmem_cache_create(names->name,
65723 sizes->cs_size,
65724 ARCH_KMALLOC_MINALIGN,
65725- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
65726+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
65727 NULL);
65728 }
65729 #ifdef CONFIG_ZONE_DMA
65730@@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, vo
65731 }
65732 /* cpu stats */
65733 {
65734- unsigned long allochit = atomic_read(&cachep->allochit);
65735- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
65736- unsigned long freehit = atomic_read(&cachep->freehit);
65737- unsigned long freemiss = atomic_read(&cachep->freemiss);
65738+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
65739+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
65740+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
65741+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
65742
65743 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
65744 allochit, allocmiss, freehit, freemiss);
65745@@ -4471,15 +4471,66 @@ static const struct file_operations proc
65746
65747 static int __init slab_proc_init(void)
65748 {
65749- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
65750+ mode_t gr_mode = S_IRUGO;
65751+
65752+#ifdef CONFIG_GRKERNSEC_PROC_ADD
65753+ gr_mode = S_IRUSR;
65754+#endif
65755+
65756+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
65757 #ifdef CONFIG_DEBUG_SLAB_LEAK
65758- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
65759+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
65760 #endif
65761 return 0;
65762 }
65763 module_init(slab_proc_init);
65764 #endif
65765
65766+void check_object_size(const void *ptr, unsigned long n, bool to)
65767+{
65768+
65769+#ifdef CONFIG_PAX_USERCOPY
65770+ struct page *page;
65771+ struct kmem_cache *cachep = NULL;
65772+ struct slab *slabp;
65773+ unsigned int objnr;
65774+ unsigned long offset;
65775+
65776+ if (!n)
65777+ return;
65778+
65779+ if (ZERO_OR_NULL_PTR(ptr))
65780+ goto report;
65781+
65782+ if (!virt_addr_valid(ptr))
65783+ return;
65784+
65785+ page = virt_to_head_page(ptr);
65786+
65787+ if (!PageSlab(page)) {
65788+ if (object_is_on_stack(ptr, n) == -1)
65789+ goto report;
65790+ return;
65791+ }
65792+
65793+ cachep = page_get_cache(page);
65794+ if (!(cachep->flags & SLAB_USERCOPY))
65795+ goto report;
65796+
65797+ slabp = page_get_slab(page);
65798+ objnr = obj_to_index(cachep, slabp, ptr);
65799+ BUG_ON(objnr >= cachep->num);
65800+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
65801+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
65802+ return;
65803+
65804+report:
65805+ pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
65806+#endif
65807+
65808+}
65809+EXPORT_SYMBOL(check_object_size);
65810+
65811 /**
65812 * ksize - get the actual amount of memory allocated for a given object
65813 * @objp: Pointer to the object
65814diff -urNp linux-2.6.32.41/mm/slob.c linux-2.6.32.41/mm/slob.c
65815--- linux-2.6.32.41/mm/slob.c 2011-03-27 14:31:47.000000000 -0400
65816+++ linux-2.6.32.41/mm/slob.c 2011-04-17 15:56:46.000000000 -0400
65817@@ -29,7 +29,7 @@
65818 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
65819 * alloc_pages() directly, allocating compound pages so the page order
65820 * does not have to be separately tracked, and also stores the exact
65821- * allocation size in page->private so that it can be used to accurately
65822+ * allocation size in slob_page->size so that it can be used to accurately
65823 * provide ksize(). These objects are detected in kfree() because slob_page()
65824 * is false for them.
65825 *
65826@@ -58,6 +58,7 @@
65827 */
65828
65829 #include <linux/kernel.h>
65830+#include <linux/sched.h>
65831 #include <linux/slab.h>
65832 #include <linux/mm.h>
65833 #include <linux/swap.h> /* struct reclaim_state */
65834@@ -100,7 +101,8 @@ struct slob_page {
65835 unsigned long flags; /* mandatory */
65836 atomic_t _count; /* mandatory */
65837 slobidx_t units; /* free units left in page */
65838- unsigned long pad[2];
65839+ unsigned long pad[1];
65840+ unsigned long size; /* size when >=PAGE_SIZE */
65841 slob_t *free; /* first free slob_t in page */
65842 struct list_head list; /* linked list of free pages */
65843 };
65844@@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
65845 */
65846 static inline int is_slob_page(struct slob_page *sp)
65847 {
65848- return PageSlab((struct page *)sp);
65849+ return PageSlab((struct page *)sp) && !sp->size;
65850 }
65851
65852 static inline void set_slob_page(struct slob_page *sp)
65853@@ -148,7 +150,7 @@ static inline void clear_slob_page(struc
65854
65855 static inline struct slob_page *slob_page(const void *addr)
65856 {
65857- return (struct slob_page *)virt_to_page(addr);
65858+ return (struct slob_page *)virt_to_head_page(addr);
65859 }
65860
65861 /*
65862@@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_
65863 /*
65864 * Return the size of a slob block.
65865 */
65866-static slobidx_t slob_units(slob_t *s)
65867+static slobidx_t slob_units(const slob_t *s)
65868 {
65869 if (s->units > 0)
65870 return s->units;
65871@@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
65872 /*
65873 * Return the next free slob block pointer after this one.
65874 */
65875-static slob_t *slob_next(slob_t *s)
65876+static slob_t *slob_next(const slob_t *s)
65877 {
65878 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
65879 slobidx_t next;
65880@@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
65881 /*
65882 * Returns true if s is the last free block in its page.
65883 */
65884-static int slob_last(slob_t *s)
65885+static int slob_last(const slob_t *s)
65886 {
65887 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
65888 }
65889@@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, i
65890 if (!page)
65891 return NULL;
65892
65893+ set_slob_page(page);
65894 return page_address(page);
65895 }
65896
65897@@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp
65898 if (!b)
65899 return NULL;
65900 sp = slob_page(b);
65901- set_slob_page(sp);
65902
65903 spin_lock_irqsave(&slob_lock, flags);
65904 sp->units = SLOB_UNITS(PAGE_SIZE);
65905 sp->free = b;
65906+ sp->size = 0;
65907 INIT_LIST_HEAD(&sp->list);
65908 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
65909 set_slob_page_free(sp, slob_list);
65910@@ -475,10 +478,9 @@ out:
65911 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
65912 #endif
65913
65914-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
65915+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
65916 {
65917- unsigned int *m;
65918- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
65919+ slob_t *m;
65920 void *ret;
65921
65922 lockdep_trace_alloc(gfp);
65923@@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t
65924
65925 if (!m)
65926 return NULL;
65927- *m = size;
65928+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
65929+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
65930+ m[0].units = size;
65931+ m[1].units = align;
65932 ret = (void *)m + align;
65933
65934 trace_kmalloc_node(_RET_IP_, ret,
65935@@ -501,9 +506,9 @@ void *__kmalloc_node(size_t size, gfp_t
65936
65937 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
65938 if (ret) {
65939- struct page *page;
65940- page = virt_to_page(ret);
65941- page->private = size;
65942+ struct slob_page *sp;
65943+ sp = slob_page(ret);
65944+ sp->size = size;
65945 }
65946
65947 trace_kmalloc_node(_RET_IP_, ret,
65948@@ -513,6 +518,13 @@ void *__kmalloc_node(size_t size, gfp_t
65949 kmemleak_alloc(ret, size, 1, gfp);
65950 return ret;
65951 }
65952+
65953+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
65954+{
65955+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
65956+
65957+ return __kmalloc_node_align(size, gfp, node, align);
65958+}
65959 EXPORT_SYMBOL(__kmalloc_node);
65960
65961 void kfree(const void *block)
65962@@ -528,13 +540,81 @@ void kfree(const void *block)
65963 sp = slob_page(block);
65964 if (is_slob_page(sp)) {
65965 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
65966- unsigned int *m = (unsigned int *)(block - align);
65967- slob_free(m, *m + align);
65968- } else
65969+ slob_t *m = (slob_t *)(block - align);
65970+ slob_free(m, m[0].units + align);
65971+ } else {
65972+ clear_slob_page(sp);
65973+ free_slob_page(sp);
65974+ sp->size = 0;
65975 put_page(&sp->page);
65976+ }
65977 }
65978 EXPORT_SYMBOL(kfree);
65979
65980+void check_object_size(const void *ptr, unsigned long n, bool to)
65981+{
65982+
65983+#ifdef CONFIG_PAX_USERCOPY
65984+ struct slob_page *sp;
65985+ const slob_t *free;
65986+ const void *base;
65987+
65988+ if (!n)
65989+ return;
65990+
65991+ if (ZERO_OR_NULL_PTR(ptr))
65992+ goto report;
65993+
65994+ if (!virt_addr_valid(ptr))
65995+ return;
65996+
65997+ sp = slob_page(ptr);
65998+ if (!PageSlab((struct page*)sp)) {
65999+ if (object_is_on_stack(ptr, n) == -1)
66000+ goto report;
66001+ return;
66002+ }
66003+
66004+ if (sp->size) {
66005+ base = page_address(&sp->page);
66006+ if (base <= ptr && n <= sp->size - (ptr - base))
66007+ return;
66008+ goto report;
66009+ }
66010+
66011+ /* some tricky double walking to find the chunk */
66012+ base = (void *)((unsigned long)ptr & PAGE_MASK);
66013+ free = sp->free;
66014+
66015+ while (!slob_last(free) && (void *)free <= ptr) {
66016+ base = free + slob_units(free);
66017+ free = slob_next(free);
66018+ }
66019+
66020+ while (base < (void *)free) {
66021+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
66022+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
66023+ int offset;
66024+
66025+ if (ptr < base + align)
66026+ goto report;
66027+
66028+ offset = ptr - base - align;
66029+ if (offset < m) {
66030+ if (n <= m - offset)
66031+ return;
66032+ goto report;
66033+ }
66034+ base += size;
66035+ }
66036+
66037+report:
66038+ pax_report_usercopy(ptr, n, to, NULL);
66039+#endif
66040+
66041+}
66042+EXPORT_SYMBOL(check_object_size);
66043+
66044 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
66045 size_t ksize(const void *block)
66046 {
66047@@ -547,10 +627,10 @@ size_t ksize(const void *block)
66048 sp = slob_page(block);
66049 if (is_slob_page(sp)) {
66050 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66051- unsigned int *m = (unsigned int *)(block - align);
66052- return SLOB_UNITS(*m) * SLOB_UNIT;
66053+ slob_t *m = (slob_t *)(block - align);
66054+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
66055 } else
66056- return sp->page.private;
66057+ return sp->size;
66058 }
66059 EXPORT_SYMBOL(ksize);
66060
66061@@ -605,17 +685,25 @@ void *kmem_cache_alloc_node(struct kmem_
66062 {
66063 void *b;
66064
66065+#ifdef CONFIG_PAX_USERCOPY
66066+ b = __kmalloc_node_align(c->size, flags, node, c->align);
66067+#else
66068 if (c->size < PAGE_SIZE) {
66069 b = slob_alloc(c->size, flags, c->align, node);
66070 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
66071 SLOB_UNITS(c->size) * SLOB_UNIT,
66072 flags, node);
66073 } else {
66074+ struct slob_page *sp;
66075+
66076 b = slob_new_pages(flags, get_order(c->size), node);
66077+ sp = slob_page(b);
66078+ sp->size = c->size;
66079 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
66080 PAGE_SIZE << get_order(c->size),
66081 flags, node);
66082 }
66083+#endif
66084
66085 if (c->ctor)
66086 c->ctor(b);
66087@@ -627,10 +715,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
66088
66089 static void __kmem_cache_free(void *b, int size)
66090 {
66091- if (size < PAGE_SIZE)
66092+ struct slob_page *sp = slob_page(b);
66093+
66094+ if (is_slob_page(sp))
66095 slob_free(b, size);
66096- else
66097+ else {
66098+ clear_slob_page(sp);
66099+ free_slob_page(sp);
66100+ sp->size = 0;
66101 slob_free_pages(b, get_order(size));
66102+ }
66103 }
66104
66105 static void kmem_rcu_free(struct rcu_head *head)
66106@@ -643,15 +737,24 @@ static void kmem_rcu_free(struct rcu_hea
66107
66108 void kmem_cache_free(struct kmem_cache *c, void *b)
66109 {
66110+ int size = c->size;
66111+
66112+#ifdef CONFIG_PAX_USERCOPY
66113+ if (size + c->align < PAGE_SIZE) {
66114+ size += c->align;
66115+ b -= c->align;
66116+ }
66117+#endif
66118+
66119 kmemleak_free_recursive(b, c->flags);
66120 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
66121 struct slob_rcu *slob_rcu;
66122- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
66123+ slob_rcu = b + (size - sizeof(struct slob_rcu));
66124 INIT_RCU_HEAD(&slob_rcu->head);
66125- slob_rcu->size = c->size;
66126+ slob_rcu->size = size;
66127 call_rcu(&slob_rcu->head, kmem_rcu_free);
66128 } else {
66129- __kmem_cache_free(b, c->size);
66130+ __kmem_cache_free(b, size);
66131 }
66132
66133 trace_kmem_cache_free(_RET_IP_, b);
66134diff -urNp linux-2.6.32.41/mm/slub.c linux-2.6.32.41/mm/slub.c
66135--- linux-2.6.32.41/mm/slub.c 2011-03-27 14:31:47.000000000 -0400
66136+++ linux-2.6.32.41/mm/slub.c 2011-04-17 15:56:46.000000000 -0400
66137@@ -410,7 +410,7 @@ static void print_track(const char *s, s
66138 if (!t->addr)
66139 return;
66140
66141- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
66142+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
66143 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
66144 }
66145
66146@@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *
66147
66148 page = virt_to_head_page(x);
66149
66150+ BUG_ON(!PageSlab(page));
66151+
66152 slab_free(s, page, x, _RET_IP_);
66153
66154 trace_kmem_cache_free(_RET_IP_, x);
66155@@ -1937,7 +1939,7 @@ static int slub_min_objects;
66156 * Merge control. If this is set then no merging of slab caches will occur.
66157 * (Could be removed. This was introduced to pacify the merge skeptics.)
66158 */
66159-static int slub_nomerge;
66160+static int slub_nomerge = 1;
66161
66162 /*
66163 * Calculate the order of allocation given an slab object size.
66164@@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_c
66165 * list to avoid pounding the page allocator excessively.
66166 */
66167 set_min_partial(s, ilog2(s->size));
66168- s->refcount = 1;
66169+ atomic_set(&s->refcount, 1);
66170 #ifdef CONFIG_NUMA
66171 s->remote_node_defrag_ratio = 1000;
66172 #endif
66173@@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struc
66174 void kmem_cache_destroy(struct kmem_cache *s)
66175 {
66176 down_write(&slub_lock);
66177- s->refcount--;
66178- if (!s->refcount) {
66179+ if (atomic_dec_and_test(&s->refcount)) {
66180 list_del(&s->list);
66181 up_write(&slub_lock);
66182 if (kmem_cache_close(s)) {
66183@@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(cha
66184 __setup("slub_nomerge", setup_slub_nomerge);
66185
66186 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
66187- const char *name, int size, gfp_t gfp_flags)
66188+ const char *name, int size, gfp_t gfp_flags, unsigned int flags)
66189 {
66190- unsigned int flags = 0;
66191-
66192 if (gfp_flags & SLUB_DMA)
66193- flags = SLAB_CACHE_DMA;
66194+ flags |= SLAB_CACHE_DMA;
66195
66196 /*
66197 * This function is called with IRQs disabled during early-boot on
66198@@ -2915,6 +2914,46 @@ void *__kmalloc_node(size_t size, gfp_t
66199 EXPORT_SYMBOL(__kmalloc_node);
66200 #endif
66201
66202+void check_object_size(const void *ptr, unsigned long n, bool to)
66203+{
66204+
66205+#ifdef CONFIG_PAX_USERCOPY
66206+ struct page *page;
66207+ struct kmem_cache *s = NULL;
66208+ unsigned long offset;
66209+
66210+ if (!n)
66211+ return;
66212+
66213+ if (ZERO_OR_NULL_PTR(ptr))
66214+ goto report;
66215+
66216+ if (!virt_addr_valid(ptr))
66217+ return;
66218+
66219+ page = get_object_page(ptr);
66220+
66221+ if (!page) {
66222+ if (object_is_on_stack(ptr, n) == -1)
66223+ goto report;
66224+ return;
66225+ }
66226+
66227+ s = page->slab;
66228+ if (!(s->flags & SLAB_USERCOPY))
66229+ goto report;
66230+
66231+ offset = (ptr - page_address(page)) % s->size;
66232+ if (offset <= s->objsize && n <= s->objsize - offset)
66233+ return;
66234+
66235+report:
66236+ pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
66237+#endif
66238+
66239+}
66240+EXPORT_SYMBOL(check_object_size);
66241+
66242 size_t ksize(const void *object)
66243 {
66244 struct page *page;
66245@@ -3185,8 +3224,8 @@ void __init kmem_cache_init(void)
66246 * kmem_cache_open for slab_state == DOWN.
66247 */
66248 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
66249- sizeof(struct kmem_cache_node), GFP_NOWAIT);
66250- kmalloc_caches[0].refcount = -1;
66251+ sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
66252+ atomic_set(&kmalloc_caches[0].refcount, -1);
66253 caches++;
66254
66255 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
66256@@ -3198,18 +3237,18 @@ void __init kmem_cache_init(void)
66257 /* Caches that are not of the two-to-the-power-of size */
66258 if (KMALLOC_MIN_SIZE <= 32) {
66259 create_kmalloc_cache(&kmalloc_caches[1],
66260- "kmalloc-96", 96, GFP_NOWAIT);
66261+ "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
66262 caches++;
66263 }
66264 if (KMALLOC_MIN_SIZE <= 64) {
66265 create_kmalloc_cache(&kmalloc_caches[2],
66266- "kmalloc-192", 192, GFP_NOWAIT);
66267+ "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
66268 caches++;
66269 }
66270
66271 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
66272 create_kmalloc_cache(&kmalloc_caches[i],
66273- "kmalloc", 1 << i, GFP_NOWAIT);
66274+ "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
66275 caches++;
66276 }
66277
66278@@ -3293,7 +3332,7 @@ static int slab_unmergeable(struct kmem_
66279 /*
66280 * We may have set a slab to be unmergeable during bootstrap.
66281 */
66282- if (s->refcount < 0)
66283+ if (atomic_read(&s->refcount) < 0)
66284 return 1;
66285
66286 return 0;
66287@@ -3353,7 +3392,7 @@ struct kmem_cache *kmem_cache_create(con
66288 if (s) {
66289 int cpu;
66290
66291- s->refcount++;
66292+ atomic_inc(&s->refcount);
66293 /*
66294 * Adjust the object sizes so that we clear
66295 * the complete object on kzalloc.
66296@@ -3372,7 +3411,7 @@ struct kmem_cache *kmem_cache_create(con
66297
66298 if (sysfs_slab_alias(s, name)) {
66299 down_write(&slub_lock);
66300- s->refcount--;
66301+ atomic_dec(&s->refcount);
66302 up_write(&slub_lock);
66303 goto err;
66304 }
66305@@ -4101,7 +4140,7 @@ SLAB_ATTR_RO(ctor);
66306
66307 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
66308 {
66309- return sprintf(buf, "%d\n", s->refcount - 1);
66310+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
66311 }
66312 SLAB_ATTR_RO(aliases);
66313
66314@@ -4503,7 +4542,7 @@ static void kmem_cache_release(struct ko
66315 kfree(s);
66316 }
66317
66318-static struct sysfs_ops slab_sysfs_ops = {
66319+static const struct sysfs_ops slab_sysfs_ops = {
66320 .show = slab_attr_show,
66321 .store = slab_attr_store,
66322 };
66323@@ -4522,7 +4561,7 @@ static int uevent_filter(struct kset *ks
66324 return 0;
66325 }
66326
66327-static struct kset_uevent_ops slab_uevent_ops = {
66328+static const struct kset_uevent_ops slab_uevent_ops = {
66329 .filter = uevent_filter,
66330 };
66331
66332@@ -4785,7 +4824,13 @@ static const struct file_operations proc
66333
66334 static int __init slab_proc_init(void)
66335 {
66336- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
66337+ mode_t gr_mode = S_IRUGO;
66338+
66339+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66340+ gr_mode = S_IRUSR;
66341+#endif
66342+
66343+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
66344 return 0;
66345 }
66346 module_init(slab_proc_init);
66347diff -urNp linux-2.6.32.41/mm/util.c linux-2.6.32.41/mm/util.c
66348--- linux-2.6.32.41/mm/util.c 2011-03-27 14:31:47.000000000 -0400
66349+++ linux-2.6.32.41/mm/util.c 2011-04-17 15:56:46.000000000 -0400
66350@@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
66351 void arch_pick_mmap_layout(struct mm_struct *mm)
66352 {
66353 mm->mmap_base = TASK_UNMAPPED_BASE;
66354+
66355+#ifdef CONFIG_PAX_RANDMMAP
66356+ if (mm->pax_flags & MF_PAX_RANDMMAP)
66357+ mm->mmap_base += mm->delta_mmap;
66358+#endif
66359+
66360 mm->get_unmapped_area = arch_get_unmapped_area;
66361 mm->unmap_area = arch_unmap_area;
66362 }
66363diff -urNp linux-2.6.32.41/mm/vmalloc.c linux-2.6.32.41/mm/vmalloc.c
66364--- linux-2.6.32.41/mm/vmalloc.c 2011-03-27 14:31:47.000000000 -0400
66365+++ linux-2.6.32.41/mm/vmalloc.c 2011-04-17 15:56:46.000000000 -0400
66366@@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd,
66367
66368 pte = pte_offset_kernel(pmd, addr);
66369 do {
66370- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
66371- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
66372+
66373+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66374+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
66375+ BUG_ON(!pte_exec(*pte));
66376+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
66377+ continue;
66378+ }
66379+#endif
66380+
66381+ {
66382+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
66383+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
66384+ }
66385 } while (pte++, addr += PAGE_SIZE, addr != end);
66386 }
66387
66388@@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, un
66389 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
66390 {
66391 pte_t *pte;
66392+ int ret = -ENOMEM;
66393
66394 /*
66395 * nr is a running index into the array which helps higher level
66396@@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, un
66397 pte = pte_alloc_kernel(pmd, addr);
66398 if (!pte)
66399 return -ENOMEM;
66400+
66401+ pax_open_kernel();
66402 do {
66403 struct page *page = pages[*nr];
66404
66405- if (WARN_ON(!pte_none(*pte)))
66406- return -EBUSY;
66407- if (WARN_ON(!page))
66408- return -ENOMEM;
66409+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66410+ if (!(pgprot_val(prot) & _PAGE_NX))
66411+ BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
66412+ else
66413+#endif
66414+
66415+ if (WARN_ON(!pte_none(*pte))) {
66416+ ret = -EBUSY;
66417+ goto out;
66418+ }
66419+ if (WARN_ON(!page)) {
66420+ ret = -ENOMEM;
66421+ goto out;
66422+ }
66423 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
66424 (*nr)++;
66425 } while (pte++, addr += PAGE_SIZE, addr != end);
66426- return 0;
66427+ ret = 0;
66428+out:
66429+ pax_close_kernel();
66430+ return ret;
66431 }
66432
66433 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
66434@@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void
66435 * and fall back on vmalloc() if that fails. Others
66436 * just put it in the vmalloc space.
66437 */
66438-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
66439+#ifdef CONFIG_MODULES
66440+#ifdef MODULES_VADDR
66441 unsigned long addr = (unsigned long)x;
66442 if (addr >= MODULES_VADDR && addr < MODULES_END)
66443 return 1;
66444 #endif
66445+
66446+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66447+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
66448+ return 1;
66449+#endif
66450+
66451+#endif
66452+
66453 return is_vmalloc_addr(x);
66454 }
66455
66456@@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void
66457
66458 if (!pgd_none(*pgd)) {
66459 pud_t *pud = pud_offset(pgd, addr);
66460+#ifdef CONFIG_X86
66461+ if (!pud_large(*pud))
66462+#endif
66463 if (!pud_none(*pud)) {
66464 pmd_t *pmd = pmd_offset(pud, addr);
66465+#ifdef CONFIG_X86
66466+ if (!pmd_large(*pmd))
66467+#endif
66468 if (!pmd_none(*pmd)) {
66469 pte_t *ptep, pte;
66470
66471@@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vm
66472 struct rb_node *tmp;
66473
66474 while (*p) {
66475- struct vmap_area *tmp;
66476+ struct vmap_area *varea;
66477
66478 parent = *p;
66479- tmp = rb_entry(parent, struct vmap_area, rb_node);
66480- if (va->va_start < tmp->va_end)
66481+ varea = rb_entry(parent, struct vmap_area, rb_node);
66482+ if (va->va_start < varea->va_end)
66483 p = &(*p)->rb_left;
66484- else if (va->va_end > tmp->va_start)
66485+ else if (va->va_end > varea->va_start)
66486 p = &(*p)->rb_right;
66487 else
66488 BUG();
66489@@ -1232,6 +1274,16 @@ static struct vm_struct *__get_vm_area_n
66490 struct vm_struct *area;
66491
66492 BUG_ON(in_interrupt());
66493+
66494+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
66495+ if (flags & VM_KERNEXEC) {
66496+ if (start != VMALLOC_START || end != VMALLOC_END)
66497+ return NULL;
66498+ start = (unsigned long)MODULES_EXEC_VADDR;
66499+ end = (unsigned long)MODULES_EXEC_END;
66500+ }
66501+#endif
66502+
66503 if (flags & VM_IOREMAP) {
66504 int bit = fls(size);
66505
66506@@ -1457,6 +1509,11 @@ void *vmap(struct page **pages, unsigned
66507 if (count > totalram_pages)
66508 return NULL;
66509
66510+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
66511+ if (!(pgprot_val(prot) & _PAGE_NX))
66512+ flags |= VM_KERNEXEC;
66513+#endif
66514+
66515 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
66516 __builtin_return_address(0));
66517 if (!area)
66518@@ -1567,6 +1624,13 @@ static void *__vmalloc_node(unsigned lon
66519 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
66520 return NULL;
66521
66522+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
66523+ if (!(pgprot_val(prot) & _PAGE_NX))
66524+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
66525+ node, gfp_mask, caller);
66526+ else
66527+#endif
66528+
66529 area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
66530 VMALLOC_END, node, gfp_mask, caller);
66531
66532@@ -1585,6 +1649,7 @@ static void *__vmalloc_node(unsigned lon
66533 return addr;
66534 }
66535
66536+#undef __vmalloc
66537 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
66538 {
66539 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
66540@@ -1601,6 +1666,7 @@ EXPORT_SYMBOL(__vmalloc);
66541 * For tight control over page level allocator and protection flags
66542 * use __vmalloc() instead.
66543 */
66544+#undef vmalloc
66545 void *vmalloc(unsigned long size)
66546 {
66547 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
66548@@ -1615,6 +1681,7 @@ EXPORT_SYMBOL(vmalloc);
66549 * The resulting memory area is zeroed so it can be mapped to userspace
66550 * without leaking data.
66551 */
66552+#undef vmalloc_user
66553 void *vmalloc_user(unsigned long size)
66554 {
66555 struct vm_struct *area;
66556@@ -1642,6 +1709,7 @@ EXPORT_SYMBOL(vmalloc_user);
66557 * For tight control over page level allocator and protection flags
66558 * use __vmalloc() instead.
66559 */
66560+#undef vmalloc_node
66561 void *vmalloc_node(unsigned long size, int node)
66562 {
66563 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
66564@@ -1664,10 +1732,10 @@ EXPORT_SYMBOL(vmalloc_node);
66565 * For tight control over page level allocator and protection flags
66566 * use __vmalloc() instead.
66567 */
66568-
66569+#undef vmalloc_exec
66570 void *vmalloc_exec(unsigned long size)
66571 {
66572- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
66573+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
66574 -1, __builtin_return_address(0));
66575 }
66576
66577@@ -1686,6 +1754,7 @@ void *vmalloc_exec(unsigned long size)
66578 * Allocate enough 32bit PA addressable pages to cover @size from the
66579 * page level allocator and map them into contiguous kernel virtual space.
66580 */
66581+#undef vmalloc_32
66582 void *vmalloc_32(unsigned long size)
66583 {
66584 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
66585@@ -1700,6 +1769,7 @@ EXPORT_SYMBOL(vmalloc_32);
66586 * The resulting memory area is 32bit addressable and zeroed so it can be
66587 * mapped to userspace without leaking data.
66588 */
66589+#undef vmalloc_32_user
66590 void *vmalloc_32_user(unsigned long size)
66591 {
66592 struct vm_struct *area;
66593@@ -1964,6 +2034,8 @@ int remap_vmalloc_range(struct vm_area_s
66594 unsigned long uaddr = vma->vm_start;
66595 unsigned long usize = vma->vm_end - vma->vm_start;
66596
66597+ BUG_ON(vma->vm_mirror);
66598+
66599 if ((PAGE_SIZE-1) & (unsigned long)addr)
66600 return -EINVAL;
66601
66602diff -urNp linux-2.6.32.41/mm/vmstat.c linux-2.6.32.41/mm/vmstat.c
66603--- linux-2.6.32.41/mm/vmstat.c 2011-03-27 14:31:47.000000000 -0400
66604+++ linux-2.6.32.41/mm/vmstat.c 2011-04-17 15:56:46.000000000 -0400
66605@@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
66606 *
66607 * vm_stat contains the global counters
66608 */
66609-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
66610+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
66611 EXPORT_SYMBOL(vm_stat);
66612
66613 #ifdef CONFIG_SMP
66614@@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
66615 v = p->vm_stat_diff[i];
66616 p->vm_stat_diff[i] = 0;
66617 local_irq_restore(flags);
66618- atomic_long_add(v, &zone->vm_stat[i]);
66619+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
66620 global_diff[i] += v;
66621 #ifdef CONFIG_NUMA
66622 /* 3 seconds idle till flush */
66623@@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
66624
66625 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
66626 if (global_diff[i])
66627- atomic_long_add(global_diff[i], &vm_stat[i]);
66628+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
66629 }
66630
66631 #endif
66632@@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
66633 start_cpu_timer(cpu);
66634 #endif
66635 #ifdef CONFIG_PROC_FS
66636- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
66637- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
66638- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
66639- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
66640+ {
66641+ mode_t gr_mode = S_IRUGO;
66642+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66643+ gr_mode = S_IRUSR;
66644+#endif
66645+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
66646+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
66647+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66648+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
66649+#else
66650+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
66651+#endif
66652+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
66653+ }
66654 #endif
66655 return 0;
66656 }
66657diff -urNp linux-2.6.32.41/net/8021q/vlan.c linux-2.6.32.41/net/8021q/vlan.c
66658--- linux-2.6.32.41/net/8021q/vlan.c 2011-03-27 14:31:47.000000000 -0400
66659+++ linux-2.6.32.41/net/8021q/vlan.c 2011-04-17 15:56:46.000000000 -0400
66660@@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net
66661 err = -EPERM;
66662 if (!capable(CAP_NET_ADMIN))
66663 break;
66664- if ((args.u.name_type >= 0) &&
66665- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
66666+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
66667 struct vlan_net *vn;
66668
66669 vn = net_generic(net, vlan_net_id);
66670diff -urNp linux-2.6.32.41/net/atm/atm_misc.c linux-2.6.32.41/net/atm/atm_misc.c
66671--- linux-2.6.32.41/net/atm/atm_misc.c 2011-03-27 14:31:47.000000000 -0400
66672+++ linux-2.6.32.41/net/atm/atm_misc.c 2011-04-17 15:56:46.000000000 -0400
66673@@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int t
66674 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
66675 return 1;
66676 atm_return(vcc,truesize);
66677- atomic_inc(&vcc->stats->rx_drop);
66678+ atomic_inc_unchecked(&vcc->stats->rx_drop);
66679 return 0;
66680 }
66681
66682@@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct
66683 }
66684 }
66685 atm_return(vcc,guess);
66686- atomic_inc(&vcc->stats->rx_drop);
66687+ atomic_inc_unchecked(&vcc->stats->rx_drop);
66688 return NULL;
66689 }
66690
66691@@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafpr
66692
66693 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
66694 {
66695-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
66696+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
66697 __SONET_ITEMS
66698 #undef __HANDLE_ITEM
66699 }
66700@@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_sta
66701
66702 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
66703 {
66704-#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
66705+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
66706 __SONET_ITEMS
66707 #undef __HANDLE_ITEM
66708 }
66709diff -urNp linux-2.6.32.41/net/atm/mpoa_caches.c linux-2.6.32.41/net/atm/mpoa_caches.c
66710--- linux-2.6.32.41/net/atm/mpoa_caches.c 2011-03-27 14:31:47.000000000 -0400
66711+++ linux-2.6.32.41/net/atm/mpoa_caches.c 2011-05-16 21:46:57.000000000 -0400
66712@@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_cl
66713 struct timeval now;
66714 struct k_message msg;
66715
66716+ pax_track_stack();
66717+
66718 do_gettimeofday(&now);
66719
66720 write_lock_irq(&client->egress_lock);
66721diff -urNp linux-2.6.32.41/net/atm/proc.c linux-2.6.32.41/net/atm/proc.c
66722--- linux-2.6.32.41/net/atm/proc.c 2011-03-27 14:31:47.000000000 -0400
66723+++ linux-2.6.32.41/net/atm/proc.c 2011-04-17 15:56:46.000000000 -0400
66724@@ -43,9 +43,9 @@ static void add_stats(struct seq_file *s
66725 const struct k_atm_aal_stats *stats)
66726 {
66727 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
66728- atomic_read(&stats->tx),atomic_read(&stats->tx_err),
66729- atomic_read(&stats->rx),atomic_read(&stats->rx_err),
66730- atomic_read(&stats->rx_drop));
66731+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
66732+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
66733+ atomic_read_unchecked(&stats->rx_drop));
66734 }
66735
66736 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
66737@@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *se
66738 {
66739 struct sock *sk = sk_atm(vcc);
66740
66741+#ifdef CONFIG_GRKERNSEC_HIDESYM
66742+ seq_printf(seq, "%p ", NULL);
66743+#else
66744 seq_printf(seq, "%p ", vcc);
66745+#endif
66746+
66747 if (!vcc->dev)
66748 seq_printf(seq, "Unassigned ");
66749 else
66750@@ -214,7 +219,11 @@ static void svc_info(struct seq_file *se
66751 {
66752 if (!vcc->dev)
66753 seq_printf(seq, sizeof(void *) == 4 ?
66754+#ifdef CONFIG_GRKERNSEC_HIDESYM
66755+ "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
66756+#else
66757 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
66758+#endif
66759 else
66760 seq_printf(seq, "%3d %3d %5d ",
66761 vcc->dev->number, vcc->vpi, vcc->vci);
66762diff -urNp linux-2.6.32.41/net/atm/resources.c linux-2.6.32.41/net/atm/resources.c
66763--- linux-2.6.32.41/net/atm/resources.c 2011-03-27 14:31:47.000000000 -0400
66764+++ linux-2.6.32.41/net/atm/resources.c 2011-04-17 15:56:46.000000000 -0400
66765@@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *
66766 static void copy_aal_stats(struct k_atm_aal_stats *from,
66767 struct atm_aal_stats *to)
66768 {
66769-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
66770+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
66771 __AAL_STAT_ITEMS
66772 #undef __HANDLE_ITEM
66773 }
66774@@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_
66775 static void subtract_aal_stats(struct k_atm_aal_stats *from,
66776 struct atm_aal_stats *to)
66777 {
66778-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
66779+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
66780 __AAL_STAT_ITEMS
66781 #undef __HANDLE_ITEM
66782 }
66783diff -urNp linux-2.6.32.41/net/bridge/br_private.h linux-2.6.32.41/net/bridge/br_private.h
66784--- linux-2.6.32.41/net/bridge/br_private.h 2011-03-27 14:31:47.000000000 -0400
66785+++ linux-2.6.32.41/net/bridge/br_private.h 2011-04-17 15:56:46.000000000 -0400
66786@@ -254,7 +254,7 @@ extern void br_ifinfo_notify(int event,
66787
66788 #ifdef CONFIG_SYSFS
66789 /* br_sysfs_if.c */
66790-extern struct sysfs_ops brport_sysfs_ops;
66791+extern const struct sysfs_ops brport_sysfs_ops;
66792 extern int br_sysfs_addif(struct net_bridge_port *p);
66793
66794 /* br_sysfs_br.c */
66795diff -urNp linux-2.6.32.41/net/bridge/br_stp_if.c linux-2.6.32.41/net/bridge/br_stp_if.c
66796--- linux-2.6.32.41/net/bridge/br_stp_if.c 2011-03-27 14:31:47.000000000 -0400
66797+++ linux-2.6.32.41/net/bridge/br_stp_if.c 2011-04-17 15:56:46.000000000 -0400
66798@@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridg
66799 char *envp[] = { NULL };
66800
66801 if (br->stp_enabled == BR_USER_STP) {
66802- r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
66803+ r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
66804 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
66805 br->dev->name, r);
66806
66807diff -urNp linux-2.6.32.41/net/bridge/br_sysfs_if.c linux-2.6.32.41/net/bridge/br_sysfs_if.c
66808--- linux-2.6.32.41/net/bridge/br_sysfs_if.c 2011-03-27 14:31:47.000000000 -0400
66809+++ linux-2.6.32.41/net/bridge/br_sysfs_if.c 2011-04-17 15:56:46.000000000 -0400
66810@@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobje
66811 return ret;
66812 }
66813
66814-struct sysfs_ops brport_sysfs_ops = {
66815+const struct sysfs_ops brport_sysfs_ops = {
66816 .show = brport_show,
66817 .store = brport_store,
66818 };
66819diff -urNp linux-2.6.32.41/net/bridge/netfilter/ebtables.c linux-2.6.32.41/net/bridge/netfilter/ebtables.c
66820--- linux-2.6.32.41/net/bridge/netfilter/ebtables.c 2011-04-17 17:00:52.000000000 -0400
66821+++ linux-2.6.32.41/net/bridge/netfilter/ebtables.c 2011-05-16 21:46:57.000000000 -0400
66822@@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struc
66823 unsigned int entries_size, nentries;
66824 char *entries;
66825
66826+ pax_track_stack();
66827+
66828 if (cmd == EBT_SO_GET_ENTRIES) {
66829 entries_size = t->private->entries_size;
66830 nentries = t->private->nentries;
66831diff -urNp linux-2.6.32.41/net/can/bcm.c linux-2.6.32.41/net/can/bcm.c
66832--- linux-2.6.32.41/net/can/bcm.c 2011-05-10 22:12:01.000000000 -0400
66833+++ linux-2.6.32.41/net/can/bcm.c 2011-05-10 22:12:34.000000000 -0400
66834@@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file
66835 struct bcm_sock *bo = bcm_sk(sk);
66836 struct bcm_op *op;
66837
66838+#ifdef CONFIG_GRKERNSEC_HIDESYM
66839+ seq_printf(m, ">>> socket %p", NULL);
66840+ seq_printf(m, " / sk %p", NULL);
66841+ seq_printf(m, " / bo %p", NULL);
66842+#else
66843 seq_printf(m, ">>> socket %p", sk->sk_socket);
66844 seq_printf(m, " / sk %p", sk);
66845 seq_printf(m, " / bo %p", bo);
66846+#endif
66847 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
66848 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
66849 seq_printf(m, " <<<\n");
66850diff -urNp linux-2.6.32.41/net/core/dev.c linux-2.6.32.41/net/core/dev.c
66851--- linux-2.6.32.41/net/core/dev.c 2011-04-17 17:00:52.000000000 -0400
66852+++ linux-2.6.32.41/net/core/dev.c 2011-04-17 17:04:18.000000000 -0400
66853@@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const cha
66854 if (no_module && capable(CAP_NET_ADMIN))
66855 no_module = request_module("netdev-%s", name);
66856 if (no_module && capable(CAP_SYS_MODULE)) {
66857+#ifdef CONFIG_GRKERNSEC_MODHARDEN
66858+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
66859+#else
66860 if (!request_module("%s", name))
66861 pr_err("Loading kernel module for a network device "
66862 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
66863 "instead\n", name);
66864+#endif
66865 }
66866 }
66867 EXPORT_SYMBOL(dev_load);
66868@@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
66869 }
66870 EXPORT_SYMBOL(netif_rx_ni);
66871
66872-static void net_tx_action(struct softirq_action *h)
66873+static void net_tx_action(void)
66874 {
66875 struct softnet_data *sd = &__get_cpu_var(softnet_data);
66876
66877@@ -2826,7 +2830,7 @@ void netif_napi_del(struct napi_struct *
66878 EXPORT_SYMBOL(netif_napi_del);
66879
66880
66881-static void net_rx_action(struct softirq_action *h)
66882+static void net_rx_action(void)
66883 {
66884 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
66885 unsigned long time_limit = jiffies + 2;
66886diff -urNp linux-2.6.32.41/net/core/flow.c linux-2.6.32.41/net/core/flow.c
66887--- linux-2.6.32.41/net/core/flow.c 2011-03-27 14:31:47.000000000 -0400
66888+++ linux-2.6.32.41/net/core/flow.c 2011-05-04 17:56:20.000000000 -0400
66889@@ -35,11 +35,11 @@ struct flow_cache_entry {
66890 atomic_t *object_ref;
66891 };
66892
66893-atomic_t flow_cache_genid = ATOMIC_INIT(0);
66894+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
66895
66896 static u32 flow_hash_shift;
66897 #define flow_hash_size (1 << flow_hash_shift)
66898-static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
66899+static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
66900
66901 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
66902
66903@@ -52,7 +52,7 @@ struct flow_percpu_info {
66904 u32 hash_rnd;
66905 int count;
66906 };
66907-static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
66908+static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
66909
66910 #define flow_hash_rnd_recalc(cpu) \
66911 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
66912@@ -69,7 +69,7 @@ struct flow_flush_info {
66913 atomic_t cpuleft;
66914 struct completion completion;
66915 };
66916-static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
66917+static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
66918
66919 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
66920
66921@@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net,
66922 if (fle->family == family &&
66923 fle->dir == dir &&
66924 flow_key_compare(key, &fle->key) == 0) {
66925- if (fle->genid == atomic_read(&flow_cache_genid)) {
66926+ if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
66927 void *ret = fle->object;
66928
66929 if (ret)
66930@@ -228,7 +228,7 @@ nocache:
66931 err = resolver(net, key, family, dir, &obj, &obj_ref);
66932
66933 if (fle && !err) {
66934- fle->genid = atomic_read(&flow_cache_genid);
66935+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
66936
66937 if (fle->object)
66938 atomic_dec(fle->object_ref);
66939@@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(uns
66940
66941 fle = flow_table(cpu)[i];
66942 for (; fle; fle = fle->next) {
66943- unsigned genid = atomic_read(&flow_cache_genid);
66944+ unsigned genid = atomic_read_unchecked(&flow_cache_genid);
66945
66946 if (!fle->object || fle->genid == genid)
66947 continue;
66948diff -urNp linux-2.6.32.41/net/core/skbuff.c linux-2.6.32.41/net/core/skbuff.c
66949--- linux-2.6.32.41/net/core/skbuff.c 2011-03-27 14:31:47.000000000 -0400
66950+++ linux-2.6.32.41/net/core/skbuff.c 2011-05-16 21:46:57.000000000 -0400
66951@@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb,
66952 struct sk_buff *frag_iter;
66953 struct sock *sk = skb->sk;
66954
66955+ pax_track_stack();
66956+
66957 /*
66958 * __skb_splice_bits() only fails if the output has no room left,
66959 * so no point in going over the frag_list for the error case.
66960diff -urNp linux-2.6.32.41/net/core/sock.c linux-2.6.32.41/net/core/sock.c
66961--- linux-2.6.32.41/net/core/sock.c 2011-03-27 14:31:47.000000000 -0400
66962+++ linux-2.6.32.41/net/core/sock.c 2011-05-04 17:56:20.000000000 -0400
66963@@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock,
66964 break;
66965
66966 case SO_PEERCRED:
66967+ {
66968+ struct ucred peercred;
66969 if (len > sizeof(sk->sk_peercred))
66970 len = sizeof(sk->sk_peercred);
66971- if (copy_to_user(optval, &sk->sk_peercred, len))
66972+ peercred = sk->sk_peercred;
66973+ if (copy_to_user(optval, &peercred, len))
66974 return -EFAULT;
66975 goto lenout;
66976+ }
66977
66978 case SO_PEERNAME:
66979 {
66980@@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock,
66981 */
66982 smp_wmb();
66983 atomic_set(&sk->sk_refcnt, 1);
66984- atomic_set(&sk->sk_drops, 0);
66985+ atomic_set_unchecked(&sk->sk_drops, 0);
66986 }
66987 EXPORT_SYMBOL(sock_init_data);
66988
66989diff -urNp linux-2.6.32.41/net/decnet/sysctl_net_decnet.c linux-2.6.32.41/net/decnet/sysctl_net_decnet.c
66990--- linux-2.6.32.41/net/decnet/sysctl_net_decnet.c 2011-03-27 14:31:47.000000000 -0400
66991+++ linux-2.6.32.41/net/decnet/sysctl_net_decnet.c 2011-04-17 15:56:46.000000000 -0400
66992@@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_t
66993
66994 if (len > *lenp) len = *lenp;
66995
66996- if (copy_to_user(buffer, addr, len))
66997+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
66998 return -EFAULT;
66999
67000 *lenp = len;
67001@@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table
67002
67003 if (len > *lenp) len = *lenp;
67004
67005- if (copy_to_user(buffer, devname, len))
67006+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
67007 return -EFAULT;
67008
67009 *lenp = len;
67010diff -urNp linux-2.6.32.41/net/econet/Kconfig linux-2.6.32.41/net/econet/Kconfig
67011--- linux-2.6.32.41/net/econet/Kconfig 2011-03-27 14:31:47.000000000 -0400
67012+++ linux-2.6.32.41/net/econet/Kconfig 2011-04-17 15:56:46.000000000 -0400
67013@@ -4,7 +4,7 @@
67014
67015 config ECONET
67016 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
67017- depends on EXPERIMENTAL && INET
67018+ depends on EXPERIMENTAL && INET && BROKEN
67019 ---help---
67020 Econet is a fairly old and slow networking protocol mainly used by
67021 Acorn computers to access file and print servers. It uses native
67022diff -urNp linux-2.6.32.41/net/ieee802154/dgram.c linux-2.6.32.41/net/ieee802154/dgram.c
67023--- linux-2.6.32.41/net/ieee802154/dgram.c 2011-03-27 14:31:47.000000000 -0400
67024+++ linux-2.6.32.41/net/ieee802154/dgram.c 2011-05-04 17:56:28.000000000 -0400
67025@@ -318,7 +318,7 @@ out:
67026 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
67027 {
67028 if (sock_queue_rcv_skb(sk, skb) < 0) {
67029- atomic_inc(&sk->sk_drops);
67030+ atomic_inc_unchecked(&sk->sk_drops);
67031 kfree_skb(skb);
67032 return NET_RX_DROP;
67033 }
67034diff -urNp linux-2.6.32.41/net/ieee802154/raw.c linux-2.6.32.41/net/ieee802154/raw.c
67035--- linux-2.6.32.41/net/ieee802154/raw.c 2011-03-27 14:31:47.000000000 -0400
67036+++ linux-2.6.32.41/net/ieee802154/raw.c 2011-05-04 17:56:28.000000000 -0400
67037@@ -206,7 +206,7 @@ out:
67038 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
67039 {
67040 if (sock_queue_rcv_skb(sk, skb) < 0) {
67041- atomic_inc(&sk->sk_drops);
67042+ atomic_inc_unchecked(&sk->sk_drops);
67043 kfree_skb(skb);
67044 return NET_RX_DROP;
67045 }
67046diff -urNp linux-2.6.32.41/net/ipv4/inet_diag.c linux-2.6.32.41/net/ipv4/inet_diag.c
67047--- linux-2.6.32.41/net/ipv4/inet_diag.c 2011-04-17 17:00:52.000000000 -0400
67048+++ linux-2.6.32.41/net/ipv4/inet_diag.c 2011-04-17 17:04:18.000000000 -0400
67049@@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct soc
67050 r->idiag_retrans = 0;
67051
67052 r->id.idiag_if = sk->sk_bound_dev_if;
67053+#ifdef CONFIG_GRKERNSEC_HIDESYM
67054+ r->id.idiag_cookie[0] = 0;
67055+ r->id.idiag_cookie[1] = 0;
67056+#else
67057 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
67058 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
67059+#endif
67060
67061 r->id.idiag_sport = inet->sport;
67062 r->id.idiag_dport = inet->dport;
67063@@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct in
67064 r->idiag_family = tw->tw_family;
67065 r->idiag_retrans = 0;
67066 r->id.idiag_if = tw->tw_bound_dev_if;
67067+
67068+#ifdef CONFIG_GRKERNSEC_HIDESYM
67069+ r->id.idiag_cookie[0] = 0;
67070+ r->id.idiag_cookie[1] = 0;
67071+#else
67072 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
67073 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
67074+#endif
67075+
67076 r->id.idiag_sport = tw->tw_sport;
67077 r->id.idiag_dport = tw->tw_dport;
67078 r->id.idiag_src[0] = tw->tw_rcv_saddr;
67079@@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk
67080 if (sk == NULL)
67081 goto unlock;
67082
67083+#ifndef CONFIG_GRKERNSEC_HIDESYM
67084 err = -ESTALE;
67085 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
67086 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
67087 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
67088 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
67089 goto out;
67090+#endif
67091
67092 err = -ENOMEM;
67093 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
67094@@ -581,8 +595,14 @@ static int inet_diag_fill_req(struct sk_
67095 r->idiag_retrans = req->retrans;
67096
67097 r->id.idiag_if = sk->sk_bound_dev_if;
67098+
67099+#ifdef CONFIG_GRKERNSEC_HIDESYM
67100+ r->id.idiag_cookie[0] = 0;
67101+ r->id.idiag_cookie[1] = 0;
67102+#else
67103 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
67104 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
67105+#endif
67106
67107 tmo = req->expires - jiffies;
67108 if (tmo < 0)
67109diff -urNp linux-2.6.32.41/net/ipv4/inet_hashtables.c linux-2.6.32.41/net/ipv4/inet_hashtables.c
67110--- linux-2.6.32.41/net/ipv4/inet_hashtables.c 2011-03-27 14:31:47.000000000 -0400
67111+++ linux-2.6.32.41/net/ipv4/inet_hashtables.c 2011-04-17 15:56:46.000000000 -0400
67112@@ -18,11 +18,14 @@
67113 #include <linux/sched.h>
67114 #include <linux/slab.h>
67115 #include <linux/wait.h>
67116+#include <linux/security.h>
67117
67118 #include <net/inet_connection_sock.h>
67119 #include <net/inet_hashtables.h>
67120 #include <net/ip.h>
67121
67122+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
67123+
67124 /*
67125 * Allocate and initialize a new local port bind bucket.
67126 * The bindhash mutex for snum's hash chain must be held here.
67127@@ -490,6 +493,8 @@ ok:
67128 }
67129 spin_unlock(&head->lock);
67130
67131+ gr_update_task_in_ip_table(current, inet_sk(sk));
67132+
67133 if (tw) {
67134 inet_twsk_deschedule(tw, death_row);
67135 inet_twsk_put(tw);
67136diff -urNp linux-2.6.32.41/net/ipv4/inetpeer.c linux-2.6.32.41/net/ipv4/inetpeer.c
67137--- linux-2.6.32.41/net/ipv4/inetpeer.c 2011-03-27 14:31:47.000000000 -0400
67138+++ linux-2.6.32.41/net/ipv4/inetpeer.c 2011-05-16 21:46:57.000000000 -0400
67139@@ -366,6 +366,8 @@ struct inet_peer *inet_getpeer(__be32 da
67140 struct inet_peer *p, *n;
67141 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
67142
67143+ pax_track_stack();
67144+
67145 /* Look up for the address quickly. */
67146 read_lock_bh(&peer_pool_lock);
67147 p = lookup(daddr, NULL);
67148@@ -389,7 +391,7 @@ struct inet_peer *inet_getpeer(__be32 da
67149 return NULL;
67150 n->v4daddr = daddr;
67151 atomic_set(&n->refcnt, 1);
67152- atomic_set(&n->rid, 0);
67153+ atomic_set_unchecked(&n->rid, 0);
67154 n->ip_id_count = secure_ip_id(daddr);
67155 n->tcp_ts_stamp = 0;
67156
67157diff -urNp linux-2.6.32.41/net/ipv4/ip_fragment.c linux-2.6.32.41/net/ipv4/ip_fragment.c
67158--- linux-2.6.32.41/net/ipv4/ip_fragment.c 2011-03-27 14:31:47.000000000 -0400
67159+++ linux-2.6.32.41/net/ipv4/ip_fragment.c 2011-04-17 15:56:46.000000000 -0400
67160@@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct
67161 return 0;
67162
67163 start = qp->rid;
67164- end = atomic_inc_return(&peer->rid);
67165+ end = atomic_inc_return_unchecked(&peer->rid);
67166 qp->rid = end;
67167
67168 rc = qp->q.fragments && (end - start) > max;
67169diff -urNp linux-2.6.32.41/net/ipv4/ip_sockglue.c linux-2.6.32.41/net/ipv4/ip_sockglue.c
67170--- linux-2.6.32.41/net/ipv4/ip_sockglue.c 2011-03-27 14:31:47.000000000 -0400
67171+++ linux-2.6.32.41/net/ipv4/ip_sockglue.c 2011-05-16 21:46:57.000000000 -0400
67172@@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock
67173 int val;
67174 int len;
67175
67176+ pax_track_stack();
67177+
67178 if (level != SOL_IP)
67179 return -EOPNOTSUPP;
67180
67181diff -urNp linux-2.6.32.41/net/ipv4/netfilter/arp_tables.c linux-2.6.32.41/net/ipv4/netfilter/arp_tables.c
67182--- linux-2.6.32.41/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:00:52.000000000 -0400
67183+++ linux-2.6.32.41/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:04:18.000000000 -0400
67184@@ -934,6 +934,7 @@ static int get_info(struct net *net, voi
67185 private = &tmp;
67186 }
67187 #endif
67188+ memset(&info, 0, sizeof(info));
67189 info.valid_hooks = t->valid_hooks;
67190 memcpy(info.hook_entry, private->hook_entry,
67191 sizeof(info.hook_entry));
67192diff -urNp linux-2.6.32.41/net/ipv4/netfilter/ip_tables.c linux-2.6.32.41/net/ipv4/netfilter/ip_tables.c
67193--- linux-2.6.32.41/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:00:52.000000000 -0400
67194+++ linux-2.6.32.41/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:04:18.000000000 -0400
67195@@ -1141,6 +1141,7 @@ static int get_info(struct net *net, voi
67196 private = &tmp;
67197 }
67198 #endif
67199+ memset(&info, 0, sizeof(info));
67200 info.valid_hooks = t->valid_hooks;
67201 memcpy(info.hook_entry, private->hook_entry,
67202 sizeof(info.hook_entry));
67203diff -urNp linux-2.6.32.41/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-2.6.32.41/net/ipv4/netfilter/nf_nat_snmp_basic.c
67204--- linux-2.6.32.41/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-03-27 14:31:47.000000000 -0400
67205+++ linux-2.6.32.41/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-04-17 15:56:46.000000000 -0400
67206@@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(
67207
67208 *len = 0;
67209
67210- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
67211+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
67212 if (*octets == NULL) {
67213 if (net_ratelimit())
67214 printk("OOM in bsalg (%d)\n", __LINE__);
67215diff -urNp linux-2.6.32.41/net/ipv4/raw.c linux-2.6.32.41/net/ipv4/raw.c
67216--- linux-2.6.32.41/net/ipv4/raw.c 2011-03-27 14:31:47.000000000 -0400
67217+++ linux-2.6.32.41/net/ipv4/raw.c 2011-05-04 17:59:08.000000000 -0400
67218@@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk,
67219 /* Charge it to the socket. */
67220
67221 if (sock_queue_rcv_skb(sk, skb) < 0) {
67222- atomic_inc(&sk->sk_drops);
67223+ atomic_inc_unchecked(&sk->sk_drops);
67224 kfree_skb(skb);
67225 return NET_RX_DROP;
67226 }
67227@@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk,
67228 int raw_rcv(struct sock *sk, struct sk_buff *skb)
67229 {
67230 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
67231- atomic_inc(&sk->sk_drops);
67232+ atomic_inc_unchecked(&sk->sk_drops);
67233 kfree_skb(skb);
67234 return NET_RX_DROP;
67235 }
67236@@ -724,15 +724,22 @@ static int raw_init(struct sock *sk)
67237
67238 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
67239 {
67240+ struct icmp_filter filter;
67241+
67242+ if (optlen < 0)
67243+ return -EINVAL;
67244 if (optlen > sizeof(struct icmp_filter))
67245 optlen = sizeof(struct icmp_filter);
67246- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
67247+ if (copy_from_user(&filter, optval, optlen))
67248 return -EFAULT;
67249+ memcpy(&raw_sk(sk)->filter, &filter, optlen);
67250+
67251 return 0;
67252 }
67253
67254 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
67255 {
67256+ struct icmp_filter filter;
67257 int len, ret = -EFAULT;
67258
67259 if (get_user(len, optlen))
67260@@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock
67261 if (len > sizeof(struct icmp_filter))
67262 len = sizeof(struct icmp_filter);
67263 ret = -EFAULT;
67264+ memcpy(&filter, &raw_sk(sk)->filter, len);
67265 if (put_user(len, optlen) ||
67266- copy_to_user(optval, &raw_sk(sk)->filter, len))
67267+ copy_to_user(optval, &filter, len))
67268 goto out;
67269 ret = 0;
67270 out: return ret;
67271@@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq
67272 sk_wmem_alloc_get(sp),
67273 sk_rmem_alloc_get(sp),
67274 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
67275- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
67276+ atomic_read(&sp->sk_refcnt),
67277+#ifdef CONFIG_GRKERNSEC_HIDESYM
67278+ NULL,
67279+#else
67280+ sp,
67281+#endif
67282+ atomic_read_unchecked(&sp->sk_drops));
67283 }
67284
67285 static int raw_seq_show(struct seq_file *seq, void *v)
67286diff -urNp linux-2.6.32.41/net/ipv4/route.c linux-2.6.32.41/net/ipv4/route.c
67287--- linux-2.6.32.41/net/ipv4/route.c 2011-03-27 14:31:47.000000000 -0400
67288+++ linux-2.6.32.41/net/ipv4/route.c 2011-05-04 17:56:28.000000000 -0400
67289@@ -268,7 +268,7 @@ static inline unsigned int rt_hash(__be3
67290
67291 static inline int rt_genid(struct net *net)
67292 {
67293- return atomic_read(&net->ipv4.rt_genid);
67294+ return atomic_read_unchecked(&net->ipv4.rt_genid);
67295 }
67296
67297 #ifdef CONFIG_PROC_FS
67298@@ -888,7 +888,7 @@ static void rt_cache_invalidate(struct n
67299 unsigned char shuffle;
67300
67301 get_random_bytes(&shuffle, sizeof(shuffle));
67302- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
67303+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
67304 }
67305
67306 /*
67307@@ -3356,7 +3356,7 @@ static __net_initdata struct pernet_oper
67308
67309 static __net_init int rt_secret_timer_init(struct net *net)
67310 {
67311- atomic_set(&net->ipv4.rt_genid,
67312+ atomic_set_unchecked(&net->ipv4.rt_genid,
67313 (int) ((num_physpages ^ (num_physpages>>8)) ^
67314 (jiffies ^ (jiffies >> 7))));
67315
67316diff -urNp linux-2.6.32.41/net/ipv4/tcp.c linux-2.6.32.41/net/ipv4/tcp.c
67317--- linux-2.6.32.41/net/ipv4/tcp.c 2011-03-27 14:31:47.000000000 -0400
67318+++ linux-2.6.32.41/net/ipv4/tcp.c 2011-05-16 21:46:57.000000000 -0400
67319@@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock
67320 int val;
67321 int err = 0;
67322
67323+ pax_track_stack();
67324+
67325 /* This is a string value all the others are int's */
67326 if (optname == TCP_CONGESTION) {
67327 char name[TCP_CA_NAME_MAX];
67328@@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock
67329 struct tcp_sock *tp = tcp_sk(sk);
67330 int val, len;
67331
67332+ pax_track_stack();
67333+
67334 if (get_user(len, optlen))
67335 return -EFAULT;
67336
67337diff -urNp linux-2.6.32.41/net/ipv4/tcp_ipv4.c linux-2.6.32.41/net/ipv4/tcp_ipv4.c
67338--- linux-2.6.32.41/net/ipv4/tcp_ipv4.c 2011-03-27 14:31:47.000000000 -0400
67339+++ linux-2.6.32.41/net/ipv4/tcp_ipv4.c 2011-04-17 15:56:46.000000000 -0400
67340@@ -84,6 +84,9 @@
67341 int sysctl_tcp_tw_reuse __read_mostly;
67342 int sysctl_tcp_low_latency __read_mostly;
67343
67344+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67345+extern int grsec_enable_blackhole;
67346+#endif
67347
67348 #ifdef CONFIG_TCP_MD5SIG
67349 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
67350@@ -1542,6 +1545,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
67351 return 0;
67352
67353 reset:
67354+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67355+ if (!grsec_enable_blackhole)
67356+#endif
67357 tcp_v4_send_reset(rsk, skb);
67358 discard:
67359 kfree_skb(skb);
67360@@ -1603,12 +1609,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
67361 TCP_SKB_CB(skb)->sacked = 0;
67362
67363 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
67364- if (!sk)
67365+ if (!sk) {
67366+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67367+ ret = 1;
67368+#endif
67369 goto no_tcp_socket;
67370+ }
67371
67372 process:
67373- if (sk->sk_state == TCP_TIME_WAIT)
67374+ if (sk->sk_state == TCP_TIME_WAIT) {
67375+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67376+ ret = 2;
67377+#endif
67378 goto do_time_wait;
67379+ }
67380
67381 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
67382 goto discard_and_relse;
67383@@ -1650,6 +1664,10 @@ no_tcp_socket:
67384 bad_packet:
67385 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
67386 } else {
67387+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67388+ if (!grsec_enable_blackhole || (ret == 1 &&
67389+ (skb->dev->flags & IFF_LOOPBACK)))
67390+#endif
67391 tcp_v4_send_reset(NULL, skb);
67392 }
67393
67394@@ -2237,7 +2255,11 @@ static void get_openreq4(struct sock *sk
67395 0, /* non standard timer */
67396 0, /* open_requests have no inode */
67397 atomic_read(&sk->sk_refcnt),
67398+#ifdef CONFIG_GRKERNSEC_HIDESYM
67399+ NULL,
67400+#else
67401 req,
67402+#endif
67403 len);
67404 }
67405
67406@@ -2279,7 +2301,12 @@ static void get_tcp4_sock(struct sock *s
67407 sock_i_uid(sk),
67408 icsk->icsk_probes_out,
67409 sock_i_ino(sk),
67410- atomic_read(&sk->sk_refcnt), sk,
67411+ atomic_read(&sk->sk_refcnt),
67412+#ifdef CONFIG_GRKERNSEC_HIDESYM
67413+ NULL,
67414+#else
67415+ sk,
67416+#endif
67417 jiffies_to_clock_t(icsk->icsk_rto),
67418 jiffies_to_clock_t(icsk->icsk_ack.ato),
67419 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
67420@@ -2307,7 +2334,13 @@ static void get_timewait4_sock(struct in
67421 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
67422 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
67423 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
67424- atomic_read(&tw->tw_refcnt), tw, len);
67425+ atomic_read(&tw->tw_refcnt),
67426+#ifdef CONFIG_GRKERNSEC_HIDESYM
67427+ NULL,
67428+#else
67429+ tw,
67430+#endif
67431+ len);
67432 }
67433
67434 #define TMPSZ 150
67435diff -urNp linux-2.6.32.41/net/ipv4/tcp_minisocks.c linux-2.6.32.41/net/ipv4/tcp_minisocks.c
67436--- linux-2.6.32.41/net/ipv4/tcp_minisocks.c 2011-03-27 14:31:47.000000000 -0400
67437+++ linux-2.6.32.41/net/ipv4/tcp_minisocks.c 2011-04-17 15:56:46.000000000 -0400
67438@@ -26,6 +26,10 @@
67439 #include <net/inet_common.h>
67440 #include <net/xfrm.h>
67441
67442+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67443+extern int grsec_enable_blackhole;
67444+#endif
67445+
67446 #ifdef CONFIG_SYSCTL
67447 #define SYNC_INIT 0 /* let the user enable it */
67448 #else
67449@@ -672,6 +676,10 @@ listen_overflow:
67450
67451 embryonic_reset:
67452 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
67453+
67454+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67455+ if (!grsec_enable_blackhole)
67456+#endif
67457 if (!(flg & TCP_FLAG_RST))
67458 req->rsk_ops->send_reset(sk, skb);
67459
67460diff -urNp linux-2.6.32.41/net/ipv4/tcp_output.c linux-2.6.32.41/net/ipv4/tcp_output.c
67461--- linux-2.6.32.41/net/ipv4/tcp_output.c 2011-03-27 14:31:47.000000000 -0400
67462+++ linux-2.6.32.41/net/ipv4/tcp_output.c 2011-05-16 21:46:57.000000000 -0400
67463@@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct s
67464 __u8 *md5_hash_location;
67465 int mss;
67466
67467+ pax_track_stack();
67468+
67469 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
67470 if (skb == NULL)
67471 return NULL;
67472diff -urNp linux-2.6.32.41/net/ipv4/tcp_probe.c linux-2.6.32.41/net/ipv4/tcp_probe.c
67473--- linux-2.6.32.41/net/ipv4/tcp_probe.c 2011-03-27 14:31:47.000000000 -0400
67474+++ linux-2.6.32.41/net/ipv4/tcp_probe.c 2011-04-17 15:56:46.000000000 -0400
67475@@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file
67476 if (cnt + width >= len)
67477 break;
67478
67479- if (copy_to_user(buf + cnt, tbuf, width))
67480+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
67481 return -EFAULT;
67482 cnt += width;
67483 }
67484diff -urNp linux-2.6.32.41/net/ipv4/tcp_timer.c linux-2.6.32.41/net/ipv4/tcp_timer.c
67485--- linux-2.6.32.41/net/ipv4/tcp_timer.c 2011-03-27 14:31:47.000000000 -0400
67486+++ linux-2.6.32.41/net/ipv4/tcp_timer.c 2011-04-17 15:56:46.000000000 -0400
67487@@ -21,6 +21,10 @@
67488 #include <linux/module.h>
67489 #include <net/tcp.h>
67490
67491+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67492+extern int grsec_lastack_retries;
67493+#endif
67494+
67495 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
67496 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
67497 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
67498@@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock
67499 }
67500 }
67501
67502+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67503+ if ((sk->sk_state == TCP_LAST_ACK) &&
67504+ (grsec_lastack_retries > 0) &&
67505+ (grsec_lastack_retries < retry_until))
67506+ retry_until = grsec_lastack_retries;
67507+#endif
67508+
67509 if (retransmits_timed_out(sk, retry_until)) {
67510 /* Has it gone just too far? */
67511 tcp_write_err(sk);
67512diff -urNp linux-2.6.32.41/net/ipv4/udp.c linux-2.6.32.41/net/ipv4/udp.c
67513--- linux-2.6.32.41/net/ipv4/udp.c 2011-03-27 14:31:47.000000000 -0400
67514+++ linux-2.6.32.41/net/ipv4/udp.c 2011-05-04 17:57:28.000000000 -0400
67515@@ -86,6 +86,7 @@
67516 #include <linux/types.h>
67517 #include <linux/fcntl.h>
67518 #include <linux/module.h>
67519+#include <linux/security.h>
67520 #include <linux/socket.h>
67521 #include <linux/sockios.h>
67522 #include <linux/igmp.h>
67523@@ -106,6 +107,10 @@
67524 #include <net/xfrm.h>
67525 #include "udp_impl.h"
67526
67527+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67528+extern int grsec_enable_blackhole;
67529+#endif
67530+
67531 struct udp_table udp_table;
67532 EXPORT_SYMBOL(udp_table);
67533
67534@@ -371,6 +376,9 @@ found:
67535 return s;
67536 }
67537
67538+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
67539+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
67540+
67541 /*
67542 * This routine is called by the ICMP module when it gets some
67543 * sort of error condition. If err < 0 then the socket should
67544@@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
67545 dport = usin->sin_port;
67546 if (dport == 0)
67547 return -EINVAL;
67548+
67549+ err = gr_search_udp_sendmsg(sk, usin);
67550+ if (err)
67551+ return err;
67552 } else {
67553 if (sk->sk_state != TCP_ESTABLISHED)
67554 return -EDESTADDRREQ;
67555+
67556+ err = gr_search_udp_sendmsg(sk, NULL);
67557+ if (err)
67558+ return err;
67559+
67560 daddr = inet->daddr;
67561 dport = inet->dport;
67562 /* Open fast path for connected socket.
67563@@ -945,6 +962,10 @@ try_again:
67564 if (!skb)
67565 goto out;
67566
67567+ err = gr_search_udp_recvmsg(sk, skb);
67568+ if (err)
67569+ goto out_free;
67570+
67571 ulen = skb->len - sizeof(struct udphdr);
67572 copied = len;
67573 if (copied > ulen)
67574@@ -1065,7 +1086,7 @@ static int __udp_queue_rcv_skb(struct so
67575 if (rc == -ENOMEM) {
67576 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
67577 is_udplite);
67578- atomic_inc(&sk->sk_drops);
67579+ atomic_inc_unchecked(&sk->sk_drops);
67580 }
67581 goto drop;
67582 }
67583@@ -1335,6 +1356,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
67584 goto csum_error;
67585
67586 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
67587+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67588+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
67589+#endif
67590 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
67591
67592 /*
67593@@ -1755,8 +1779,13 @@ static void udp4_format_sock(struct sock
67594 sk_wmem_alloc_get(sp),
67595 sk_rmem_alloc_get(sp),
67596 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
67597- atomic_read(&sp->sk_refcnt), sp,
67598- atomic_read(&sp->sk_drops), len);
67599+ atomic_read(&sp->sk_refcnt),
67600+#ifdef CONFIG_GRKERNSEC_HIDESYM
67601+ NULL,
67602+#else
67603+ sp,
67604+#endif
67605+ atomic_read_unchecked(&sp->sk_drops), len);
67606 }
67607
67608 int udp4_seq_show(struct seq_file *seq, void *v)
67609diff -urNp linux-2.6.32.41/net/ipv6/inet6_connection_sock.c linux-2.6.32.41/net/ipv6/inet6_connection_sock.c
67610--- linux-2.6.32.41/net/ipv6/inet6_connection_sock.c 2011-03-27 14:31:47.000000000 -0400
67611+++ linux-2.6.32.41/net/ipv6/inet6_connection_sock.c 2011-05-04 17:56:28.000000000 -0400
67612@@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *
67613 #ifdef CONFIG_XFRM
67614 {
67615 struct rt6_info *rt = (struct rt6_info *)dst;
67616- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
67617+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
67618 }
67619 #endif
67620 }
67621@@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(
67622 #ifdef CONFIG_XFRM
67623 if (dst) {
67624 struct rt6_info *rt = (struct rt6_info *)dst;
67625- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
67626+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
67627 sk->sk_dst_cache = NULL;
67628 dst_release(dst);
67629 dst = NULL;
67630diff -urNp linux-2.6.32.41/net/ipv6/inet6_hashtables.c linux-2.6.32.41/net/ipv6/inet6_hashtables.c
67631--- linux-2.6.32.41/net/ipv6/inet6_hashtables.c 2011-03-27 14:31:47.000000000 -0400
67632+++ linux-2.6.32.41/net/ipv6/inet6_hashtables.c 2011-05-04 17:56:28.000000000 -0400
67633@@ -118,7 +118,7 @@ out:
67634 }
67635 EXPORT_SYMBOL(__inet6_lookup_established);
67636
67637-static int inline compute_score(struct sock *sk, struct net *net,
67638+static inline int compute_score(struct sock *sk, struct net *net,
67639 const unsigned short hnum,
67640 const struct in6_addr *daddr,
67641 const int dif)
67642diff -urNp linux-2.6.32.41/net/ipv6/ipv6_sockglue.c linux-2.6.32.41/net/ipv6/ipv6_sockglue.c
67643--- linux-2.6.32.41/net/ipv6/ipv6_sockglue.c 2011-03-27 14:31:47.000000000 -0400
67644+++ linux-2.6.32.41/net/ipv6/ipv6_sockglue.c 2011-05-16 21:46:57.000000000 -0400
67645@@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct soc
67646 int val, valbool;
67647 int retv = -ENOPROTOOPT;
67648
67649+ pax_track_stack();
67650+
67651 if (optval == NULL)
67652 val=0;
67653 else {
67654@@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct soc
67655 int len;
67656 int val;
67657
67658+ pax_track_stack();
67659+
67660 if (ip6_mroute_opt(optname))
67661 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
67662
67663diff -urNp linux-2.6.32.41/net/ipv6/netfilter/ip6_tables.c linux-2.6.32.41/net/ipv6/netfilter/ip6_tables.c
67664--- linux-2.6.32.41/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:00:52.000000000 -0400
67665+++ linux-2.6.32.41/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:04:18.000000000 -0400
67666@@ -1173,6 +1173,7 @@ static int get_info(struct net *net, voi
67667 private = &tmp;
67668 }
67669 #endif
67670+ memset(&info, 0, sizeof(info));
67671 info.valid_hooks = t->valid_hooks;
67672 memcpy(info.hook_entry, private->hook_entry,
67673 sizeof(info.hook_entry));
67674diff -urNp linux-2.6.32.41/net/ipv6/raw.c linux-2.6.32.41/net/ipv6/raw.c
67675--- linux-2.6.32.41/net/ipv6/raw.c 2011-03-27 14:31:47.000000000 -0400
67676+++ linux-2.6.32.41/net/ipv6/raw.c 2011-05-16 21:46:57.000000000 -0400
67677@@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct s
67678 {
67679 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
67680 skb_checksum_complete(skb)) {
67681- atomic_inc(&sk->sk_drops);
67682+ atomic_inc_unchecked(&sk->sk_drops);
67683 kfree_skb(skb);
67684 return NET_RX_DROP;
67685 }
67686
67687 /* Charge it to the socket. */
67688 if (sock_queue_rcv_skb(sk,skb)<0) {
67689- atomic_inc(&sk->sk_drops);
67690+ atomic_inc_unchecked(&sk->sk_drops);
67691 kfree_skb(skb);
67692 return NET_RX_DROP;
67693 }
67694@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
67695 struct raw6_sock *rp = raw6_sk(sk);
67696
67697 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
67698- atomic_inc(&sk->sk_drops);
67699+ atomic_inc_unchecked(&sk->sk_drops);
67700 kfree_skb(skb);
67701 return NET_RX_DROP;
67702 }
67703@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
67704
67705 if (inet->hdrincl) {
67706 if (skb_checksum_complete(skb)) {
67707- atomic_inc(&sk->sk_drops);
67708+ atomic_inc_unchecked(&sk->sk_drops);
67709 kfree_skb(skb);
67710 return NET_RX_DROP;
67711 }
67712@@ -518,7 +518,7 @@ csum_copy_err:
67713 as some normal condition.
67714 */
67715 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
67716- atomic_inc(&sk->sk_drops);
67717+ atomic_inc_unchecked(&sk->sk_drops);
67718 goto out;
67719 }
67720
67721@@ -600,7 +600,7 @@ out:
67722 return err;
67723 }
67724
67725-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
67726+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
67727 struct flowi *fl, struct rt6_info *rt,
67728 unsigned int flags)
67729 {
67730@@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *i
67731 u16 proto;
67732 int err;
67733
67734+ pax_track_stack();
67735+
67736 /* Rough check on arithmetic overflow,
67737 better check is made in ip6_append_data().
67738 */
67739@@ -916,12 +918,17 @@ do_confirm:
67740 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
67741 char __user *optval, int optlen)
67742 {
67743+ struct icmp6_filter filter;
67744+
67745 switch (optname) {
67746 case ICMPV6_FILTER:
67747+ if (optlen < 0)
67748+ return -EINVAL;
67749 if (optlen > sizeof(struct icmp6_filter))
67750 optlen = sizeof(struct icmp6_filter);
67751- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
67752+ if (copy_from_user(&filter, optval, optlen))
67753 return -EFAULT;
67754+ memcpy(&raw6_sk(sk)->filter, &filter, optlen);
67755 return 0;
67756 default:
67757 return -ENOPROTOOPT;
67758@@ -933,6 +940,7 @@ static int rawv6_seticmpfilter(struct so
67759 static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
67760 char __user *optval, int __user *optlen)
67761 {
67762+ struct icmp6_filter filter;
67763 int len;
67764
67765 switch (optname) {
67766@@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct so
67767 len = sizeof(struct icmp6_filter);
67768 if (put_user(len, optlen))
67769 return -EFAULT;
67770- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
67771+ memcpy(&filter, &raw6_sk(sk)->filter, len);
67772+ if (copy_to_user(optval, &filter, len))
67773 return -EFAULT;
67774 return 0;
67775 default:
67776@@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct se
67777 0, 0L, 0,
67778 sock_i_uid(sp), 0,
67779 sock_i_ino(sp),
67780- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
67781+ atomic_read(&sp->sk_refcnt),
67782+#ifdef CONFIG_GRKERNSEC_HIDESYM
67783+ NULL,
67784+#else
67785+ sp,
67786+#endif
67787+ atomic_read_unchecked(&sp->sk_drops));
67788 }
67789
67790 static int raw6_seq_show(struct seq_file *seq, void *v)
67791diff -urNp linux-2.6.32.41/net/ipv6/tcp_ipv6.c linux-2.6.32.41/net/ipv6/tcp_ipv6.c
67792--- linux-2.6.32.41/net/ipv6/tcp_ipv6.c 2011-03-27 14:31:47.000000000 -0400
67793+++ linux-2.6.32.41/net/ipv6/tcp_ipv6.c 2011-04-17 15:56:46.000000000 -0400
67794@@ -88,6 +88,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
67795 }
67796 #endif
67797
67798+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67799+extern int grsec_enable_blackhole;
67800+#endif
67801+
67802 static void tcp_v6_hash(struct sock *sk)
67803 {
67804 if (sk->sk_state != TCP_CLOSE) {
67805@@ -1578,6 +1582,9 @@ static int tcp_v6_do_rcv(struct sock *sk
67806 return 0;
67807
67808 reset:
67809+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67810+ if (!grsec_enable_blackhole)
67811+#endif
67812 tcp_v6_send_reset(sk, skb);
67813 discard:
67814 if (opt_skb)
67815@@ -1655,12 +1662,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
67816 TCP_SKB_CB(skb)->sacked = 0;
67817
67818 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
67819- if (!sk)
67820+ if (!sk) {
67821+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67822+ ret = 1;
67823+#endif
67824 goto no_tcp_socket;
67825+ }
67826
67827 process:
67828- if (sk->sk_state == TCP_TIME_WAIT)
67829+ if (sk->sk_state == TCP_TIME_WAIT) {
67830+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67831+ ret = 2;
67832+#endif
67833 goto do_time_wait;
67834+ }
67835
67836 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
67837 goto discard_and_relse;
67838@@ -1700,6 +1715,10 @@ no_tcp_socket:
67839 bad_packet:
67840 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
67841 } else {
67842+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67843+ if (!grsec_enable_blackhole || (ret == 1 &&
67844+ (skb->dev->flags & IFF_LOOPBACK)))
67845+#endif
67846 tcp_v6_send_reset(NULL, skb);
67847 }
67848
67849@@ -1915,7 +1934,13 @@ static void get_openreq6(struct seq_file
67850 uid,
67851 0, /* non standard timer */
67852 0, /* open_requests have no inode */
67853- 0, req);
67854+ 0,
67855+#ifdef CONFIG_GRKERNSEC_HIDESYM
67856+ NULL
67857+#else
67858+ req
67859+#endif
67860+ );
67861 }
67862
67863 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
67864@@ -1965,7 +1990,12 @@ static void get_tcp6_sock(struct seq_fil
67865 sock_i_uid(sp),
67866 icsk->icsk_probes_out,
67867 sock_i_ino(sp),
67868- atomic_read(&sp->sk_refcnt), sp,
67869+ atomic_read(&sp->sk_refcnt),
67870+#ifdef CONFIG_GRKERNSEC_HIDESYM
67871+ NULL,
67872+#else
67873+ sp,
67874+#endif
67875 jiffies_to_clock_t(icsk->icsk_rto),
67876 jiffies_to_clock_t(icsk->icsk_ack.ato),
67877 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
67878@@ -2000,7 +2030,13 @@ static void get_timewait6_sock(struct se
67879 dest->s6_addr32[2], dest->s6_addr32[3], destp,
67880 tw->tw_substate, 0, 0,
67881 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
67882- atomic_read(&tw->tw_refcnt), tw);
67883+ atomic_read(&tw->tw_refcnt),
67884+#ifdef CONFIG_GRKERNSEC_HIDESYM
67885+ NULL
67886+#else
67887+ tw
67888+#endif
67889+ );
67890 }
67891
67892 static int tcp6_seq_show(struct seq_file *seq, void *v)
67893diff -urNp linux-2.6.32.41/net/ipv6/udp.c linux-2.6.32.41/net/ipv6/udp.c
67894--- linux-2.6.32.41/net/ipv6/udp.c 2011-03-27 14:31:47.000000000 -0400
67895+++ linux-2.6.32.41/net/ipv6/udp.c 2011-05-04 17:58:16.000000000 -0400
67896@@ -49,6 +49,10 @@
67897 #include <linux/seq_file.h>
67898 #include "udp_impl.h"
67899
67900+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67901+extern int grsec_enable_blackhole;
67902+#endif
67903+
67904 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
67905 {
67906 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
67907@@ -388,7 +392,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
67908 if (rc == -ENOMEM) {
67909 UDP6_INC_STATS_BH(sock_net(sk),
67910 UDP_MIB_RCVBUFERRORS, is_udplite);
67911- atomic_inc(&sk->sk_drops);
67912+ atomic_inc_unchecked(&sk->sk_drops);
67913 }
67914 goto drop;
67915 }
67916@@ -587,6 +591,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
67917 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
67918 proto == IPPROTO_UDPLITE);
67919
67920+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67921+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
67922+#endif
67923 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
67924
67925 kfree_skb(skb);
67926@@ -1206,8 +1213,13 @@ static void udp6_sock_seq_show(struct se
67927 0, 0L, 0,
67928 sock_i_uid(sp), 0,
67929 sock_i_ino(sp),
67930- atomic_read(&sp->sk_refcnt), sp,
67931- atomic_read(&sp->sk_drops));
67932+ atomic_read(&sp->sk_refcnt),
67933+#ifdef CONFIG_GRKERNSEC_HIDESYM
67934+ NULL,
67935+#else
67936+ sp,
67937+#endif
67938+ atomic_read_unchecked(&sp->sk_drops));
67939 }
67940
67941 int udp6_seq_show(struct seq_file *seq, void *v)
67942diff -urNp linux-2.6.32.41/net/irda/ircomm/ircomm_tty.c linux-2.6.32.41/net/irda/ircomm/ircomm_tty.c
67943--- linux-2.6.32.41/net/irda/ircomm/ircomm_tty.c 2011-03-27 14:31:47.000000000 -0400
67944+++ linux-2.6.32.41/net/irda/ircomm/ircomm_tty.c 2011-04-17 15:56:46.000000000 -0400
67945@@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(st
67946 add_wait_queue(&self->open_wait, &wait);
67947
67948 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
67949- __FILE__,__LINE__, tty->driver->name, self->open_count );
67950+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
67951
67952 /* As far as I can see, we protect open_count - Jean II */
67953 spin_lock_irqsave(&self->spinlock, flags);
67954 if (!tty_hung_up_p(filp)) {
67955 extra_count = 1;
67956- self->open_count--;
67957+ local_dec(&self->open_count);
67958 }
67959 spin_unlock_irqrestore(&self->spinlock, flags);
67960- self->blocked_open++;
67961+ local_inc(&self->blocked_open);
67962
67963 while (1) {
67964 if (tty->termios->c_cflag & CBAUD) {
67965@@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(st
67966 }
67967
67968 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
67969- __FILE__,__LINE__, tty->driver->name, self->open_count );
67970+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
67971
67972 schedule();
67973 }
67974@@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(st
67975 if (extra_count) {
67976 /* ++ is not atomic, so this should be protected - Jean II */
67977 spin_lock_irqsave(&self->spinlock, flags);
67978- self->open_count++;
67979+ local_inc(&self->open_count);
67980 spin_unlock_irqrestore(&self->spinlock, flags);
67981 }
67982- self->blocked_open--;
67983+ local_dec(&self->blocked_open);
67984
67985 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
67986- __FILE__,__LINE__, tty->driver->name, self->open_count);
67987+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
67988
67989 if (!retval)
67990 self->flags |= ASYNC_NORMAL_ACTIVE;
67991@@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_st
67992 }
67993 /* ++ is not atomic, so this should be protected - Jean II */
67994 spin_lock_irqsave(&self->spinlock, flags);
67995- self->open_count++;
67996+ local_inc(&self->open_count);
67997
67998 tty->driver_data = self;
67999 self->tty = tty;
68000 spin_unlock_irqrestore(&self->spinlock, flags);
68001
68002 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
68003- self->line, self->open_count);
68004+ self->line, local_read(&self->open_count));
68005
68006 /* Not really used by us, but lets do it anyway */
68007 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
68008@@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_
68009 return;
68010 }
68011
68012- if ((tty->count == 1) && (self->open_count != 1)) {
68013+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
68014 /*
68015 * Uh, oh. tty->count is 1, which means that the tty
68016 * structure will be freed. state->count should always
68017@@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_
68018 */
68019 IRDA_DEBUG(0, "%s(), bad serial port count; "
68020 "tty->count is 1, state->count is %d\n", __func__ ,
68021- self->open_count);
68022- self->open_count = 1;
68023+ local_read(&self->open_count));
68024+ local_set(&self->open_count, 1);
68025 }
68026
68027- if (--self->open_count < 0) {
68028+ if (local_dec_return(&self->open_count) < 0) {
68029 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
68030- __func__, self->line, self->open_count);
68031- self->open_count = 0;
68032+ __func__, self->line, local_read(&self->open_count));
68033+ local_set(&self->open_count, 0);
68034 }
68035- if (self->open_count) {
68036+ if (local_read(&self->open_count)) {
68037 spin_unlock_irqrestore(&self->spinlock, flags);
68038
68039 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
68040@@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_
68041 tty->closing = 0;
68042 self->tty = NULL;
68043
68044- if (self->blocked_open) {
68045+ if (local_read(&self->blocked_open)) {
68046 if (self->close_delay)
68047 schedule_timeout_interruptible(self->close_delay);
68048 wake_up_interruptible(&self->open_wait);
68049@@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty
68050 spin_lock_irqsave(&self->spinlock, flags);
68051 self->flags &= ~ASYNC_NORMAL_ACTIVE;
68052 self->tty = NULL;
68053- self->open_count = 0;
68054+ local_set(&self->open_count, 0);
68055 spin_unlock_irqrestore(&self->spinlock, flags);
68056
68057 wake_up_interruptible(&self->open_wait);
68058@@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct
68059 seq_putc(m, '\n');
68060
68061 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
68062- seq_printf(m, "Open count: %d\n", self->open_count);
68063+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
68064 seq_printf(m, "Max data size: %d\n", self->max_data_size);
68065 seq_printf(m, "Max header size: %d\n", self->max_header_size);
68066
68067diff -urNp linux-2.6.32.41/net/iucv/af_iucv.c linux-2.6.32.41/net/iucv/af_iucv.c
68068--- linux-2.6.32.41/net/iucv/af_iucv.c 2011-03-27 14:31:47.000000000 -0400
68069+++ linux-2.6.32.41/net/iucv/af_iucv.c 2011-05-04 17:56:28.000000000 -0400
68070@@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct soc
68071
68072 write_lock_bh(&iucv_sk_list.lock);
68073
68074- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
68075+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
68076 while (__iucv_get_sock_by_name(name)) {
68077 sprintf(name, "%08x",
68078- atomic_inc_return(&iucv_sk_list.autobind_name));
68079+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
68080 }
68081
68082 write_unlock_bh(&iucv_sk_list.lock);
68083diff -urNp linux-2.6.32.41/net/key/af_key.c linux-2.6.32.41/net/key/af_key.c
68084--- linux-2.6.32.41/net/key/af_key.c 2011-03-27 14:31:47.000000000 -0400
68085+++ linux-2.6.32.41/net/key/af_key.c 2011-05-16 21:46:57.000000000 -0400
68086@@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk
68087 struct xfrm_migrate m[XFRM_MAX_DEPTH];
68088 struct xfrm_kmaddress k;
68089
68090+ pax_track_stack();
68091+
68092 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
68093 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
68094 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
68095@@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_fil
68096 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
68097 else
68098 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
68099+#ifdef CONFIG_GRKERNSEC_HIDESYM
68100+ NULL,
68101+#else
68102 s,
68103+#endif
68104 atomic_read(&s->sk_refcnt),
68105 sk_rmem_alloc_get(s),
68106 sk_wmem_alloc_get(s),
68107diff -urNp linux-2.6.32.41/net/mac80211/cfg.c linux-2.6.32.41/net/mac80211/cfg.c
68108--- linux-2.6.32.41/net/mac80211/cfg.c 2011-03-27 14:31:47.000000000 -0400
68109+++ linux-2.6.32.41/net/mac80211/cfg.c 2011-04-17 15:56:46.000000000 -0400
68110@@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(st
68111 return err;
68112 }
68113
68114-struct cfg80211_ops mac80211_config_ops = {
68115+const struct cfg80211_ops mac80211_config_ops = {
68116 .add_virtual_intf = ieee80211_add_iface,
68117 .del_virtual_intf = ieee80211_del_iface,
68118 .change_virtual_intf = ieee80211_change_iface,
68119diff -urNp linux-2.6.32.41/net/mac80211/cfg.h linux-2.6.32.41/net/mac80211/cfg.h
68120--- linux-2.6.32.41/net/mac80211/cfg.h 2011-03-27 14:31:47.000000000 -0400
68121+++ linux-2.6.32.41/net/mac80211/cfg.h 2011-04-17 15:56:46.000000000 -0400
68122@@ -4,6 +4,6 @@
68123 #ifndef __CFG_H
68124 #define __CFG_H
68125
68126-extern struct cfg80211_ops mac80211_config_ops;
68127+extern const struct cfg80211_ops mac80211_config_ops;
68128
68129 #endif /* __CFG_H */
68130diff -urNp linux-2.6.32.41/net/mac80211/debugfs_key.c linux-2.6.32.41/net/mac80211/debugfs_key.c
68131--- linux-2.6.32.41/net/mac80211/debugfs_key.c 2011-03-27 14:31:47.000000000 -0400
68132+++ linux-2.6.32.41/net/mac80211/debugfs_key.c 2011-04-17 15:56:46.000000000 -0400
68133@@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file
68134 size_t count, loff_t *ppos)
68135 {
68136 struct ieee80211_key *key = file->private_data;
68137- int i, res, bufsize = 2 * key->conf.keylen + 2;
68138+ int i, bufsize = 2 * key->conf.keylen + 2;
68139 char *buf = kmalloc(bufsize, GFP_KERNEL);
68140 char *p = buf;
68141+ ssize_t res;
68142+
68143+ if (buf == NULL)
68144+ return -ENOMEM;
68145
68146 for (i = 0; i < key->conf.keylen; i++)
68147 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
68148diff -urNp linux-2.6.32.41/net/mac80211/debugfs_sta.c linux-2.6.32.41/net/mac80211/debugfs_sta.c
68149--- linux-2.6.32.41/net/mac80211/debugfs_sta.c 2011-03-27 14:31:47.000000000 -0400
68150+++ linux-2.6.32.41/net/mac80211/debugfs_sta.c 2011-05-16 21:46:57.000000000 -0400
68151@@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struc
68152 int i;
68153 struct sta_info *sta = file->private_data;
68154
68155+ pax_track_stack();
68156+
68157 spin_lock_bh(&sta->lock);
68158 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
68159 sta->ampdu_mlme.dialog_token_allocator + 1);
68160diff -urNp linux-2.6.32.41/net/mac80211/ieee80211_i.h linux-2.6.32.41/net/mac80211/ieee80211_i.h
68161--- linux-2.6.32.41/net/mac80211/ieee80211_i.h 2011-03-27 14:31:47.000000000 -0400
68162+++ linux-2.6.32.41/net/mac80211/ieee80211_i.h 2011-04-17 15:56:46.000000000 -0400
68163@@ -25,6 +25,7 @@
68164 #include <linux/etherdevice.h>
68165 #include <net/cfg80211.h>
68166 #include <net/mac80211.h>
68167+#include <asm/local.h>
68168 #include "key.h"
68169 #include "sta_info.h"
68170
68171@@ -635,7 +636,7 @@ struct ieee80211_local {
68172 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
68173 spinlock_t queue_stop_reason_lock;
68174
68175- int open_count;
68176+ local_t open_count;
68177 int monitors, cooked_mntrs;
68178 /* number of interfaces with corresponding FIF_ flags */
68179 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
68180diff -urNp linux-2.6.32.41/net/mac80211/iface.c linux-2.6.32.41/net/mac80211/iface.c
68181--- linux-2.6.32.41/net/mac80211/iface.c 2011-03-27 14:31:47.000000000 -0400
68182+++ linux-2.6.32.41/net/mac80211/iface.c 2011-04-17 15:56:46.000000000 -0400
68183@@ -166,7 +166,7 @@ static int ieee80211_open(struct net_dev
68184 break;
68185 }
68186
68187- if (local->open_count == 0) {
68188+ if (local_read(&local->open_count) == 0) {
68189 res = drv_start(local);
68190 if (res)
68191 goto err_del_bss;
68192@@ -196,7 +196,7 @@ static int ieee80211_open(struct net_dev
68193 * Validate the MAC address for this device.
68194 */
68195 if (!is_valid_ether_addr(dev->dev_addr)) {
68196- if (!local->open_count)
68197+ if (!local_read(&local->open_count))
68198 drv_stop(local);
68199 return -EADDRNOTAVAIL;
68200 }
68201@@ -292,7 +292,7 @@ static int ieee80211_open(struct net_dev
68202
68203 hw_reconf_flags |= __ieee80211_recalc_idle(local);
68204
68205- local->open_count++;
68206+ local_inc(&local->open_count);
68207 if (hw_reconf_flags) {
68208 ieee80211_hw_config(local, hw_reconf_flags);
68209 /*
68210@@ -320,7 +320,7 @@ static int ieee80211_open(struct net_dev
68211 err_del_interface:
68212 drv_remove_interface(local, &conf);
68213 err_stop:
68214- if (!local->open_count)
68215+ if (!local_read(&local->open_count))
68216 drv_stop(local);
68217 err_del_bss:
68218 sdata->bss = NULL;
68219@@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_dev
68220 WARN_ON(!list_empty(&sdata->u.ap.vlans));
68221 }
68222
68223- local->open_count--;
68224+ local_dec(&local->open_count);
68225
68226 switch (sdata->vif.type) {
68227 case NL80211_IFTYPE_AP_VLAN:
68228@@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_dev
68229
68230 ieee80211_recalc_ps(local, -1);
68231
68232- if (local->open_count == 0) {
68233+ if (local_read(&local->open_count) == 0) {
68234 ieee80211_clear_tx_pending(local);
68235 ieee80211_stop_device(local);
68236
68237diff -urNp linux-2.6.32.41/net/mac80211/main.c linux-2.6.32.41/net/mac80211/main.c
68238--- linux-2.6.32.41/net/mac80211/main.c 2011-05-10 22:12:02.000000000 -0400
68239+++ linux-2.6.32.41/net/mac80211/main.c 2011-05-10 22:12:34.000000000 -0400
68240@@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211
68241 local->hw.conf.power_level = power;
68242 }
68243
68244- if (changed && local->open_count) {
68245+ if (changed && local_read(&local->open_count)) {
68246 ret = drv_config(local, changed);
68247 /*
68248 * Goal:
68249diff -urNp linux-2.6.32.41/net/mac80211/mlme.c linux-2.6.32.41/net/mac80211/mlme.c
68250--- linux-2.6.32.41/net/mac80211/mlme.c 2011-03-27 14:31:47.000000000 -0400
68251+++ linux-2.6.32.41/net/mac80211/mlme.c 2011-05-16 21:46:57.000000000 -0400
68252@@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee
68253 bool have_higher_than_11mbit = false, newsta = false;
68254 u16 ap_ht_cap_flags;
68255
68256+ pax_track_stack();
68257+
68258 /*
68259 * AssocResp and ReassocResp have identical structure, so process both
68260 * of them in this function.
68261diff -urNp linux-2.6.32.41/net/mac80211/pm.c linux-2.6.32.41/net/mac80211/pm.c
68262--- linux-2.6.32.41/net/mac80211/pm.c 2011-03-27 14:31:47.000000000 -0400
68263+++ linux-2.6.32.41/net/mac80211/pm.c 2011-04-17 15:56:46.000000000 -0400
68264@@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211
68265 }
68266
68267 /* stop hardware - this must stop RX */
68268- if (local->open_count)
68269+ if (local_read(&local->open_count))
68270 ieee80211_stop_device(local);
68271
68272 local->suspended = true;
68273diff -urNp linux-2.6.32.41/net/mac80211/rate.c linux-2.6.32.41/net/mac80211/rate.c
68274--- linux-2.6.32.41/net/mac80211/rate.c 2011-03-27 14:31:47.000000000 -0400
68275+++ linux-2.6.32.41/net/mac80211/rate.c 2011-04-17 15:56:46.000000000 -0400
68276@@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct
68277 struct rate_control_ref *ref, *old;
68278
68279 ASSERT_RTNL();
68280- if (local->open_count)
68281+ if (local_read(&local->open_count))
68282 return -EBUSY;
68283
68284 ref = rate_control_alloc(name, local);
68285diff -urNp linux-2.6.32.41/net/mac80211/tx.c linux-2.6.32.41/net/mac80211/tx.c
68286--- linux-2.6.32.41/net/mac80211/tx.c 2011-03-27 14:31:47.000000000 -0400
68287+++ linux-2.6.32.41/net/mac80211/tx.c 2011-04-17 15:56:46.000000000 -0400
68288@@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct
68289 return cpu_to_le16(dur);
68290 }
68291
68292-static int inline is_ieee80211_device(struct ieee80211_local *local,
68293+static inline int is_ieee80211_device(struct ieee80211_local *local,
68294 struct net_device *dev)
68295 {
68296 return local == wdev_priv(dev->ieee80211_ptr);
68297diff -urNp linux-2.6.32.41/net/mac80211/util.c linux-2.6.32.41/net/mac80211/util.c
68298--- linux-2.6.32.41/net/mac80211/util.c 2011-03-27 14:31:47.000000000 -0400
68299+++ linux-2.6.32.41/net/mac80211/util.c 2011-04-17 15:56:46.000000000 -0400
68300@@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_
68301 local->resuming = true;
68302
68303 /* restart hardware */
68304- if (local->open_count) {
68305+ if (local_read(&local->open_count)) {
68306 /*
68307 * Upon resume hardware can sometimes be goofy due to
68308 * various platform / driver / bus issues, so restarting
68309diff -urNp linux-2.6.32.41/net/netfilter/ipvs/ip_vs_app.c linux-2.6.32.41/net/netfilter/ipvs/ip_vs_app.c
68310--- linux-2.6.32.41/net/netfilter/ipvs/ip_vs_app.c 2011-03-27 14:31:47.000000000 -0400
68311+++ linux-2.6.32.41/net/netfilter/ipvs/ip_vs_app.c 2011-05-17 19:26:34.000000000 -0400
68312@@ -564,7 +564,7 @@ static const struct file_operations ip_v
68313 .open = ip_vs_app_open,
68314 .read = seq_read,
68315 .llseek = seq_lseek,
68316- .release = seq_release,
68317+ .release = seq_release_net,
68318 };
68319 #endif
68320
68321diff -urNp linux-2.6.32.41/net/netfilter/ipvs/ip_vs_conn.c linux-2.6.32.41/net/netfilter/ipvs/ip_vs_conn.c
68322--- linux-2.6.32.41/net/netfilter/ipvs/ip_vs_conn.c 2011-03-27 14:31:47.000000000 -0400
68323+++ linux-2.6.32.41/net/netfilter/ipvs/ip_vs_conn.c 2011-05-17 19:26:34.000000000 -0400
68324@@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
68325 /* if the connection is not template and is created
68326 * by sync, preserve the activity flag.
68327 */
68328- cp->flags |= atomic_read(&dest->conn_flags) &
68329+ cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
68330 (~IP_VS_CONN_F_INACTIVE);
68331 else
68332- cp->flags |= atomic_read(&dest->conn_flags);
68333+ cp->flags |= atomic_read_unchecked(&dest->conn_flags);
68334 cp->dest = dest;
68335
68336 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
68337@@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const
68338 atomic_set(&cp->refcnt, 1);
68339
68340 atomic_set(&cp->n_control, 0);
68341- atomic_set(&cp->in_pkts, 0);
68342+ atomic_set_unchecked(&cp->in_pkts, 0);
68343
68344 atomic_inc(&ip_vs_conn_count);
68345 if (flags & IP_VS_CONN_F_NO_CPORT)
68346@@ -871,7 +871,7 @@ static const struct file_operations ip_v
68347 .open = ip_vs_conn_open,
68348 .read = seq_read,
68349 .llseek = seq_lseek,
68350- .release = seq_release,
68351+ .release = seq_release_net,
68352 };
68353
68354 static const char *ip_vs_origin_name(unsigned flags)
68355@@ -934,7 +934,7 @@ static const struct file_operations ip_v
68356 .open = ip_vs_conn_sync_open,
68357 .read = seq_read,
68358 .llseek = seq_lseek,
68359- .release = seq_release,
68360+ .release = seq_release_net,
68361 };
68362
68363 #endif
68364@@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip
68365
68366 /* Don't drop the entry if its number of incoming packets is not
68367 located in [0, 8] */
68368- i = atomic_read(&cp->in_pkts);
68369+ i = atomic_read_unchecked(&cp->in_pkts);
68370 if (i > 8 || i < 0) return 0;
68371
68372 if (!todrop_rate[i]) return 0;
68373diff -urNp linux-2.6.32.41/net/netfilter/ipvs/ip_vs_core.c linux-2.6.32.41/net/netfilter/ipvs/ip_vs_core.c
68374--- linux-2.6.32.41/net/netfilter/ipvs/ip_vs_core.c 2011-03-27 14:31:47.000000000 -0400
68375+++ linux-2.6.32.41/net/netfilter/ipvs/ip_vs_core.c 2011-05-04 17:56:28.000000000 -0400
68376@@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *sv
68377 ret = cp->packet_xmit(skb, cp, pp);
68378 /* do not touch skb anymore */
68379
68380- atomic_inc(&cp->in_pkts);
68381+ atomic_inc_unchecked(&cp->in_pkts);
68382 ip_vs_conn_put(cp);
68383 return ret;
68384 }
68385@@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk
68386 * Sync connection if it is about to close to
68387 * encorage the standby servers to update the connections timeout
68388 */
68389- pkts = atomic_add_return(1, &cp->in_pkts);
68390+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
68391 if (af == AF_INET &&
68392 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
68393 (((cp->protocol != IPPROTO_TCP ||
68394diff -urNp linux-2.6.32.41/net/netfilter/ipvs/ip_vs_ctl.c linux-2.6.32.41/net/netfilter/ipvs/ip_vs_ctl.c
68395--- linux-2.6.32.41/net/netfilter/ipvs/ip_vs_ctl.c 2011-03-27 14:31:47.000000000 -0400
68396+++ linux-2.6.32.41/net/netfilter/ipvs/ip_vs_ctl.c 2011-05-17 19:26:34.000000000 -0400
68397@@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service
68398 ip_vs_rs_hash(dest);
68399 write_unlock_bh(&__ip_vs_rs_lock);
68400 }
68401- atomic_set(&dest->conn_flags, conn_flags);
68402+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
68403
68404 /* bind the service */
68405 if (!dest->svc) {
68406@@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct se
68407 " %-7s %-6d %-10d %-10d\n",
68408 &dest->addr.in6,
68409 ntohs(dest->port),
68410- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
68411+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
68412 atomic_read(&dest->weight),
68413 atomic_read(&dest->activeconns),
68414 atomic_read(&dest->inactconns));
68415@@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct se
68416 "%-7s %-6d %-10d %-10d\n",
68417 ntohl(dest->addr.ip),
68418 ntohs(dest->port),
68419- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
68420+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
68421 atomic_read(&dest->weight),
68422 atomic_read(&dest->activeconns),
68423 atomic_read(&dest->inactconns));
68424@@ -1927,7 +1927,7 @@ static const struct file_operations ip_v
68425 .open = ip_vs_info_open,
68426 .read = seq_read,
68427 .llseek = seq_lseek,
68428- .release = seq_release_private,
68429+ .release = seq_release_net,
68430 };
68431
68432 #endif
68433@@ -1976,7 +1976,7 @@ static const struct file_operations ip_v
68434 .open = ip_vs_stats_seq_open,
68435 .read = seq_read,
68436 .llseek = seq_lseek,
68437- .release = single_release,
68438+ .release = single_release_net,
68439 };
68440
68441 #endif
68442@@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip
68443
68444 entry.addr = dest->addr.ip;
68445 entry.port = dest->port;
68446- entry.conn_flags = atomic_read(&dest->conn_flags);
68447+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
68448 entry.weight = atomic_read(&dest->weight);
68449 entry.u_threshold = dest->u_threshold;
68450 entry.l_threshold = dest->l_threshold;
68451@@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cm
68452 unsigned char arg[128];
68453 int ret = 0;
68454
68455+ pax_track_stack();
68456+
68457 if (!capable(CAP_NET_ADMIN))
68458 return -EPERM;
68459
68460@@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct s
68461 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
68462
68463 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
68464- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
68465+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
68466 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
68467 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
68468 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
68469diff -urNp linux-2.6.32.41/net/netfilter/ipvs/ip_vs_sync.c linux-2.6.32.41/net/netfilter/ipvs/ip_vs_sync.c
68470--- linux-2.6.32.41/net/netfilter/ipvs/ip_vs_sync.c 2011-03-27 14:31:47.000000000 -0400
68471+++ linux-2.6.32.41/net/netfilter/ipvs/ip_vs_sync.c 2011-05-04 17:56:28.000000000 -0400
68472@@ -438,7 +438,7 @@ static void ip_vs_process_message(const
68473
68474 if (opt)
68475 memcpy(&cp->in_seq, opt, sizeof(*opt));
68476- atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
68477+ atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
68478 cp->state = state;
68479 cp->old_state = cp->state;
68480 /*
68481diff -urNp linux-2.6.32.41/net/netfilter/ipvs/ip_vs_xmit.c linux-2.6.32.41/net/netfilter/ipvs/ip_vs_xmit.c
68482--- linux-2.6.32.41/net/netfilter/ipvs/ip_vs_xmit.c 2011-03-27 14:31:47.000000000 -0400
68483+++ linux-2.6.32.41/net/netfilter/ipvs/ip_vs_xmit.c 2011-05-04 17:56:28.000000000 -0400
68484@@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
68485 else
68486 rc = NF_ACCEPT;
68487 /* do not touch skb anymore */
68488- atomic_inc(&cp->in_pkts);
68489+ atomic_inc_unchecked(&cp->in_pkts);
68490 goto out;
68491 }
68492
68493@@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
68494 else
68495 rc = NF_ACCEPT;
68496 /* do not touch skb anymore */
68497- atomic_inc(&cp->in_pkts);
68498+ atomic_inc_unchecked(&cp->in_pkts);
68499 goto out;
68500 }
68501
68502diff -urNp linux-2.6.32.41/net/netfilter/Kconfig linux-2.6.32.41/net/netfilter/Kconfig
68503--- linux-2.6.32.41/net/netfilter/Kconfig 2011-03-27 14:31:47.000000000 -0400
68504+++ linux-2.6.32.41/net/netfilter/Kconfig 2011-04-17 15:56:46.000000000 -0400
68505@@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
68506
68507 To compile it as a module, choose M here. If unsure, say N.
68508
68509+config NETFILTER_XT_MATCH_GRADM
68510+ tristate '"gradm" match support'
68511+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
68512+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
68513+ ---help---
68514+ The gradm match allows to match on grsecurity RBAC being enabled.
68515+ It is useful when iptables rules are applied early on bootup to
68516+ prevent connections to the machine (except from a trusted host)
68517+ while the RBAC system is disabled.
68518+
68519 config NETFILTER_XT_MATCH_HASHLIMIT
68520 tristate '"hashlimit" match support'
68521 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
68522diff -urNp linux-2.6.32.41/net/netfilter/Makefile linux-2.6.32.41/net/netfilter/Makefile
68523--- linux-2.6.32.41/net/netfilter/Makefile 2011-03-27 14:31:47.000000000 -0400
68524+++ linux-2.6.32.41/net/netfilter/Makefile 2011-04-17 15:56:46.000000000 -0400
68525@@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRAC
68526 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
68527 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
68528 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
68529+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
68530 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
68531 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
68532 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
68533diff -urNp linux-2.6.32.41/net/netfilter/nf_conntrack_netlink.c linux-2.6.32.41/net/netfilter/nf_conntrack_netlink.c
68534--- linux-2.6.32.41/net/netfilter/nf_conntrack_netlink.c 2011-03-27 14:31:47.000000000 -0400
68535+++ linux-2.6.32.41/net/netfilter/nf_conntrack_netlink.c 2011-04-17 15:56:46.000000000 -0400
68536@@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlatt
68537 static int
68538 ctnetlink_parse_tuple(const struct nlattr * const cda[],
68539 struct nf_conntrack_tuple *tuple,
68540- enum ctattr_tuple type, u_int8_t l3num)
68541+ enum ctattr_type type, u_int8_t l3num)
68542 {
68543 struct nlattr *tb[CTA_TUPLE_MAX+1];
68544 int err;
68545diff -urNp linux-2.6.32.41/net/netfilter/nfnetlink_log.c linux-2.6.32.41/net/netfilter/nfnetlink_log.c
68546--- linux-2.6.32.41/net/netfilter/nfnetlink_log.c 2011-03-27 14:31:47.000000000 -0400
68547+++ linux-2.6.32.41/net/netfilter/nfnetlink_log.c 2011-05-04 17:56:28.000000000 -0400
68548@@ -68,7 +68,7 @@ struct nfulnl_instance {
68549 };
68550
68551 static DEFINE_RWLOCK(instances_lock);
68552-static atomic_t global_seq;
68553+static atomic_unchecked_t global_seq;
68554
68555 #define INSTANCE_BUCKETS 16
68556 static struct hlist_head instance_table[INSTANCE_BUCKETS];
68557@@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_ins
68558 /* global sequence number */
68559 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
68560 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
68561- htonl(atomic_inc_return(&global_seq)));
68562+ htonl(atomic_inc_return_unchecked(&global_seq)));
68563
68564 if (data_len) {
68565 struct nlattr *nla;
68566diff -urNp linux-2.6.32.41/net/netfilter/xt_gradm.c linux-2.6.32.41/net/netfilter/xt_gradm.c
68567--- linux-2.6.32.41/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
68568+++ linux-2.6.32.41/net/netfilter/xt_gradm.c 2011-04-17 15:56:46.000000000 -0400
68569@@ -0,0 +1,51 @@
68570+/*
68571+ * gradm match for netfilter
68572