]> git.ipfire.org Git - people/ms/ipfire-3.x.git/blob - pkgs/core/kernel/patches/grsecurity-2.2.0-2.6.35.4-201009172030.patch
ccache: Update to 3.1.1.
[people/ms/ipfire-3.x.git] / pkgs / core / kernel / patches / grsecurity-2.2.0-2.6.35.4-201009172030.patch
1 diff -urNp linux-2.6.35.4/arch/alpha/include/asm/dma-mapping.h linux-2.6.35.4/arch/alpha/include/asm/dma-mapping.h
2 --- linux-2.6.35.4/arch/alpha/include/asm/dma-mapping.h 2010-08-26 19:47:12.000000000 -0400
3 +++ linux-2.6.35.4/arch/alpha/include/asm/dma-mapping.h 2010-09-17 20:12:09.000000000 -0400
4 @@ -3,9 +3,9 @@
5
6 #include <linux/dma-attrs.h>
7
8 -extern struct dma_map_ops *dma_ops;
9 +extern const struct dma_map_ops *dma_ops;
10
11 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
12 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
13 {
14 return dma_ops;
15 }
16 diff -urNp linux-2.6.35.4/arch/alpha/include/asm/elf.h linux-2.6.35.4/arch/alpha/include/asm/elf.h
17 --- linux-2.6.35.4/arch/alpha/include/asm/elf.h 2010-08-26 19:47:12.000000000 -0400
18 +++ linux-2.6.35.4/arch/alpha/include/asm/elf.h 2010-09-17 20:12:09.000000000 -0400
19 @@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
20
21 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
22
23 +#ifdef CONFIG_PAX_ASLR
24 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
25 +
26 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
27 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
28 +#endif
29 +
30 /* $0 is set by ld.so to a pointer to a function which might be
31 registered using atexit. This provides a mean for the dynamic
32 linker to call DT_FINI functions for shared libraries that have
33 diff -urNp linux-2.6.35.4/arch/alpha/include/asm/pgtable.h linux-2.6.35.4/arch/alpha/include/asm/pgtable.h
34 --- linux-2.6.35.4/arch/alpha/include/asm/pgtable.h 2010-08-26 19:47:12.000000000 -0400
35 +++ linux-2.6.35.4/arch/alpha/include/asm/pgtable.h 2010-09-17 20:12:09.000000000 -0400
36 @@ -101,6 +101,17 @@ struct vm_area_struct;
37 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
38 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
39 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
40 +
41 +#ifdef CONFIG_PAX_PAGEEXEC
42 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
43 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
44 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
45 +#else
46 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
47 +# define PAGE_COPY_NOEXEC PAGE_COPY
48 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
49 +#endif
50 +
51 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
52
53 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
54 diff -urNp linux-2.6.35.4/arch/alpha/kernel/module.c linux-2.6.35.4/arch/alpha/kernel/module.c
55 --- linux-2.6.35.4/arch/alpha/kernel/module.c 2010-08-26 19:47:12.000000000 -0400
56 +++ linux-2.6.35.4/arch/alpha/kernel/module.c 2010-09-17 20:12:09.000000000 -0400
57 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
58
59 /* The small sections were sorted to the end of the segment.
60 The following should definitely cover them. */
61 - gp = (u64)me->module_core + me->core_size - 0x8000;
62 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
63 got = sechdrs[me->arch.gotsecindex].sh_addr;
64
65 for (i = 0; i < n; i++) {
66 diff -urNp linux-2.6.35.4/arch/alpha/kernel/osf_sys.c linux-2.6.35.4/arch/alpha/kernel/osf_sys.c
67 --- linux-2.6.35.4/arch/alpha/kernel/osf_sys.c 2010-08-26 19:47:12.000000000 -0400
68 +++ linux-2.6.35.4/arch/alpha/kernel/osf_sys.c 2010-09-17 20:12:09.000000000 -0400
69 @@ -1170,7 +1170,7 @@ arch_get_unmapped_area_1(unsigned long a
70 /* At this point: (!vma || addr < vma->vm_end). */
71 if (limit - len < addr)
72 return -ENOMEM;
73 - if (!vma || addr + len <= vma->vm_start)
74 + if (check_heap_stack_gap(vma, addr, len))
75 return addr;
76 addr = vma->vm_end;
77 vma = vma->vm_next;
78 @@ -1206,6 +1206,10 @@ arch_get_unmapped_area(struct file *filp
79 merely specific addresses, but regions of memory -- perhaps
80 this feature should be incorporated into all ports? */
81
82 +#ifdef CONFIG_PAX_RANDMMAP
83 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
84 +#endif
85 +
86 if (addr) {
87 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
88 if (addr != (unsigned long) -ENOMEM)
89 @@ -1213,8 +1217,8 @@ arch_get_unmapped_area(struct file *filp
90 }
91
92 /* Next, try allocating at TASK_UNMAPPED_BASE. */
93 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
94 - len, limit);
95 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
96 +
97 if (addr != (unsigned long) -ENOMEM)
98 return addr;
99
100 diff -urNp linux-2.6.35.4/arch/alpha/kernel/pci_iommu.c linux-2.6.35.4/arch/alpha/kernel/pci_iommu.c
101 --- linux-2.6.35.4/arch/alpha/kernel/pci_iommu.c 2010-08-26 19:47:12.000000000 -0400
102 +++ linux-2.6.35.4/arch/alpha/kernel/pci_iommu.c 2010-09-17 20:12:09.000000000 -0400
103 @@ -950,7 +950,7 @@ static int alpha_pci_set_mask(struct dev
104 return 0;
105 }
106
107 -struct dma_map_ops alpha_pci_ops = {
108 +const struct dma_map_ops alpha_pci_ops = {
109 .alloc_coherent = alpha_pci_alloc_coherent,
110 .free_coherent = alpha_pci_free_coherent,
111 .map_page = alpha_pci_map_page,
112 @@ -962,5 +962,5 @@ struct dma_map_ops alpha_pci_ops = {
113 .set_dma_mask = alpha_pci_set_mask,
114 };
115
116 -struct dma_map_ops *dma_ops = &alpha_pci_ops;
117 +const struct dma_map_ops *dma_ops = &alpha_pci_ops;
118 EXPORT_SYMBOL(dma_ops);
119 diff -urNp linux-2.6.35.4/arch/alpha/kernel/pci-noop.c linux-2.6.35.4/arch/alpha/kernel/pci-noop.c
120 --- linux-2.6.35.4/arch/alpha/kernel/pci-noop.c 2010-08-26 19:47:12.000000000 -0400
121 +++ linux-2.6.35.4/arch/alpha/kernel/pci-noop.c 2010-09-17 20:12:09.000000000 -0400
122 @@ -173,7 +173,7 @@ static int alpha_noop_set_mask(struct de
123 return 0;
124 }
125
126 -struct dma_map_ops alpha_noop_ops = {
127 +const struct dma_map_ops alpha_noop_ops = {
128 .alloc_coherent = alpha_noop_alloc_coherent,
129 .free_coherent = alpha_noop_free_coherent,
130 .map_page = alpha_noop_map_page,
131 @@ -183,7 +183,7 @@ struct dma_map_ops alpha_noop_ops = {
132 .set_dma_mask = alpha_noop_set_mask,
133 };
134
135 -struct dma_map_ops *dma_ops = &alpha_noop_ops;
136 +const struct dma_map_ops *dma_ops = &alpha_noop_ops;
137 EXPORT_SYMBOL(dma_ops);
138
139 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
140 diff -urNp linux-2.6.35.4/arch/alpha/mm/fault.c linux-2.6.35.4/arch/alpha/mm/fault.c
141 --- linux-2.6.35.4/arch/alpha/mm/fault.c 2010-08-26 19:47:12.000000000 -0400
142 +++ linux-2.6.35.4/arch/alpha/mm/fault.c 2010-09-17 20:12:09.000000000 -0400
143 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
144 __reload_thread(pcb);
145 }
146
147 +#ifdef CONFIG_PAX_PAGEEXEC
148 +/*
149 + * PaX: decide what to do with offenders (regs->pc = fault address)
150 + *
151 + * returns 1 when task should be killed
152 + * 2 when patched PLT trampoline was detected
153 + * 3 when unpatched PLT trampoline was detected
154 + */
155 +static int pax_handle_fetch_fault(struct pt_regs *regs)
156 +{
157 +
158 +#ifdef CONFIG_PAX_EMUPLT
159 + int err;
160 +
161 + do { /* PaX: patched PLT emulation #1 */
162 + unsigned int ldah, ldq, jmp;
163 +
164 + err = get_user(ldah, (unsigned int *)regs->pc);
165 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
166 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
167 +
168 + if (err)
169 + break;
170 +
171 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
172 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
173 + jmp == 0x6BFB0000U)
174 + {
175 + unsigned long r27, addr;
176 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
177 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
178 +
179 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
180 + err = get_user(r27, (unsigned long *)addr);
181 + if (err)
182 + break;
183 +
184 + regs->r27 = r27;
185 + regs->pc = r27;
186 + return 2;
187 + }
188 + } while (0);
189 +
190 + do { /* PaX: patched PLT emulation #2 */
191 + unsigned int ldah, lda, br;
192 +
193 + err = get_user(ldah, (unsigned int *)regs->pc);
194 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
195 + err |= get_user(br, (unsigned int *)(regs->pc+8));
196 +
197 + if (err)
198 + break;
199 +
200 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
201 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
202 + (br & 0xFFE00000U) == 0xC3E00000U)
203 + {
204 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
205 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
206 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
207 +
208 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
209 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
210 + return 2;
211 + }
212 + } while (0);
213 +
214 + do { /* PaX: unpatched PLT emulation */
215 + unsigned int br;
216 +
217 + err = get_user(br, (unsigned int *)regs->pc);
218 +
219 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
220 + unsigned int br2, ldq, nop, jmp;
221 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
222 +
223 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
224 + err = get_user(br2, (unsigned int *)addr);
225 + err |= get_user(ldq, (unsigned int *)(addr+4));
226 + err |= get_user(nop, (unsigned int *)(addr+8));
227 + err |= get_user(jmp, (unsigned int *)(addr+12));
228 + err |= get_user(resolver, (unsigned long *)(addr+16));
229 +
230 + if (err)
231 + break;
232 +
233 + if (br2 == 0xC3600000U &&
234 + ldq == 0xA77B000CU &&
235 + nop == 0x47FF041FU &&
236 + jmp == 0x6B7B0000U)
237 + {
238 + regs->r28 = regs->pc+4;
239 + regs->r27 = addr+16;
240 + regs->pc = resolver;
241 + return 3;
242 + }
243 + }
244 + } while (0);
245 +#endif
246 +
247 + return 1;
248 +}
249 +
250 +void pax_report_insns(void *pc, void *sp)
251 +{
252 + unsigned long i;
253 +
254 + printk(KERN_ERR "PAX: bytes at PC: ");
255 + for (i = 0; i < 5; i++) {
256 + unsigned int c;
257 + if (get_user(c, (unsigned int *)pc+i))
258 + printk(KERN_CONT "???????? ");
259 + else
260 + printk(KERN_CONT "%08x ", c);
261 + }
262 + printk("\n");
263 +}
264 +#endif
265
266 /*
267 * This routine handles page faults. It determines the address,
268 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
269 good_area:
270 si_code = SEGV_ACCERR;
271 if (cause < 0) {
272 - if (!(vma->vm_flags & VM_EXEC))
273 + if (!(vma->vm_flags & VM_EXEC)) {
274 +
275 +#ifdef CONFIG_PAX_PAGEEXEC
276 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
277 + goto bad_area;
278 +
279 + up_read(&mm->mmap_sem);
280 + switch (pax_handle_fetch_fault(regs)) {
281 +
282 +#ifdef CONFIG_PAX_EMUPLT
283 + case 2:
284 + case 3:
285 + return;
286 +#endif
287 +
288 + }
289 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
290 + do_group_exit(SIGKILL);
291 +#else
292 goto bad_area;
293 +#endif
294 +
295 + }
296 } else if (!cause) {
297 /* Allow reads even for write-only mappings */
298 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
299 diff -urNp linux-2.6.35.4/arch/arm/include/asm/elf.h linux-2.6.35.4/arch/arm/include/asm/elf.h
300 --- linux-2.6.35.4/arch/arm/include/asm/elf.h 2010-08-26 19:47:12.000000000 -0400
301 +++ linux-2.6.35.4/arch/arm/include/asm/elf.h 2010-09-17 20:12:09.000000000 -0400
302 @@ -111,7 +111,14 @@ int dump_task_regs(struct task_struct *t
303 the loader. We need to make sure that it is out of the way of the program
304 that it will "exec", and that there is sufficient room for the brk. */
305
306 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
307 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
308 +
309 +#ifdef CONFIG_PAX_ASLR
310 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
311 +
312 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
313 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
314 +#endif
315
316 /* When the program starts, a1 contains a pointer to a function to be
317 registered with atexit, as per the SVR4 ABI. A value of 0 means we
318 diff -urNp linux-2.6.35.4/arch/arm/include/asm/kmap_types.h linux-2.6.35.4/arch/arm/include/asm/kmap_types.h
319 --- linux-2.6.35.4/arch/arm/include/asm/kmap_types.h 2010-08-26 19:47:12.000000000 -0400
320 +++ linux-2.6.35.4/arch/arm/include/asm/kmap_types.h 2010-09-17 20:12:09.000000000 -0400
321 @@ -21,6 +21,7 @@ enum km_type {
322 KM_L1_CACHE,
323 KM_L2_CACHE,
324 KM_KDB,
325 + KM_CLEARPAGE,
326 KM_TYPE_NR
327 };
328
329 diff -urNp linux-2.6.35.4/arch/arm/include/asm/uaccess.h linux-2.6.35.4/arch/arm/include/asm/uaccess.h
330 --- linux-2.6.35.4/arch/arm/include/asm/uaccess.h 2010-08-26 19:47:12.000000000 -0400
331 +++ linux-2.6.35.4/arch/arm/include/asm/uaccess.h 2010-09-17 20:12:09.000000000 -0400
332 @@ -403,6 +403,9 @@ extern unsigned long __must_check __strn
333
334 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
335 {
336 + if ((long)n < 0)
337 + return n;
338 +
339 if (access_ok(VERIFY_READ, from, n))
340 n = __copy_from_user(to, from, n);
341 else /* security hole - plug it */
342 @@ -412,6 +415,9 @@ static inline unsigned long __must_check
343
344 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
345 {
346 + if ((long)n < 0)
347 + return n;
348 +
349 if (access_ok(VERIFY_WRITE, to, n))
350 n = __copy_to_user(to, from, n);
351 return n;
352 diff -urNp linux-2.6.35.4/arch/arm/kernel/kgdb.c linux-2.6.35.4/arch/arm/kernel/kgdb.c
353 --- linux-2.6.35.4/arch/arm/kernel/kgdb.c 2010-08-26 19:47:12.000000000 -0400
354 +++ linux-2.6.35.4/arch/arm/kernel/kgdb.c 2010-09-17 20:12:09.000000000 -0400
355 @@ -208,7 +208,7 @@ void kgdb_arch_exit(void)
356 * and we handle the normal undef case within the do_undefinstr
357 * handler.
358 */
359 -struct kgdb_arch arch_kgdb_ops = {
360 +const struct kgdb_arch arch_kgdb_ops = {
361 #ifndef __ARMEB__
362 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
363 #else /* ! __ARMEB__ */
364 diff -urNp linux-2.6.35.4/arch/arm/mach-at91/pm.c linux-2.6.35.4/arch/arm/mach-at91/pm.c
365 --- linux-2.6.35.4/arch/arm/mach-at91/pm.c 2010-08-26 19:47:12.000000000 -0400
366 +++ linux-2.6.35.4/arch/arm/mach-at91/pm.c 2010-09-17 20:12:09.000000000 -0400
367 @@ -294,7 +294,7 @@ static void at91_pm_end(void)
368 }
369
370
371 -static struct platform_suspend_ops at91_pm_ops ={
372 +static const struct platform_suspend_ops at91_pm_ops ={
373 .valid = at91_pm_valid_state,
374 .begin = at91_pm_begin,
375 .enter = at91_pm_enter,
376 diff -urNp linux-2.6.35.4/arch/arm/mach-davinci/pm.c linux-2.6.35.4/arch/arm/mach-davinci/pm.c
377 --- linux-2.6.35.4/arch/arm/mach-davinci/pm.c 2010-08-26 19:47:12.000000000 -0400
378 +++ linux-2.6.35.4/arch/arm/mach-davinci/pm.c 2010-09-17 20:12:09.000000000 -0400
379 @@ -110,7 +110,7 @@ static int davinci_pm_enter(suspend_stat
380 return ret;
381 }
382
383 -static struct platform_suspend_ops davinci_pm_ops = {
384 +static const struct platform_suspend_ops davinci_pm_ops = {
385 .enter = davinci_pm_enter,
386 .valid = suspend_valid_only_mem,
387 };
388 diff -urNp linux-2.6.35.4/arch/arm/mach-msm/last_radio_log.c linux-2.6.35.4/arch/arm/mach-msm/last_radio_log.c
389 --- linux-2.6.35.4/arch/arm/mach-msm/last_radio_log.c 2010-08-26 19:47:12.000000000 -0400
390 +++ linux-2.6.35.4/arch/arm/mach-msm/last_radio_log.c 2010-09-17 20:12:09.000000000 -0400
391 @@ -47,6 +47,7 @@ static ssize_t last_radio_log_read(struc
392 return count;
393 }
394
395 +/* cannot be const, see msm_init_last_radio_log */
396 static struct file_operations last_radio_log_fops = {
397 .read = last_radio_log_read
398 };
399 diff -urNp linux-2.6.35.4/arch/arm/mach-omap1/pm.c linux-2.6.35.4/arch/arm/mach-omap1/pm.c
400 --- linux-2.6.35.4/arch/arm/mach-omap1/pm.c 2010-08-26 19:47:12.000000000 -0400
401 +++ linux-2.6.35.4/arch/arm/mach-omap1/pm.c 2010-09-17 20:12:09.000000000 -0400
402 @@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq
403
404
405
406 -static struct platform_suspend_ops omap_pm_ops ={
407 +static const struct platform_suspend_ops omap_pm_ops ={
408 .prepare = omap_pm_prepare,
409 .enter = omap_pm_enter,
410 .finish = omap_pm_finish,
411 diff -urNp linux-2.6.35.4/arch/arm/mach-omap2/pm24xx.c linux-2.6.35.4/arch/arm/mach-omap2/pm24xx.c
412 --- linux-2.6.35.4/arch/arm/mach-omap2/pm24xx.c 2010-08-26 19:47:12.000000000 -0400
413 +++ linux-2.6.35.4/arch/arm/mach-omap2/pm24xx.c 2010-09-17 20:12:09.000000000 -0400
414 @@ -325,7 +325,7 @@ static void omap2_pm_finish(void)
415 enable_hlt();
416 }
417
418 -static struct platform_suspend_ops omap_pm_ops = {
419 +static const struct platform_suspend_ops omap_pm_ops = {
420 .prepare = omap2_pm_prepare,
421 .enter = omap2_pm_enter,
422 .finish = omap2_pm_finish,
423 diff -urNp linux-2.6.35.4/arch/arm/mach-omap2/pm34xx.c linux-2.6.35.4/arch/arm/mach-omap2/pm34xx.c
424 --- linux-2.6.35.4/arch/arm/mach-omap2/pm34xx.c 2010-08-26 19:47:12.000000000 -0400
425 +++ linux-2.6.35.4/arch/arm/mach-omap2/pm34xx.c 2010-09-17 20:12:09.000000000 -0400
426 @@ -669,7 +669,7 @@ static void omap3_pm_end(void)
427 return;
428 }
429
430 -static struct platform_suspend_ops omap_pm_ops = {
431 +static const struct platform_suspend_ops omap_pm_ops = {
432 .begin = omap3_pm_begin,
433 .end = omap3_pm_end,
434 .prepare = omap3_pm_prepare,
435 diff -urNp linux-2.6.35.4/arch/arm/mach-pnx4008/pm.c linux-2.6.35.4/arch/arm/mach-pnx4008/pm.c
436 --- linux-2.6.35.4/arch/arm/mach-pnx4008/pm.c 2010-08-26 19:47:12.000000000 -0400
437 +++ linux-2.6.35.4/arch/arm/mach-pnx4008/pm.c 2010-09-17 20:12:09.000000000 -0400
438 @@ -119,7 +119,7 @@ static int pnx4008_pm_valid(suspend_stat
439 (state == PM_SUSPEND_MEM);
440 }
441
442 -static struct platform_suspend_ops pnx4008_pm_ops = {
443 +static const struct platform_suspend_ops pnx4008_pm_ops = {
444 .enter = pnx4008_pm_enter,
445 .valid = pnx4008_pm_valid,
446 };
447 diff -urNp linux-2.6.35.4/arch/arm/mach-pxa/pm.c linux-2.6.35.4/arch/arm/mach-pxa/pm.c
448 --- linux-2.6.35.4/arch/arm/mach-pxa/pm.c 2010-08-26 19:47:12.000000000 -0400
449 +++ linux-2.6.35.4/arch/arm/mach-pxa/pm.c 2010-09-17 20:12:09.000000000 -0400
450 @@ -96,7 +96,7 @@ void pxa_pm_finish(void)
451 pxa_cpu_pm_fns->finish();
452 }
453
454 -static struct platform_suspend_ops pxa_pm_ops = {
455 +static const struct platform_suspend_ops pxa_pm_ops = {
456 .valid = pxa_pm_valid,
457 .enter = pxa_pm_enter,
458 .prepare = pxa_pm_prepare,
459 diff -urNp linux-2.6.35.4/arch/arm/mach-pxa/sharpsl_pm.c linux-2.6.35.4/arch/arm/mach-pxa/sharpsl_pm.c
460 --- linux-2.6.35.4/arch/arm/mach-pxa/sharpsl_pm.c 2010-08-26 19:47:12.000000000 -0400
461 +++ linux-2.6.35.4/arch/arm/mach-pxa/sharpsl_pm.c 2010-09-17 20:12:09.000000000 -0400
462 @@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status
463 }
464
465 #ifdef CONFIG_PM
466 -static struct platform_suspend_ops sharpsl_pm_ops = {
467 +static const struct platform_suspend_ops sharpsl_pm_ops = {
468 .prepare = pxa_pm_prepare,
469 .finish = pxa_pm_finish,
470 .enter = corgi_pxa_pm_enter,
471 diff -urNp linux-2.6.35.4/arch/arm/mach-sa1100/pm.c linux-2.6.35.4/arch/arm/mach-sa1100/pm.c
472 --- linux-2.6.35.4/arch/arm/mach-sa1100/pm.c 2010-08-26 19:47:12.000000000 -0400
473 +++ linux-2.6.35.4/arch/arm/mach-sa1100/pm.c 2010-09-17 20:12:09.000000000 -0400
474 @@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
475 return virt_to_phys(sp);
476 }
477
478 -static struct platform_suspend_ops sa11x0_pm_ops = {
479 +static const struct platform_suspend_ops sa11x0_pm_ops = {
480 .enter = sa11x0_pm_enter,
481 .valid = suspend_valid_only_mem,
482 };
483 diff -urNp linux-2.6.35.4/arch/arm/mm/fault.c linux-2.6.35.4/arch/arm/mm/fault.c
484 --- linux-2.6.35.4/arch/arm/mm/fault.c 2010-08-26 19:47:12.000000000 -0400
485 +++ linux-2.6.35.4/arch/arm/mm/fault.c 2010-09-17 20:12:09.000000000 -0400
486 @@ -167,6 +167,13 @@ __do_user_fault(struct task_struct *tsk,
487 }
488 #endif
489
490 +#ifdef CONFIG_PAX_PAGEEXEC
491 + if (fsr & FSR_LNX_PF) {
492 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
493 + do_group_exit(SIGKILL);
494 + }
495 +#endif
496 +
497 tsk->thread.address = addr;
498 tsk->thread.error_code = fsr;
499 tsk->thread.trap_no = 14;
500 @@ -364,6 +371,33 @@ do_page_fault(unsigned long addr, unsign
501 }
502 #endif /* CONFIG_MMU */
503
504 +#ifdef CONFIG_PAX_PAGEEXEC
505 +void pax_report_insns(void *pc, void *sp)
506 +{
507 + long i;
508 +
509 + printk(KERN_ERR "PAX: bytes at PC: ");
510 + for (i = 0; i < 20; i++) {
511 + unsigned char c;
512 + if (get_user(c, (__force unsigned char __user *)pc+i))
513 + printk(KERN_CONT "?? ");
514 + else
515 + printk(KERN_CONT "%02x ", c);
516 + }
517 + printk("\n");
518 +
519 + printk(KERN_ERR "PAX: bytes at SP-4: ");
520 + for (i = -1; i < 20; i++) {
521 + unsigned long c;
522 + if (get_user(c, (__force unsigned long __user *)sp+i))
523 + printk(KERN_CONT "???????? ");
524 + else
525 + printk(KERN_CONT "%08lx ", c);
526 + }
527 + printk("\n");
528 +}
529 +#endif
530 +
531 /*
532 * First Level Translation Fault Handler
533 *
534 diff -urNp linux-2.6.35.4/arch/arm/mm/mmap.c linux-2.6.35.4/arch/arm/mm/mmap.c
535 --- linux-2.6.35.4/arch/arm/mm/mmap.c 2010-08-26 19:47:12.000000000 -0400
536 +++ linux-2.6.35.4/arch/arm/mm/mmap.c 2010-09-17 20:12:09.000000000 -0400
537 @@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp
538 if (len > TASK_SIZE)
539 return -ENOMEM;
540
541 +#ifdef CONFIG_PAX_RANDMMAP
542 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
543 +#endif
544 +
545 if (addr) {
546 if (do_align)
547 addr = COLOUR_ALIGN(addr, pgoff);
548 @@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp
549 addr = PAGE_ALIGN(addr);
550
551 vma = find_vma(mm, addr);
552 - if (TASK_SIZE - len >= addr &&
553 - (!vma || addr + len <= vma->vm_start))
554 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
555 return addr;
556 }
557 if (len > mm->cached_hole_size) {
558 - start_addr = addr = mm->free_area_cache;
559 + start_addr = addr = mm->free_area_cache;
560 } else {
561 - start_addr = addr = TASK_UNMAPPED_BASE;
562 - mm->cached_hole_size = 0;
563 + start_addr = addr = mm->mmap_base;
564 + mm->cached_hole_size = 0;
565 }
566
567 full_search:
568 @@ -94,14 +97,14 @@ full_search:
569 * Start a new search - just in case we missed
570 * some holes.
571 */
572 - if (start_addr != TASK_UNMAPPED_BASE) {
573 - start_addr = addr = TASK_UNMAPPED_BASE;
574 + if (start_addr != mm->mmap_base) {
575 + start_addr = addr = mm->mmap_base;
576 mm->cached_hole_size = 0;
577 goto full_search;
578 }
579 return -ENOMEM;
580 }
581 - if (!vma || addr + len <= vma->vm_start) {
582 + if (check_heap_stack_gap(vma, addr, len)) {
583 /*
584 * Remember the place where we stopped the search:
585 */
586 diff -urNp linux-2.6.35.4/arch/arm/plat-samsung/pm.c linux-2.6.35.4/arch/arm/plat-samsung/pm.c
587 --- linux-2.6.35.4/arch/arm/plat-samsung/pm.c 2010-08-26 19:47:12.000000000 -0400
588 +++ linux-2.6.35.4/arch/arm/plat-samsung/pm.c 2010-09-17 20:12:09.000000000 -0400
589 @@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
590 s3c_pm_check_cleanup();
591 }
592
593 -static struct platform_suspend_ops s3c_pm_ops = {
594 +static const struct platform_suspend_ops s3c_pm_ops = {
595 .enter = s3c_pm_enter,
596 .prepare = s3c_pm_prepare,
597 .finish = s3c_pm_finish,
598 diff -urNp linux-2.6.35.4/arch/avr32/include/asm/elf.h linux-2.6.35.4/arch/avr32/include/asm/elf.h
599 --- linux-2.6.35.4/arch/avr32/include/asm/elf.h 2010-08-26 19:47:12.000000000 -0400
600 +++ linux-2.6.35.4/arch/avr32/include/asm/elf.h 2010-09-17 20:12:09.000000000 -0400
601 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg
602 the loader. We need to make sure that it is out of the way of the program
603 that it will "exec", and that there is sufficient room for the brk. */
604
605 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
606 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
607
608 +#ifdef CONFIG_PAX_ASLR
609 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
610 +
611 +#define PAX_DELTA_MMAP_LEN 15
612 +#define PAX_DELTA_STACK_LEN 15
613 +#endif
614
615 /* This yields a mask that user programs can use to figure out what
616 instruction set this CPU supports. This could be done in user space,
617 diff -urNp linux-2.6.35.4/arch/avr32/include/asm/kmap_types.h linux-2.6.35.4/arch/avr32/include/asm/kmap_types.h
618 --- linux-2.6.35.4/arch/avr32/include/asm/kmap_types.h 2010-08-26 19:47:12.000000000 -0400
619 +++ linux-2.6.35.4/arch/avr32/include/asm/kmap_types.h 2010-09-17 20:12:09.000000000 -0400
620 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
621 D(11) KM_IRQ1,
622 D(12) KM_SOFTIRQ0,
623 D(13) KM_SOFTIRQ1,
624 -D(14) KM_TYPE_NR
625 +D(14) KM_CLEARPAGE,
626 +D(15) KM_TYPE_NR
627 };
628
629 #undef D
630 diff -urNp linux-2.6.35.4/arch/avr32/mach-at32ap/pm.c linux-2.6.35.4/arch/avr32/mach-at32ap/pm.c
631 --- linux-2.6.35.4/arch/avr32/mach-at32ap/pm.c 2010-08-26 19:47:12.000000000 -0400
632 +++ linux-2.6.35.4/arch/avr32/mach-at32ap/pm.c 2010-09-17 20:12:09.000000000 -0400
633 @@ -176,7 +176,7 @@ out:
634 return 0;
635 }
636
637 -static struct platform_suspend_ops avr32_pm_ops = {
638 +static const struct platform_suspend_ops avr32_pm_ops = {
639 .valid = avr32_pm_valid_state,
640 .enter = avr32_pm_enter,
641 };
642 diff -urNp linux-2.6.35.4/arch/avr32/mm/fault.c linux-2.6.35.4/arch/avr32/mm/fault.c
643 --- linux-2.6.35.4/arch/avr32/mm/fault.c 2010-08-26 19:47:12.000000000 -0400
644 +++ linux-2.6.35.4/arch/avr32/mm/fault.c 2010-09-17 20:12:09.000000000 -0400
645 @@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
646
647 int exception_trace = 1;
648
649 +#ifdef CONFIG_PAX_PAGEEXEC
650 +void pax_report_insns(void *pc, void *sp)
651 +{
652 + unsigned long i;
653 +
654 + printk(KERN_ERR "PAX: bytes at PC: ");
655 + for (i = 0; i < 20; i++) {
656 + unsigned char c;
657 + if (get_user(c, (unsigned char *)pc+i))
658 + printk(KERN_CONT "???????? ");
659 + else
660 + printk(KERN_CONT "%02x ", c);
661 + }
662 + printk("\n");
663 +}
664 +#endif
665 +
666 /*
667 * This routine handles page faults. It determines the address and the
668 * problem, and then passes it off to one of the appropriate routines.
669 @@ -157,6 +174,16 @@ bad_area:
670 up_read(&mm->mmap_sem);
671
672 if (user_mode(regs)) {
673 +
674 +#ifdef CONFIG_PAX_PAGEEXEC
675 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
676 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
677 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
678 + do_group_exit(SIGKILL);
679 + }
680 + }
681 +#endif
682 +
683 if (exception_trace && printk_ratelimit())
684 printk("%s%s[%d]: segfault at %08lx pc %08lx "
685 "sp %08lx ecr %lu\n",
686 diff -urNp linux-2.6.35.4/arch/blackfin/kernel/kgdb.c linux-2.6.35.4/arch/blackfin/kernel/kgdb.c
687 --- linux-2.6.35.4/arch/blackfin/kernel/kgdb.c 2010-08-26 19:47:12.000000000 -0400
688 +++ linux-2.6.35.4/arch/blackfin/kernel/kgdb.c 2010-09-17 20:12:09.000000000 -0400
689 @@ -397,7 +397,7 @@ int kgdb_arch_handle_exception(int vecto
690 return -1; /* this means that we do not want to exit from the handler */
691 }
692
693 -struct kgdb_arch arch_kgdb_ops = {
694 +const struct kgdb_arch arch_kgdb_ops = {
695 .gdb_bpt_instr = {0xa1},
696 #ifdef CONFIG_SMP
697 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
698 diff -urNp linux-2.6.35.4/arch/blackfin/mach-common/pm.c linux-2.6.35.4/arch/blackfin/mach-common/pm.c
699 --- linux-2.6.35.4/arch/blackfin/mach-common/pm.c 2010-08-26 19:47:12.000000000 -0400
700 +++ linux-2.6.35.4/arch/blackfin/mach-common/pm.c 2010-09-17 20:12:09.000000000 -0400
701 @@ -232,7 +232,7 @@ static int bfin_pm_enter(suspend_state_t
702 return 0;
703 }
704
705 -struct platform_suspend_ops bfin_pm_ops = {
706 +const struct platform_suspend_ops bfin_pm_ops = {
707 .enter = bfin_pm_enter,
708 .valid = bfin_pm_valid,
709 };
710 diff -urNp linux-2.6.35.4/arch/blackfin/mm/maccess.c linux-2.6.35.4/arch/blackfin/mm/maccess.c
711 --- linux-2.6.35.4/arch/blackfin/mm/maccess.c 2010-08-26 19:47:12.000000000 -0400
712 +++ linux-2.6.35.4/arch/blackfin/mm/maccess.c 2010-09-17 20:12:09.000000000 -0400
713 @@ -16,7 +16,7 @@ static int validate_memory_access_addres
714 return bfin_mem_access_type(addr, size);
715 }
716
717 -long probe_kernel_read(void *dst, void *src, size_t size)
718 +long probe_kernel_read(void *dst, const void *src, size_t size)
719 {
720 unsigned long lsrc = (unsigned long)src;
721 int mem_type;
722 @@ -55,7 +55,7 @@ long probe_kernel_read(void *dst, void *
723 return -EFAULT;
724 }
725
726 -long probe_kernel_write(void *dst, void *src, size_t size)
727 +long probe_kernel_write(void *dst, const void *src, size_t size)
728 {
729 unsigned long ldst = (unsigned long)dst;
730 int mem_type;
731 diff -urNp linux-2.6.35.4/arch/frv/include/asm/kmap_types.h linux-2.6.35.4/arch/frv/include/asm/kmap_types.h
732 --- linux-2.6.35.4/arch/frv/include/asm/kmap_types.h 2010-08-26 19:47:12.000000000 -0400
733 +++ linux-2.6.35.4/arch/frv/include/asm/kmap_types.h 2010-09-17 20:12:09.000000000 -0400
734 @@ -23,6 +23,7 @@ enum km_type {
735 KM_IRQ1,
736 KM_SOFTIRQ0,
737 KM_SOFTIRQ1,
738 + KM_CLEARPAGE,
739 KM_TYPE_NR
740 };
741
742 diff -urNp linux-2.6.35.4/arch/frv/mm/elf-fdpic.c linux-2.6.35.4/arch/frv/mm/elf-fdpic.c
743 --- linux-2.6.35.4/arch/frv/mm/elf-fdpic.c 2010-08-26 19:47:12.000000000 -0400
744 +++ linux-2.6.35.4/arch/frv/mm/elf-fdpic.c 2010-09-17 20:12:09.000000000 -0400
745 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
746 if (addr) {
747 addr = PAGE_ALIGN(addr);
748 vma = find_vma(current->mm, addr);
749 - if (TASK_SIZE - len >= addr &&
750 - (!vma || addr + len <= vma->vm_start))
751 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
752 goto success;
753 }
754
755 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
756 for (; vma; vma = vma->vm_next) {
757 if (addr > limit)
758 break;
759 - if (addr + len <= vma->vm_start)
760 + if (check_heap_stack_gap(vma, addr, len))
761 goto success;
762 addr = vma->vm_end;
763 }
764 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
765 for (; vma; vma = vma->vm_next) {
766 if (addr > limit)
767 break;
768 - if (addr + len <= vma->vm_start)
769 + if (check_heap_stack_gap(vma, addr, len))
770 goto success;
771 addr = vma->vm_end;
772 }
773 diff -urNp linux-2.6.35.4/arch/ia64/hp/common/hwsw_iommu.c linux-2.6.35.4/arch/ia64/hp/common/hwsw_iommu.c
774 --- linux-2.6.35.4/arch/ia64/hp/common/hwsw_iommu.c 2010-08-26 19:47:12.000000000 -0400
775 +++ linux-2.6.35.4/arch/ia64/hp/common/hwsw_iommu.c 2010-09-17 20:12:09.000000000 -0400
776 @@ -17,7 +17,7 @@
777 #include <linux/swiotlb.h>
778 #include <asm/machvec.h>
779
780 -extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
781 +extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
782
783 /* swiotlb declarations & definitions: */
784 extern int swiotlb_late_init_with_default_size (size_t size);
785 @@ -33,7 +33,7 @@ static inline int use_swiotlb(struct dev
786 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
787 }
788
789 -struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
790 +const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
791 {
792 if (use_swiotlb(dev))
793 return &swiotlb_dma_ops;
794 diff -urNp linux-2.6.35.4/arch/ia64/hp/common/sba_iommu.c linux-2.6.35.4/arch/ia64/hp/common/sba_iommu.c
795 --- linux-2.6.35.4/arch/ia64/hp/common/sba_iommu.c 2010-08-26 19:47:12.000000000 -0400
796 +++ linux-2.6.35.4/arch/ia64/hp/common/sba_iommu.c 2010-09-17 20:12:09.000000000 -0400
797 @@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_d
798 },
799 };
800
801 -extern struct dma_map_ops swiotlb_dma_ops;
802 +extern const struct dma_map_ops swiotlb_dma_ops;
803
804 static int __init
805 sba_init(void)
806 @@ -2211,7 +2211,7 @@ sba_page_override(char *str)
807
808 __setup("sbapagesize=",sba_page_override);
809
810 -struct dma_map_ops sba_dma_ops = {
811 +const struct dma_map_ops sba_dma_ops = {
812 .alloc_coherent = sba_alloc_coherent,
813 .free_coherent = sba_free_coherent,
814 .map_page = sba_map_page,
815 diff -urNp linux-2.6.35.4/arch/ia64/include/asm/compat.h linux-2.6.35.4/arch/ia64/include/asm/compat.h
816 --- linux-2.6.35.4/arch/ia64/include/asm/compat.h 2010-08-26 19:47:12.000000000 -0400
817 +++ linux-2.6.35.4/arch/ia64/include/asm/compat.h 2010-09-17 20:12:37.000000000 -0400
818 @@ -199,7 +199,7 @@ ptr_to_compat(void __user *uptr)
819 }
820
821 static __inline__ void __user *
822 -compat_alloc_user_space (long len)
823 +arch_compat_alloc_user_space (long len)
824 {
825 struct pt_regs *regs = task_pt_regs(current);
826 return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len);
827 diff -urNp linux-2.6.35.4/arch/ia64/include/asm/dma-mapping.h linux-2.6.35.4/arch/ia64/include/asm/dma-mapping.h
828 --- linux-2.6.35.4/arch/ia64/include/asm/dma-mapping.h 2010-08-26 19:47:12.000000000 -0400
829 +++ linux-2.6.35.4/arch/ia64/include/asm/dma-mapping.h 2010-09-17 20:12:09.000000000 -0400
830 @@ -12,7 +12,7 @@
831
832 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
833
834 -extern struct dma_map_ops *dma_ops;
835 +extern const struct dma_map_ops *dma_ops;
836 extern struct ia64_machine_vector ia64_mv;
837 extern void set_iommu_machvec(void);
838
839 @@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct d
840 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
841 dma_addr_t *daddr, gfp_t gfp)
842 {
843 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
844 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
845 void *caddr;
846
847 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
848 @@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(s
849 static inline void dma_free_coherent(struct device *dev, size_t size,
850 void *caddr, dma_addr_t daddr)
851 {
852 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
853 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
854 debug_dma_free_coherent(dev, size, caddr, daddr);
855 ops->free_coherent(dev, size, caddr, daddr);
856 }
857 @@ -49,13 +49,13 @@ static inline void dma_free_coherent(str
858
859 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
860 {
861 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
862 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
863 return ops->mapping_error(dev, daddr);
864 }
865
866 static inline int dma_supported(struct device *dev, u64 mask)
867 {
868 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
869 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
870 return ops->dma_supported(dev, mask);
871 }
872
873 diff -urNp linux-2.6.35.4/arch/ia64/include/asm/elf.h linux-2.6.35.4/arch/ia64/include/asm/elf.h
874 --- linux-2.6.35.4/arch/ia64/include/asm/elf.h 2010-08-26 19:47:12.000000000 -0400
875 +++ linux-2.6.35.4/arch/ia64/include/asm/elf.h 2010-09-17 20:12:09.000000000 -0400
876 @@ -42,6 +42,13 @@
877 */
878 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
879
880 +#ifdef CONFIG_PAX_ASLR
881 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
882 +
883 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
884 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
885 +#endif
886 +
887 #define PT_IA_64_UNWIND 0x70000001
888
889 /* IA-64 relocations: */
890 diff -urNp linux-2.6.35.4/arch/ia64/include/asm/machvec.h linux-2.6.35.4/arch/ia64/include/asm/machvec.h
891 --- linux-2.6.35.4/arch/ia64/include/asm/machvec.h 2010-08-26 19:47:12.000000000 -0400
892 +++ linux-2.6.35.4/arch/ia64/include/asm/machvec.h 2010-09-17 20:12:09.000000000 -0400
893 @@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event
894 /* DMA-mapping interface: */
895 typedef void ia64_mv_dma_init (void);
896 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
897 -typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
898 +typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
899
900 /*
901 * WARNING: The legacy I/O space is _architected_. Platforms are
902 @@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(co
903 # endif /* CONFIG_IA64_GENERIC */
904
905 extern void swiotlb_dma_init(void);
906 -extern struct dma_map_ops *dma_get_ops(struct device *);
907 +extern const struct dma_map_ops *dma_get_ops(struct device *);
908
909 /*
910 * Define default versions so we can extend machvec for new platforms without having
911 diff -urNp linux-2.6.35.4/arch/ia64/include/asm/pgtable.h linux-2.6.35.4/arch/ia64/include/asm/pgtable.h
912 --- linux-2.6.35.4/arch/ia64/include/asm/pgtable.h 2010-08-26 19:47:12.000000000 -0400
913 +++ linux-2.6.35.4/arch/ia64/include/asm/pgtable.h 2010-09-17 20:12:09.000000000 -0400
914 @@ -12,7 +12,7 @@
915 * David Mosberger-Tang <davidm@hpl.hp.com>
916 */
917
918 -
919 +#include <linux/const.h>
920 #include <asm/mman.h>
921 #include <asm/page.h>
922 #include <asm/processor.h>
923 @@ -143,6 +143,17 @@
924 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
925 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
926 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
927 +
928 +#ifdef CONFIG_PAX_PAGEEXEC
929 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
930 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
931 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
932 +#else
933 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
934 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
935 +# define PAGE_COPY_NOEXEC PAGE_COPY
936 +#endif
937 +
938 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
939 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
940 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
941 diff -urNp linux-2.6.35.4/arch/ia64/include/asm/uaccess.h linux-2.6.35.4/arch/ia64/include/asm/uaccess.h
942 --- linux-2.6.35.4/arch/ia64/include/asm/uaccess.h 2010-08-26 19:47:12.000000000 -0400
943 +++ linux-2.6.35.4/arch/ia64/include/asm/uaccess.h 2010-09-17 20:12:09.000000000 -0400
944 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
945 const void *__cu_from = (from); \
946 long __cu_len = (n); \
947 \
948 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
949 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
950 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
951 __cu_len; \
952 })
953 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
954 long __cu_len = (n); \
955 \
956 __chk_user_ptr(__cu_from); \
957 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
958 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
959 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
960 __cu_len; \
961 })
962 diff -urNp linux-2.6.35.4/arch/ia64/kernel/dma-mapping.c linux-2.6.35.4/arch/ia64/kernel/dma-mapping.c
963 --- linux-2.6.35.4/arch/ia64/kernel/dma-mapping.c 2010-08-26 19:47:12.000000000 -0400
964 +++ linux-2.6.35.4/arch/ia64/kernel/dma-mapping.c 2010-09-17 20:12:09.000000000 -0400
965 @@ -3,7 +3,7 @@
966 /* Set this to 1 if there is a HW IOMMU in the system */
967 int iommu_detected __read_mostly;
968
969 -struct dma_map_ops *dma_ops;
970 +const struct dma_map_ops *dma_ops;
971 EXPORT_SYMBOL(dma_ops);
972
973 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
974 @@ -16,7 +16,7 @@ static int __init dma_init(void)
975 }
976 fs_initcall(dma_init);
977
978 -struct dma_map_ops *dma_get_ops(struct device *dev)
979 +const struct dma_map_ops *dma_get_ops(struct device *dev)
980 {
981 return dma_ops;
982 }
983 diff -urNp linux-2.6.35.4/arch/ia64/kernel/module.c linux-2.6.35.4/arch/ia64/kernel/module.c
984 --- linux-2.6.35.4/arch/ia64/kernel/module.c 2010-08-26 19:47:12.000000000 -0400
985 +++ linux-2.6.35.4/arch/ia64/kernel/module.c 2010-09-17 20:12:09.000000000 -0400
986 @@ -315,8 +315,7 @@ module_alloc (unsigned long size)
987 void
988 module_free (struct module *mod, void *module_region)
989 {
990 - if (mod && mod->arch.init_unw_table &&
991 - module_region == mod->module_init) {
992 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
993 unw_remove_unwind_table(mod->arch.init_unw_table);
994 mod->arch.init_unw_table = NULL;
995 }
996 @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
997 }
998
999 static inline int
1000 +in_init_rx (const struct module *mod, uint64_t addr)
1001 +{
1002 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1003 +}
1004 +
1005 +static inline int
1006 +in_init_rw (const struct module *mod, uint64_t addr)
1007 +{
1008 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1009 +}
1010 +
1011 +static inline int
1012 in_init (const struct module *mod, uint64_t addr)
1013 {
1014 - return addr - (uint64_t) mod->module_init < mod->init_size;
1015 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1016 +}
1017 +
1018 +static inline int
1019 +in_core_rx (const struct module *mod, uint64_t addr)
1020 +{
1021 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1022 +}
1023 +
1024 +static inline int
1025 +in_core_rw (const struct module *mod, uint64_t addr)
1026 +{
1027 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1028 }
1029
1030 static inline int
1031 in_core (const struct module *mod, uint64_t addr)
1032 {
1033 - return addr - (uint64_t) mod->module_core < mod->core_size;
1034 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1035 }
1036
1037 static inline int
1038 @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
1039 break;
1040
1041 case RV_BDREL:
1042 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1043 + if (in_init_rx(mod, val))
1044 + val -= (uint64_t) mod->module_init_rx;
1045 + else if (in_init_rw(mod, val))
1046 + val -= (uint64_t) mod->module_init_rw;
1047 + else if (in_core_rx(mod, val))
1048 + val -= (uint64_t) mod->module_core_rx;
1049 + else if (in_core_rw(mod, val))
1050 + val -= (uint64_t) mod->module_core_rw;
1051 break;
1052
1053 case RV_LTV:
1054 @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
1055 * addresses have been selected...
1056 */
1057 uint64_t gp;
1058 - if (mod->core_size > MAX_LTOFF)
1059 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1060 /*
1061 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1062 * at the end of the module.
1063 */
1064 - gp = mod->core_size - MAX_LTOFF / 2;
1065 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1066 else
1067 - gp = mod->core_size / 2;
1068 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1069 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1070 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1071 mod->arch.gp = gp;
1072 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1073 }
1074 diff -urNp linux-2.6.35.4/arch/ia64/kernel/pci-dma.c linux-2.6.35.4/arch/ia64/kernel/pci-dma.c
1075 --- linux-2.6.35.4/arch/ia64/kernel/pci-dma.c 2010-08-26 19:47:12.000000000 -0400
1076 +++ linux-2.6.35.4/arch/ia64/kernel/pci-dma.c 2010-09-17 20:12:09.000000000 -0400
1077 @@ -43,7 +43,7 @@ struct device fallback_dev = {
1078 .dma_mask = &fallback_dev.coherent_dma_mask,
1079 };
1080
1081 -extern struct dma_map_ops intel_dma_ops;
1082 +extern const struct dma_map_ops intel_dma_ops;
1083
1084 static int __init pci_iommu_init(void)
1085 {
1086 diff -urNp linux-2.6.35.4/arch/ia64/kernel/pci-swiotlb.c linux-2.6.35.4/arch/ia64/kernel/pci-swiotlb.c
1087 --- linux-2.6.35.4/arch/ia64/kernel/pci-swiotlb.c 2010-08-26 19:47:12.000000000 -0400
1088 +++ linux-2.6.35.4/arch/ia64/kernel/pci-swiotlb.c 2010-09-17 20:12:09.000000000 -0400
1089 @@ -22,7 +22,7 @@ static void *ia64_swiotlb_alloc_coherent
1090 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1091 }
1092
1093 -struct dma_map_ops swiotlb_dma_ops = {
1094 +const struct dma_map_ops swiotlb_dma_ops = {
1095 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1096 .free_coherent = swiotlb_free_coherent,
1097 .map_page = swiotlb_map_page,
1098 diff -urNp linux-2.6.35.4/arch/ia64/kernel/sys_ia64.c linux-2.6.35.4/arch/ia64/kernel/sys_ia64.c
1099 --- linux-2.6.35.4/arch/ia64/kernel/sys_ia64.c 2010-08-26 19:47:12.000000000 -0400
1100 +++ linux-2.6.35.4/arch/ia64/kernel/sys_ia64.c 2010-09-17 20:12:09.000000000 -0400
1101 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
1102 if (REGION_NUMBER(addr) == RGN_HPAGE)
1103 addr = 0;
1104 #endif
1105 +
1106 +#ifdef CONFIG_PAX_RANDMMAP
1107 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1108 + addr = mm->free_area_cache;
1109 + else
1110 +#endif
1111 +
1112 if (!addr)
1113 addr = mm->free_area_cache;
1114
1115 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
1116 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1117 /* At this point: (!vma || addr < vma->vm_end). */
1118 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1119 - if (start_addr != TASK_UNMAPPED_BASE) {
1120 + if (start_addr != mm->mmap_base) {
1121 /* Start a new search --- just in case we missed some holes. */
1122 - addr = TASK_UNMAPPED_BASE;
1123 + addr = mm->mmap_base;
1124 goto full_search;
1125 }
1126 return -ENOMEM;
1127 }
1128 - if (!vma || addr + len <= vma->vm_start) {
1129 + if (check_heap_stack_gap(vma, addr, len)) {
1130 /* Remember the address where we stopped this search: */
1131 mm->free_area_cache = addr + len;
1132 return addr;
1133 diff -urNp linux-2.6.35.4/arch/ia64/kernel/vmlinux.lds.S linux-2.6.35.4/arch/ia64/kernel/vmlinux.lds.S
1134 --- linux-2.6.35.4/arch/ia64/kernel/vmlinux.lds.S 2010-08-26 19:47:12.000000000 -0400
1135 +++ linux-2.6.35.4/arch/ia64/kernel/vmlinux.lds.S 2010-09-17 20:12:09.000000000 -0400
1136 @@ -196,7 +196,7 @@ SECTIONS
1137 /* Per-cpu data: */
1138 . = ALIGN(PERCPU_PAGE_SIZE);
1139 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1140 - __phys_per_cpu_start = __per_cpu_load;
1141 + __phys_per_cpu_start = per_cpu_load;
1142 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1143 * into percpu page size
1144 */
1145 diff -urNp linux-2.6.35.4/arch/ia64/mm/fault.c linux-2.6.35.4/arch/ia64/mm/fault.c
1146 --- linux-2.6.35.4/arch/ia64/mm/fault.c 2010-08-26 19:47:12.000000000 -0400
1147 +++ linux-2.6.35.4/arch/ia64/mm/fault.c 2010-09-17 20:12:09.000000000 -0400
1148 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
1149 return pte_present(pte);
1150 }
1151
1152 +#ifdef CONFIG_PAX_PAGEEXEC
1153 +void pax_report_insns(void *pc, void *sp)
1154 +{
1155 + unsigned long i;
1156 +
1157 + printk(KERN_ERR "PAX: bytes at PC: ");
1158 + for (i = 0; i < 8; i++) {
1159 + unsigned int c;
1160 + if (get_user(c, (unsigned int *)pc+i))
1161 + printk(KERN_CONT "???????? ");
1162 + else
1163 + printk(KERN_CONT "%08x ", c);
1164 + }
1165 + printk("\n");
1166 +}
1167 +#endif
1168 +
1169 void __kprobes
1170 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1171 {
1172 @@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres
1173 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1174 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1175
1176 - if ((vma->vm_flags & mask) != mask)
1177 + if ((vma->vm_flags & mask) != mask) {
1178 +
1179 +#ifdef CONFIG_PAX_PAGEEXEC
1180 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1181 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1182 + goto bad_area;
1183 +
1184 + up_read(&mm->mmap_sem);
1185 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1186 + do_group_exit(SIGKILL);
1187 + }
1188 +#endif
1189 +
1190 goto bad_area;
1191
1192 + }
1193 +
1194 /*
1195 * If for any reason at all we couldn't handle the fault, make
1196 * sure we exit gracefully rather than endlessly redo the
1197 diff -urNp linux-2.6.35.4/arch/ia64/mm/hugetlbpage.c linux-2.6.35.4/arch/ia64/mm/hugetlbpage.c
1198 --- linux-2.6.35.4/arch/ia64/mm/hugetlbpage.c 2010-08-26 19:47:12.000000000 -0400
1199 +++ linux-2.6.35.4/arch/ia64/mm/hugetlbpage.c 2010-09-17 20:12:09.000000000 -0400
1200 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(
1201 /* At this point: (!vmm || addr < vmm->vm_end). */
1202 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1203 return -ENOMEM;
1204 - if (!vmm || (addr + len) <= vmm->vm_start)
1205 + if (check_heap_stack_gap(vmm, addr, len))
1206 return addr;
1207 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1208 }
1209 diff -urNp linux-2.6.35.4/arch/ia64/mm/init.c linux-2.6.35.4/arch/ia64/mm/init.c
1210 --- linux-2.6.35.4/arch/ia64/mm/init.c 2010-08-26 19:47:12.000000000 -0400
1211 +++ linux-2.6.35.4/arch/ia64/mm/init.c 2010-09-17 20:12:09.000000000 -0400
1212 @@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1213 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1214 vma->vm_end = vma->vm_start + PAGE_SIZE;
1215 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1216 +
1217 +#ifdef CONFIG_PAX_PAGEEXEC
1218 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1219 + vma->vm_flags &= ~VM_EXEC;
1220 +
1221 +#ifdef CONFIG_PAX_MPROTECT
1222 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1223 + vma->vm_flags &= ~VM_MAYEXEC;
1224 +#endif
1225 +
1226 + }
1227 +#endif
1228 +
1229 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1230 down_write(&current->mm->mmap_sem);
1231 if (insert_vm_struct(current->mm, vma)) {
1232 diff -urNp linux-2.6.35.4/arch/ia64/sn/pci/pci_dma.c linux-2.6.35.4/arch/ia64/sn/pci/pci_dma.c
1233 --- linux-2.6.35.4/arch/ia64/sn/pci/pci_dma.c 2010-08-26 19:47:12.000000000 -0400
1234 +++ linux-2.6.35.4/arch/ia64/sn/pci/pci_dma.c 2010-09-17 20:12:09.000000000 -0400
1235 @@ -465,7 +465,7 @@ int sn_pci_legacy_write(struct pci_bus *
1236 return ret;
1237 }
1238
1239 -static struct dma_map_ops sn_dma_ops = {
1240 +static const struct dma_map_ops sn_dma_ops = {
1241 .alloc_coherent = sn_dma_alloc_coherent,
1242 .free_coherent = sn_dma_free_coherent,
1243 .map_page = sn_dma_map_page,
1244 diff -urNp linux-2.6.35.4/arch/m32r/lib/usercopy.c linux-2.6.35.4/arch/m32r/lib/usercopy.c
1245 --- linux-2.6.35.4/arch/m32r/lib/usercopy.c 2010-08-26 19:47:12.000000000 -0400
1246 +++ linux-2.6.35.4/arch/m32r/lib/usercopy.c 2010-09-17 20:12:09.000000000 -0400
1247 @@ -14,6 +14,9 @@
1248 unsigned long
1249 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1250 {
1251 + if ((long)n < 0)
1252 + return n;
1253 +
1254 prefetch(from);
1255 if (access_ok(VERIFY_WRITE, to, n))
1256 __copy_user(to,from,n);
1257 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1258 unsigned long
1259 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1260 {
1261 + if ((long)n < 0)
1262 + return n;
1263 +
1264 prefetchw(to);
1265 if (access_ok(VERIFY_READ, from, n))
1266 __copy_user_zeroing(to,from,n);
1267 diff -urNp linux-2.6.35.4/arch/microblaze/include/asm/device.h linux-2.6.35.4/arch/microblaze/include/asm/device.h
1268 --- linux-2.6.35.4/arch/microblaze/include/asm/device.h 2010-08-26 19:47:12.000000000 -0400
1269 +++ linux-2.6.35.4/arch/microblaze/include/asm/device.h 2010-09-17 20:12:09.000000000 -0400
1270 @@ -13,7 +13,7 @@ struct device_node;
1271
1272 struct dev_archdata {
1273 /* DMA operations on that device */
1274 - struct dma_map_ops *dma_ops;
1275 + const struct dma_map_ops *dma_ops;
1276 void *dma_data;
1277 };
1278
1279 diff -urNp linux-2.6.35.4/arch/microblaze/include/asm/dma-mapping.h linux-2.6.35.4/arch/microblaze/include/asm/dma-mapping.h
1280 --- linux-2.6.35.4/arch/microblaze/include/asm/dma-mapping.h 2010-08-26 19:47:12.000000000 -0400
1281 +++ linux-2.6.35.4/arch/microblaze/include/asm/dma-mapping.h 2010-09-17 20:12:09.000000000 -0400
1282 @@ -43,14 +43,14 @@ static inline unsigned long device_to_ma
1283 return 0xfffffffful;
1284 }
1285
1286 -extern struct dma_map_ops *dma_ops;
1287 +extern const struct dma_map_ops *dma_ops;
1288
1289 /*
1290 * Available generic sets of operations
1291 */
1292 -extern struct dma_map_ops dma_direct_ops;
1293 +extern const struct dma_map_ops dma_direct_ops;
1294
1295 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
1296 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
1297 {
1298 /* We don't handle the NULL dev case for ISA for now. We could
1299 * do it via an out of line call but it is not needed for now. The
1300 @@ -63,14 +63,14 @@ static inline struct dma_map_ops *get_dm
1301 return dev->archdata.dma_ops;
1302 }
1303
1304 -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
1305 +static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
1306 {
1307 dev->archdata.dma_ops = ops;
1308 }
1309
1310 static inline int dma_supported(struct device *dev, u64 mask)
1311 {
1312 - struct dma_map_ops *ops = get_dma_ops(dev);
1313 + const struct dma_map_ops *ops = get_dma_ops(dev);
1314
1315 if (unlikely(!ops))
1316 return 0;
1317 @@ -87,7 +87,7 @@ static inline int dma_supported(struct d
1318
1319 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
1320 {
1321 - struct dma_map_ops *ops = get_dma_ops(dev);
1322 + const struct dma_map_ops *ops = get_dma_ops(dev);
1323
1324 if (unlikely(ops == NULL))
1325 return -EIO;
1326 @@ -103,7 +103,7 @@ static inline int dma_set_mask(struct de
1327
1328 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1329 {
1330 - struct dma_map_ops *ops = get_dma_ops(dev);
1331 + const struct dma_map_ops *ops = get_dma_ops(dev);
1332 if (ops->mapping_error)
1333 return ops->mapping_error(dev, dma_addr);
1334
1335 @@ -117,7 +117,7 @@ static inline int dma_mapping_error(stru
1336 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1337 dma_addr_t *dma_handle, gfp_t flag)
1338 {
1339 - struct dma_map_ops *ops = get_dma_ops(dev);
1340 + const struct dma_map_ops *ops = get_dma_ops(dev);
1341 void *memory;
1342
1343 BUG_ON(!ops);
1344 @@ -131,7 +131,7 @@ static inline void *dma_alloc_coherent(s
1345 static inline void dma_free_coherent(struct device *dev, size_t size,
1346 void *cpu_addr, dma_addr_t dma_handle)
1347 {
1348 - struct dma_map_ops *ops = get_dma_ops(dev);
1349 + const struct dma_map_ops *ops = get_dma_ops(dev);
1350
1351 BUG_ON(!ops);
1352 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
1353 diff -urNp linux-2.6.35.4/arch/microblaze/include/asm/pci.h linux-2.6.35.4/arch/microblaze/include/asm/pci.h
1354 --- linux-2.6.35.4/arch/microblaze/include/asm/pci.h 2010-08-26 19:47:12.000000000 -0400
1355 +++ linux-2.6.35.4/arch/microblaze/include/asm/pci.h 2010-09-17 20:12:09.000000000 -0400
1356 @@ -54,8 +54,8 @@ static inline void pcibios_penalize_isa_
1357 }
1358
1359 #ifdef CONFIG_PCI
1360 -extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
1361 -extern struct dma_map_ops *get_pci_dma_ops(void);
1362 +extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
1363 +extern const struct dma_map_ops *get_pci_dma_ops(void);
1364 #else /* CONFIG_PCI */
1365 #define set_pci_dma_ops(d)
1366 #define get_pci_dma_ops() NULL
1367 diff -urNp linux-2.6.35.4/arch/microblaze/kernel/dma.c linux-2.6.35.4/arch/microblaze/kernel/dma.c
1368 --- linux-2.6.35.4/arch/microblaze/kernel/dma.c 2010-08-26 19:47:12.000000000 -0400
1369 +++ linux-2.6.35.4/arch/microblaze/kernel/dma.c 2010-09-17 20:12:09.000000000 -0400
1370 @@ -133,7 +133,7 @@ static inline void dma_direct_unmap_page
1371 __dma_sync_page(dma_address, 0 , size, direction);
1372 }
1373
1374 -struct dma_map_ops dma_direct_ops = {
1375 +const struct dma_map_ops dma_direct_ops = {
1376 .alloc_coherent = dma_direct_alloc_coherent,
1377 .free_coherent = dma_direct_free_coherent,
1378 .map_sg = dma_direct_map_sg,
1379 diff -urNp linux-2.6.35.4/arch/microblaze/pci/pci-common.c linux-2.6.35.4/arch/microblaze/pci/pci-common.c
1380 --- linux-2.6.35.4/arch/microblaze/pci/pci-common.c 2010-08-26 19:47:12.000000000 -0400
1381 +++ linux-2.6.35.4/arch/microblaze/pci/pci-common.c 2010-09-17 20:12:09.000000000 -0400
1382 @@ -46,14 +46,14 @@ resource_size_t isa_mem_base;
1383 /* Default PCI flags is 0 on ppc32, modified at boot on ppc64 */
1384 unsigned int pci_flags;
1385
1386 -static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
1387 +static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
1388
1389 -void set_pci_dma_ops(struct dma_map_ops *dma_ops)
1390 +void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
1391 {
1392 pci_dma_ops = dma_ops;
1393 }
1394
1395 -struct dma_map_ops *get_pci_dma_ops(void)
1396 +const struct dma_map_ops *get_pci_dma_ops(void)
1397 {
1398 return pci_dma_ops;
1399 }
1400 diff -urNp linux-2.6.35.4/arch/mips/alchemy/devboards/pm.c linux-2.6.35.4/arch/mips/alchemy/devboards/pm.c
1401 --- linux-2.6.35.4/arch/mips/alchemy/devboards/pm.c 2010-08-26 19:47:12.000000000 -0400
1402 +++ linux-2.6.35.4/arch/mips/alchemy/devboards/pm.c 2010-09-17 20:12:09.000000000 -0400
1403 @@ -110,7 +110,7 @@ static void db1x_pm_end(void)
1404
1405 }
1406
1407 -static struct platform_suspend_ops db1x_pm_ops = {
1408 +static const struct platform_suspend_ops db1x_pm_ops = {
1409 .valid = suspend_valid_only_mem,
1410 .begin = db1x_pm_begin,
1411 .enter = db1x_pm_enter,
1412 diff -urNp linux-2.6.35.4/arch/mips/include/asm/compat.h linux-2.6.35.4/arch/mips/include/asm/compat.h
1413 --- linux-2.6.35.4/arch/mips/include/asm/compat.h 2010-08-26 19:47:12.000000000 -0400
1414 +++ linux-2.6.35.4/arch/mips/include/asm/compat.h 2010-09-17 20:12:37.000000000 -0400
1415 @@ -145,7 +145,7 @@ static inline compat_uptr_t ptr_to_compa
1416 return (u32)(unsigned long)uptr;
1417 }
1418
1419 -static inline void __user *compat_alloc_user_space(long len)
1420 +static inline void __user *arch_compat_alloc_user_space(long len)
1421 {
1422 struct pt_regs *regs = (struct pt_regs *)
1423 ((unsigned long) current_thread_info() + THREAD_SIZE - 32) - 1;
1424 diff -urNp linux-2.6.35.4/arch/mips/include/asm/elf.h linux-2.6.35.4/arch/mips/include/asm/elf.h
1425 --- linux-2.6.35.4/arch/mips/include/asm/elf.h 2010-08-26 19:47:12.000000000 -0400
1426 +++ linux-2.6.35.4/arch/mips/include/asm/elf.h 2010-09-17 20:12:09.000000000 -0400
1427 @@ -368,6 +368,13 @@ extern const char *__elf_platform;
1428 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1429 #endif
1430
1431 +#ifdef CONFIG_PAX_ASLR
1432 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1433 +
1434 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1435 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1436 +#endif
1437 +
1438 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1439 struct linux_binprm;
1440 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1441 diff -urNp linux-2.6.35.4/arch/mips/include/asm/page.h linux-2.6.35.4/arch/mips/include/asm/page.h
1442 --- linux-2.6.35.4/arch/mips/include/asm/page.h 2010-08-26 19:47:12.000000000 -0400
1443 +++ linux-2.6.35.4/arch/mips/include/asm/page.h 2010-09-17 20:12:09.000000000 -0400
1444 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1445 #ifdef CONFIG_CPU_MIPS32
1446 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1447 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1448 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1449 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1450 #else
1451 typedef struct { unsigned long long pte; } pte_t;
1452 #define pte_val(x) ((x).pte)
1453 diff -urNp linux-2.6.35.4/arch/mips/include/asm/system.h linux-2.6.35.4/arch/mips/include/asm/system.h
1454 --- linux-2.6.35.4/arch/mips/include/asm/system.h 2010-08-26 19:47:12.000000000 -0400
1455 +++ linux-2.6.35.4/arch/mips/include/asm/system.h 2010-09-17 20:12:09.000000000 -0400
1456 @@ -234,6 +234,6 @@ extern void per_cpu_trap_init(void);
1457 */
1458 #define __ARCH_WANT_UNLOCKED_CTXSW
1459
1460 -extern unsigned long arch_align_stack(unsigned long sp);
1461 +#define arch_align_stack(x) ((x) & ALMASK)
1462
1463 #endif /* _ASM_SYSTEM_H */
1464 diff -urNp linux-2.6.35.4/arch/mips/kernel/binfmt_elfn32.c linux-2.6.35.4/arch/mips/kernel/binfmt_elfn32.c
1465 --- linux-2.6.35.4/arch/mips/kernel/binfmt_elfn32.c 2010-08-26 19:47:12.000000000 -0400
1466 +++ linux-2.6.35.4/arch/mips/kernel/binfmt_elfn32.c 2010-09-17 20:12:09.000000000 -0400
1467 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1468 #undef ELF_ET_DYN_BASE
1469 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1470
1471 +#ifdef CONFIG_PAX_ASLR
1472 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1473 +
1474 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1475 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1476 +#endif
1477 +
1478 #include <asm/processor.h>
1479 #include <linux/module.h>
1480 #include <linux/elfcore.h>
1481 diff -urNp linux-2.6.35.4/arch/mips/kernel/binfmt_elfo32.c linux-2.6.35.4/arch/mips/kernel/binfmt_elfo32.c
1482 --- linux-2.6.35.4/arch/mips/kernel/binfmt_elfo32.c 2010-08-26 19:47:12.000000000 -0400
1483 +++ linux-2.6.35.4/arch/mips/kernel/binfmt_elfo32.c 2010-09-17 20:12:09.000000000 -0400
1484 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1485 #undef ELF_ET_DYN_BASE
1486 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1487
1488 +#ifdef CONFIG_PAX_ASLR
1489 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1490 +
1491 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1492 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1493 +#endif
1494 +
1495 #include <asm/processor.h>
1496
1497 /*
1498 diff -urNp linux-2.6.35.4/arch/mips/kernel/kgdb.c linux-2.6.35.4/arch/mips/kernel/kgdb.c
1499 --- linux-2.6.35.4/arch/mips/kernel/kgdb.c 2010-08-26 19:47:12.000000000 -0400
1500 +++ linux-2.6.35.4/arch/mips/kernel/kgdb.c 2010-09-17 20:12:09.000000000 -0400
1501 @@ -270,6 +270,7 @@ int kgdb_arch_handle_exception(int vecto
1502 return -1;
1503 }
1504
1505 +/* cannot be const, see kgdb_arch_init */
1506 struct kgdb_arch arch_kgdb_ops;
1507
1508 /*
1509 diff -urNp linux-2.6.35.4/arch/mips/kernel/process.c linux-2.6.35.4/arch/mips/kernel/process.c
1510 --- linux-2.6.35.4/arch/mips/kernel/process.c 2010-08-26 19:47:12.000000000 -0400
1511 +++ linux-2.6.35.4/arch/mips/kernel/process.c 2010-09-17 20:12:09.000000000 -0400
1512 @@ -474,15 +474,3 @@ unsigned long get_wchan(struct task_stru
1513 out:
1514 return pc;
1515 }
1516 -
1517 -/*
1518 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1519 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1520 - */
1521 -unsigned long arch_align_stack(unsigned long sp)
1522 -{
1523 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1524 - sp -= get_random_int() & ~PAGE_MASK;
1525 -
1526 - return sp & ALMASK;
1527 -}
1528 diff -urNp linux-2.6.35.4/arch/mips/kernel/syscall.c linux-2.6.35.4/arch/mips/kernel/syscall.c
1529 --- linux-2.6.35.4/arch/mips/kernel/syscall.c 2010-08-26 19:47:12.000000000 -0400
1530 +++ linux-2.6.35.4/arch/mips/kernel/syscall.c 2010-09-17 20:12:09.000000000 -0400
1531 @@ -106,17 +106,21 @@ unsigned long arch_get_unmapped_area(str
1532 do_color_align = 0;
1533 if (filp || (flags & MAP_SHARED))
1534 do_color_align = 1;
1535 +
1536 +#ifdef CONFIG_PAX_RANDMMAP
1537 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1538 +#endif
1539 +
1540 if (addr) {
1541 if (do_color_align)
1542 addr = COLOUR_ALIGN(addr, pgoff);
1543 else
1544 addr = PAGE_ALIGN(addr);
1545 vmm = find_vma(current->mm, addr);
1546 - if (task_size - len >= addr &&
1547 - (!vmm || addr + len <= vmm->vm_start))
1548 + if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
1549 return addr;
1550 }
1551 - addr = TASK_UNMAPPED_BASE;
1552 + addr = current->mm->mmap_base;
1553 if (do_color_align)
1554 addr = COLOUR_ALIGN(addr, pgoff);
1555 else
1556 @@ -126,7 +130,7 @@ unsigned long arch_get_unmapped_area(str
1557 /* At this point: (!vmm || addr < vmm->vm_end). */
1558 if (task_size - len < addr)
1559 return -ENOMEM;
1560 - if (!vmm || addr + len <= vmm->vm_start)
1561 + if (check_heap_stack_gap(vmm, addr, len))
1562 return addr;
1563 addr = vmm->vm_end;
1564 if (do_color_align)
1565 diff -urNp linux-2.6.35.4/arch/mips/loongson/common/pm.c linux-2.6.35.4/arch/mips/loongson/common/pm.c
1566 --- linux-2.6.35.4/arch/mips/loongson/common/pm.c 2010-08-26 19:47:12.000000000 -0400
1567 +++ linux-2.6.35.4/arch/mips/loongson/common/pm.c 2010-09-17 20:12:09.000000000 -0400
1568 @@ -147,7 +147,7 @@ static int loongson_pm_valid_state(suspe
1569 }
1570 }
1571
1572 -static struct platform_suspend_ops loongson_pm_ops = {
1573 +static const struct platform_suspend_ops loongson_pm_ops = {
1574 .valid = loongson_pm_valid_state,
1575 .enter = loongson_pm_enter,
1576 };
1577 diff -urNp linux-2.6.35.4/arch/mips/mm/fault.c linux-2.6.35.4/arch/mips/mm/fault.c
1578 --- linux-2.6.35.4/arch/mips/mm/fault.c 2010-08-26 19:47:12.000000000 -0400
1579 +++ linux-2.6.35.4/arch/mips/mm/fault.c 2010-09-17 20:12:09.000000000 -0400
1580 @@ -26,6 +26,23 @@
1581 #include <asm/ptrace.h>
1582 #include <asm/highmem.h> /* For VMALLOC_END */
1583
1584 +#ifdef CONFIG_PAX_PAGEEXEC
1585 +void pax_report_insns(void *pc)
1586 +{
1587 + unsigned long i;
1588 +
1589 + printk(KERN_ERR "PAX: bytes at PC: ");
1590 + for (i = 0; i < 5; i++) {
1591 + unsigned int c;
1592 + if (get_user(c, (unsigned int *)pc+i))
1593 + printk(KERN_CONT "???????? ");
1594 + else
1595 + printk(KERN_CONT "%08x ", c);
1596 + }
1597 + printk("\n");
1598 +}
1599 +#endif
1600 +
1601 /*
1602 * This routine handles page faults. It determines the address,
1603 * and the problem, and then passes it off to one of the appropriate
1604 diff -urNp linux-2.6.35.4/arch/parisc/include/asm/compat.h linux-2.6.35.4/arch/parisc/include/asm/compat.h
1605 --- linux-2.6.35.4/arch/parisc/include/asm/compat.h 2010-08-26 19:47:12.000000000 -0400
1606 +++ linux-2.6.35.4/arch/parisc/include/asm/compat.h 2010-09-17 20:12:37.000000000 -0400
1607 @@ -147,7 +147,7 @@ static inline compat_uptr_t ptr_to_compa
1608 return (u32)(unsigned long)uptr;
1609 }
1610
1611 -static __inline__ void __user *compat_alloc_user_space(long len)
1612 +static __inline__ void __user *arch_compat_alloc_user_space(long len)
1613 {
1614 struct pt_regs *regs = &current->thread.regs;
1615 return (void __user *)regs->gr[30];
1616 diff -urNp linux-2.6.35.4/arch/parisc/include/asm/elf.h linux-2.6.35.4/arch/parisc/include/asm/elf.h
1617 --- linux-2.6.35.4/arch/parisc/include/asm/elf.h 2010-08-26 19:47:12.000000000 -0400
1618 +++ linux-2.6.35.4/arch/parisc/include/asm/elf.h 2010-09-17 20:12:09.000000000 -0400
1619 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration..
1620
1621 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1622
1623 +#ifdef CONFIG_PAX_ASLR
1624 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
1625 +
1626 +#define PAX_DELTA_MMAP_LEN 16
1627 +#define PAX_DELTA_STACK_LEN 16
1628 +#endif
1629 +
1630 /* This yields a mask that user programs can use to figure out what
1631 instruction set this CPU supports. This could be done in user space,
1632 but it's not easy, and we've already done it here. */
1633 diff -urNp linux-2.6.35.4/arch/parisc/include/asm/pgtable.h linux-2.6.35.4/arch/parisc/include/asm/pgtable.h
1634 --- linux-2.6.35.4/arch/parisc/include/asm/pgtable.h 2010-08-26 19:47:12.000000000 -0400
1635 +++ linux-2.6.35.4/arch/parisc/include/asm/pgtable.h 2010-09-17 20:12:09.000000000 -0400
1636 @@ -207,6 +207,17 @@
1637 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1638 #define PAGE_COPY PAGE_EXECREAD
1639 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1640 +
1641 +#ifdef CONFIG_PAX_PAGEEXEC
1642 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1643 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1644 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1645 +#else
1646 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1647 +# define PAGE_COPY_NOEXEC PAGE_COPY
1648 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1649 +#endif
1650 +
1651 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1652 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
1653 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
1654 diff -urNp linux-2.6.35.4/arch/parisc/kernel/module.c linux-2.6.35.4/arch/parisc/kernel/module.c
1655 --- linux-2.6.35.4/arch/parisc/kernel/module.c 2010-08-26 19:47:12.000000000 -0400
1656 +++ linux-2.6.35.4/arch/parisc/kernel/module.c 2010-09-17 20:12:09.000000000 -0400
1657 @@ -96,16 +96,38 @@
1658
1659 /* three functions to determine where in the module core
1660 * or init pieces the location is */
1661 +static inline int in_init_rx(struct module *me, void *loc)
1662 +{
1663 + return (loc >= me->module_init_rx &&
1664 + loc < (me->module_init_rx + me->init_size_rx));
1665 +}
1666 +
1667 +static inline int in_init_rw(struct module *me, void *loc)
1668 +{
1669 + return (loc >= me->module_init_rw &&
1670 + loc < (me->module_init_rw + me->init_size_rw));
1671 +}
1672 +
1673 static inline int in_init(struct module *me, void *loc)
1674 {
1675 - return (loc >= me->module_init &&
1676 - loc <= (me->module_init + me->init_size));
1677 + return in_init_rx(me, loc) || in_init_rw(me, loc);
1678 +}
1679 +
1680 +static inline int in_core_rx(struct module *me, void *loc)
1681 +{
1682 + return (loc >= me->module_core_rx &&
1683 + loc < (me->module_core_rx + me->core_size_rx));
1684 +}
1685 +
1686 +static inline int in_core_rw(struct module *me, void *loc)
1687 +{
1688 + return (loc >= me->module_core_rw &&
1689 + loc < (me->module_core_rw + me->core_size_rw));
1690 }
1691
1692 static inline int in_core(struct module *me, void *loc)
1693 {
1694 - return (loc >= me->module_core &&
1695 - loc <= (me->module_core + me->core_size));
1696 + return in_core_rx(me, loc) || in_core_rw(me, loc);
1697 }
1698
1699 static inline int in_local(struct module *me, void *loc)
1700 @@ -365,13 +387,13 @@ int module_frob_arch_sections(CONST Elf_
1701 }
1702
1703 /* align things a bit */
1704 - me->core_size = ALIGN(me->core_size, 16);
1705 - me->arch.got_offset = me->core_size;
1706 - me->core_size += gots * sizeof(struct got_entry);
1707 -
1708 - me->core_size = ALIGN(me->core_size, 16);
1709 - me->arch.fdesc_offset = me->core_size;
1710 - me->core_size += fdescs * sizeof(Elf_Fdesc);
1711 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1712 + me->arch.got_offset = me->core_size_rw;
1713 + me->core_size_rw += gots * sizeof(struct got_entry);
1714 +
1715 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1716 + me->arch.fdesc_offset = me->core_size_rw;
1717 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1718
1719 me->arch.got_max = gots;
1720 me->arch.fdesc_max = fdescs;
1721 @@ -389,7 +411,7 @@ static Elf64_Word get_got(struct module
1722
1723 BUG_ON(value == 0);
1724
1725 - got = me->module_core + me->arch.got_offset;
1726 + got = me->module_core_rw + me->arch.got_offset;
1727 for (i = 0; got[i].addr; i++)
1728 if (got[i].addr == value)
1729 goto out;
1730 @@ -407,7 +429,7 @@ static Elf64_Word get_got(struct module
1731 #ifdef CONFIG_64BIT
1732 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1733 {
1734 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1735 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1736
1737 if (!value) {
1738 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1739 @@ -425,7 +447,7 @@ static Elf_Addr get_fdesc(struct module
1740
1741 /* Create new one */
1742 fdesc->addr = value;
1743 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1744 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1745 return (Elf_Addr)fdesc;
1746 }
1747 #endif /* CONFIG_64BIT */
1748 @@ -849,7 +871,7 @@ register_unwind_table(struct module *me,
1749
1750 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1751 end = table + sechdrs[me->arch.unwind_section].sh_size;
1752 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1753 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1754
1755 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1756 me->arch.unwind_section, table, end, gp);
1757 diff -urNp linux-2.6.35.4/arch/parisc/kernel/sys_parisc.c linux-2.6.35.4/arch/parisc/kernel/sys_parisc.c
1758 --- linux-2.6.35.4/arch/parisc/kernel/sys_parisc.c 2010-08-26 19:47:12.000000000 -0400
1759 +++ linux-2.6.35.4/arch/parisc/kernel/sys_parisc.c 2010-09-17 20:12:09.000000000 -0400
1760 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1761 /* At this point: (!vma || addr < vma->vm_end). */
1762 if (TASK_SIZE - len < addr)
1763 return -ENOMEM;
1764 - if (!vma || addr + len <= vma->vm_start)
1765 + if (check_heap_stack_gap(vma, addr, len))
1766 return addr;
1767 addr = vma->vm_end;
1768 }
1769 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1770 /* At this point: (!vma || addr < vma->vm_end). */
1771 if (TASK_SIZE - len < addr)
1772 return -ENOMEM;
1773 - if (!vma || addr + len <= vma->vm_start)
1774 + if (check_heap_stack_gap(vma, addr, len))
1775 return addr;
1776 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1777 if (addr < vma->vm_end) /* handle wraparound */
1778 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1779 if (flags & MAP_FIXED)
1780 return addr;
1781 if (!addr)
1782 - addr = TASK_UNMAPPED_BASE;
1783 + addr = current->mm->mmap_base;
1784
1785 if (filp) {
1786 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1787 diff -urNp linux-2.6.35.4/arch/parisc/kernel/traps.c linux-2.6.35.4/arch/parisc/kernel/traps.c
1788 --- linux-2.6.35.4/arch/parisc/kernel/traps.c 2010-08-26 19:47:12.000000000 -0400
1789 +++ linux-2.6.35.4/arch/parisc/kernel/traps.c 2010-09-17 20:12:09.000000000 -0400
1790 @@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1791
1792 down_read(&current->mm->mmap_sem);
1793 vma = find_vma(current->mm,regs->iaoq[0]);
1794 - if (vma && (regs->iaoq[0] >= vma->vm_start)
1795 - && (vma->vm_flags & VM_EXEC)) {
1796 -
1797 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1798 fault_address = regs->iaoq[0];
1799 fault_space = regs->iasq[0];
1800
1801 diff -urNp linux-2.6.35.4/arch/parisc/mm/fault.c linux-2.6.35.4/arch/parisc/mm/fault.c
1802 --- linux-2.6.35.4/arch/parisc/mm/fault.c 2010-08-26 19:47:12.000000000 -0400
1803 +++ linux-2.6.35.4/arch/parisc/mm/fault.c 2010-09-17 20:12:09.000000000 -0400
1804 @@ -15,6 +15,7 @@
1805 #include <linux/sched.h>
1806 #include <linux/interrupt.h>
1807 #include <linux/module.h>
1808 +#include <linux/unistd.h>
1809
1810 #include <asm/uaccess.h>
1811 #include <asm/traps.h>
1812 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1813 static unsigned long
1814 parisc_acctyp(unsigned long code, unsigned int inst)
1815 {
1816 - if (code == 6 || code == 16)
1817 + if (code == 6 || code == 7 || code == 16)
1818 return VM_EXEC;
1819
1820 switch (inst & 0xf0000000) {
1821 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1822 }
1823 #endif
1824
1825 +#ifdef CONFIG_PAX_PAGEEXEC
1826 +/*
1827 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1828 + *
1829 + * returns 1 when task should be killed
1830 + * 2 when rt_sigreturn trampoline was detected
1831 + * 3 when unpatched PLT trampoline was detected
1832 + */
1833 +static int pax_handle_fetch_fault(struct pt_regs *regs)
1834 +{
1835 +
1836 +#ifdef CONFIG_PAX_EMUPLT
1837 + int err;
1838 +
1839 + do { /* PaX: unpatched PLT emulation */
1840 + unsigned int bl, depwi;
1841 +
1842 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1843 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1844 +
1845 + if (err)
1846 + break;
1847 +
1848 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1849 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1850 +
1851 + err = get_user(ldw, (unsigned int *)addr);
1852 + err |= get_user(bv, (unsigned int *)(addr+4));
1853 + err |= get_user(ldw2, (unsigned int *)(addr+8));
1854 +
1855 + if (err)
1856 + break;
1857 +
1858 + if (ldw == 0x0E801096U &&
1859 + bv == 0xEAC0C000U &&
1860 + ldw2 == 0x0E881095U)
1861 + {
1862 + unsigned int resolver, map;
1863 +
1864 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1865 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1866 + if (err)
1867 + break;
1868 +
1869 + regs->gr[20] = instruction_pointer(regs)+8;
1870 + regs->gr[21] = map;
1871 + regs->gr[22] = resolver;
1872 + regs->iaoq[0] = resolver | 3UL;
1873 + regs->iaoq[1] = regs->iaoq[0] + 4;
1874 + return 3;
1875 + }
1876 + }
1877 + } while (0);
1878 +#endif
1879 +
1880 +#ifdef CONFIG_PAX_EMUTRAMP
1881 +
1882 +#ifndef CONFIG_PAX_EMUSIGRT
1883 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1884 + return 1;
1885 +#endif
1886 +
1887 + do { /* PaX: rt_sigreturn emulation */
1888 + unsigned int ldi1, ldi2, bel, nop;
1889 +
1890 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1891 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1892 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1893 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1894 +
1895 + if (err)
1896 + break;
1897 +
1898 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1899 + ldi2 == 0x3414015AU &&
1900 + bel == 0xE4008200U &&
1901 + nop == 0x08000240U)
1902 + {
1903 + regs->gr[25] = (ldi1 & 2) >> 1;
1904 + regs->gr[20] = __NR_rt_sigreturn;
1905 + regs->gr[31] = regs->iaoq[1] + 16;
1906 + regs->sr[0] = regs->iasq[1];
1907 + regs->iaoq[0] = 0x100UL;
1908 + regs->iaoq[1] = regs->iaoq[0] + 4;
1909 + regs->iasq[0] = regs->sr[2];
1910 + regs->iasq[1] = regs->sr[2];
1911 + return 2;
1912 + }
1913 + } while (0);
1914 +#endif
1915 +
1916 + return 1;
1917 +}
1918 +
1919 +void pax_report_insns(void *pc, void *sp)
1920 +{
1921 + unsigned long i;
1922 +
1923 + printk(KERN_ERR "PAX: bytes at PC: ");
1924 + for (i = 0; i < 5; i++) {
1925 + unsigned int c;
1926 + if (get_user(c, (unsigned int *)pc+i))
1927 + printk(KERN_CONT "???????? ");
1928 + else
1929 + printk(KERN_CONT "%08x ", c);
1930 + }
1931 + printk("\n");
1932 +}
1933 +#endif
1934 +
1935 int fixup_exception(struct pt_regs *regs)
1936 {
1937 const struct exception_table_entry *fix;
1938 @@ -192,8 +303,33 @@ good_area:
1939
1940 acc_type = parisc_acctyp(code,regs->iir);
1941
1942 - if ((vma->vm_flags & acc_type) != acc_type)
1943 + if ((vma->vm_flags & acc_type) != acc_type) {
1944 +
1945 +#ifdef CONFIG_PAX_PAGEEXEC
1946 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1947 + (address & ~3UL) == instruction_pointer(regs))
1948 + {
1949 + up_read(&mm->mmap_sem);
1950 + switch (pax_handle_fetch_fault(regs)) {
1951 +
1952 +#ifdef CONFIG_PAX_EMUPLT
1953 + case 3:
1954 + return;
1955 +#endif
1956 +
1957 +#ifdef CONFIG_PAX_EMUTRAMP
1958 + case 2:
1959 + return;
1960 +#endif
1961 +
1962 + }
1963 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1964 + do_group_exit(SIGKILL);
1965 + }
1966 +#endif
1967 +
1968 goto bad_area;
1969 + }
1970
1971 /*
1972 * If for any reason at all we couldn't handle the fault, make
1973 diff -urNp linux-2.6.35.4/arch/powerpc/include/asm/compat.h linux-2.6.35.4/arch/powerpc/include/asm/compat.h
1974 --- linux-2.6.35.4/arch/powerpc/include/asm/compat.h 2010-08-26 19:47:12.000000000 -0400
1975 +++ linux-2.6.35.4/arch/powerpc/include/asm/compat.h 2010-09-17 20:12:37.000000000 -0400
1976 @@ -134,7 +134,7 @@ static inline compat_uptr_t ptr_to_compa
1977 return (u32)(unsigned long)uptr;
1978 }
1979
1980 -static inline void __user *compat_alloc_user_space(long len)
1981 +static inline void __user *arch_compat_alloc_user_space(long len)
1982 {
1983 struct pt_regs *regs = current->thread.regs;
1984 unsigned long usp = regs->gpr[1];
1985 diff -urNp linux-2.6.35.4/arch/powerpc/include/asm/device.h linux-2.6.35.4/arch/powerpc/include/asm/device.h
1986 --- linux-2.6.35.4/arch/powerpc/include/asm/device.h 2010-08-26 19:47:12.000000000 -0400
1987 +++ linux-2.6.35.4/arch/powerpc/include/asm/device.h 2010-09-17 20:12:09.000000000 -0400
1988 @@ -11,7 +11,7 @@ struct device_node;
1989
1990 struct dev_archdata {
1991 /* DMA operations on that device */
1992 - struct dma_map_ops *dma_ops;
1993 + const struct dma_map_ops *dma_ops;
1994
1995 /*
1996 * When an iommu is in use, dma_data is used as a ptr to the base of the
1997 diff -urNp linux-2.6.35.4/arch/powerpc/include/asm/dma-mapping.h linux-2.6.35.4/arch/powerpc/include/asm/dma-mapping.h
1998 --- linux-2.6.35.4/arch/powerpc/include/asm/dma-mapping.h 2010-08-26 19:47:12.000000000 -0400
1999 +++ linux-2.6.35.4/arch/powerpc/include/asm/dma-mapping.h 2010-09-17 20:12:09.000000000 -0400
2000 @@ -66,12 +66,13 @@ static inline unsigned long device_to_ma
2001 /*
2002 * Available generic sets of operations
2003 */
2004 +/* cannot be const */
2005 #ifdef CONFIG_PPC64
2006 extern struct dma_map_ops dma_iommu_ops;
2007 #endif
2008 -extern struct dma_map_ops dma_direct_ops;
2009 +extern const struct dma_map_ops dma_direct_ops;
2010
2011 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2012 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
2013 {
2014 /* We don't handle the NULL dev case for ISA for now. We could
2015 * do it via an out of line call but it is not needed for now. The
2016 @@ -84,7 +85,7 @@ static inline struct dma_map_ops *get_dm
2017 return dev->archdata.dma_ops;
2018 }
2019
2020 -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
2021 +static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
2022 {
2023 dev->archdata.dma_ops = ops;
2024 }
2025 @@ -118,7 +119,7 @@ static inline void set_dma_offset(struct
2026
2027 static inline int dma_supported(struct device *dev, u64 mask)
2028 {
2029 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2030 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2031
2032 if (unlikely(dma_ops == NULL))
2033 return 0;
2034 @@ -129,7 +130,7 @@ static inline int dma_supported(struct d
2035
2036 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2037 {
2038 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2039 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2040
2041 if (unlikely(dma_ops == NULL))
2042 return -EIO;
2043 @@ -144,7 +145,7 @@ static inline int dma_set_mask(struct de
2044 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2045 dma_addr_t *dma_handle, gfp_t flag)
2046 {
2047 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2048 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2049 void *cpu_addr;
2050
2051 BUG_ON(!dma_ops);
2052 @@ -159,7 +160,7 @@ static inline void *dma_alloc_coherent(s
2053 static inline void dma_free_coherent(struct device *dev, size_t size,
2054 void *cpu_addr, dma_addr_t dma_handle)
2055 {
2056 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2057 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2058
2059 BUG_ON(!dma_ops);
2060
2061 @@ -170,7 +171,7 @@ static inline void dma_free_coherent(str
2062
2063 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2064 {
2065 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
2066 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2067
2068 if (dma_ops->mapping_error)
2069 return dma_ops->mapping_error(dev, dma_addr);
2070 diff -urNp linux-2.6.35.4/arch/powerpc/include/asm/elf.h linux-2.6.35.4/arch/powerpc/include/asm/elf.h
2071 --- linux-2.6.35.4/arch/powerpc/include/asm/elf.h 2010-08-26 19:47:12.000000000 -0400
2072 +++ linux-2.6.35.4/arch/powerpc/include/asm/elf.h 2010-09-17 20:12:09.000000000 -0400
2073 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
2074 the loader. We need to make sure that it is out of the way of the program
2075 that it will "exec", and that there is sufficient room for the brk. */
2076
2077 -extern unsigned long randomize_et_dyn(unsigned long base);
2078 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2079 +#define ELF_ET_DYN_BASE (0x20000000)
2080 +
2081 +#ifdef CONFIG_PAX_ASLR
2082 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2083 +
2084 +#ifdef __powerpc64__
2085 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2086 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2087 +#else
2088 +#define PAX_DELTA_MMAP_LEN 15
2089 +#define PAX_DELTA_STACK_LEN 15
2090 +#endif
2091 +#endif
2092
2093 /*
2094 * Our registers are always unsigned longs, whether we're a 32 bit
2095 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(s
2096 (0x7ff >> (PAGE_SHIFT - 12)) : \
2097 (0x3ffff >> (PAGE_SHIFT - 12)))
2098
2099 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2100 -#define arch_randomize_brk arch_randomize_brk
2101 -
2102 #endif /* __KERNEL__ */
2103
2104 /*
2105 diff -urNp linux-2.6.35.4/arch/powerpc/include/asm/iommu.h linux-2.6.35.4/arch/powerpc/include/asm/iommu.h
2106 --- linux-2.6.35.4/arch/powerpc/include/asm/iommu.h 2010-08-26 19:47:12.000000000 -0400
2107 +++ linux-2.6.35.4/arch/powerpc/include/asm/iommu.h 2010-09-17 20:12:09.000000000 -0400
2108 @@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(voi
2109 extern void iommu_init_early_dart(void);
2110 extern void iommu_init_early_pasemi(void);
2111
2112 +/* dma-iommu.c */
2113 +extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
2114 +
2115 #ifdef CONFIG_PCI
2116 extern void pci_iommu_init(void);
2117 extern void pci_direct_iommu_init(void);
2118 diff -urNp linux-2.6.35.4/arch/powerpc/include/asm/kmap_types.h linux-2.6.35.4/arch/powerpc/include/asm/kmap_types.h
2119 --- linux-2.6.35.4/arch/powerpc/include/asm/kmap_types.h 2010-08-26 19:47:12.000000000 -0400
2120 +++ linux-2.6.35.4/arch/powerpc/include/asm/kmap_types.h 2010-09-17 20:12:09.000000000 -0400
2121 @@ -27,6 +27,7 @@ enum km_type {
2122 KM_PPC_SYNC_PAGE,
2123 KM_PPC_SYNC_ICACHE,
2124 KM_KDB,
2125 + KM_CLEARPAGE,
2126 KM_TYPE_NR
2127 };
2128
2129 diff -urNp linux-2.6.35.4/arch/powerpc/include/asm/page_64.h linux-2.6.35.4/arch/powerpc/include/asm/page_64.h
2130 --- linux-2.6.35.4/arch/powerpc/include/asm/page_64.h 2010-08-26 19:47:12.000000000 -0400
2131 +++ linux-2.6.35.4/arch/powerpc/include/asm/page_64.h 2010-09-17 20:12:09.000000000 -0400
2132 @@ -172,15 +172,18 @@ do { \
2133 * stack by default, so in the absense of a PT_GNU_STACK program header
2134 * we turn execute permission off.
2135 */
2136 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2137 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2138 +#define VM_STACK_DEFAULT_FLAGS32 \
2139 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2140 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2141
2142 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2143 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2144
2145 +#ifndef CONFIG_PAX_PAGEEXEC
2146 #define VM_STACK_DEFAULT_FLAGS \
2147 (test_thread_flag(TIF_32BIT) ? \
2148 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2149 +#endif
2150
2151 #include <asm-generic/getorder.h>
2152
2153 diff -urNp linux-2.6.35.4/arch/powerpc/include/asm/page.h linux-2.6.35.4/arch/powerpc/include/asm/page.h
2154 --- linux-2.6.35.4/arch/powerpc/include/asm/page.h 2010-08-26 19:47:12.000000000 -0400
2155 +++ linux-2.6.35.4/arch/powerpc/include/asm/page.h 2010-09-17 20:12:09.000000000 -0400
2156 @@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
2157 * and needs to be executable. This means the whole heap ends
2158 * up being executable.
2159 */
2160 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2161 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2162 +#define VM_DATA_DEFAULT_FLAGS32 \
2163 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2164 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2165
2166 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2167 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2168 @@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
2169 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2170 #endif
2171
2172 +#define ktla_ktva(addr) (addr)
2173 +#define ktva_ktla(addr) (addr)
2174 +
2175 #ifndef __ASSEMBLY__
2176
2177 #undef STRICT_MM_TYPECHECKS
2178 diff -urNp linux-2.6.35.4/arch/powerpc/include/asm/pci.h linux-2.6.35.4/arch/powerpc/include/asm/pci.h
2179 --- linux-2.6.35.4/arch/powerpc/include/asm/pci.h 2010-08-26 19:47:12.000000000 -0400
2180 +++ linux-2.6.35.4/arch/powerpc/include/asm/pci.h 2010-09-17 20:12:09.000000000 -0400
2181 @@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq
2182 }
2183
2184 #ifdef CONFIG_PCI
2185 -extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
2186 -extern struct dma_map_ops *get_pci_dma_ops(void);
2187 +extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
2188 +extern const struct dma_map_ops *get_pci_dma_ops(void);
2189 #else /* CONFIG_PCI */
2190 #define set_pci_dma_ops(d)
2191 #define get_pci_dma_ops() NULL
2192 diff -urNp linux-2.6.35.4/arch/powerpc/include/asm/pte-hash32.h linux-2.6.35.4/arch/powerpc/include/asm/pte-hash32.h
2193 --- linux-2.6.35.4/arch/powerpc/include/asm/pte-hash32.h 2010-08-26 19:47:12.000000000 -0400
2194 +++ linux-2.6.35.4/arch/powerpc/include/asm/pte-hash32.h 2010-09-17 20:12:09.000000000 -0400
2195 @@ -21,6 +21,7 @@
2196 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2197 #define _PAGE_USER 0x004 /* usermode access allowed */
2198 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2199 +#define _PAGE_EXEC _PAGE_GUARDED
2200 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2201 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2202 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2203 diff -urNp linux-2.6.35.4/arch/powerpc/include/asm/reg.h linux-2.6.35.4/arch/powerpc/include/asm/reg.h
2204 --- linux-2.6.35.4/arch/powerpc/include/asm/reg.h 2010-08-26 19:47:12.000000000 -0400
2205 +++ linux-2.6.35.4/arch/powerpc/include/asm/reg.h 2010-09-17 20:12:09.000000000 -0400
2206 @@ -191,6 +191,7 @@
2207 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2208 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2209 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2210 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2211 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2212 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2213 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2214 diff -urNp linux-2.6.35.4/arch/powerpc/include/asm/swiotlb.h linux-2.6.35.4/arch/powerpc/include/asm/swiotlb.h
2215 --- linux-2.6.35.4/arch/powerpc/include/asm/swiotlb.h 2010-08-26 19:47:12.000000000 -0400
2216 +++ linux-2.6.35.4/arch/powerpc/include/asm/swiotlb.h 2010-09-17 20:12:09.000000000 -0400
2217 @@ -13,7 +13,7 @@
2218
2219 #include <linux/swiotlb.h>
2220
2221 -extern struct dma_map_ops swiotlb_dma_ops;
2222 +extern const struct dma_map_ops swiotlb_dma_ops;
2223
2224 static inline void dma_mark_clean(void *addr, size_t size) {}
2225
2226 diff -urNp linux-2.6.35.4/arch/powerpc/include/asm/uaccess.h linux-2.6.35.4/arch/powerpc/include/asm/uaccess.h
2227 --- linux-2.6.35.4/arch/powerpc/include/asm/uaccess.h 2010-08-26 19:47:12.000000000 -0400
2228 +++ linux-2.6.35.4/arch/powerpc/include/asm/uaccess.h 2010-09-17 20:12:09.000000000 -0400
2229 @@ -13,6 +13,8 @@
2230 #define VERIFY_READ 0
2231 #define VERIFY_WRITE 1
2232
2233 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
2234 +
2235 /*
2236 * The fs value determines whether argument validity checking should be
2237 * performed or not. If get_fs() == USER_DS, checking is performed, with
2238 @@ -327,52 +329,6 @@ do { \
2239 extern unsigned long __copy_tofrom_user(void __user *to,
2240 const void __user *from, unsigned long size);
2241
2242 -#ifndef __powerpc64__
2243 -
2244 -static inline unsigned long copy_from_user(void *to,
2245 - const void __user *from, unsigned long n)
2246 -{
2247 - unsigned long over;
2248 -
2249 - if (access_ok(VERIFY_READ, from, n))
2250 - return __copy_tofrom_user((__force void __user *)to, from, n);
2251 - if ((unsigned long)from < TASK_SIZE) {
2252 - over = (unsigned long)from + n - TASK_SIZE;
2253 - return __copy_tofrom_user((__force void __user *)to, from,
2254 - n - over) + over;
2255 - }
2256 - return n;
2257 -}
2258 -
2259 -static inline unsigned long copy_to_user(void __user *to,
2260 - const void *from, unsigned long n)
2261 -{
2262 - unsigned long over;
2263 -
2264 - if (access_ok(VERIFY_WRITE, to, n))
2265 - return __copy_tofrom_user(to, (__force void __user *)from, n);
2266 - if ((unsigned long)to < TASK_SIZE) {
2267 - over = (unsigned long)to + n - TASK_SIZE;
2268 - return __copy_tofrom_user(to, (__force void __user *)from,
2269 - n - over) + over;
2270 - }
2271 - return n;
2272 -}
2273 -
2274 -#else /* __powerpc64__ */
2275 -
2276 -#define __copy_in_user(to, from, size) \
2277 - __copy_tofrom_user((to), (from), (size))
2278 -
2279 -extern unsigned long copy_from_user(void *to, const void __user *from,
2280 - unsigned long n);
2281 -extern unsigned long copy_to_user(void __user *to, const void *from,
2282 - unsigned long n);
2283 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
2284 - unsigned long n);
2285 -
2286 -#endif /* __powerpc64__ */
2287 -
2288 static inline unsigned long __copy_from_user_inatomic(void *to,
2289 const void __user *from, unsigned long n)
2290 {
2291 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
2292 if (ret == 0)
2293 return 0;
2294 }
2295 +
2296 + if (!__builtin_constant_p(n))
2297 + check_object_size(to, n, false);
2298 +
2299 return __copy_tofrom_user((__force void __user *)to, from, n);
2300 }
2301
2302 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
2303 if (ret == 0)
2304 return 0;
2305 }
2306 +
2307 + if (!__builtin_constant_p(n))
2308 + check_object_size(from, n, true);
2309 +
2310 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2311 }
2312
2313 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
2314 return __copy_to_user_inatomic(to, from, size);
2315 }
2316
2317 +#ifndef __powerpc64__
2318 +
2319 +static inline unsigned long __must_check copy_from_user(void *to,
2320 + const void __user *from, unsigned long n)
2321 +{
2322 + unsigned long over;
2323 +
2324 + if ((long)n < 0)
2325 + return n;
2326 +
2327 + if (access_ok(VERIFY_READ, from, n)) {
2328 + if (!__builtin_constant_p(n))
2329 + check_object_size(to, n, false);
2330 + return __copy_tofrom_user((__force void __user *)to, from, n);
2331 + }
2332 + if ((unsigned long)from < TASK_SIZE) {
2333 + over = (unsigned long)from + n - TASK_SIZE;
2334 + if (!__builtin_constant_p(n - over))
2335 + check_object_size(to, n - over, false);
2336 + return __copy_tofrom_user((__force void __user *)to, from,
2337 + n - over) + over;
2338 + }
2339 + return n;
2340 +}
2341 +
2342 +static inline unsigned long __must_check copy_to_user(void __user *to,
2343 + const void *from, unsigned long n)
2344 +{
2345 + unsigned long over;
2346 +
2347 + if ((long)n < 0)
2348 + return n;
2349 +
2350 + if (access_ok(VERIFY_WRITE, to, n)) {
2351 + if (!__builtin_constant_p(n))
2352 + check_object_size(from, n, true);
2353 + return __copy_tofrom_user(to, (__force void __user *)from, n);
2354 + }
2355 + if ((unsigned long)to < TASK_SIZE) {
2356 + over = (unsigned long)to + n - TASK_SIZE;
2357 + if (!__builtin_constant_p(n))
2358 + check_object_size(from, n - over, true);
2359 + return __copy_tofrom_user(to, (__force void __user *)from,
2360 + n - over) + over;
2361 + }
2362 + return n;
2363 +}
2364 +
2365 +#else /* __powerpc64__ */
2366 +
2367 +#define __copy_in_user(to, from, size) \
2368 + __copy_tofrom_user((to), (from), (size))
2369 +
2370 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2371 +{
2372 + if ((long)n < 0 || n > INT_MAX)
2373 + return n;
2374 +
2375 + if (!__builtin_constant_p(n))
2376 + check_object_size(to, n, false);
2377 +
2378 + if (likely(access_ok(VERIFY_READ, from, n)))
2379 + n = __copy_from_user(to, from, n);
2380 + else
2381 + memset(to, 0, n);
2382 + return n;
2383 +}
2384 +
2385 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2386 +{
2387 + if ((long)n < 0 || n > INT_MAX)
2388 + return n;
2389 +
2390 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
2391 + if (!__builtin_constant_p(n))
2392 + check_object_size(from, n, true);
2393 + n = __copy_to_user(to, from, n);
2394 + }
2395 + return n;
2396 +}
2397 +
2398 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
2399 + unsigned long n);
2400 +
2401 +#endif /* __powerpc64__ */
2402 +
2403 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2404
2405 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2406 diff -urNp linux-2.6.35.4/arch/powerpc/kernel/dma.c linux-2.6.35.4/arch/powerpc/kernel/dma.c
2407 --- linux-2.6.35.4/arch/powerpc/kernel/dma.c 2010-08-26 19:47:12.000000000 -0400
2408 +++ linux-2.6.35.4/arch/powerpc/kernel/dma.c 2010-09-17 20:12:09.000000000 -0400
2409 @@ -135,7 +135,7 @@ static inline void dma_direct_sync_singl
2410 }
2411 #endif
2412
2413 -struct dma_map_ops dma_direct_ops = {
2414 +const struct dma_map_ops dma_direct_ops = {
2415 .alloc_coherent = dma_direct_alloc_coherent,
2416 .free_coherent = dma_direct_free_coherent,
2417 .map_sg = dma_direct_map_sg,
2418 diff -urNp linux-2.6.35.4/arch/powerpc/kernel/dma-iommu.c linux-2.6.35.4/arch/powerpc/kernel/dma-iommu.c
2419 --- linux-2.6.35.4/arch/powerpc/kernel/dma-iommu.c 2010-08-26 19:47:12.000000000 -0400
2420 +++ linux-2.6.35.4/arch/powerpc/kernel/dma-iommu.c 2010-09-17 20:12:09.000000000 -0400
2421 @@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct de
2422 }
2423
2424 /* We support DMA to/from any memory page via the iommu */
2425 -static int dma_iommu_dma_supported(struct device *dev, u64 mask)
2426 +int dma_iommu_dma_supported(struct device *dev, u64 mask)
2427 {
2428 struct iommu_table *tbl = get_iommu_table_base(dev);
2429
2430 diff -urNp linux-2.6.35.4/arch/powerpc/kernel/dma-swiotlb.c linux-2.6.35.4/arch/powerpc/kernel/dma-swiotlb.c
2431 --- linux-2.6.35.4/arch/powerpc/kernel/dma-swiotlb.c 2010-08-26 19:47:12.000000000 -0400
2432 +++ linux-2.6.35.4/arch/powerpc/kernel/dma-swiotlb.c 2010-09-17 20:12:09.000000000 -0400
2433 @@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
2434 * map_page, and unmap_page on highmem, use normal dma_ops
2435 * for everything else.
2436 */
2437 -struct dma_map_ops swiotlb_dma_ops = {
2438 +const struct dma_map_ops swiotlb_dma_ops = {
2439 .alloc_coherent = dma_direct_alloc_coherent,
2440 .free_coherent = dma_direct_free_coherent,
2441 .map_sg = swiotlb_map_sg_attrs,
2442 diff -urNp linux-2.6.35.4/arch/powerpc/kernel/exceptions-64e.S linux-2.6.35.4/arch/powerpc/kernel/exceptions-64e.S
2443 --- linux-2.6.35.4/arch/powerpc/kernel/exceptions-64e.S 2010-08-26 19:47:12.000000000 -0400
2444 +++ linux-2.6.35.4/arch/powerpc/kernel/exceptions-64e.S 2010-09-17 20:12:09.000000000 -0400
2445 @@ -455,6 +455,7 @@ storage_fault_common:
2446 std r14,_DAR(r1)
2447 std r15,_DSISR(r1)
2448 addi r3,r1,STACK_FRAME_OVERHEAD
2449 + bl .save_nvgprs
2450 mr r4,r14
2451 mr r5,r15
2452 ld r14,PACA_EXGEN+EX_R14(r13)
2453 @@ -464,8 +465,7 @@ storage_fault_common:
2454 cmpdi r3,0
2455 bne- 1f
2456 b .ret_from_except_lite
2457 -1: bl .save_nvgprs
2458 - mr r5,r3
2459 +1: mr r5,r3
2460 addi r3,r1,STACK_FRAME_OVERHEAD
2461 ld r4,_DAR(r1)
2462 bl .bad_page_fault
2463 diff -urNp linux-2.6.35.4/arch/powerpc/kernel/exceptions-64s.S linux-2.6.35.4/arch/powerpc/kernel/exceptions-64s.S
2464 --- linux-2.6.35.4/arch/powerpc/kernel/exceptions-64s.S 2010-08-26 19:47:12.000000000 -0400
2465 +++ linux-2.6.35.4/arch/powerpc/kernel/exceptions-64s.S 2010-09-17 20:12:09.000000000 -0400
2466 @@ -840,10 +840,10 @@ handle_page_fault:
2467 11: ld r4,_DAR(r1)
2468 ld r5,_DSISR(r1)
2469 addi r3,r1,STACK_FRAME_OVERHEAD
2470 + bl .save_nvgprs
2471 bl .do_page_fault
2472 cmpdi r3,0
2473 beq+ 13f
2474 - bl .save_nvgprs
2475 mr r5,r3
2476 addi r3,r1,STACK_FRAME_OVERHEAD
2477 lwz r4,_DAR(r1)
2478 diff -urNp linux-2.6.35.4/arch/powerpc/kernel/ibmebus.c linux-2.6.35.4/arch/powerpc/kernel/ibmebus.c
2479 --- linux-2.6.35.4/arch/powerpc/kernel/ibmebus.c 2010-08-26 19:47:12.000000000 -0400
2480 +++ linux-2.6.35.4/arch/powerpc/kernel/ibmebus.c 2010-09-17 20:12:09.000000000 -0400
2481 @@ -128,7 +128,7 @@ static int ibmebus_dma_supported(struct
2482 return 1;
2483 }
2484
2485 -static struct dma_map_ops ibmebus_dma_ops = {
2486 +static const struct dma_map_ops ibmebus_dma_ops = {
2487 .alloc_coherent = ibmebus_alloc_coherent,
2488 .free_coherent = ibmebus_free_coherent,
2489 .map_sg = ibmebus_map_sg,
2490 diff -urNp linux-2.6.35.4/arch/powerpc/kernel/kgdb.c linux-2.6.35.4/arch/powerpc/kernel/kgdb.c
2491 --- linux-2.6.35.4/arch/powerpc/kernel/kgdb.c 2010-08-26 19:47:12.000000000 -0400
2492 +++ linux-2.6.35.4/arch/powerpc/kernel/kgdb.c 2010-09-17 20:12:09.000000000 -0400
2493 @@ -128,7 +128,7 @@ static int kgdb_handle_breakpoint(struct
2494 if (kgdb_handle_exception(1, SIGTRAP, 0, regs) != 0)
2495 return 0;
2496
2497 - if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2498 + if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2499 regs->nip += 4;
2500
2501 return 1;
2502 @@ -360,7 +360,7 @@ int kgdb_arch_handle_exception(int vecto
2503 /*
2504 * Global data
2505 */
2506 -struct kgdb_arch arch_kgdb_ops = {
2507 +const struct kgdb_arch arch_kgdb_ops = {
2508 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
2509 };
2510
2511 diff -urNp linux-2.6.35.4/arch/powerpc/kernel/module_32.c linux-2.6.35.4/arch/powerpc/kernel/module_32.c
2512 --- linux-2.6.35.4/arch/powerpc/kernel/module_32.c 2010-08-26 19:47:12.000000000 -0400
2513 +++ linux-2.6.35.4/arch/powerpc/kernel/module_32.c 2010-09-17 20:12:09.000000000 -0400
2514 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2515 me->arch.core_plt_section = i;
2516 }
2517 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2518 - printk("Module doesn't contain .plt or .init.plt sections.\n");
2519 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2520 return -ENOEXEC;
2521 }
2522
2523 @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2524
2525 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2526 /* Init, or core PLT? */
2527 - if (location >= mod->module_core
2528 - && location < mod->module_core + mod->core_size)
2529 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2530 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2531 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2532 - else
2533 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2534 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2535 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2536 + else {
2537 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2538 + return ~0UL;
2539 + }
2540
2541 /* Find this entry, or if that fails, the next avail. entry */
2542 while (entry->jump[0]) {
2543 diff -urNp linux-2.6.35.4/arch/powerpc/kernel/module.c linux-2.6.35.4/arch/powerpc/kernel/module.c
2544 --- linux-2.6.35.4/arch/powerpc/kernel/module.c 2010-08-26 19:47:12.000000000 -0400
2545 +++ linux-2.6.35.4/arch/powerpc/kernel/module.c 2010-09-17 20:12:09.000000000 -0400
2546 @@ -31,11 +31,24 @@
2547
2548 LIST_HEAD(module_bug_list);
2549
2550 +#ifdef CONFIG_PAX_KERNEXEC
2551 void *module_alloc(unsigned long size)
2552 {
2553 if (size == 0)
2554 return NULL;
2555
2556 + return vmalloc(size);
2557 +}
2558 +
2559 +void *module_alloc_exec(unsigned long size)
2560 +#else
2561 +void *module_alloc(unsigned long size)
2562 +#endif
2563 +
2564 +{
2565 + if (size == 0)
2566 + return NULL;
2567 +
2568 return vmalloc_exec(size);
2569 }
2570
2571 @@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2572 vfree(module_region);
2573 }
2574
2575 +#ifdef CONFIG_PAX_KERNEXEC
2576 +void module_free_exec(struct module *mod, void *module_region)
2577 +{
2578 + module_free(mod, module_region);
2579 +}
2580 +#endif
2581 +
2582 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2583 const Elf_Shdr *sechdrs,
2584 const char *name)
2585 diff -urNp linux-2.6.35.4/arch/powerpc/kernel/pci-common.c linux-2.6.35.4/arch/powerpc/kernel/pci-common.c
2586 --- linux-2.6.35.4/arch/powerpc/kernel/pci-common.c 2010-08-26 19:47:12.000000000 -0400
2587 +++ linux-2.6.35.4/arch/powerpc/kernel/pci-common.c 2010-09-17 20:12:09.000000000 -0400
2588 @@ -51,14 +51,14 @@ resource_size_t isa_mem_base;
2589 unsigned int ppc_pci_flags = 0;
2590
2591
2592 -static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2593 +static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2594
2595 -void set_pci_dma_ops(struct dma_map_ops *dma_ops)
2596 +void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
2597 {
2598 pci_dma_ops = dma_ops;
2599 }
2600
2601 -struct dma_map_ops *get_pci_dma_ops(void)
2602 +const struct dma_map_ops *get_pci_dma_ops(void)
2603 {
2604 return pci_dma_ops;
2605 }
2606 diff -urNp linux-2.6.35.4/arch/powerpc/kernel/process.c linux-2.6.35.4/arch/powerpc/kernel/process.c
2607 --- linux-2.6.35.4/arch/powerpc/kernel/process.c 2010-08-26 19:47:12.000000000 -0400
2608 +++ linux-2.6.35.4/arch/powerpc/kernel/process.c 2010-09-17 20:12:09.000000000 -0400
2609 @@ -1215,51 +1215,3 @@ unsigned long arch_align_stack(unsigned
2610 sp -= get_random_int() & ~PAGE_MASK;
2611 return sp & ~0xf;
2612 }
2613 -
2614 -static inline unsigned long brk_rnd(void)
2615 -{
2616 - unsigned long rnd = 0;
2617 -
2618 - /* 8MB for 32bit, 1GB for 64bit */
2619 - if (is_32bit_task())
2620 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2621 - else
2622 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2623 -
2624 - return rnd << PAGE_SHIFT;
2625 -}
2626 -
2627 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2628 -{
2629 - unsigned long base = mm->brk;
2630 - unsigned long ret;
2631 -
2632 -#ifdef CONFIG_PPC_STD_MMU_64
2633 - /*
2634 - * If we are using 1TB segments and we are allowed to randomise
2635 - * the heap, we can put it above 1TB so it is backed by a 1TB
2636 - * segment. Otherwise the heap will be in the bottom 1TB
2637 - * which always uses 256MB segments and this may result in a
2638 - * performance penalty.
2639 - */
2640 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2641 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2642 -#endif
2643 -
2644 - ret = PAGE_ALIGN(base + brk_rnd());
2645 -
2646 - if (ret < mm->brk)
2647 - return mm->brk;
2648 -
2649 - return ret;
2650 -}
2651 -
2652 -unsigned long randomize_et_dyn(unsigned long base)
2653 -{
2654 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2655 -
2656 - if (ret < base)
2657 - return base;
2658 -
2659 - return ret;
2660 -}
2661 diff -urNp linux-2.6.35.4/arch/powerpc/kernel/signal_32.c linux-2.6.35.4/arch/powerpc/kernel/signal_32.c
2662 --- linux-2.6.35.4/arch/powerpc/kernel/signal_32.c 2010-08-26 19:47:12.000000000 -0400
2663 +++ linux-2.6.35.4/arch/powerpc/kernel/signal_32.c 2010-09-17 20:12:09.000000000 -0400
2664 @@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig
2665 /* Save user registers on the stack */
2666 frame = &rt_sf->uc.uc_mcontext;
2667 addr = frame;
2668 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2669 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2670 if (save_user_regs(regs, frame, 0, 1))
2671 goto badframe;
2672 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2673 diff -urNp linux-2.6.35.4/arch/powerpc/kernel/signal_64.c linux-2.6.35.4/arch/powerpc/kernel/signal_64.c
2674 --- linux-2.6.35.4/arch/powerpc/kernel/signal_64.c 2010-08-26 19:47:12.000000000 -0400
2675 +++ linux-2.6.35.4/arch/powerpc/kernel/signal_64.c 2010-09-17 20:12:09.000000000 -0400
2676 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct
2677 current->thread.fpscr.val = 0;
2678
2679 /* Set up to return from userspace. */
2680 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2681 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2682 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2683 } else {
2684 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2685 diff -urNp linux-2.6.35.4/arch/powerpc/kernel/vdso.c linux-2.6.35.4/arch/powerpc/kernel/vdso.c
2686 --- linux-2.6.35.4/arch/powerpc/kernel/vdso.c 2010-08-26 19:47:12.000000000 -0400
2687 +++ linux-2.6.35.4/arch/powerpc/kernel/vdso.c 2010-09-17 20:12:09.000000000 -0400
2688 @@ -36,6 +36,7 @@
2689 #include <asm/firmware.h>
2690 #include <asm/vdso.h>
2691 #include <asm/vdso_datapage.h>
2692 +#include <asm/mman.h>
2693
2694 #include "setup.h"
2695
2696 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2697 vdso_base = VDSO32_MBASE;
2698 #endif
2699
2700 - current->mm->context.vdso_base = 0;
2701 + current->mm->context.vdso_base = ~0UL;
2702
2703 /* vDSO has a problem and was disabled, just don't "enable" it for the
2704 * process
2705 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2706 vdso_base = get_unmapped_area(NULL, vdso_base,
2707 (vdso_pages << PAGE_SHIFT) +
2708 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2709 - 0, 0);
2710 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
2711 if (IS_ERR_VALUE(vdso_base)) {
2712 rc = vdso_base;
2713 goto fail_mmapsem;
2714 diff -urNp linux-2.6.35.4/arch/powerpc/kernel/vio.c linux-2.6.35.4/arch/powerpc/kernel/vio.c
2715 --- linux-2.6.35.4/arch/powerpc/kernel/vio.c 2010-08-26 19:47:12.000000000 -0400
2716 +++ linux-2.6.35.4/arch/powerpc/kernel/vio.c 2010-09-17 20:12:09.000000000 -0400
2717 @@ -602,11 +602,12 @@ static void vio_dma_iommu_unmap_sg(struc
2718 vio_cmo_dealloc(viodev, alloc_size);
2719 }
2720
2721 -struct dma_map_ops vio_dma_mapping_ops = {
2722 +static const struct dma_map_ops vio_dma_mapping_ops = {
2723 .alloc_coherent = vio_dma_iommu_alloc_coherent,
2724 .free_coherent = vio_dma_iommu_free_coherent,
2725 .map_sg = vio_dma_iommu_map_sg,
2726 .unmap_sg = vio_dma_iommu_unmap_sg,
2727 + .dma_supported = dma_iommu_dma_supported,
2728 .map_page = vio_dma_iommu_map_page,
2729 .unmap_page = vio_dma_iommu_unmap_page,
2730
2731 @@ -860,7 +861,6 @@ static void vio_cmo_bus_remove(struct vi
2732
2733 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
2734 {
2735 - vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
2736 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
2737 }
2738
2739 diff -urNp linux-2.6.35.4/arch/powerpc/lib/usercopy_64.c linux-2.6.35.4/arch/powerpc/lib/usercopy_64.c
2740 --- linux-2.6.35.4/arch/powerpc/lib/usercopy_64.c 2010-08-26 19:47:12.000000000 -0400
2741 +++ linux-2.6.35.4/arch/powerpc/lib/usercopy_64.c 2010-09-17 20:12:09.000000000 -0400
2742 @@ -9,22 +9,6 @@
2743 #include <linux/module.h>
2744 #include <asm/uaccess.h>
2745
2746 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2747 -{
2748 - if (likely(access_ok(VERIFY_READ, from, n)))
2749 - n = __copy_from_user(to, from, n);
2750 - else
2751 - memset(to, 0, n);
2752 - return n;
2753 -}
2754 -
2755 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2756 -{
2757 - if (likely(access_ok(VERIFY_WRITE, to, n)))
2758 - n = __copy_to_user(to, from, n);
2759 - return n;
2760 -}
2761 -
2762 unsigned long copy_in_user(void __user *to, const void __user *from,
2763 unsigned long n)
2764 {
2765 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2766 return n;
2767 }
2768
2769 -EXPORT_SYMBOL(copy_from_user);
2770 -EXPORT_SYMBOL(copy_to_user);
2771 EXPORT_SYMBOL(copy_in_user);
2772
2773 diff -urNp linux-2.6.35.4/arch/powerpc/mm/fault.c linux-2.6.35.4/arch/powerpc/mm/fault.c
2774 --- linux-2.6.35.4/arch/powerpc/mm/fault.c 2010-08-26 19:47:12.000000000 -0400
2775 +++ linux-2.6.35.4/arch/powerpc/mm/fault.c 2010-09-17 20:12:09.000000000 -0400
2776 @@ -30,6 +30,10 @@
2777 #include <linux/kprobes.h>
2778 #include <linux/kdebug.h>
2779 #include <linux/perf_event.h>
2780 +#include <linux/slab.h>
2781 +#include <linux/pagemap.h>
2782 +#include <linux/compiler.h>
2783 +#include <linux/unistd.h>
2784
2785 #include <asm/firmware.h>
2786 #include <asm/page.h>
2787 @@ -41,6 +45,7 @@
2788 #include <asm/tlbflush.h>
2789 #include <asm/siginfo.h>
2790 #include <mm/mmu_decl.h>
2791 +#include <asm/ptrace.h>
2792
2793 #ifdef CONFIG_KPROBES
2794 static inline int notify_page_fault(struct pt_regs *regs)
2795 @@ -64,6 +69,33 @@ static inline int notify_page_fault(stru
2796 }
2797 #endif
2798
2799 +#ifdef CONFIG_PAX_PAGEEXEC
2800 +/*
2801 + * PaX: decide what to do with offenders (regs->nip = fault address)
2802 + *
2803 + * returns 1 when task should be killed
2804 + */
2805 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2806 +{
2807 + return 1;
2808 +}
2809 +
2810 +void pax_report_insns(void *pc, void *sp)
2811 +{
2812 + unsigned long i;
2813 +
2814 + printk(KERN_ERR "PAX: bytes at PC: ");
2815 + for (i = 0; i < 5; i++) {
2816 + unsigned int c;
2817 + if (get_user(c, (unsigned int __user *)pc+i))
2818 + printk(KERN_CONT "???????? ");
2819 + else
2820 + printk(KERN_CONT "%08x ", c);
2821 + }
2822 + printk("\n");
2823 +}
2824 +#endif
2825 +
2826 /*
2827 * Check whether the instruction at regs->nip is a store using
2828 * an update addressing form which will update r1.
2829 @@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_re
2830 * indicate errors in DSISR but can validly be set in SRR1.
2831 */
2832 if (trap == 0x400)
2833 - error_code &= 0x48200000;
2834 + error_code &= 0x58200000;
2835 else
2836 is_write = error_code & DSISR_ISSTORE;
2837 #else
2838 @@ -257,7 +289,7 @@ good_area:
2839 * "undefined". Of those that can be set, this is the only
2840 * one which seems bad.
2841 */
2842 - if (error_code & 0x10000000)
2843 + if (error_code & DSISR_GUARDED)
2844 /* Guarded storage error. */
2845 goto bad_area;
2846 #endif /* CONFIG_8xx */
2847 @@ -272,7 +304,7 @@ good_area:
2848 * processors use the same I/D cache coherency mechanism
2849 * as embedded.
2850 */
2851 - if (error_code & DSISR_PROTFAULT)
2852 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2853 goto bad_area;
2854 #endif /* CONFIG_PPC_STD_MMU */
2855
2856 @@ -341,6 +373,23 @@ bad_area:
2857 bad_area_nosemaphore:
2858 /* User mode accesses cause a SIGSEGV */
2859 if (user_mode(regs)) {
2860 +
2861 +#ifdef CONFIG_PAX_PAGEEXEC
2862 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2863 +#ifdef CONFIG_PPC_STD_MMU
2864 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2865 +#else
2866 + if (is_exec && regs->nip == address) {
2867 +#endif
2868 + switch (pax_handle_fetch_fault(regs)) {
2869 + }
2870 +
2871 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2872 + do_group_exit(SIGKILL);
2873 + }
2874 + }
2875 +#endif
2876 +
2877 _exception(SIGSEGV, regs, code, address);
2878 return 0;
2879 }
2880 diff -urNp linux-2.6.35.4/arch/powerpc/mm/mmap_64.c linux-2.6.35.4/arch/powerpc/mm/mmap_64.c
2881 --- linux-2.6.35.4/arch/powerpc/mm/mmap_64.c 2010-08-26 19:47:12.000000000 -0400
2882 +++ linux-2.6.35.4/arch/powerpc/mm/mmap_64.c 2010-09-17 20:12:09.000000000 -0400
2883 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2884 */
2885 if (mmap_is_legacy()) {
2886 mm->mmap_base = TASK_UNMAPPED_BASE;
2887 +
2888 +#ifdef CONFIG_PAX_RANDMMAP
2889 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2890 + mm->mmap_base += mm->delta_mmap;
2891 +#endif
2892 +
2893 mm->get_unmapped_area = arch_get_unmapped_area;
2894 mm->unmap_area = arch_unmap_area;
2895 } else {
2896 mm->mmap_base = mmap_base();
2897 +
2898 +#ifdef CONFIG_PAX_RANDMMAP
2899 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2900 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2901 +#endif
2902 +
2903 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2904 mm->unmap_area = arch_unmap_area_topdown;
2905 }
2906 diff -urNp linux-2.6.35.4/arch/powerpc/mm/slice.c linux-2.6.35.4/arch/powerpc/mm/slice.c
2907 --- linux-2.6.35.4/arch/powerpc/mm/slice.c 2010-08-26 19:47:12.000000000 -0400
2908 +++ linux-2.6.35.4/arch/powerpc/mm/slice.c 2010-09-17 20:12:09.000000000 -0400
2909 @@ -98,10 +98,9 @@ static int slice_area_is_free(struct mm_
2910 if ((mm->task_size - len) < addr)
2911 return 0;
2912 vma = find_vma(mm, addr);
2913 - return (!vma || (addr + len) <= vma->vm_start);
2914 + return check_heap_stack_gap(vma, addr, len);
2915 }
2916
2917 -static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2918 {
2919 return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
2920 1ul << SLICE_LOW_SHIFT);
2921 @@ -256,7 +255,7 @@ full_search:
2922 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2923 continue;
2924 }
2925 - if (!vma || addr + len <= vma->vm_start) {
2926 + if (check_heap_stack_gap(vma, addr, len)) {
2927 /*
2928 * Remember the place where we stopped the search:
2929 */
2930 @@ -336,7 +335,7 @@ static unsigned long slice_find_area_top
2931 * return with success:
2932 */
2933 vma = find_vma(mm, addr);
2934 - if (!vma || (addr + len) <= vma->vm_start) {
2935 + if (check_heap_stack_gap(vma, addr, len)) {
2936 /* remember the address as a hint for next time */
2937 if (use_cache)
2938 mm->free_area_cache = addr;
2939 @@ -426,6 +425,11 @@ unsigned long slice_get_unmapped_area(un
2940 if (fixed && addr > (mm->task_size - len))
2941 return -EINVAL;
2942
2943 +#ifdef CONFIG_PAX_RANDMMAP
2944 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2945 + addr = 0;
2946 +#endif
2947 +
2948 /* If hint, make sure it matches our alignment restrictions */
2949 if (!fixed && addr) {
2950 addr = _ALIGN_UP(addr, 1ul << pshift);
2951 diff -urNp linux-2.6.35.4/arch/powerpc/platforms/52xx/lite5200_pm.c linux-2.6.35.4/arch/powerpc/platforms/52xx/lite5200_pm.c
2952 --- linux-2.6.35.4/arch/powerpc/platforms/52xx/lite5200_pm.c 2010-08-26 19:47:12.000000000 -0400
2953 +++ linux-2.6.35.4/arch/powerpc/platforms/52xx/lite5200_pm.c 2010-09-17 20:12:09.000000000 -0400
2954 @@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
2955 lite5200_pm_target_state = PM_SUSPEND_ON;
2956 }
2957
2958 -static struct platform_suspend_ops lite5200_pm_ops = {
2959 +static const struct platform_suspend_ops lite5200_pm_ops = {
2960 .valid = lite5200_pm_valid,
2961 .begin = lite5200_pm_begin,
2962 .prepare = lite5200_pm_prepare,
2963 diff -urNp linux-2.6.35.4/arch/powerpc/platforms/52xx/mpc52xx_pm.c linux-2.6.35.4/arch/powerpc/platforms/52xx/mpc52xx_pm.c
2964 --- linux-2.6.35.4/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2010-08-26 19:47:12.000000000 -0400
2965 +++ linux-2.6.35.4/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2010-09-17 20:12:09.000000000 -0400
2966 @@ -189,7 +189,7 @@ void mpc52xx_pm_finish(void)
2967 iounmap(mbar);
2968 }
2969
2970 -static struct platform_suspend_ops mpc52xx_pm_ops = {
2971 +static const struct platform_suspend_ops mpc52xx_pm_ops = {
2972 .valid = mpc52xx_pm_valid,
2973 .prepare = mpc52xx_pm_prepare,
2974 .enter = mpc52xx_pm_enter,
2975 diff -urNp linux-2.6.35.4/arch/powerpc/platforms/83xx/suspend.c linux-2.6.35.4/arch/powerpc/platforms/83xx/suspend.c
2976 --- linux-2.6.35.4/arch/powerpc/platforms/83xx/suspend.c 2010-08-26 19:47:12.000000000 -0400
2977 +++ linux-2.6.35.4/arch/powerpc/platforms/83xx/suspend.c 2010-09-17 20:12:09.000000000 -0400
2978 @@ -311,7 +311,7 @@ static int mpc83xx_is_pci_agent(void)
2979 return ret;
2980 }
2981
2982 -static struct platform_suspend_ops mpc83xx_suspend_ops = {
2983 +static const struct platform_suspend_ops mpc83xx_suspend_ops = {
2984 .valid = mpc83xx_suspend_valid,
2985 .begin = mpc83xx_suspend_begin,
2986 .enter = mpc83xx_suspend_enter,
2987 diff -urNp linux-2.6.35.4/arch/powerpc/platforms/cell/iommu.c linux-2.6.35.4/arch/powerpc/platforms/cell/iommu.c
2988 --- linux-2.6.35.4/arch/powerpc/platforms/cell/iommu.c 2010-08-26 19:47:12.000000000 -0400
2989 +++ linux-2.6.35.4/arch/powerpc/platforms/cell/iommu.c 2010-09-17 20:12:09.000000000 -0400
2990 @@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struc
2991
2992 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
2993
2994 -struct dma_map_ops dma_iommu_fixed_ops = {
2995 +const struct dma_map_ops dma_iommu_fixed_ops = {
2996 .alloc_coherent = dma_fixed_alloc_coherent,
2997 .free_coherent = dma_fixed_free_coherent,
2998 .map_sg = dma_fixed_map_sg,
2999 diff -urNp linux-2.6.35.4/arch/powerpc/platforms/ps3/system-bus.c linux-2.6.35.4/arch/powerpc/platforms/ps3/system-bus.c
3000 --- linux-2.6.35.4/arch/powerpc/platforms/ps3/system-bus.c 2010-08-26 19:47:12.000000000 -0400
3001 +++ linux-2.6.35.4/arch/powerpc/platforms/ps3/system-bus.c 2010-09-17 20:12:09.000000000 -0400
3002 @@ -695,7 +695,7 @@ static int ps3_dma_supported(struct devi
3003 return mask >= DMA_BIT_MASK(32);
3004 }
3005
3006 -static struct dma_map_ops ps3_sb_dma_ops = {
3007 +static const struct dma_map_ops ps3_sb_dma_ops = {
3008 .alloc_coherent = ps3_alloc_coherent,
3009 .free_coherent = ps3_free_coherent,
3010 .map_sg = ps3_sb_map_sg,
3011 @@ -705,7 +705,7 @@ static struct dma_map_ops ps3_sb_dma_ops
3012 .unmap_page = ps3_unmap_page,
3013 };
3014
3015 -static struct dma_map_ops ps3_ioc0_dma_ops = {
3016 +static const struct dma_map_ops ps3_ioc0_dma_ops = {
3017 .alloc_coherent = ps3_alloc_coherent,
3018 .free_coherent = ps3_free_coherent,
3019 .map_sg = ps3_ioc0_map_sg,
3020 diff -urNp linux-2.6.35.4/arch/powerpc/sysdev/fsl_pmc.c linux-2.6.35.4/arch/powerpc/sysdev/fsl_pmc.c
3021 --- linux-2.6.35.4/arch/powerpc/sysdev/fsl_pmc.c 2010-08-26 19:47:12.000000000 -0400
3022 +++ linux-2.6.35.4/arch/powerpc/sysdev/fsl_pmc.c 2010-09-17 20:12:09.000000000 -0400
3023 @@ -53,7 +53,7 @@ static int pmc_suspend_valid(suspend_sta
3024 return 1;
3025 }
3026
3027 -static struct platform_suspend_ops pmc_suspend_ops = {
3028 +static const struct platform_suspend_ops pmc_suspend_ops = {
3029 .valid = pmc_suspend_valid,
3030 .enter = pmc_suspend_enter,
3031 };
3032 diff -urNp linux-2.6.35.4/arch/s390/include/asm/compat.h linux-2.6.35.4/arch/s390/include/asm/compat.h
3033 --- linux-2.6.35.4/arch/s390/include/asm/compat.h 2010-08-26 19:47:12.000000000 -0400
3034 +++ linux-2.6.35.4/arch/s390/include/asm/compat.h 2010-09-17 20:12:37.000000000 -0400
3035 @@ -181,7 +181,7 @@ static inline int is_compat_task(void)
3036
3037 #endif
3038
3039 -static inline void __user *compat_alloc_user_space(long len)
3040 +static inline void __user *arch_compat_alloc_user_space(long len)
3041 {
3042 unsigned long stack;
3043
3044 diff -urNp linux-2.6.35.4/arch/s390/include/asm/elf.h linux-2.6.35.4/arch/s390/include/asm/elf.h
3045 --- linux-2.6.35.4/arch/s390/include/asm/elf.h 2010-08-26 19:47:12.000000000 -0400
3046 +++ linux-2.6.35.4/arch/s390/include/asm/elf.h 2010-09-17 20:12:09.000000000 -0400
3047 @@ -163,6 +163,13 @@ extern unsigned int vdso_enabled;
3048 that it will "exec", and that there is sufficient room for the brk. */
3049 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
3050
3051 +#ifdef CONFIG_PAX_ASLR
3052 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
3053 +
3054 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3055 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3056 +#endif
3057 +
3058 /* This yields a mask that user programs can use to figure out what
3059 instruction set this CPU supports. */
3060
3061 diff -urNp linux-2.6.35.4/arch/s390/include/asm/uaccess.h linux-2.6.35.4/arch/s390/include/asm/uaccess.h
3062 --- linux-2.6.35.4/arch/s390/include/asm/uaccess.h 2010-08-26 19:47:12.000000000 -0400
3063 +++ linux-2.6.35.4/arch/s390/include/asm/uaccess.h 2010-09-17 20:12:09.000000000 -0400
3064 @@ -234,6 +234,10 @@ static inline unsigned long __must_check
3065 copy_to_user(void __user *to, const void *from, unsigned long n)
3066 {
3067 might_fault();
3068 +
3069 + if ((long)n < 0)
3070 + return n;
3071 +
3072 if (access_ok(VERIFY_WRITE, to, n))
3073 n = __copy_to_user(to, from, n);
3074 return n;
3075 @@ -259,6 +263,9 @@ copy_to_user(void __user *to, const void
3076 static inline unsigned long __must_check
3077 __copy_from_user(void *to, const void __user *from, unsigned long n)
3078 {
3079 + if ((long)n < 0)
3080 + return n;
3081 +
3082 if (__builtin_constant_p(n) && (n <= 256))
3083 return uaccess.copy_from_user_small(n, from, to);
3084 else
3085 @@ -293,6 +300,10 @@ copy_from_user(void *to, const void __us
3086 unsigned int sz = __compiletime_object_size(to);
3087
3088 might_fault();
3089 +
3090 + if ((long)n < 0)
3091 + return n;
3092 +
3093 if (unlikely(sz != -1 && sz < n)) {
3094 copy_from_user_overflow();
3095 return n;
3096 diff -urNp linux-2.6.35.4/arch/s390/Kconfig linux-2.6.35.4/arch/s390/Kconfig
3097 --- linux-2.6.35.4/arch/s390/Kconfig 2010-08-26 19:47:12.000000000 -0400
3098 +++ linux-2.6.35.4/arch/s390/Kconfig 2010-09-17 20:12:09.000000000 -0400
3099 @@ -230,13 +230,12 @@ config AUDIT_ARCH
3100
3101 config S390_EXEC_PROTECT
3102 bool "Data execute protection"
3103 + default y
3104 help
3105 This option allows to enable a buffer overflow protection for user
3106 - space programs and it also selects the addressing mode option above.
3107 - The kernel parameter noexec=on will enable this feature and also
3108 - switch the addressing modes, default is disabled. Enabling this (via
3109 - kernel parameter) on machines earlier than IBM System z9-109 EC/BC
3110 - will reduce system performance.
3111 + space programs.
3112 + Enabling this on machines earlier than IBM System z9-109 EC/BC will
3113 + reduce system performance.
3114
3115 comment "Code generation options"
3116
3117 diff -urNp linux-2.6.35.4/arch/s390/kernel/module.c linux-2.6.35.4/arch/s390/kernel/module.c
3118 --- linux-2.6.35.4/arch/s390/kernel/module.c 2010-08-26 19:47:12.000000000 -0400
3119 +++ linux-2.6.35.4/arch/s390/kernel/module.c 2010-09-17 20:12:09.000000000 -0400
3120 @@ -168,11 +168,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
3121
3122 /* Increase core size by size of got & plt and set start
3123 offsets for got and plt. */
3124 - me->core_size = ALIGN(me->core_size, 4);
3125 - me->arch.got_offset = me->core_size;
3126 - me->core_size += me->arch.got_size;
3127 - me->arch.plt_offset = me->core_size;
3128 - me->core_size += me->arch.plt_size;
3129 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
3130 + me->arch.got_offset = me->core_size_rw;
3131 + me->core_size_rw += me->arch.got_size;
3132 + me->arch.plt_offset = me->core_size_rx;
3133 + me->core_size_rx += me->arch.plt_size;
3134 return 0;
3135 }
3136
3137 @@ -258,7 +258,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3138 if (info->got_initialized == 0) {
3139 Elf_Addr *gotent;
3140
3141 - gotent = me->module_core + me->arch.got_offset +
3142 + gotent = me->module_core_rw + me->arch.got_offset +
3143 info->got_offset;
3144 *gotent = val;
3145 info->got_initialized = 1;
3146 @@ -282,7 +282,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3147 else if (r_type == R_390_GOTENT ||
3148 r_type == R_390_GOTPLTENT)
3149 *(unsigned int *) loc =
3150 - (val + (Elf_Addr) me->module_core - loc) >> 1;
3151 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3152 else if (r_type == R_390_GOT64 ||
3153 r_type == R_390_GOTPLT64)
3154 *(unsigned long *) loc = val;
3155 @@ -296,7 +296,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3156 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3157 if (info->plt_initialized == 0) {
3158 unsigned int *ip;
3159 - ip = me->module_core + me->arch.plt_offset +
3160 + ip = me->module_core_rx + me->arch.plt_offset +
3161 info->plt_offset;
3162 #ifndef CONFIG_64BIT
3163 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3164 @@ -321,7 +321,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3165 val - loc + 0xffffUL < 0x1ffffeUL) ||
3166 (r_type == R_390_PLT32DBL &&
3167 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3168 - val = (Elf_Addr) me->module_core +
3169 + val = (Elf_Addr) me->module_core_rx +
3170 me->arch.plt_offset +
3171 info->plt_offset;
3172 val += rela->r_addend - loc;
3173 @@ -343,7 +343,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3174 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3175 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3176 val = val + rela->r_addend -
3177 - ((Elf_Addr) me->module_core + me->arch.got_offset);
3178 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3179 if (r_type == R_390_GOTOFF16)
3180 *(unsigned short *) loc = val;
3181 else if (r_type == R_390_GOTOFF32)
3182 @@ -353,7 +353,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3183 break;
3184 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3185 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3186 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
3187 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3188 rela->r_addend - loc;
3189 if (r_type == R_390_GOTPC)
3190 *(unsigned int *) loc = val;
3191 diff -urNp linux-2.6.35.4/arch/s390/kernel/setup.c linux-2.6.35.4/arch/s390/kernel/setup.c
3192 --- linux-2.6.35.4/arch/s390/kernel/setup.c 2010-08-26 19:47:12.000000000 -0400
3193 +++ linux-2.6.35.4/arch/s390/kernel/setup.c 2010-09-17 20:12:09.000000000 -0400
3194 @@ -281,7 +281,7 @@ static int __init early_parse_mem(char *
3195 }
3196 early_param("mem", early_parse_mem);
3197
3198 -unsigned int user_mode = HOME_SPACE_MODE;
3199 +unsigned int user_mode = SECONDARY_SPACE_MODE;
3200 EXPORT_SYMBOL_GPL(user_mode);
3201
3202 static int set_amode_and_uaccess(unsigned long user_amode,
3203 @@ -310,17 +310,6 @@ static int set_amode_and_uaccess(unsigne
3204 }
3205 }
3206
3207 -/*
3208 - * Switch kernel/user addressing modes?
3209 - */
3210 -static int __init early_parse_switch_amode(char *p)
3211 -{
3212 - if (user_mode != SECONDARY_SPACE_MODE)
3213 - user_mode = PRIMARY_SPACE_MODE;
3214 - return 0;
3215 -}
3216 -early_param("switch_amode", early_parse_switch_amode);
3217 -
3218 static int __init early_parse_user_mode(char *p)
3219 {
3220 if (p && strcmp(p, "primary") == 0)
3221 @@ -337,20 +326,6 @@ static int __init early_parse_user_mode(
3222 }
3223 early_param("user_mode", early_parse_user_mode);
3224
3225 -#ifdef CONFIG_S390_EXEC_PROTECT
3226 -/*
3227 - * Enable execute protection?
3228 - */
3229 -static int __init early_parse_noexec(char *p)
3230 -{
3231 - if (!strncmp(p, "off", 3))
3232 - return 0;
3233 - user_mode = SECONDARY_SPACE_MODE;
3234 - return 0;
3235 -}
3236 -early_param("noexec", early_parse_noexec);
3237 -#endif /* CONFIG_S390_EXEC_PROTECT */
3238 -
3239 static void setup_addressing_mode(void)
3240 {
3241 if (user_mode == SECONDARY_SPACE_MODE) {
3242 diff -urNp linux-2.6.35.4/arch/s390/mm/maccess.c linux-2.6.35.4/arch/s390/mm/maccess.c
3243 --- linux-2.6.35.4/arch/s390/mm/maccess.c 2010-08-26 19:47:12.000000000 -0400
3244 +++ linux-2.6.35.4/arch/s390/mm/maccess.c 2010-09-17 20:12:09.000000000 -0400
3245 @@ -45,7 +45,7 @@ static long probe_kernel_write_odd(void
3246 return rc ? rc : count;
3247 }
3248
3249 -long probe_kernel_write(void *dst, void *src, size_t size)
3250 +long probe_kernel_write(void *dst, const void *src, size_t size)
3251 {
3252 long copied = 0;
3253
3254 diff -urNp linux-2.6.35.4/arch/s390/mm/mmap.c linux-2.6.35.4/arch/s390/mm/mmap.c
3255 --- linux-2.6.35.4/arch/s390/mm/mmap.c 2010-08-26 19:47:12.000000000 -0400
3256 +++ linux-2.6.35.4/arch/s390/mm/mmap.c 2010-09-17 20:12:09.000000000 -0400
3257 @@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_str
3258 */
3259 if (mmap_is_legacy()) {
3260 mm->mmap_base = TASK_UNMAPPED_BASE;
3261 +
3262 +#ifdef CONFIG_PAX_RANDMMAP
3263 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3264 + mm->mmap_base += mm->delta_mmap;
3265 +#endif
3266 +
3267 mm->get_unmapped_area = arch_get_unmapped_area;
3268 mm->unmap_area = arch_unmap_area;
3269 } else {
3270 mm->mmap_base = mmap_base();
3271 +
3272 +#ifdef CONFIG_PAX_RANDMMAP
3273 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3274 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3275 +#endif
3276 +
3277 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3278 mm->unmap_area = arch_unmap_area_topdown;
3279 }
3280 @@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_str
3281 */
3282 if (mmap_is_legacy()) {
3283 mm->mmap_base = TASK_UNMAPPED_BASE;
3284 +
3285 +#ifdef CONFIG_PAX_RANDMMAP
3286 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3287 + mm->mmap_base += mm->delta_mmap;
3288 +#endif
3289 +
3290 mm->get_unmapped_area = s390_get_unmapped_area;
3291 mm->unmap_area = arch_unmap_area;
3292 } else {
3293 mm->mmap_base = mmap_base();
3294 +
3295 +#ifdef CONFIG_PAX_RANDMMAP
3296 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3297 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3298 +#endif
3299 +
3300 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3301 mm->unmap_area = arch_unmap_area_topdown;
3302 }
3303 diff -urNp linux-2.6.35.4/arch/sh/boards/mach-hp6xx/pm.c linux-2.6.35.4/arch/sh/boards/mach-hp6xx/pm.c
3304 --- linux-2.6.35.4/arch/sh/boards/mach-hp6xx/pm.c 2010-08-26 19:47:12.000000000 -0400
3305 +++ linux-2.6.35.4/arch/sh/boards/mach-hp6xx/pm.c 2010-09-17 20:12:09.000000000 -0400
3306 @@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_
3307 return 0;
3308 }
3309
3310 -static struct platform_suspend_ops hp6x0_pm_ops = {
3311 +static const struct platform_suspend_ops hp6x0_pm_ops = {
3312 .enter = hp6x0_pm_enter,
3313 .valid = suspend_valid_only_mem,
3314 };
3315 diff -urNp linux-2.6.35.4/arch/sh/include/asm/dma-mapping.h linux-2.6.35.4/arch/sh/include/asm/dma-mapping.h
3316 --- linux-2.6.35.4/arch/sh/include/asm/dma-mapping.h 2010-08-26 19:47:12.000000000 -0400
3317 +++ linux-2.6.35.4/arch/sh/include/asm/dma-mapping.h 2010-09-17 20:12:09.000000000 -0400
3318 @@ -1,10 +1,10 @@
3319 #ifndef __ASM_SH_DMA_MAPPING_H
3320 #define __ASM_SH_DMA_MAPPING_H
3321
3322 -extern struct dma_map_ops *dma_ops;
3323 +extern const struct dma_map_ops *dma_ops;
3324 extern void no_iommu_init(void);
3325
3326 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3327 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
3328 {
3329 return dma_ops;
3330 }
3331 @@ -14,7 +14,7 @@ static inline struct dma_map_ops *get_dm
3332
3333 static inline int dma_supported(struct device *dev, u64 mask)
3334 {
3335 - struct dma_map_ops *ops = get_dma_ops(dev);
3336 + const struct dma_map_ops *ops = get_dma_ops(dev);
3337
3338 if (ops->dma_supported)
3339 return ops->dma_supported(dev, mask);
3340 @@ -24,7 +24,7 @@ static inline int dma_supported(struct d
3341
3342 static inline int dma_set_mask(struct device *dev, u64 mask)
3343 {
3344 - struct dma_map_ops *ops = get_dma_ops(dev);
3345 + const struct dma_map_ops *ops = get_dma_ops(dev);
3346
3347 if (!dev->dma_mask || !dma_supported(dev, mask))
3348 return -EIO;
3349 @@ -59,7 +59,7 @@ static inline int dma_get_cache_alignmen
3350
3351 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
3352 {
3353 - struct dma_map_ops *ops = get_dma_ops(dev);
3354 + const struct dma_map_ops *ops = get_dma_ops(dev);
3355
3356 if (ops->mapping_error)
3357 return ops->mapping_error(dev, dma_addr);
3358 @@ -70,7 +70,7 @@ static inline int dma_mapping_error(stru
3359 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3360 dma_addr_t *dma_handle, gfp_t gfp)
3361 {
3362 - struct dma_map_ops *ops = get_dma_ops(dev);
3363 + const struct dma_map_ops *ops = get_dma_ops(dev);
3364 void *memory;
3365
3366 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
3367 @@ -87,7 +87,7 @@ static inline void *dma_alloc_coherent(s
3368 static inline void dma_free_coherent(struct device *dev, size_t size,
3369 void *vaddr, dma_addr_t dma_handle)
3370 {
3371 - struct dma_map_ops *ops = get_dma_ops(dev);
3372 + const struct dma_map_ops *ops = get_dma_ops(dev);
3373
3374 if (dma_release_from_coherent(dev, get_order(size), vaddr))
3375 return;
3376 diff -urNp linux-2.6.35.4/arch/sh/kernel/cpu/shmobile/pm.c linux-2.6.35.4/arch/sh/kernel/cpu/shmobile/pm.c
3377 --- linux-2.6.35.4/arch/sh/kernel/cpu/shmobile/pm.c 2010-08-26 19:47:12.000000000 -0400
3378 +++ linux-2.6.35.4/arch/sh/kernel/cpu/shmobile/pm.c 2010-09-17 20:12:09.000000000 -0400
3379 @@ -141,7 +141,7 @@ static int sh_pm_enter(suspend_state_t s
3380 return 0;
3381 }
3382
3383 -static struct platform_suspend_ops sh_pm_ops = {
3384 +static const struct platform_suspend_ops sh_pm_ops = {
3385 .enter = sh_pm_enter,
3386 .valid = suspend_valid_only_mem,
3387 };
3388 diff -urNp linux-2.6.35.4/arch/sh/kernel/dma-nommu.c linux-2.6.35.4/arch/sh/kernel/dma-nommu.c
3389 --- linux-2.6.35.4/arch/sh/kernel/dma-nommu.c 2010-08-26 19:47:12.000000000 -0400
3390 +++ linux-2.6.35.4/arch/sh/kernel/dma-nommu.c 2010-09-17 20:12:09.000000000 -0400
3391 @@ -62,7 +62,7 @@ static void nommu_sync_sg(struct device
3392 }
3393 #endif
3394
3395 -struct dma_map_ops nommu_dma_ops = {
3396 +const struct dma_map_ops nommu_dma_ops = {
3397 .alloc_coherent = dma_generic_alloc_coherent,
3398 .free_coherent = dma_generic_free_coherent,
3399 .map_page = nommu_map_page,
3400 diff -urNp linux-2.6.35.4/arch/sh/kernel/kgdb.c linux-2.6.35.4/arch/sh/kernel/kgdb.c
3401 --- linux-2.6.35.4/arch/sh/kernel/kgdb.c 2010-08-26 19:47:12.000000000 -0400
3402 +++ linux-2.6.35.4/arch/sh/kernel/kgdb.c 2010-09-17 20:12:09.000000000 -0400
3403 @@ -319,7 +319,7 @@ void kgdb_arch_exit(void)
3404 unregister_die_notifier(&kgdb_notifier);
3405 }
3406
3407 -struct kgdb_arch arch_kgdb_ops = {
3408 +const struct kgdb_arch arch_kgdb_ops = {
3409 /* Breakpoint instruction: trapa #0x3c */
3410 #ifdef CONFIG_CPU_LITTLE_ENDIAN
3411 .gdb_bpt_instr = { 0x3c, 0xc3 },
3412 diff -urNp linux-2.6.35.4/arch/sh/mm/consistent.c linux-2.6.35.4/arch/sh/mm/consistent.c
3413 --- linux-2.6.35.4/arch/sh/mm/consistent.c 2010-08-26 19:47:12.000000000 -0400
3414 +++ linux-2.6.35.4/arch/sh/mm/consistent.c 2010-09-17 20:12:09.000000000 -0400
3415 @@ -22,7 +22,7 @@
3416
3417 #define PREALLOC_DMA_DEBUG_ENTRIES 4096
3418
3419 -struct dma_map_ops *dma_ops;
3420 +const struct dma_map_ops *dma_ops;
3421 EXPORT_SYMBOL(dma_ops);
3422
3423 static int __init dma_init(void)
3424 diff -urNp linux-2.6.35.4/arch/sh/mm/mmap.c linux-2.6.35.4/arch/sh/mm/mmap.c
3425 --- linux-2.6.35.4/arch/sh/mm/mmap.c 2010-08-26 19:47:12.000000000 -0400
3426 +++ linux-2.6.35.4/arch/sh/mm/mmap.c 2010-09-17 20:12:09.000000000 -0400
3427 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
3428 addr = PAGE_ALIGN(addr);
3429
3430 vma = find_vma(mm, addr);
3431 - if (TASK_SIZE - len >= addr &&
3432 - (!vma || addr + len <= vma->vm_start))
3433 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3434 return addr;
3435 }
3436
3437 @@ -106,7 +105,7 @@ full_search:
3438 }
3439 return -ENOMEM;
3440 }
3441 - if (likely(!vma || addr + len <= vma->vm_start)) {
3442 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3443 /*
3444 * Remember the place where we stopped the search:
3445 */
3446 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
3447 addr = PAGE_ALIGN(addr);
3448
3449 vma = find_vma(mm, addr);
3450 - if (TASK_SIZE - len >= addr &&
3451 - (!vma || addr + len <= vma->vm_start))
3452 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3453 return addr;
3454 }
3455
3456 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
3457 /* make sure it can fit in the remaining address space */
3458 if (likely(addr > len)) {
3459 vma = find_vma(mm, addr-len);
3460 - if (!vma || addr <= vma->vm_start) {
3461 + if (check_heap_stack_gap(vma, addr - len, len)) {
3462 /* remember the address as a hint for next time */
3463 return (mm->free_area_cache = addr-len);
3464 }
3465 @@ -199,7 +197,7 @@ arch_get_unmapped_area_topdown(struct fi
3466 * return with success:
3467 */
3468 vma = find_vma(mm, addr);
3469 - if (likely(!vma || addr+len <= vma->vm_start)) {
3470 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3471 /* remember the address as a hint for next time */
3472 return (mm->free_area_cache = addr);
3473 }
3474 diff -urNp linux-2.6.35.4/arch/sparc/include/asm/atomic_64.h linux-2.6.35.4/arch/sparc/include/asm/atomic_64.h
3475 --- linux-2.6.35.4/arch/sparc/include/asm/atomic_64.h 2010-08-26 19:47:12.000000000 -0400
3476 +++ linux-2.6.35.4/arch/sparc/include/asm/atomic_64.h 2010-09-17 20:12:09.000000000 -0400
3477 @@ -14,18 +14,40 @@
3478 #define ATOMIC64_INIT(i) { (i) }
3479
3480 #define atomic_read(v) (*(volatile int *)&(v)->counter)
3481 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3482 +{
3483 + return v->counter;
3484 +}
3485 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
3486 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3487 +{
3488 + return v->counter;
3489 +}
3490
3491 #define atomic_set(v, i) (((v)->counter) = i)
3492 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3493 +{
3494 + v->counter = i;
3495 +}
3496 #define atomic64_set(v, i) (((v)->counter) = i)
3497 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3498 +{
3499 + v->counter = i;
3500 +}
3501
3502 extern void atomic_add(int, atomic_t *);
3503 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3504 extern void atomic64_add(long, atomic64_t *);
3505 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3506 extern void atomic_sub(int, atomic_t *);
3507 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3508 extern void atomic64_sub(long, atomic64_t *);
3509 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3510
3511 extern int atomic_add_ret(int, atomic_t *);
3512 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3513 extern long atomic64_add_ret(long, atomic64_t *);
3514 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3515 extern int atomic_sub_ret(int, atomic_t *);
3516 extern long atomic64_sub_ret(long, atomic64_t *);
3517
3518 @@ -33,7 +55,15 @@ extern long atomic64_sub_ret(long, atomi
3519 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3520
3521 #define atomic_inc_return(v) atomic_add_ret(1, v)
3522 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3523 +{
3524 + return atomic_add_ret_unchecked(1, v);
3525 +}
3526 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3527 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3528 +{
3529 + return atomic64_add_ret_unchecked(1, v);
3530 +}
3531
3532 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3533 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3534 @@ -59,10 +89,26 @@ extern long atomic64_sub_ret(long, atomi
3535 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3536
3537 #define atomic_inc(v) atomic_add(1, v)
3538 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3539 +{
3540 + atomic_add_unchecked(1, v);
3541 +}
3542 #define atomic64_inc(v) atomic64_add(1, v)
3543 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3544 +{
3545 + atomic64_add_unchecked(1, v);
3546 +}
3547
3548 #define atomic_dec(v) atomic_sub(1, v)
3549 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3550 +{
3551 + atomic_sub_unchecked(1, v);
3552 +}
3553 #define atomic64_dec(v) atomic64_sub(1, v)
3554 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3555 +{
3556 + atomic64_sub_unchecked(1, v);
3557 +}
3558
3559 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3560 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3561 @@ -72,17 +118,28 @@ extern long atomic64_sub_ret(long, atomi
3562
3563 static inline int atomic_add_unless(atomic_t *v, int a, int u)
3564 {
3565 - int c, old;
3566 + int c, old, new;
3567 c = atomic_read(v);
3568 for (;;) {
3569 - if (unlikely(c == (u)))
3570 + if (unlikely(c == u))
3571 break;
3572 - old = atomic_cmpxchg((v), c, c + (a));
3573 +
3574 + asm volatile("addcc %2, %0, %0\n"
3575 +
3576 +#ifdef CONFIG_PAX_REFCOUNT
3577 + "tvs %%icc, 6\n"
3578 +#endif
3579 +
3580 + : "=r" (new)
3581 + : "0" (c), "ir" (a)
3582 + : "cc");
3583 +
3584 + old = atomic_cmpxchg(v, c, new);
3585 if (likely(old == c))
3586 break;
3587 c = old;
3588 }
3589 - return c != (u);
3590 + return c != u;
3591 }
3592
3593 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
3594 @@ -93,17 +150,28 @@ static inline int atomic_add_unless(atom
3595
3596 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3597 {
3598 - long c, old;
3599 + long c, old, new;
3600 c = atomic64_read(v);
3601 for (;;) {
3602 - if (unlikely(c == (u)))
3603 + if (unlikely(c == u))
3604 break;
3605 - old = atomic64_cmpxchg((v), c, c + (a));
3606 +
3607 + asm volatile("addcc %2, %0, %0\n"
3608 +
3609 +#ifdef CONFIG_PAX_REFCOUNT
3610 + "tvs %%xcc, 6\n"
3611 +#endif
3612 +
3613 + : "=r" (new)
3614 + : "0" (c), "ir" (a)
3615 + : "cc");
3616 +
3617 + old = atomic64_cmpxchg(v, c, new);
3618 if (likely(old == c))
3619 break;
3620 c = old;
3621 }
3622 - return c != (u);
3623 + return c != u;
3624 }
3625
3626 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3627 diff -urNp linux-2.6.35.4/arch/sparc/include/asm/compat.h linux-2.6.35.4/arch/sparc/include/asm/compat.h
3628 --- linux-2.6.35.4/arch/sparc/include/asm/compat.h 2010-08-26 19:47:12.000000000 -0400
3629 +++ linux-2.6.35.4/arch/sparc/include/asm/compat.h 2010-09-17 20:12:37.000000000 -0400
3630 @@ -167,7 +167,7 @@ static inline compat_uptr_t ptr_to_compa
3631 return (u32)(unsigned long)uptr;
3632 }
3633
3634 -static inline void __user *compat_alloc_user_space(long len)
3635 +static inline void __user *arch_compat_alloc_user_space(long len)
3636 {
3637 struct pt_regs *regs = current_thread_info()->kregs;
3638 unsigned long usp = regs->u_regs[UREG_I6];
3639 diff -urNp linux-2.6.35.4/arch/sparc/include/asm/dma-mapping.h linux-2.6.35.4/arch/sparc/include/asm/dma-mapping.h
3640 --- linux-2.6.35.4/arch/sparc/include/asm/dma-mapping.h 2010-08-26 19:47:12.000000000 -0400
3641 +++ linux-2.6.35.4/arch/sparc/include/asm/dma-mapping.h 2010-09-17 20:12:09.000000000 -0400
3642 @@ -13,10 +13,10 @@ extern int dma_supported(struct device *
3643 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
3644 #define dma_is_consistent(d, h) (1)
3645
3646 -extern struct dma_map_ops *dma_ops, pci32_dma_ops;
3647 +extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
3648 extern struct bus_type pci_bus_type;
3649
3650 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3651 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
3652 {
3653 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
3654 if (dev->bus == &pci_bus_type)
3655 @@ -30,7 +30,7 @@ static inline struct dma_map_ops *get_dm
3656 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3657 dma_addr_t *dma_handle, gfp_t flag)
3658 {
3659 - struct dma_map_ops *ops = get_dma_ops(dev);
3660 + const struct dma_map_ops *ops = get_dma_ops(dev);
3661 void *cpu_addr;
3662
3663 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
3664 @@ -41,7 +41,7 @@ static inline void *dma_alloc_coherent(s
3665 static inline void dma_free_coherent(struct device *dev, size_t size,
3666 void *cpu_addr, dma_addr_t dma_handle)
3667 {
3668 - struct dma_map_ops *ops = get_dma_ops(dev);
3669 + const struct dma_map_ops *ops = get_dma_ops(dev);
3670
3671 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
3672 ops->free_coherent(dev, size, cpu_addr, dma_handle);
3673 diff -urNp linux-2.6.35.4/arch/sparc/include/asm/elf_32.h linux-2.6.35.4/arch/sparc/include/asm/elf_32.h
3674 --- linux-2.6.35.4/arch/sparc/include/asm/elf_32.h 2010-08-26 19:47:12.000000000 -0400
3675 +++ linux-2.6.35.4/arch/sparc/include/asm/elf_32.h 2010-09-17 20:12:09.000000000 -0400
3676 @@ -114,6 +114,13 @@ typedef struct {
3677
3678 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3679
3680 +#ifdef CONFIG_PAX_ASLR
3681 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3682 +
3683 +#define PAX_DELTA_MMAP_LEN 16
3684 +#define PAX_DELTA_STACK_LEN 16
3685 +#endif
3686 +
3687 /* This yields a mask that user programs can use to figure out what
3688 instruction set this cpu supports. This can NOT be done in userspace
3689 on Sparc. */
3690 diff -urNp linux-2.6.35.4/arch/sparc/include/asm/elf_64.h linux-2.6.35.4/arch/sparc/include/asm/elf_64.h
3691 --- linux-2.6.35.4/arch/sparc/include/asm/elf_64.h 2010-08-26 19:47:12.000000000 -0400
3692 +++ linux-2.6.35.4/arch/sparc/include/asm/elf_64.h 2010-09-17 20:12:09.000000000 -0400
3693 @@ -162,6 +162,12 @@ typedef struct {
3694 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3695 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3696
3697 +#ifdef CONFIG_PAX_ASLR
3698 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3699 +
3700 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3701 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3702 +#endif
3703
3704 /* This yields a mask that user programs can use to figure out what
3705 instruction set this cpu supports. */
3706 diff -urNp linux-2.6.35.4/arch/sparc/include/asm/pgtable_32.h linux-2.6.35.4/arch/sparc/include/asm/pgtable_32.h
3707 --- linux-2.6.35.4/arch/sparc/include/asm/pgtable_32.h 2010-08-26 19:47:12.000000000 -0400
3708 +++ linux-2.6.35.4/arch/sparc/include/asm/pgtable_32.h 2010-09-17 20:12:09.000000000 -0400
3709 @@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3710 BTFIXUPDEF_INT(page_none)
3711 BTFIXUPDEF_INT(page_copy)
3712 BTFIXUPDEF_INT(page_readonly)
3713 +
3714 +#ifdef CONFIG_PAX_PAGEEXEC
3715 +BTFIXUPDEF_INT(page_shared_noexec)
3716 +BTFIXUPDEF_INT(page_copy_noexec)
3717 +BTFIXUPDEF_INT(page_readonly_noexec)
3718 +#endif
3719 +
3720 BTFIXUPDEF_INT(page_kernel)
3721
3722 #define PMD_SHIFT SUN4C_PMD_SHIFT
3723 @@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
3724 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3725 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3726
3727 +#ifdef CONFIG_PAX_PAGEEXEC
3728 +extern pgprot_t PAGE_SHARED_NOEXEC;
3729 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3730 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3731 +#else
3732 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3733 +# define PAGE_COPY_NOEXEC PAGE_COPY
3734 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3735 +#endif
3736 +
3737 extern unsigned long page_kernel;
3738
3739 #ifdef MODULE
3740 diff -urNp linux-2.6.35.4/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.35.4/arch/sparc/include/asm/pgtsrmmu.h
3741 --- linux-2.6.35.4/arch/sparc/include/asm/pgtsrmmu.h 2010-08-26 19:47:12.000000000 -0400
3742 +++ linux-2.6.35.4/arch/sparc/include/asm/pgtsrmmu.h 2010-09-17 20:12:09.000000000 -0400
3743 @@ -115,6 +115,13 @@
3744 SRMMU_EXEC | SRMMU_REF)
3745 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3746 SRMMU_EXEC | SRMMU_REF)
3747 +
3748 +#ifdef CONFIG_PAX_PAGEEXEC
3749 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3750 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3751 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3752 +#endif
3753 +
3754 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3755 SRMMU_DIRTY | SRMMU_REF)
3756
3757 diff -urNp linux-2.6.35.4/arch/sparc/include/asm/spinlock_64.h linux-2.6.35.4/arch/sparc/include/asm/spinlock_64.h
3758 --- linux-2.6.35.4/arch/sparc/include/asm/spinlock_64.h 2010-08-26 19:47:12.000000000 -0400
3759 +++ linux-2.6.35.4/arch/sparc/include/asm/spinlock_64.h 2010-09-17 20:12:09.000000000 -0400
3760 @@ -99,7 +99,12 @@ static void inline arch_read_lock(arch_r
3761 __asm__ __volatile__ (
3762 "1: ldsw [%2], %0\n"
3763 " brlz,pn %0, 2f\n"
3764 -"4: add %0, 1, %1\n"
3765 +"4: addcc %0, 1, %1\n"
3766 +
3767 +#ifdef CONFIG_PAX_REFCOUNT
3768 +" tvs %%icc, 6\n"
3769 +#endif
3770 +
3771 " cas [%2], %0, %1\n"
3772 " cmp %0, %1\n"
3773 " bne,pn %%icc, 1b\n"
3774 @@ -112,7 +117,7 @@ static void inline arch_read_lock(arch_r
3775 " .previous"
3776 : "=&r" (tmp1), "=&r" (tmp2)
3777 : "r" (lock)
3778 - : "memory");
3779 + : "memory", "cc");
3780 }
3781
3782 static int inline arch_read_trylock(arch_rwlock_t *lock)
3783 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch
3784 "1: ldsw [%2], %0\n"
3785 " brlz,a,pn %0, 2f\n"
3786 " mov 0, %0\n"
3787 -" add %0, 1, %1\n"
3788 +" addcc %0, 1, %1\n"
3789 +
3790 +#ifdef CONFIG_PAX_REFCOUNT
3791 +" tvs %%icc, 6\n"
3792 +#endif
3793 +
3794 " cas [%2], %0, %1\n"
3795 " cmp %0, %1\n"
3796 " bne,pn %%icc, 1b\n"
3797 @@ -142,7 +152,12 @@ static void inline arch_read_unlock(arch
3798
3799 __asm__ __volatile__(
3800 "1: lduw [%2], %0\n"
3801 -" sub %0, 1, %1\n"
3802 +" subcc %0, 1, %1\n"
3803 +
3804 +#ifdef CONFIG_PAX_REFCOUNT
3805 +" tvs %%icc, 6\n"
3806 +#endif
3807 +
3808 " cas [%2], %0, %1\n"
3809 " cmp %0, %1\n"
3810 " bne,pn %%xcc, 1b\n"
3811 diff -urNp linux-2.6.35.4/arch/sparc/include/asm/uaccess_32.h linux-2.6.35.4/arch/sparc/include/asm/uaccess_32.h
3812 --- linux-2.6.35.4/arch/sparc/include/asm/uaccess_32.h 2010-08-26 19:47:12.000000000 -0400
3813 +++ linux-2.6.35.4/arch/sparc/include/asm/uaccess_32.h 2010-09-17 20:12:09.000000000 -0400
3814 @@ -249,14 +249,25 @@ extern unsigned long __copy_user(void __
3815
3816 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3817 {
3818 - if (n && __access_ok((unsigned long) to, n))
3819 + if ((long)n < 0)
3820 + return n;
3821 +
3822 + if (n && __access_ok((unsigned long) to, n)) {
3823 + if (!__builtin_constant_p(n))
3824 + check_object_size(from, n, true);
3825 return __copy_user(to, (__force void __user *) from, n);
3826 - else
3827 + } else
3828 return n;
3829 }
3830
3831 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3832 {
3833 + if ((long)n < 0)
3834 + return n;
3835 +
3836 + if (!__builtin_constant_p(n))
3837 + check_object_size(from, n, true);
3838 +
3839 return __copy_user(to, (__force void __user *) from, n);
3840 }
3841
3842 @@ -272,19 +283,27 @@ static inline unsigned long copy_from_us
3843 {
3844 int sz = __compiletime_object_size(to);
3845
3846 + if ((long)n < 0)
3847 + return n;
3848 +
3849 if (unlikely(sz != -1 && sz < n)) {
3850 copy_from_user_overflow();
3851 return n;
3852 }
3853
3854 - if (n && __access_ok((unsigned long) from, n))
3855 + if (n && __access_ok((unsigned long) from, n)) {
3856 + if (!__builtin_constant_p(n))
3857 + check_object_size(to, n, false);
3858 return __copy_user((__force void __user *) to, from, n);
3859 - else
3860 + } else
3861 return n;
3862 }
3863
3864 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3865 {
3866 + if ((long)n < 0)
3867 + return n;
3868 +
3869 return __copy_user((__force void __user *) to, from, n);
3870 }
3871
3872 diff -urNp linux-2.6.35.4/arch/sparc/include/asm/uaccess_64.h linux-2.6.35.4/arch/sparc/include/asm/uaccess_64.h
3873 --- linux-2.6.35.4/arch/sparc/include/asm/uaccess_64.h 2010-08-26 19:47:12.000000000 -0400
3874 +++ linux-2.6.35.4/arch/sparc/include/asm/uaccess_64.h 2010-09-17 20:12:09.000000000 -0400
3875 @@ -10,6 +10,7 @@
3876 #include <linux/compiler.h>
3877 #include <linux/string.h>
3878 #include <linux/thread_info.h>
3879 +#include <linux/kernel.h>
3880 #include <asm/asi.h>
3881 #include <asm/system.h>
3882 #include <asm/spitfire.h>
3883 @@ -224,6 +225,12 @@ copy_from_user(void *to, const void __us
3884 int sz = __compiletime_object_size(to);
3885 unsigned long ret = size;
3886
3887 + if ((long)size < 0 || size > INT_MAX)
3888 + return size;
3889 +
3890 + if (!__builtin_constant_p(size))
3891 + check_object_size(to, size, false);
3892 +
3893 if (likely(sz == -1 || sz >= size)) {
3894 ret = ___copy_from_user(to, from, size);
3895 if (unlikely(ret))
3896 @@ -243,8 +250,15 @@ extern unsigned long copy_to_user_fixup(
3897 static inline unsigned long __must_check
3898 copy_to_user(void __user *to, const void *from, unsigned long size)
3899 {
3900 - unsigned long ret = ___copy_to_user(to, from, size);
3901 + unsigned long ret;
3902 +
3903 + if ((long)size < 0 || size > INT_MAX)
3904 + return size;
3905 +
3906 + if (!__builtin_constant_p(size))
3907 + check_object_size(from, size, true);
3908
3909 + ret = ___copy_to_user(to, from, size);
3910 if (unlikely(ret))
3911 ret = copy_to_user_fixup(to, from, size);
3912 return ret;
3913 diff -urNp linux-2.6.35.4/arch/sparc/include/asm/uaccess.h linux-2.6.35.4/arch/sparc/include/asm/uaccess.h
3914 --- linux-2.6.35.4/arch/sparc/include/asm/uaccess.h 2010-08-26 19:47:12.000000000 -0400
3915 +++ linux-2.6.35.4/arch/sparc/include/asm/uaccess.h 2010-09-17 20:12:09.000000000 -0400
3916 @@ -1,5 +1,13 @@
3917 #ifndef ___ASM_SPARC_UACCESS_H
3918 #define ___ASM_SPARC_UACCESS_H
3919 +
3920 +#ifdef __KERNEL__
3921 +#ifndef __ASSEMBLY__
3922 +#include <linux/types.h>
3923 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3924 +#endif
3925 +#endif
3926 +
3927 #if defined(__sparc__) && defined(__arch64__)
3928 #include <asm/uaccess_64.h>
3929 #else
3930 diff -urNp linux-2.6.35.4/arch/sparc/kernel/iommu.c linux-2.6.35.4/arch/sparc/kernel/iommu.c
3931 --- linux-2.6.35.4/arch/sparc/kernel/iommu.c 2010-08-26 19:47:12.000000000 -0400
3932 +++ linux-2.6.35.4/arch/sparc/kernel/iommu.c 2010-09-17 20:12:09.000000000 -0400
3933 @@ -828,7 +828,7 @@ static void dma_4u_sync_sg_for_cpu(struc
3934 spin_unlock_irqrestore(&iommu->lock, flags);
3935 }
3936
3937 -static struct dma_map_ops sun4u_dma_ops = {
3938 +static const struct dma_map_ops sun4u_dma_ops = {
3939 .alloc_coherent = dma_4u_alloc_coherent,
3940 .free_coherent = dma_4u_free_coherent,
3941 .map_page = dma_4u_map_page,
3942 @@ -839,7 +839,7 @@ static struct dma_map_ops sun4u_dma_ops
3943 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
3944 };
3945
3946 -struct dma_map_ops *dma_ops = &sun4u_dma_ops;
3947 +const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
3948 EXPORT_SYMBOL(dma_ops);
3949
3950 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
3951 diff -urNp linux-2.6.35.4/arch/sparc/kernel/ioport.c linux-2.6.35.4/arch/sparc/kernel/ioport.c
3952 --- linux-2.6.35.4/arch/sparc/kernel/ioport.c 2010-08-26 19:47:12.000000000 -0400
3953 +++ linux-2.6.35.4/arch/sparc/kernel/ioport.c 2010-09-17 20:12:09.000000000 -0400
3954 @@ -397,7 +397,7 @@ static void sbus_sync_sg_for_device(stru
3955 BUG();
3956 }
3957
3958 -struct dma_map_ops sbus_dma_ops = {
3959 +const struct dma_map_ops sbus_dma_ops = {
3960 .alloc_coherent = sbus_alloc_coherent,
3961 .free_coherent = sbus_free_coherent,
3962 .map_page = sbus_map_page,
3963 @@ -408,7 +408,7 @@ struct dma_map_ops sbus_dma_ops = {
3964 .sync_sg_for_device = sbus_sync_sg_for_device,
3965 };
3966
3967 -struct dma_map_ops *dma_ops = &sbus_dma_ops;
3968 +const struct dma_map_ops *dma_ops = &sbus_dma_ops;
3969 EXPORT_SYMBOL(dma_ops);
3970
3971 static int __init sparc_register_ioport(void)
3972 @@ -645,7 +645,7 @@ static void pci32_sync_sg_for_device(str
3973 }
3974 }
3975
3976 -struct dma_map_ops pci32_dma_ops = {
3977 +const struct dma_map_ops pci32_dma_ops = {
3978 .alloc_coherent = pci32_alloc_coherent,
3979 .free_coherent = pci32_free_coherent,
3980 .map_page = pci32_map_page,
3981 diff -urNp linux-2.6.35.4/arch/sparc/kernel/kgdb_32.c linux-2.6.35.4/arch/sparc/kernel/kgdb_32.c
3982 --- linux-2.6.35.4/arch/sparc/kernel/kgdb_32.c 2010-08-26 19:47:12.000000000 -0400
3983 +++ linux-2.6.35.4/arch/sparc/kernel/kgdb_32.c 2010-09-17 20:12:09.000000000 -0400
3984 @@ -164,7 +164,7 @@ void kgdb_arch_set_pc(struct pt_regs *re
3985 regs->npc = regs->pc + 4;
3986 }
3987
3988 -struct kgdb_arch arch_kgdb_ops = {
3989 +const struct kgdb_arch arch_kgdb_ops = {
3990 /* Breakpoint instruction: ta 0x7d */
3991 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
3992 };
3993 diff -urNp linux-2.6.35.4/arch/sparc/kernel/kgdb_64.c linux-2.6.35.4/arch/sparc/kernel/kgdb_64.c
3994 --- linux-2.6.35.4/arch/sparc/kernel/kgdb_64.c 2010-08-26 19:47:12.000000000 -0400
3995 +++ linux-2.6.35.4/arch/sparc/kernel/kgdb_64.c 2010-09-17 20:12:09.000000000 -0400
3996 @@ -187,7 +187,7 @@ void kgdb_arch_set_pc(struct pt_regs *re
3997 regs->tnpc = regs->tpc + 4;
3998 }
3999
4000 -struct kgdb_arch arch_kgdb_ops = {
4001 +const struct kgdb_arch arch_kgdb_ops = {
4002 /* Breakpoint instruction: ta 0x72 */
4003 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
4004 };
4005 diff -urNp linux-2.6.35.4/arch/sparc/kernel/Makefile linux-2.6.35.4/arch/sparc/kernel/Makefile
4006 --- linux-2.6.35.4/arch/sparc/kernel/Makefile 2010-08-26 19:47:12.000000000 -0400
4007 +++ linux-2.6.35.4/arch/sparc/kernel/Makefile 2010-09-17 20:12:09.000000000 -0400
4008 @@ -3,7 +3,7 @@
4009 #
4010
4011 asflags-y := -ansi
4012 -ccflags-y := -Werror
4013 +#ccflags-y := -Werror
4014
4015 extra-y := head_$(BITS).o
4016 extra-y += init_task.o
4017 diff -urNp linux-2.6.35.4/arch/sparc/kernel/pci_sun4v.c linux-2.6.35.4/arch/sparc/kernel/pci_sun4v.c
4018 --- linux-2.6.35.4/arch/sparc/kernel/pci_sun4v.c 2010-08-26 19:47:12.000000000 -0400
4019 +++ linux-2.6.35.4/arch/sparc/kernel/pci_sun4v.c 2010-09-17 20:12:09.000000000 -0400
4020 @@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct devic
4021 spin_unlock_irqrestore(&iommu->lock, flags);
4022 }
4023
4024 -static struct dma_map_ops sun4v_dma_ops = {
4025 +static const struct dma_map_ops sun4v_dma_ops = {
4026 .alloc_coherent = dma_4v_alloc_coherent,
4027 .free_coherent = dma_4v_free_coherent,
4028 .map_page = dma_4v_map_page,
4029 diff -urNp linux-2.6.35.4/arch/sparc/kernel/sys_sparc_32.c linux-2.6.35.4/arch/sparc/kernel/sys_sparc_32.c
4030 --- linux-2.6.35.4/arch/sparc/kernel/sys_sparc_32.c 2010-08-26 19:47:12.000000000 -0400
4031 +++ linux-2.6.35.4/arch/sparc/kernel/sys_sparc_32.c 2010-09-17 20:12:09.000000000 -0400
4032 @@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(str
4033 if (ARCH_SUN4C && len > 0x20000000)
4034 return -ENOMEM;
4035 if (!addr)
4036 - addr = TASK_UNMAPPED_BASE;
4037 + addr = current->mm->mmap_base;
4038
4039 if (flags & MAP_SHARED)
4040 addr = COLOUR_ALIGN(addr);
4041 @@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
4042 }
4043 if (TASK_SIZE - PAGE_SIZE - len < addr)
4044 return -ENOMEM;
4045 - if (!vmm || addr + len <= vmm->vm_start)
4046 + if (check_heap_stack_gap(vmm, addr, len))
4047 return addr;
4048 addr = vmm->vm_end;
4049 if (flags & MAP_SHARED)
4050 diff -urNp linux-2.6.35.4/arch/sparc/kernel/sys_sparc_64.c linux-2.6.35.4/arch/sparc/kernel/sys_sparc_64.c
4051 --- linux-2.6.35.4/arch/sparc/kernel/sys_sparc_64.c 2010-08-26 19:47:12.000000000 -0400
4052 +++ linux-2.6.35.4/arch/sparc/kernel/sys_sparc_64.c 2010-09-17 20:12:09.000000000 -0400
4053 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str
4054 /* We do not accept a shared mapping if it would violate
4055 * cache aliasing constraints.
4056 */
4057 - if ((flags & MAP_SHARED) &&
4058 + if ((filp || (flags & MAP_SHARED)) &&
4059 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4060 return -EINVAL;
4061 return addr;
4062 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str
4063 if (filp || (flags & MAP_SHARED))
4064 do_color_align = 1;
4065
4066 +#ifdef CONFIG_PAX_RANDMMAP
4067 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4068 +#endif
4069 +
4070 if (addr) {
4071 if (do_color_align)
4072 addr = COLOUR_ALIGN(addr, pgoff);
4073 @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str
4074 addr = PAGE_ALIGN(addr);
4075
4076 vma = find_vma(mm, addr);
4077 - if (task_size - len >= addr &&
4078 - (!vma || addr + len <= vma->vm_start))
4079 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4080 return addr;
4081 }
4082
4083 if (len > mm->cached_hole_size) {
4084 - start_addr = addr = mm->free_area_cache;
4085 + start_addr = addr = mm->free_area_cache;
4086 } else {
4087 - start_addr = addr = TASK_UNMAPPED_BASE;
4088 + start_addr = addr = mm->mmap_base;
4089 mm->cached_hole_size = 0;
4090 }
4091
4092 @@ -174,14 +177,14 @@ full_search:
4093 vma = find_vma(mm, VA_EXCLUDE_END);
4094 }
4095 if (unlikely(task_size < addr)) {
4096 - if (start_addr != TASK_UNMAPPED_BASE) {
4097 - start_addr = addr = TASK_UNMAPPED_BASE;
4098 + if (start_addr != mm->mmap_base) {
4099 + start_addr = addr = mm->mmap_base;
4100 mm->cached_hole_size = 0;
4101 goto full_search;
4102 }
4103 return -ENOMEM;
4104 }
4105 - if (likely(!vma || addr + len <= vma->vm_start)) {
4106 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4107 /*
4108 * Remember the place where we stopped the search:
4109 */
4110 @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi
4111 /* We do not accept a shared mapping if it would violate
4112 * cache aliasing constraints.
4113 */
4114 - if ((flags & MAP_SHARED) &&
4115 + if ((filp || (flags & MAP_SHARED)) &&
4116 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4117 return -EINVAL;
4118 return addr;
4119 @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi
4120 addr = PAGE_ALIGN(addr);
4121
4122 vma = find_vma(mm, addr);
4123 - if (task_size - len >= addr &&
4124 - (!vma || addr + len <= vma->vm_start))
4125 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4126 return addr;
4127 }
4128
4129 @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi
4130 /* make sure it can fit in the remaining address space */
4131 if (likely(addr > len)) {
4132 vma = find_vma(mm, addr-len);
4133 - if (!vma || addr <= vma->vm_start) {
4134 + if (check_heap_stack_gap(vma, addr - len, len)) {
4135 /* remember the address as a hint for next time */
4136 return (mm->free_area_cache = addr-len);
4137 }
4138 @@ -278,7 +280,7 @@ arch_get_unmapped_area_topdown(struct fi
4139 * return with success:
4140 */
4141 vma = find_vma(mm, addr);
4142 - if (likely(!vma || addr+len <= vma->vm_start)) {
4143 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4144 /* remember the address as a hint for next time */
4145 return (mm->free_area_cache = addr);
4146 }
4147 @@ -385,6 +387,12 @@ void arch_pick_mmap_layout(struct mm_str
4148 gap == RLIM_INFINITY ||
4149 sysctl_legacy_va_layout) {
4150 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4151 +
4152 +#ifdef CONFIG_PAX_RANDMMAP
4153 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4154 + mm->mmap_base += mm->delta_mmap;
4155 +#endif
4156 +
4157 mm->get_unmapped_area = arch_get_unmapped_area;
4158 mm->unmap_area = arch_unmap_area;
4159 } else {
4160 @@ -397,6 +405,12 @@ void arch_pick_mmap_layout(struct mm_str
4161 gap = (task_size / 6 * 5);
4162
4163 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4164 +
4165 +#ifdef CONFIG_PAX_RANDMMAP
4166 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4167 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4168 +#endif
4169 +
4170 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4171 mm->unmap_area = arch_unmap_area_topdown;
4172 }
4173 diff -urNp linux-2.6.35.4/arch/sparc/kernel/traps_64.c linux-2.6.35.4/arch/sparc/kernel/traps_64.c
4174 --- linux-2.6.35.4/arch/sparc/kernel/traps_64.c 2010-08-26 19:47:12.000000000 -0400
4175 +++ linux-2.6.35.4/arch/sparc/kernel/traps_64.c 2010-09-17 20:12:09.000000000 -0400
4176 @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long
4177
4178 lvl -= 0x100;
4179 if (regs->tstate & TSTATE_PRIV) {
4180 +
4181 +#ifdef CONFIG_PAX_REFCOUNT
4182 + if (lvl == 6)
4183 + pax_report_refcount_overflow(regs);
4184 +#endif
4185 +
4186 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4187 die_if_kernel(buffer, regs);
4188 }
4189 @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long
4190 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4191 {
4192 char buffer[32];
4193 -
4194 +
4195 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4196 0, lvl, SIGTRAP) == NOTIFY_STOP)
4197 return;
4198
4199 +#ifdef CONFIG_PAX_REFCOUNT
4200 + if (lvl == 6)
4201 + pax_report_refcount_overflow(regs);
4202 +#endif
4203 +
4204 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4205
4206 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4207 diff -urNp linux-2.6.35.4/arch/sparc/lib/atomic_64.S linux-2.6.35.4/arch/sparc/lib/atomic_64.S
4208 --- linux-2.6.35.4/arch/sparc/lib/atomic_64.S 2010-08-26 19:47:12.000000000 -0400
4209 +++ linux-2.6.35.4/arch/sparc/lib/atomic_64.S 2010-09-17 20:12:37.000000000 -0400
4210 @@ -18,7 +18,12 @@
4211 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4212 BACKOFF_SETUP(%o2)
4213 1: lduw [%o1], %g1
4214 - add %g1, %o0, %g7
4215 + addcc %g1, %o0, %g7
4216 +
4217 +#ifdef CONFIG_PAX_REFCOUNT
4218 + tvs %icc, 6
4219 +#endif
4220 +
4221 cas [%o1], %g1, %g7
4222 cmp %g1, %g7
4223 bne,pn %icc, 2f
4224 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
4225 2: BACKOFF_SPIN(%o2, %o3, 1b)
4226 .size atomic_add, .-atomic_add
4227
4228 + .globl atomic_add_unchecked
4229 + .type atomic_add_unchecked,#function
4230 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4231 + BACKOFF_SETUP(%o2)
4232 +1: lduw [%o1], %g1
4233 + add %g1, %o0, %g7
4234 + cas [%o1], %g1, %g7
4235 + cmp %g1, %g7
4236 + bne,pn %icc, 2f
4237 + nop
4238 + retl
4239 + nop
4240 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4241 + .size atomic_add_unchecked, .-atomic_add_unchecked
4242 +
4243 .globl atomic_sub
4244 .type atomic_sub,#function
4245 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4246 BACKOFF_SETUP(%o2)
4247 1: lduw [%o1], %g1
4248 - sub %g1, %o0, %g7
4249 + subcc %g1, %o0, %g7
4250 +
4251 +#ifdef CONFIG_PAX_REFCOUNT
4252 + tvs %icc, 6
4253 +#endif
4254 +
4255 cas [%o1], %g1, %g7
4256 cmp %g1, %g7
4257 bne,pn %icc, 2f
4258 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
4259 2: BACKOFF_SPIN(%o2, %o3, 1b)
4260 .size atomic_sub, .-atomic_sub
4261
4262 + .globl atomic_sub_unchecked
4263 + .type atomic_sub_unchecked,#function
4264 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4265 + BACKOFF_SETUP(%o2)
4266 +1: lduw [%o1], %g1
4267 + sub %g1, %o0, %g7
4268 + cas [%o1], %g1, %g7
4269 + cmp %g1, %g7
4270 + bne,pn %icc, 2f
4271 + nop
4272 + retl
4273 + nop
4274 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4275 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
4276 +
4277 .globl atomic_add_ret
4278 .type atomic_add_ret,#function
4279 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4280 BACKOFF_SETUP(%o2)
4281 1: lduw [%o1], %g1
4282 - add %g1, %o0, %g7
4283 + addcc %g1, %o0, %g7
4284 +
4285 +#ifdef CONFIG_PAX_REFCOUNT
4286 + tvs %icc, 6
4287 +#endif
4288 +
4289 cas [%o1], %g1, %g7
4290 cmp %g1, %g7
4291 bne,pn %icc, 2f
4292 @@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1
4293 2: BACKOFF_SPIN(%o2, %o3, 1b)
4294 .size atomic_add_ret, .-atomic_add_ret
4295
4296 + .globl atomic_add_ret_unchecked
4297 + .type atomic_add_ret_unchecked,#function
4298 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4299 + BACKOFF_SETUP(%o2)
4300 +1: lduw [%o1], %g1
4301 + addcc %g1, %o0, %g7
4302 + cas [%o1], %g1, %g7
4303 + cmp %g1, %g7
4304 + bne,pn %icc, 2f
4305 + add %g7, %o0, %g7
4306 + sra %g7, 0, %o0
4307 + retl
4308 + nop
4309 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4310 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4311 +
4312 .globl atomic_sub_ret
4313 .type atomic_sub_ret,#function
4314 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4315 BACKOFF_SETUP(%o2)
4316 1: lduw [%o1], %g1
4317 - sub %g1, %o0, %g7
4318 + subcc %g1, %o0, %g7
4319 +
4320 +#ifdef CONFIG_PAX_REFCOUNT
4321 + tvs %icc, 6
4322 +#endif
4323 +
4324 cas [%o1], %g1, %g7
4325 cmp %g1, %g7
4326 bne,pn %icc, 2f
4327 @@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
4328 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4329 BACKOFF_SETUP(%o2)
4330 1: ldx [%o1], %g1
4331 - add %g1, %o0, %g7
4332 + addcc %g1, %o0, %g7
4333 +
4334 +#ifdef CONFIG_PAX_REFCOUNT
4335 + tvs %xcc, 6
4336 +#endif
4337 +
4338 casx [%o1], %g1, %g7
4339 cmp %g1, %g7
4340 bne,pn %xcc, 2f
4341 @@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 =
4342 2: BACKOFF_SPIN(%o2, %o3, 1b)
4343 .size atomic64_add, .-atomic64_add
4344
4345 + .globl atomic64_add_unchecked
4346 + .type atomic64_add_unchecked,#function
4347 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4348 + BACKOFF_SETUP(%o2)
4349 +1: ldx [%o1], %g1
4350 + addcc %g1, %o0, %g7
4351 + casx [%o1], %g1, %g7
4352 + cmp %g1, %g7
4353 + bne,pn %xcc, 2f
4354 + nop
4355 + retl
4356 + nop
4357 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4358 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
4359 +
4360 .globl atomic64_sub
4361 .type atomic64_sub,#function
4362 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4363 BACKOFF_SETUP(%o2)
4364 1: ldx [%o1], %g1
4365 - sub %g1, %o0, %g7
4366 + subcc %g1, %o0, %g7
4367 +
4368 +#ifdef CONFIG_PAX_REFCOUNT
4369 + tvs %xcc, 6
4370 +#endif
4371 +
4372 casx [%o1], %g1, %g7
4373 cmp %g1, %g7
4374 bne,pn %xcc, 2f
4375 @@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
4376 2: BACKOFF_SPIN(%o2, %o3, 1b)
4377 .size atomic64_sub, .-atomic64_sub
4378
4379 + .globl atomic64_sub_unchecked
4380 + .type atomic64_sub_unchecked,#function
4381 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4382 + BACKOFF_SETUP(%o2)
4383 +1: ldx [%o1], %g1
4384 + subcc %g1, %o0, %g7
4385 + casx [%o1], %g1, %g7
4386 + cmp %g1, %g7
4387 + bne,pn %xcc, 2f
4388 + nop
4389 + retl
4390 + nop
4391 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4392 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4393 +
4394 .globl atomic64_add_ret
4395 .type atomic64_add_ret,#function
4396 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4397 BACKOFF_SETUP(%o2)
4398 1: ldx [%o1], %g1
4399 - add %g1, %o0, %g7
4400 + addcc %g1, %o0, %g7
4401 +
4402 +#ifdef CONFIG_PAX_REFCOUNT
4403 + tvs %xcc, 6
4404 +#endif
4405 +
4406 casx [%o1], %g1, %g7
4407 cmp %g1, %g7
4408 bne,pn %xcc, 2f
4409 @@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4410 2: BACKOFF_SPIN(%o2, %o3, 1b)
4411 .size atomic64_add_ret, .-atomic64_add_ret
4412
4413 + .globl atomic64_add_ret_unchecked
4414 + .type atomic64_add_ret_unchecked,#function
4415 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4416 + BACKOFF_SETUP(%o2)
4417 +1: ldx [%o1], %g1
4418 + addcc %g1, %o0, %g7
4419 + casx [%o1], %g1, %g7
4420 + cmp %g1, %g7
4421 + bne,pn %xcc, 2f
4422 + add %g7, %o0, %g7
4423 + mov %g7, %o0
4424 + retl
4425 + nop
4426 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4427 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4428 +
4429 .globl atomic64_sub_ret
4430 .type atomic64_sub_ret,#function
4431 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4432 BACKOFF_SETUP(%o2)
4433 1: ldx [%o1], %g1
4434 - sub %g1, %o0, %g7
4435 + subcc %g1, %o0, %g7
4436 +
4437 +#ifdef CONFIG_PAX_REFCOUNT
4438 + tvs %xcc, 6
4439 +#endif
4440 +
4441 casx [%o1], %g1, %g7
4442 cmp %g1, %g7
4443 bne,pn %xcc, 2f
4444 diff -urNp linux-2.6.35.4/arch/sparc/lib/ksyms.c linux-2.6.35.4/arch/sparc/lib/ksyms.c
4445 --- linux-2.6.35.4/arch/sparc/lib/ksyms.c 2010-08-26 19:47:12.000000000 -0400
4446 +++ linux-2.6.35.4/arch/sparc/lib/ksyms.c 2010-09-17 20:12:09.000000000 -0400
4447 @@ -142,12 +142,17 @@ EXPORT_SYMBOL(__downgrade_write);
4448
4449 /* Atomic counter implementation. */
4450 EXPORT_SYMBOL(atomic_add);
4451 +EXPORT_SYMBOL(atomic_add_unchecked);
4452 EXPORT_SYMBOL(atomic_add_ret);
4453 EXPORT_SYMBOL(atomic_sub);
4454 +EXPORT_SYMBOL(atomic_sub_unchecked);
4455 EXPORT_SYMBOL(atomic_sub_ret);
4456 EXPORT_SYMBOL(atomic64_add);
4457 +EXPORT_SYMBOL(atomic64_add_unchecked);
4458 EXPORT_SYMBOL(atomic64_add_ret);
4459 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4460 EXPORT_SYMBOL(atomic64_sub);
4461 +EXPORT_SYMBOL(atomic64_sub_unchecked);
4462 EXPORT_SYMBOL(atomic64_sub_ret);
4463
4464 /* Atomic bit operations. */
4465 diff -urNp linux-2.6.35.4/arch/sparc/lib/rwsem_64.S linux-2.6.35.4/arch/sparc/lib/rwsem_64.S
4466 --- linux-2.6.35.4/arch/sparc/lib/rwsem_64.S 2010-08-26 19:47:12.000000000 -0400
4467 +++ linux-2.6.35.4/arch/sparc/lib/rwsem_64.S 2010-09-17 20:12:09.000000000 -0400
4468 @@ -11,7 +11,12 @@
4469 .globl __down_read
4470 __down_read:
4471 1: lduw [%o0], %g1
4472 - add %g1, 1, %g7
4473 + addcc %g1, 1, %g7
4474 +
4475 +#ifdef CONFIG_PAX_REFCOUNT
4476 + tvs %icc, 6
4477 +#endif
4478 +
4479 cas [%o0], %g1, %g7
4480 cmp %g1, %g7
4481 bne,pn %icc, 1b
4482 @@ -33,7 +38,12 @@ __down_read:
4483 .globl __down_read_trylock
4484 __down_read_trylock:
4485 1: lduw [%o0], %g1
4486 - add %g1, 1, %g7
4487 + addcc %g1, 1, %g7
4488 +
4489 +#ifdef CONFIG_PAX_REFCOUNT
4490 + tvs %icc, 6
4491 +#endif
4492 +
4493 cmp %g7, 0
4494 bl,pn %icc, 2f
4495 mov 0, %o1
4496 @@ -51,7 +61,12 @@ __down_write:
4497 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
4498 1:
4499 lduw [%o0], %g3
4500 - add %g3, %g1, %g7
4501 + addcc %g3, %g1, %g7
4502 +
4503 +#ifdef CONFIG_PAX_REFCOUNT
4504 + tvs %icc, 6
4505 +#endif
4506 +
4507 cas [%o0], %g3, %g7
4508 cmp %g3, %g7
4509 bne,pn %icc, 1b
4510 @@ -77,7 +92,12 @@ __down_write_trylock:
4511 cmp %g3, 0
4512 bne,pn %icc, 2f
4513 mov 0, %o1
4514 - add %g3, %g1, %g7
4515 + addcc %g3, %g1, %g7
4516 +
4517 +#ifdef CONFIG_PAX_REFCOUNT
4518 + tvs %icc, 6
4519 +#endif
4520 +
4521 cas [%o0], %g3, %g7
4522 cmp %g3, %g7
4523 bne,pn %icc, 1b
4524 @@ -90,7 +110,12 @@ __down_write_trylock:
4525 __up_read:
4526 1:
4527 lduw [%o0], %g1
4528 - sub %g1, 1, %g7
4529 + subcc %g1, 1, %g7
4530 +
4531 +#ifdef CONFIG_PAX_REFCOUNT
4532 + tvs %icc, 6
4533 +#endif
4534 +
4535 cas [%o0], %g1, %g7
4536 cmp %g1, %g7
4537 bne,pn %icc, 1b
4538 @@ -118,7 +143,12 @@ __up_write:
4539 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
4540 1:
4541 lduw [%o0], %g3
4542 - sub %g3, %g1, %g7
4543 + subcc %g3, %g1, %g7
4544 +
4545 +#ifdef CONFIG_PAX_REFCOUNT
4546 + tvs %icc, 6
4547 +#endif
4548 +
4549 cas [%o0], %g3, %g7
4550 cmp %g3, %g7
4551 bne,pn %icc, 1b
4552 @@ -143,7 +173,12 @@ __downgrade_write:
4553 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
4554 1:
4555 lduw [%o0], %g3
4556 - sub %g3, %g1, %g7
4557 + subcc %g3, %g1, %g7
4558 +
4559 +#ifdef CONFIG_PAX_REFCOUNT
4560 + tvs %icc, 6
4561 +#endif
4562 +
4563 cas [%o0], %g3, %g7
4564 cmp %g3, %g7
4565 bne,pn %icc, 1b
4566 diff -urNp linux-2.6.35.4/arch/sparc/Makefile linux-2.6.35.4/arch/sparc/Makefile
4567 --- linux-2.6.35.4/arch/sparc/Makefile 2010-08-26 19:47:12.000000000 -0400
4568 +++ linux-2.6.35.4/arch/sparc/Makefile 2010-09-17 20:12:37.000000000 -0400
4569 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
4570 # Export what is needed by arch/sparc/boot/Makefile
4571 export VMLINUX_INIT VMLINUX_MAIN
4572 VMLINUX_INIT := $(head-y) $(init-y)
4573 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4574 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4575 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4576 VMLINUX_MAIN += $(drivers-y) $(net-y)
4577
4578 diff -urNp linux-2.6.35.4/arch/sparc/mm/fault_32.c linux-2.6.35.4/arch/sparc/mm/fault_32.c
4579 --- linux-2.6.35.4/arch/sparc/mm/fault_32.c 2010-08-26 19:47:12.000000000 -0400
4580 +++ linux-2.6.35.4/arch/sparc/mm/fault_32.c 2010-09-17 20:12:09.000000000 -0400
4581 @@ -22,6 +22,9 @@
4582 #include <linux/interrupt.h>
4583 #include <linux/module.h>
4584 #include <linux/kdebug.h>
4585 +#include <linux/slab.h>
4586 +#include <linux/pagemap.h>
4587 +#include <linux/compiler.h>
4588
4589 #include <asm/system.h>
4590 #include <asm/page.h>
4591 @@ -209,6 +212,268 @@ static unsigned long compute_si_addr(str
4592 return safe_compute_effective_address(regs, insn);
4593 }
4594
4595 +#ifdef CONFIG_PAX_PAGEEXEC
4596 +#ifdef CONFIG_PAX_DLRESOLVE
4597 +static void pax_emuplt_close(struct vm_area_struct *vma)
4598 +{
4599 + vma->vm_mm->call_dl_resolve = 0UL;
4600 +}
4601 +
4602 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4603 +{
4604 + unsigned int *kaddr;
4605 +
4606 + vmf->page = alloc_page(GFP_HIGHUSER);
4607 + if (!vmf->page)
4608 + return VM_FAULT_OOM;
4609 +
4610 + kaddr = kmap(vmf->page);
4611 + memset(kaddr, 0, PAGE_SIZE);
4612 + kaddr[0] = 0x9DE3BFA8U; /* save */
4613 + flush_dcache_page(vmf->page);
4614 + kunmap(vmf->page);
4615 + return VM_FAULT_MAJOR;
4616 +}
4617 +
4618 +static const struct vm_operations_struct pax_vm_ops = {
4619 + .close = pax_emuplt_close,
4620 + .fault = pax_emuplt_fault
4621 +};
4622 +
4623 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4624 +{
4625 + int ret;
4626 +
4627 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4628 + vma->vm_mm = current->mm;
4629 + vma->vm_start = addr;
4630 + vma->vm_end = addr + PAGE_SIZE;
4631 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4632 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4633 + vma->vm_ops = &pax_vm_ops;
4634 +
4635 + ret = insert_vm_struct(current->mm, vma);
4636 + if (ret)
4637 + return ret;
4638 +
4639 + ++current->mm->total_vm;
4640 + return 0;
4641 +}
4642 +#endif
4643 +
4644 +/*
4645 + * PaX: decide what to do with offenders (regs->pc = fault address)
4646 + *
4647 + * returns 1 when task should be killed
4648 + * 2 when patched PLT trampoline was detected
4649 + * 3 when unpatched PLT trampoline was detected
4650 + */
4651 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4652 +{
4653 +
4654 +#ifdef CONFIG_PAX_EMUPLT
4655 + int err;
4656 +
4657 + do { /* PaX: patched PLT emulation #1 */
4658 + unsigned int sethi1, sethi2, jmpl;
4659 +
4660 + err = get_user(sethi1, (unsigned int *)regs->pc);
4661 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4662 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4663 +
4664 + if (err)
4665 + break;
4666 +
4667 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4668 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4669 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4670 + {
4671 + unsigned int addr;
4672 +
4673 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4674 + addr = regs->u_regs[UREG_G1];
4675 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4676 + regs->pc = addr;
4677 + regs->npc = addr+4;
4678 + return 2;
4679 + }
4680 + } while (0);
4681 +
4682 + { /* PaX: patched PLT emulation #2 */
4683 + unsigned int ba;
4684 +
4685 + err = get_user(ba, (unsigned int *)regs->pc);
4686 +
4687 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4688 + unsigned int addr;
4689 +
4690 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4691 + regs->pc = addr;
4692 + regs->npc = addr+4;
4693 + return 2;
4694 + }
4695 + }
4696 +
4697 + do { /* PaX: patched PLT emulation #3 */
4698 + unsigned int sethi, jmpl, nop;
4699 +
4700 + err = get_user(sethi, (unsigned int *)regs->pc);
4701 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4702 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4703 +
4704 + if (err)
4705 + break;
4706 +
4707 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4708 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4709 + nop == 0x01000000U)
4710 + {
4711 + unsigned int addr;
4712 +
4713 + addr = (sethi & 0x003FFFFFU) << 10;
4714 + regs->u_regs[UREG_G1] = addr;
4715 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4716 + regs->pc = addr;
4717 + regs->npc = addr+4;
4718 + return 2;
4719 + }
4720 + } while (0);
4721 +
4722 + do { /* PaX: unpatched PLT emulation step 1 */
4723 + unsigned int sethi, ba, nop;
4724 +
4725 + err = get_user(sethi, (unsigned int *)regs->pc);
4726 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
4727 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4728 +
4729 + if (err)
4730 + break;
4731 +
4732 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4733 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4734 + nop == 0x01000000U)
4735 + {
4736 + unsigned int addr, save, call;
4737 +
4738 + if ((ba & 0xFFC00000U) == 0x30800000U)
4739 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4740 + else
4741 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4742 +
4743 + err = get_user(save, (unsigned int *)addr);
4744 + err |= get_user(call, (unsigned int *)(addr+4));
4745 + err |= get_user(nop, (unsigned int *)(addr+8));
4746 + if (err)
4747 + break;
4748 +
4749 +#ifdef CONFIG_PAX_DLRESOLVE
4750 + if (save == 0x9DE3BFA8U &&
4751 + (call & 0xC0000000U) == 0x40000000U &&
4752 + nop == 0x01000000U)
4753 + {
4754 + struct vm_area_struct *vma;
4755 + unsigned long call_dl_resolve;
4756 +
4757 + down_read(&current->mm->mmap_sem);
4758 + call_dl_resolve = current->mm->call_dl_resolve;
4759 + up_read(&current->mm->mmap_sem);
4760 + if (likely(call_dl_resolve))
4761 + goto emulate;
4762 +
4763 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4764 +
4765 + down_write(&current->mm->mmap_sem);
4766 + if (current->mm->call_dl_resolve) {
4767 + call_dl_resolve = current->mm->call_dl_resolve;
4768 + up_write(&current->mm->mmap_sem);
4769 + if (vma)
4770 + kmem_cache_free(vm_area_cachep, vma);
4771 + goto emulate;
4772 + }
4773 +
4774 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4775 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4776 + up_write(&current->mm->mmap_sem);
4777 + if (vma)
4778 + kmem_cache_free(vm_area_cachep, vma);
4779 + return 1;
4780 + }
4781 +
4782 + if (pax_insert_vma(vma, call_dl_resolve)) {
4783 + up_write(&current->mm->mmap_sem);
4784 + kmem_cache_free(vm_area_cachep, vma);
4785 + return 1;
4786 + }
4787 +
4788 + current->mm->call_dl_resolve = call_dl_resolve;
4789 + up_write(&current->mm->mmap_sem);
4790 +
4791 +emulate:
4792 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4793 + regs->pc = call_dl_resolve;
4794 + regs->npc = addr+4;
4795 + return 3;
4796 + }
4797 +#endif
4798 +
4799 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4800 + if ((save & 0xFFC00000U) == 0x05000000U &&
4801 + (call & 0xFFFFE000U) == 0x85C0A000U &&
4802 + nop == 0x01000000U)
4803 + {
4804 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4805 + regs->u_regs[UREG_G2] = addr + 4;
4806 + addr = (save & 0x003FFFFFU) << 10;
4807 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4808 + regs->pc = addr;
4809 + regs->npc = addr+4;
4810 + return 3;
4811 + }
4812 + }
4813 + } while (0);
4814 +
4815 + do { /* PaX: unpatched PLT emulation step 2 */
4816 + unsigned int save, call, nop;
4817 +
4818 + err = get_user(save, (unsigned int *)(regs->pc-4));
4819 + err |= get_user(call, (unsigned int *)regs->pc);
4820 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
4821 + if (err)
4822 + break;
4823 +
4824 + if (save == 0x9DE3BFA8U &&
4825 + (call & 0xC0000000U) == 0x40000000U &&
4826 + nop == 0x01000000U)
4827 + {
4828 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4829 +
4830 + regs->u_regs[UREG_RETPC] = regs->pc;
4831 + regs->pc = dl_resolve;
4832 + regs->npc = dl_resolve+4;
4833 + return 3;
4834 + }
4835 + } while (0);
4836 +#endif
4837 +
4838 + return 1;
4839 +}
4840 +
4841 +void pax_report_insns(void *pc, void *sp)
4842 +{
4843 + unsigned long i;
4844 +
4845 + printk(KERN_ERR "PAX: bytes at PC: ");
4846 + for (i = 0; i < 8; i++) {
4847 + unsigned int c;
4848 + if (get_user(c, (unsigned int *)pc+i))
4849 + printk(KERN_CONT "???????? ");
4850 + else
4851 + printk(KERN_CONT "%08x ", c);
4852 + }
4853 + printk("\n");
4854 +}
4855 +#endif
4856 +
4857 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4858 int text_fault)
4859 {
4860 @@ -282,6 +547,24 @@ good_area:
4861 if(!(vma->vm_flags & VM_WRITE))
4862 goto bad_area;
4863 } else {
4864 +
4865 +#ifdef CONFIG_PAX_PAGEEXEC
4866 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4867 + up_read(&mm->mmap_sem);
4868 + switch (pax_handle_fetch_fault(regs)) {
4869 +
4870 +#ifdef CONFIG_PAX_EMUPLT
4871 + case 2:
4872 + case 3:
4873 + return;
4874 +#endif
4875 +
4876 + }
4877 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4878 + do_group_exit(SIGKILL);
4879 + }
4880 +#endif
4881 +
4882 /* Allow reads even for write-only mappings */
4883 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4884 goto bad_area;
4885 diff -urNp linux-2.6.35.4/arch/sparc/mm/fault_64.c linux-2.6.35.4/arch/sparc/mm/fault_64.c
4886 --- linux-2.6.35.4/arch/sparc/mm/fault_64.c 2010-08-26 19:47:12.000000000 -0400
4887 +++ linux-2.6.35.4/arch/sparc/mm/fault_64.c 2010-09-17 20:12:09.000000000 -0400
4888 @@ -21,6 +21,9 @@
4889 #include <linux/kprobes.h>
4890 #include <linux/kdebug.h>
4891 #include <linux/percpu.h>
4892 +#include <linux/slab.h>
4893 +#include <linux/pagemap.h>
4894 +#include <linux/compiler.h>
4895
4896 #include <asm/page.h>
4897 #include <asm/pgtable.h>
4898 @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32b
4899 show_regs(regs);
4900 }
4901
4902 +#ifdef CONFIG_PAX_PAGEEXEC
4903 +#ifdef CONFIG_PAX_DLRESOLVE
4904 +static void pax_emuplt_close(struct vm_area_struct *vma)
4905 +{
4906 + vma->vm_mm->call_dl_resolve = 0UL;
4907 +}
4908 +
4909 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4910 +{
4911 + unsigned int *kaddr;
4912 +
4913 + vmf->page = alloc_page(GFP_HIGHUSER);
4914 + if (!vmf->page)
4915 + return VM_FAULT_OOM;
4916 +
4917 + kaddr = kmap(vmf->page);
4918 + memset(kaddr, 0, PAGE_SIZE);
4919 + kaddr[0] = 0x9DE3BFA8U; /* save */
4920 + flush_dcache_page(vmf->page);
4921 + kunmap(vmf->page);
4922 + return VM_FAULT_MAJOR;
4923 +}
4924 +
4925 +static const struct vm_operations_struct pax_vm_ops = {
4926 + .close = pax_emuplt_close,
4927 + .fault = pax_emuplt_fault
4928 +};
4929 +
4930 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4931 +{
4932 + int ret;
4933 +
4934 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4935 + vma->vm_mm = current->mm;
4936 + vma->vm_start = addr;
4937 + vma->vm_end = addr + PAGE_SIZE;
4938 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4939 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4940 + vma->vm_ops = &pax_vm_ops;
4941 +
4942 + ret = insert_vm_struct(current->mm, vma);
4943 + if (ret)
4944 + return ret;
4945 +
4946 + ++current->mm->total_vm;
4947 + return 0;
4948 +}
4949 +#endif
4950 +
4951 +/*
4952 + * PaX: decide what to do with offenders (regs->tpc = fault address)
4953 + *
4954 + * returns 1 when task should be killed
4955 + * 2 when patched PLT trampoline was detected
4956 + * 3 when unpatched PLT trampoline was detected
4957 + */
4958 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4959 +{
4960 +
4961 +#ifdef CONFIG_PAX_EMUPLT
4962 + int err;
4963 +
4964 + do { /* PaX: patched PLT emulation #1 */
4965 + unsigned int sethi1, sethi2, jmpl;
4966 +
4967 + err = get_user(sethi1, (unsigned int *)regs->tpc);
4968 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4969 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4970 +
4971 + if (err)
4972 + break;
4973 +
4974 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4975 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4976 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4977 + {
4978 + unsigned long addr;
4979 +
4980 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4981 + addr = regs->u_regs[UREG_G1];
4982 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4983 +
4984 + if (test_thread_flag(TIF_32BIT))
4985 + addr &= 0xFFFFFFFFUL;
4986 +
4987 + regs->tpc = addr;
4988 + regs->tnpc = addr+4;
4989 + return 2;
4990 + }
4991 + } while (0);
4992 +
4993 + { /* PaX: patched PLT emulation #2 */
4994 + unsigned int ba;
4995 +
4996 + err = get_user(ba, (unsigned int *)regs->tpc);
4997 +
4998 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4999 + unsigned long addr;
5000 +
5001 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5002 +
5003 + if (test_thread_flag(TIF_32BIT))
5004 + addr &= 0xFFFFFFFFUL;
5005 +
5006 + regs->tpc = addr;
5007 + regs->tnpc = addr+4;
5008 + return 2;
5009 + }
5010 + }
5011 +
5012 + do { /* PaX: patched PLT emulation #3 */
5013 + unsigned int sethi, jmpl, nop;
5014 +
5015 + err = get_user(sethi, (unsigned int *)regs->tpc);
5016 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5017 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5018 +
5019 + if (err)
5020 + break;
5021 +
5022 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5023 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5024 + nop == 0x01000000U)
5025 + {
5026 + unsigned long addr;
5027 +
5028 + addr = (sethi & 0x003FFFFFU) << 10;
5029 + regs->u_regs[UREG_G1] = addr;
5030 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5031 +
5032 + if (test_thread_flag(TIF_32BIT))
5033 + addr &= 0xFFFFFFFFUL;
5034 +
5035 + regs->tpc = addr;
5036 + regs->tnpc = addr+4;
5037 + return 2;
5038 + }
5039 + } while (0);
5040 +
5041 + do { /* PaX: patched PLT emulation #4 */
5042 + unsigned int sethi, mov1, call, mov2;
5043 +
5044 + err = get_user(sethi, (unsigned int *)regs->tpc);
5045 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5046 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
5047 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5048 +
5049 + if (err)
5050 + break;
5051 +
5052 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5053 + mov1 == 0x8210000FU &&
5054 + (call & 0xC0000000U) == 0x40000000U &&
5055 + mov2 == 0x9E100001U)
5056 + {
5057 + unsigned long addr;
5058 +
5059 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5060 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5061 +
5062 + if (test_thread_flag(TIF_32BIT))
5063 + addr &= 0xFFFFFFFFUL;
5064 +
5065 + regs->tpc = addr;
5066 + regs->tnpc = addr+4;
5067 + return 2;
5068 + }
5069 + } while (0);
5070 +
5071 + do { /* PaX: patched PLT emulation #5 */
5072 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5073 +
5074 + err = get_user(sethi, (unsigned int *)regs->tpc);
5075 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5076 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5077 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5078 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5079 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5080 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5081 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5082 +
5083 + if (err)
5084 + break;
5085 +
5086 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5087 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5088 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5089 + (or1 & 0xFFFFE000U) == 0x82106000U &&
5090 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5091 + sllx == 0x83287020U &&
5092 + jmpl == 0x81C04005U &&
5093 + nop == 0x01000000U)
5094 + {
5095 + unsigned long addr;
5096 +
5097 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5098 + regs->u_regs[UREG_G1] <<= 32;
5099 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5100 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5101 + regs->tpc = addr;
5102 + regs->tnpc = addr+4;
5103 + return 2;
5104 + }
5105 + } while (0);
5106 +
5107 + do { /* PaX: patched PLT emulation #6 */
5108 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5109 +
5110 + err = get_user(sethi, (unsigned int *)regs->tpc);
5111 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5112 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5113 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5114 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
5115 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5116 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5117 +
5118 + if (err)
5119 + break;
5120 +
5121 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5122 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5123 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5124 + sllx == 0x83287020U &&
5125 + (or & 0xFFFFE000U) == 0x8A116000U &&
5126 + jmpl == 0x81C04005U &&
5127 + nop == 0x01000000U)
5128 + {
5129 + unsigned long addr;
5130 +
5131 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5132 + regs->u_regs[UREG_G1] <<= 32;
5133 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5134 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5135 + regs->tpc = addr;
5136 + regs->tnpc = addr+4;
5137 + return 2;
5138 + }
5139 + } while (0);
5140 +
5141 + do { /* PaX: unpatched PLT emulation step 1 */
5142 + unsigned int sethi, ba, nop;
5143 +
5144 + err = get_user(sethi, (unsigned int *)regs->tpc);
5145 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5146 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5147 +
5148 + if (err)
5149 + break;
5150 +
5151 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5152 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5153 + nop == 0x01000000U)
5154 + {
5155 + unsigned long addr;
5156 + unsigned int save, call;
5157 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5158 +
5159 + if ((ba & 0xFFC00000U) == 0x30800000U)
5160 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5161 + else
5162 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5163 +
5164 + if (test_thread_flag(TIF_32BIT))
5165 + addr &= 0xFFFFFFFFUL;
5166 +
5167 + err = get_user(save, (unsigned int *)addr);
5168 + err |= get_user(call, (unsigned int *)(addr+4));
5169 + err |= get_user(nop, (unsigned int *)(addr+8));
5170 + if (err)
5171 + break;
5172 +
5173 +#ifdef CONFIG_PAX_DLRESOLVE
5174 + if (save == 0x9DE3BFA8U &&
5175 + (call & 0xC0000000U) == 0x40000000U &&
5176 + nop == 0x01000000U)
5177 + {
5178 + struct vm_area_struct *vma;
5179 + unsigned long call_dl_resolve;
5180 +
5181 + down_read(&current->mm->mmap_sem);
5182 + call_dl_resolve = current->mm->call_dl_resolve;
5183 + up_read(&current->mm->mmap_sem);
5184 + if (likely(call_dl_resolve))
5185 + goto emulate;
5186 +
5187 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5188 +
5189 + down_write(&current->mm->mmap_sem);
5190 + if (current->mm->call_dl_resolve) {
5191 + call_dl_resolve = current->mm->call_dl_resolve;
5192 + up_write(&current->mm->mmap_sem);
5193 + if (vma)
5194 + kmem_cache_free(vm_area_cachep, vma);
5195 + goto emulate;
5196 + }
5197 +
5198 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5199 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5200 + up_write(&current->mm->mmap_sem);
5201 + if (vma)
5202 + kmem_cache_free(vm_area_cachep, vma);
5203 + return 1;
5204 + }
5205 +
5206 + if (pax_insert_vma(vma, call_dl_resolve)) {
5207 + up_write(&current->mm->mmap_sem);
5208 + kmem_cache_free(vm_area_cachep, vma);
5209 + return 1;
5210 + }
5211 +
5212 + current->mm->call_dl_resolve = call_dl_resolve;
5213 + up_write(&current->mm->mmap_sem);
5214 +
5215 +emulate:
5216 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5217 + regs->tpc = call_dl_resolve;
5218 + regs->tnpc = addr+4;
5219 + return 3;
5220 + }
5221 +#endif
5222 +
5223 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5224 + if ((save & 0xFFC00000U) == 0x05000000U &&
5225 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5226 + nop == 0x01000000U)
5227 + {
5228 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5229 + regs->u_regs[UREG_G2] = addr + 4;
5230 + addr = (save & 0x003FFFFFU) << 10;
5231 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5232 +
5233 + if (test_thread_flag(TIF_32BIT))
5234 + addr &= 0xFFFFFFFFUL;
5235 +
5236 + regs->tpc = addr;
5237 + regs->tnpc = addr+4;
5238 + return 3;
5239 + }
5240 +
5241 + /* PaX: 64-bit PLT stub */
5242 + err = get_user(sethi1, (unsigned int *)addr);
5243 + err |= get_user(sethi2, (unsigned int *)(addr+4));
5244 + err |= get_user(or1, (unsigned int *)(addr+8));
5245 + err |= get_user(or2, (unsigned int *)(addr+12));
5246 + err |= get_user(sllx, (unsigned int *)(addr+16));
5247 + err |= get_user(add, (unsigned int *)(addr+20));
5248 + err |= get_user(jmpl, (unsigned int *)(addr+24));
5249 + err |= get_user(nop, (unsigned int *)(addr+28));
5250 + if (err)
5251 + break;
5252 +
5253 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5254 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5255 + (or1 & 0xFFFFE000U) == 0x88112000U &&
5256 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5257 + sllx == 0x89293020U &&
5258 + add == 0x8A010005U &&
5259 + jmpl == 0x89C14000U &&
5260 + nop == 0x01000000U)
5261 + {
5262 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5263 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5264 + regs->u_regs[UREG_G4] <<= 32;
5265 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5266 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5267 + regs->u_regs[UREG_G4] = addr + 24;
5268 + addr = regs->u_regs[UREG_G5];
5269 + regs->tpc = addr;
5270 + regs->tnpc = addr+4;
5271 + return 3;
5272 + }
5273 + }
5274 + } while (0);
5275 +
5276 +#ifdef CONFIG_PAX_DLRESOLVE
5277 + do { /* PaX: unpatched PLT emulation step 2 */
5278 + unsigned int save, call, nop;
5279 +
5280 + err = get_user(save, (unsigned int *)(regs->tpc-4));
5281 + err |= get_user(call, (unsigned int *)regs->tpc);
5282 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5283 + if (err)
5284 + break;
5285 +
5286 + if (save == 0x9DE3BFA8U &&
5287 + (call & 0xC0000000U) == 0x40000000U &&
5288 + nop == 0x01000000U)
5289 + {
5290 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5291 +
5292 + if (test_thread_flag(TIF_32BIT))
5293 + dl_resolve &= 0xFFFFFFFFUL;
5294 +
5295 + regs->u_regs[UREG_RETPC] = regs->tpc;
5296 + regs->tpc = dl_resolve;
5297 + regs->tnpc = dl_resolve+4;
5298 + return 3;
5299 + }
5300 + } while (0);
5301 +#endif
5302 +
5303 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5304 + unsigned int sethi, ba, nop;
5305 +
5306 + err = get_user(sethi, (unsigned int *)regs->tpc);
5307 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5308 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5309 +
5310 + if (err)
5311 + break;
5312 +
5313 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5314 + (ba & 0xFFF00000U) == 0x30600000U &&
5315 + nop == 0x01000000U)
5316 + {
5317 + unsigned long addr;
5318 +
5319 + addr = (sethi & 0x003FFFFFU) << 10;
5320 + regs->u_regs[UREG_G1] = addr;
5321 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5322 +
5323 + if (test_thread_flag(TIF_32BIT))
5324 + addr &= 0xFFFFFFFFUL;
5325 +
5326 + regs->tpc = addr;
5327 + regs->tnpc = addr+4;
5328 + return 2;
5329 + }
5330 + } while (0);
5331 +
5332 +#endif
5333 +
5334 + return 1;
5335 +}
5336 +
5337 +void pax_report_insns(void *pc, void *sp)
5338 +{
5339 + unsigned long i;
5340 +
5341 + printk(KERN_ERR "PAX: bytes at PC: ");
5342 + for (i = 0; i < 8; i++) {
5343 + unsigned int c;
5344 + if (get_user(c, (unsigned int *)pc+i))
5345 + printk(KERN_CONT "???????? ");
5346 + else
5347 + printk(KERN_CONT "%08x ", c);
5348 + }
5349 + printk("\n");
5350 +}
5351 +#endif
5352 +
5353 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5354 {
5355 struct mm_struct *mm = current->mm;
5356 @@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fau
5357 if (!vma)
5358 goto bad_area;
5359
5360 +#ifdef CONFIG_PAX_PAGEEXEC
5361 + /* PaX: detect ITLB misses on non-exec pages */
5362 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5363 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5364 + {
5365 + if (address != regs->tpc)
5366 + goto good_area;
5367 +
5368 + up_read(&mm->mmap_sem);
5369 + switch (pax_handle_fetch_fault(regs)) {
5370 +
5371 +#ifdef CONFIG_PAX_EMUPLT
5372 + case 2:
5373 + case 3:
5374 + return;
5375 +#endif
5376 +
5377 + }
5378 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
5379 + do_group_exit(SIGKILL);
5380 + }
5381 +#endif
5382 +
5383 /* Pure DTLB misses do not tell us whether the fault causing
5384 * load/store/atomic was a write or not, it only says that there
5385 * was no match. So in such a case we (carefully) read the
5386 diff -urNp linux-2.6.35.4/arch/sparc/mm/hugetlbpage.c linux-2.6.35.4/arch/sparc/mm/hugetlbpage.c
5387 --- linux-2.6.35.4/arch/sparc/mm/hugetlbpage.c 2010-08-26 19:47:12.000000000 -0400
5388 +++ linux-2.6.35.4/arch/sparc/mm/hugetlbpage.c 2010-09-17 20:12:09.000000000 -0400
5389 @@ -68,7 +68,7 @@ full_search:
5390 }
5391 return -ENOMEM;
5392 }
5393 - if (likely(!vma || addr + len <= vma->vm_start)) {
5394 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5395 /*
5396 * Remember the place where we stopped the search:
5397 */
5398 @@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct
5399 /* make sure it can fit in the remaining address space */
5400 if (likely(addr > len)) {
5401 vma = find_vma(mm, addr-len);
5402 - if (!vma || addr <= vma->vm_start) {
5403 + if (check_heap_stack_gap(vma, addr - len, len)) {
5404 /* remember the address as a hint for next time */
5405 return (mm->free_area_cache = addr-len);
5406 }
5407 @@ -125,7 +125,7 @@ hugetlb_get_unmapped_area_topdown(struct
5408 * return with success:
5409 */
5410 vma = find_vma(mm, addr);
5411 - if (likely(!vma || addr+len <= vma->vm_start)) {
5412 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5413 /* remember the address as a hint for next time */
5414 return (mm->free_area_cache = addr);
5415 }
5416 @@ -182,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *f
5417 if (addr) {
5418 addr = ALIGN(addr, HPAGE_SIZE);
5419 vma = find_vma(mm, addr);
5420 - if (task_size - len >= addr &&
5421 - (!vma || addr + len <= vma->vm_start))
5422 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5423 return addr;
5424 }
5425 if (mm->get_unmapped_area == arch_get_unmapped_area)
5426 diff -urNp linux-2.6.35.4/arch/sparc/mm/init_32.c linux-2.6.35.4/arch/sparc/mm/init_32.c
5427 --- linux-2.6.35.4/arch/sparc/mm/init_32.c 2010-08-26 19:47:12.000000000 -0400
5428 +++ linux-2.6.35.4/arch/sparc/mm/init_32.c 2010-09-17 20:12:09.000000000 -0400
5429 @@ -318,6 +318,9 @@ extern void device_scan(void);
5430 pgprot_t PAGE_SHARED __read_mostly;
5431 EXPORT_SYMBOL(PAGE_SHARED);
5432
5433 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5434 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5435 +
5436 void __init paging_init(void)
5437 {
5438 switch(sparc_cpu_model) {
5439 @@ -346,17 +349,17 @@ void __init paging_init(void)
5440
5441 /* Initialize the protection map with non-constant, MMU dependent values. */
5442 protection_map[0] = PAGE_NONE;
5443 - protection_map[1] = PAGE_READONLY;
5444 - protection_map[2] = PAGE_COPY;
5445 - protection_map[3] = PAGE_COPY;
5446 + protection_map[1] = PAGE_READONLY_NOEXEC;
5447 + protection_map[2] = PAGE_COPY_NOEXEC;
5448 + protection_map[3] = PAGE_COPY_NOEXEC;
5449 protection_map[4] = PAGE_READONLY;
5450 protection_map[5] = PAGE_READONLY;
5451 protection_map[6] = PAGE_COPY;
5452 protection_map[7] = PAGE_COPY;
5453 protection_map[8] = PAGE_NONE;
5454 - protection_map[9] = PAGE_READONLY;
5455 - protection_map[10] = PAGE_SHARED;
5456 - protection_map[11] = PAGE_SHARED;
5457 + protection_map[9] = PAGE_READONLY_NOEXEC;
5458 + protection_map[10] = PAGE_SHARED_NOEXEC;
5459 + protection_map[11] = PAGE_SHARED_NOEXEC;
5460 protection_map[12] = PAGE_READONLY;
5461 protection_map[13] = PAGE_READONLY;
5462 protection_map[14] = PAGE_SHARED;
5463 diff -urNp linux-2.6.35.4/arch/sparc/mm/Makefile linux-2.6.35.4/arch/sparc/mm/Makefile
5464 --- linux-2.6.35.4/arch/sparc/mm/Makefile 2010-08-26 19:47:12.000000000 -0400
5465 +++ linux-2.6.35.4/arch/sparc/mm/Makefile 2010-09-17 20:12:09.000000000 -0400
5466 @@ -2,7 +2,7 @@
5467 #
5468
5469 asflags-y := -ansi
5470 -ccflags-y := -Werror
5471 +#ccflags-y := -Werror
5472
5473 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5474 obj-y += fault_$(BITS).o
5475 diff -urNp linux-2.6.35.4/arch/sparc/mm/srmmu.c linux-2.6.35.4/arch/sparc/mm/srmmu.c
5476 --- linux-2.6.35.4/arch/sparc/mm/srmmu.c 2010-08-26 19:47:12.000000000 -0400
5477 +++ linux-2.6.35.4/arch/sparc/mm/srmmu.c 2010-09-17 20:12:09.000000000 -0400
5478 @@ -2198,6 +2198,13 @@ void __init ld_mmu_srmmu(void)
5479 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5480 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5481 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5482 +
5483 +#ifdef CONFIG_PAX_PAGEEXEC
5484 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5485 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5486 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5487 +#endif
5488 +
5489 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5490 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5491
5492 diff -urNp linux-2.6.35.4/arch/um/include/asm/kmap_types.h linux-2.6.35.4/arch/um/include/asm/kmap_types.h
5493 --- linux-2.6.35.4/arch/um/include/asm/kmap_types.h 2010-08-26 19:47:12.000000000 -0400
5494 +++ linux-2.6.35.4/arch/um/include/asm/kmap_types.h 2010-09-17 20:12:09.000000000 -0400
5495 @@ -23,6 +23,7 @@ enum km_type {
5496 KM_IRQ1,
5497 KM_SOFTIRQ0,
5498 KM_SOFTIRQ1,
5499 + KM_CLEARPAGE,
5500 KM_TYPE_NR
5501 };
5502
5503 diff -urNp linux-2.6.35.4/arch/um/include/asm/page.h linux-2.6.35.4/arch/um/include/asm/page.h
5504 --- linux-2.6.35.4/arch/um/include/asm/page.h 2010-08-26 19:47:12.000000000 -0400
5505 +++ linux-2.6.35.4/arch/um/include/asm/page.h 2010-09-17 20:12:09.000000000 -0400
5506 @@ -14,6 +14,9 @@
5507 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5508 #define PAGE_MASK (~(PAGE_SIZE-1))
5509
5510 +#define ktla_ktva(addr) (addr)
5511 +#define ktva_ktla(addr) (addr)
5512 +
5513 #ifndef __ASSEMBLY__
5514
5515 struct page;
5516 diff -urNp linux-2.6.35.4/arch/um/sys-i386/syscalls.c linux-2.6.35.4/arch/um/sys-i386/syscalls.c
5517 --- linux-2.6.35.4/arch/um/sys-i386/syscalls.c 2010-08-26 19:47:12.000000000 -0400
5518 +++ linux-2.6.35.4/arch/um/sys-i386/syscalls.c 2010-09-17 20:12:09.000000000 -0400
5519 @@ -11,6 +11,21 @@
5520 #include "asm/uaccess.h"
5521 #include "asm/unistd.h"
5522
5523 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5524 +{
5525 + unsigned long pax_task_size = TASK_SIZE;
5526 +
5527 +#ifdef CONFIG_PAX_SEGMEXEC
5528 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5529 + pax_task_size = SEGMEXEC_TASK_SIZE;
5530 +#endif
5531 +
5532 + if (len > pax_task_size || addr > pax_task_size - len)
5533 + return -EINVAL;
5534 +
5535 + return 0;
5536 +}
5537 +
5538 /*
5539 * The prototype on i386 is:
5540 *
5541 diff -urNp linux-2.6.35.4/arch/x86/boot/bitops.h linux-2.6.35.4/arch/x86/boot/bitops.h
5542 --- linux-2.6.35.4/arch/x86/boot/bitops.h 2010-08-26 19:47:12.000000000 -0400
5543 +++ linux-2.6.35.4/arch/x86/boot/bitops.h 2010-09-17 20:12:09.000000000 -0400
5544 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int
5545 u8 v;
5546 const u32 *p = (const u32 *)addr;
5547
5548 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5549 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5550 return v;
5551 }
5552
5553 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int
5554
5555 static inline void set_bit(int nr, void *addr)
5556 {
5557 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5558 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5559 }
5560
5561 #endif /* BOOT_BITOPS_H */
5562 diff -urNp linux-2.6.35.4/arch/x86/boot/boot.h linux-2.6.35.4/arch/x86/boot/boot.h
5563 --- linux-2.6.35.4/arch/x86/boot/boot.h 2010-08-26 19:47:12.000000000 -0400
5564 +++ linux-2.6.35.4/arch/x86/boot/boot.h 2010-09-17 20:12:09.000000000 -0400
5565 @@ -82,7 +82,7 @@ static inline void io_delay(void)
5566 static inline u16 ds(void)
5567 {
5568 u16 seg;
5569 - asm("movw %%ds,%0" : "=rm" (seg));
5570 + asm volatile("movw %%ds,%0" : "=rm" (seg));
5571 return seg;
5572 }
5573
5574 @@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t
5575 static inline int memcmp(const void *s1, const void *s2, size_t len)
5576 {
5577 u8 diff;
5578 - asm("repe; cmpsb; setnz %0"
5579 + asm volatile("repe; cmpsb; setnz %0"
5580 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5581 return diff;
5582 }
5583 diff -urNp linux-2.6.35.4/arch/x86/boot/compressed/head_32.S linux-2.6.35.4/arch/x86/boot/compressed/head_32.S
5584 --- linux-2.6.35.4/arch/x86/boot/compressed/head_32.S 2010-08-26 19:47:12.000000000 -0400
5585 +++ linux-2.6.35.4/arch/x86/boot/compressed/head_32.S 2010-09-17 20:12:09.000000000 -0400
5586 @@ -76,7 +76,7 @@ ENTRY(startup_32)
5587 notl %eax
5588 andl %eax, %ebx
5589 #else
5590 - movl $LOAD_PHYSICAL_ADDR, %ebx
5591 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5592 #endif
5593
5594 /* Target address to relocate to for decompression */
5595 @@ -149,7 +149,7 @@ relocated:
5596 * and where it was actually loaded.
5597 */
5598 movl %ebp, %ebx
5599 - subl $LOAD_PHYSICAL_ADDR, %ebx
5600 + subl $____LOAD_PHYSICAL_ADDR, %ebx
5601 jz 2f /* Nothing to be done if loaded at compiled addr. */
5602 /*
5603 * Process relocations.
5604 @@ -157,8 +157,7 @@ relocated:
5605
5606 1: subl $4, %edi
5607 movl (%edi), %ecx
5608 - testl %ecx, %ecx
5609 - jz 2f
5610 + jecxz 2f
5611 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5612 jmp 1b
5613 2:
5614 diff -urNp linux-2.6.35.4/arch/x86/boot/compressed/head_64.S linux-2.6.35.4/arch/x86/boot/compressed/head_64.S
5615 --- linux-2.6.35.4/arch/x86/boot/compressed/head_64.S 2010-08-26 19:47:12.000000000 -0400
5616 +++ linux-2.6.35.4/arch/x86/boot/compressed/head_64.S 2010-09-17 20:12:09.000000000 -0400
5617 @@ -91,7 +91,7 @@ ENTRY(startup_32)
5618 notl %eax
5619 andl %eax, %ebx
5620 #else
5621 - movl $LOAD_PHYSICAL_ADDR, %ebx
5622 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5623 #endif
5624
5625 /* Target address to relocate to for decompression */
5626 @@ -233,7 +233,7 @@ ENTRY(startup_64)
5627 notq %rax
5628 andq %rax, %rbp
5629 #else
5630 - movq $LOAD_PHYSICAL_ADDR, %rbp
5631 + movq $____LOAD_PHYSICAL_ADDR, %rbp
5632 #endif
5633
5634 /* Target address to relocate to for decompression */
5635 diff -urNp linux-2.6.35.4/arch/x86/boot/compressed/misc.c linux-2.6.35.4/arch/x86/boot/compressed/misc.c
5636 --- linux-2.6.35.4/arch/x86/boot/compressed/misc.c 2010-08-26 19:47:12.000000000 -0400
5637 +++ linux-2.6.35.4/arch/x86/boot/compressed/misc.c 2010-09-17 20:12:09.000000000 -0400
5638 @@ -285,7 +285,7 @@ static void parse_elf(void *output)
5639 case PT_LOAD:
5640 #ifdef CONFIG_RELOCATABLE
5641 dest = output;
5642 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5643 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5644 #else
5645 dest = (void *)(phdr->p_paddr);
5646 #endif
5647 @@ -332,7 +332,7 @@ asmlinkage void decompress_kernel(void *
5648 error("Destination address too large");
5649 #endif
5650 #ifndef CONFIG_RELOCATABLE
5651 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5652 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5653 error("Wrong destination address");
5654 #endif
5655
5656 diff -urNp linux-2.6.35.4/arch/x86/boot/compressed/mkpiggy.c linux-2.6.35.4/arch/x86/boot/compressed/mkpiggy.c
5657 --- linux-2.6.35.4/arch/x86/boot/compressed/mkpiggy.c 2010-08-26 19:47:12.000000000 -0400
5658 +++ linux-2.6.35.4/arch/x86/boot/compressed/mkpiggy.c 2010-09-17 20:12:09.000000000 -0400
5659 @@ -74,7 +74,7 @@ int main(int argc, char *argv[])
5660
5661 offs = (olen > ilen) ? olen - ilen : 0;
5662 offs += olen >> 12; /* Add 8 bytes for each 32K block */
5663 - offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
5664 + offs += 64*1024; /* Add 64K bytes slack */
5665 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
5666
5667 printf(".section \".rodata..compressed\",\"a\",@progbits\n");
5668 diff -urNp linux-2.6.35.4/arch/x86/boot/compressed/relocs.c linux-2.6.35.4/arch/x86/boot/compressed/relocs.c
5669 --- linux-2.6.35.4/arch/x86/boot/compressed/relocs.c 2010-08-26 19:47:12.000000000 -0400
5670 +++ linux-2.6.35.4/arch/x86/boot/compressed/relocs.c 2010-09-17 20:12:09.000000000 -0400
5671 @@ -13,8 +13,11 @@
5672
5673 static void die(char *fmt, ...);
5674
5675 +#include "../../../../include/generated/autoconf.h"
5676 +
5677 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5678 static Elf32_Ehdr ehdr;
5679 +static Elf32_Phdr *phdr;
5680 static unsigned long reloc_count, reloc_idx;
5681 static unsigned long *relocs;
5682
5683 @@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5684 }
5685 }
5686
5687 +static void read_phdrs(FILE *fp)
5688 +{
5689 + unsigned int i;
5690 +
5691 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5692 + if (!phdr) {
5693 + die("Unable to allocate %d program headers\n",
5694 + ehdr.e_phnum);
5695 + }
5696 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5697 + die("Seek to %d failed: %s\n",
5698 + ehdr.e_phoff, strerror(errno));
5699 + }
5700 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5701 + die("Cannot read ELF program headers: %s\n",
5702 + strerror(errno));
5703 + }
5704 + for(i = 0; i < ehdr.e_phnum; i++) {
5705 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5706 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5707 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5708 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5709 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5710 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5711 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5712 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5713 + }
5714 +
5715 +}
5716 +
5717 static void read_shdrs(FILE *fp)
5718 {
5719 - int i;
5720 + unsigned int i;
5721 Elf32_Shdr shdr;
5722
5723 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5724 @@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5725
5726 static void read_strtabs(FILE *fp)
5727 {
5728 - int i;
5729 + unsigned int i;
5730 for (i = 0; i < ehdr.e_shnum; i++) {
5731 struct section *sec = &secs[i];
5732 if (sec->shdr.sh_type != SHT_STRTAB) {
5733 @@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5734
5735 static void read_symtabs(FILE *fp)
5736 {
5737 - int i,j;
5738 + unsigned int i,j;
5739 for (i = 0; i < ehdr.e_shnum; i++) {
5740 struct section *sec = &secs[i];
5741 if (sec->shdr.sh_type != SHT_SYMTAB) {
5742 @@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5743
5744 static void read_relocs(FILE *fp)
5745 {
5746 - int i,j;
5747 + unsigned int i,j;
5748 + uint32_t base;
5749 +
5750 for (i = 0; i < ehdr.e_shnum; i++) {
5751 struct section *sec = &secs[i];
5752 if (sec->shdr.sh_type != SHT_REL) {
5753 @@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5754 die("Cannot read symbol table: %s\n",
5755 strerror(errno));
5756 }
5757 + base = 0;
5758 + for (j = 0; j < ehdr.e_phnum; j++) {
5759 + if (phdr[j].p_type != PT_LOAD )
5760 + continue;
5761 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5762 + continue;
5763 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5764 + break;
5765 + }
5766 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5767 Elf32_Rel *rel = &sec->reltab[j];
5768 - rel->r_offset = elf32_to_cpu(rel->r_offset);
5769 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5770 rel->r_info = elf32_to_cpu(rel->r_info);
5771 }
5772 }
5773 @@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5774
5775 static void print_absolute_symbols(void)
5776 {
5777 - int i;
5778 + unsigned int i;
5779 printf("Absolute symbols\n");
5780 printf(" Num: Value Size Type Bind Visibility Name\n");
5781 for (i = 0; i < ehdr.e_shnum; i++) {
5782 struct section *sec = &secs[i];
5783 char *sym_strtab;
5784 Elf32_Sym *sh_symtab;
5785 - int j;
5786 + unsigned int j;
5787
5788 if (sec->shdr.sh_type != SHT_SYMTAB) {
5789 continue;
5790 @@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5791
5792 static void print_absolute_relocs(void)
5793 {
5794 - int i, printed = 0;
5795 + unsigned int i, printed = 0;
5796
5797 for (i = 0; i < ehdr.e_shnum; i++) {
5798 struct section *sec = &secs[i];
5799 struct section *sec_applies, *sec_symtab;
5800 char *sym_strtab;
5801 Elf32_Sym *sh_symtab;
5802 - int j;
5803 + unsigned int j;
5804 if (sec->shdr.sh_type != SHT_REL) {
5805 continue;
5806 }
5807 @@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
5808
5809 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
5810 {
5811 - int i;
5812 + unsigned int i;
5813 /* Walk through the relocations */
5814 for (i = 0; i < ehdr.e_shnum; i++) {
5815 char *sym_strtab;
5816 Elf32_Sym *sh_symtab;
5817 struct section *sec_applies, *sec_symtab;
5818 - int j;
5819 + unsigned int j;
5820 struct section *sec = &secs[i];
5821
5822 if (sec->shdr.sh_type != SHT_REL) {
5823 @@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(El
5824 !is_rel_reloc(sym_name(sym_strtab, sym))) {
5825 continue;
5826 }
5827 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
5828 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
5829 + continue;
5830 +
5831 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
5832 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
5833 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
5834 + continue;
5835 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
5836 + continue;
5837 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
5838 + continue;
5839 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
5840 + continue;
5841 +#endif
5842 +
5843 switch (r_type) {
5844 case R_386_NONE:
5845 case R_386_PC32:
5846 @@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, co
5847
5848 static void emit_relocs(int as_text)
5849 {
5850 - int i;
5851 + unsigned int i;
5852 /* Count how many relocations I have and allocate space for them. */
5853 reloc_count = 0;
5854 walk_relocs(count_reloc);
5855 @@ -665,6 +725,7 @@ int main(int argc, char **argv)
5856 fname, strerror(errno));
5857 }
5858 read_ehdr(fp);
5859 + read_phdrs(fp);
5860 read_shdrs(fp);
5861 read_strtabs(fp);
5862 read_symtabs(fp);
5863 diff -urNp linux-2.6.35.4/arch/x86/boot/cpucheck.c linux-2.6.35.4/arch/x86/boot/cpucheck.c
5864 --- linux-2.6.35.4/arch/x86/boot/cpucheck.c 2010-08-26 19:47:12.000000000 -0400
5865 +++ linux-2.6.35.4/arch/x86/boot/cpucheck.c 2010-09-17 20:12:09.000000000 -0400
5866 @@ -74,7 +74,7 @@ static int has_fpu(void)
5867 u16 fcw = -1, fsw = -1;
5868 u32 cr0;
5869
5870 - asm("movl %%cr0,%0" : "=r" (cr0));
5871 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
5872 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
5873 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
5874 asm volatile("movl %0,%%cr0" : : "r" (cr0));
5875 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
5876 {
5877 u32 f0, f1;
5878
5879 - asm("pushfl ; "
5880 + asm volatile("pushfl ; "
5881 "pushfl ; "
5882 "popl %0 ; "
5883 "movl %0,%1 ; "
5884 @@ -115,7 +115,7 @@ static void get_flags(void)
5885 set_bit(X86_FEATURE_FPU, cpu.flags);
5886
5887 if (has_eflag(X86_EFLAGS_ID)) {
5888 - asm("cpuid"
5889 + asm volatile("cpuid"
5890 : "=a" (max_intel_level),
5891 "=b" (cpu_vendor[0]),
5892 "=d" (cpu_vendor[1]),
5893 @@ -124,7 +124,7 @@ static void get_flags(void)
5894
5895 if (max_intel_level >= 0x00000001 &&
5896 max_intel_level <= 0x0000ffff) {
5897 - asm("cpuid"
5898 + asm volatile("cpuid"
5899 : "=a" (tfms),
5900 "=c" (cpu.flags[4]),
5901 "=d" (cpu.flags[0])
5902 @@ -136,7 +136,7 @@ static void get_flags(void)
5903 cpu.model += ((tfms >> 16) & 0xf) << 4;
5904 }
5905
5906 - asm("cpuid"
5907 + asm volatile("cpuid"
5908 : "=a" (max_amd_level)
5909 : "a" (0x80000000)
5910 : "ebx", "ecx", "edx");
5911 @@ -144,7 +144,7 @@ static void get_flags(void)
5912 if (max_amd_level >= 0x80000001 &&
5913 max_amd_level <= 0x8000ffff) {
5914 u32 eax = 0x80000001;
5915 - asm("cpuid"
5916 + asm volatile("cpuid"
5917 : "+a" (eax),
5918 "=c" (cpu.flags[6]),
5919 "=d" (cpu.flags[1])
5920 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5921 u32 ecx = MSR_K7_HWCR;
5922 u32 eax, edx;
5923
5924 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5925 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5926 eax &= ~(1 << 15);
5927 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5928 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5929
5930 get_flags(); /* Make sure it really did something */
5931 err = check_flags();
5932 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5933 u32 ecx = MSR_VIA_FCR;
5934 u32 eax, edx;
5935
5936 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5937 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5938 eax |= (1<<1)|(1<<7);
5939 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5940 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5941
5942 set_bit(X86_FEATURE_CX8, cpu.flags);
5943 err = check_flags();
5944 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
5945 u32 eax, edx;
5946 u32 level = 1;
5947
5948 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5949 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5950 - asm("cpuid"
5951 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5952 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5953 + asm volatile("cpuid"
5954 : "+a" (level), "=d" (cpu.flags[0])
5955 : : "ecx", "ebx");
5956 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5957 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5958
5959 err = check_flags();
5960 }
5961 diff -urNp linux-2.6.35.4/arch/x86/boot/header.S linux-2.6.35.4/arch/x86/boot/header.S
5962 --- linux-2.6.35.4/arch/x86/boot/header.S 2010-08-26 19:47:12.000000000 -0400
5963 +++ linux-2.6.35.4/arch/x86/boot/header.S 2010-09-17 20:12:09.000000000 -0400
5964 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
5965 # single linked list of
5966 # struct setup_data
5967
5968 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
5969 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
5970
5971 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
5972 #define VO_INIT_SIZE (VO__end - VO__text)
5973 diff -urNp linux-2.6.35.4/arch/x86/boot/memory.c linux-2.6.35.4/arch/x86/boot/memory.c
5974 --- linux-2.6.35.4/arch/x86/boot/memory.c 2010-08-26 19:47:12.000000000 -0400
5975 +++ linux-2.6.35.4/arch/x86/boot/memory.c 2010-09-17 20:12:09.000000000 -0400
5976 @@ -19,7 +19,7 @@
5977
5978 static int detect_memory_e820(void)
5979 {
5980 - int count = 0;
5981 + unsigned int count = 0;
5982 struct biosregs ireg, oreg;
5983 struct e820entry *desc = boot_params.e820_map;
5984 static struct e820entry buf; /* static so it is zeroed */
5985 diff -urNp linux-2.6.35.4/arch/x86/boot/video.c linux-2.6.35.4/arch/x86/boot/video.c
5986 --- linux-2.6.35.4/arch/x86/boot/video.c 2010-08-26 19:47:12.000000000 -0400
5987 +++ linux-2.6.35.4/arch/x86/boot/video.c 2010-09-17 20:12:09.000000000 -0400
5988 @@ -96,7 +96,7 @@ static void store_mode_params(void)
5989 static unsigned int get_entry(void)
5990 {
5991 char entry_buf[4];
5992 - int i, len = 0;
5993 + unsigned int i, len = 0;
5994 int key;
5995 unsigned int v;
5996
5997 diff -urNp linux-2.6.35.4/arch/x86/boot/video-vesa.c linux-2.6.35.4/arch/x86/boot/video-vesa.c
5998 --- linux-2.6.35.4/arch/x86/boot/video-vesa.c 2010-08-26 19:47:12.000000000 -0400
5999 +++ linux-2.6.35.4/arch/x86/boot/video-vesa.c 2010-09-17 20:12:09.000000000 -0400
6000 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6001
6002 boot_params.screen_info.vesapm_seg = oreg.es;
6003 boot_params.screen_info.vesapm_off = oreg.di;
6004 + boot_params.screen_info.vesapm_size = oreg.cx;
6005 }
6006
6007 /*
6008 diff -urNp linux-2.6.35.4/arch/x86/ia32/ia32entry.S linux-2.6.35.4/arch/x86/ia32/ia32entry.S
6009 --- linux-2.6.35.4/arch/x86/ia32/ia32entry.S 2010-08-26 19:47:12.000000000 -0400
6010 +++ linux-2.6.35.4/arch/x86/ia32/ia32entry.S 2010-09-17 20:12:37.000000000 -0400
6011 @@ -13,6 +13,7 @@
6012 #include <asm/thread_info.h>
6013 #include <asm/segment.h>
6014 #include <asm/irqflags.h>
6015 +#include <asm/pgtable.h>
6016 #include <linux/linkage.h>
6017
6018 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6019 @@ -50,7 +51,12 @@
6020 /*
6021 * Reload arg registers from stack in case ptrace changed them.
6022 * We don't reload %eax because syscall_trace_enter() returned
6023 - * the value it wants us to use in the table lookup.
6024 + * the %rax value we should see. Instead, we just truncate that
6025 + * value to 32 bits again as we did on entry from user mode.
6026 + * If it's a new value set by user_regset during entry tracing,
6027 + * this matches the normal truncation of the user-mode value.
6028 + * If it's -1 to make us punt the syscall, then (u32)-1 is still
6029 + * an appropriately invalid value.
6030 */
6031 .macro LOAD_ARGS32 offset, _r9=0
6032 .if \_r9
6033 @@ -60,6 +66,7 @@
6034 movl \offset+48(%rsp),%edx
6035 movl \offset+56(%rsp),%esi
6036 movl \offset+64(%rsp),%edi
6037 + movl %eax,%eax /* zero extension */
6038 .endm
6039
6040 .macro CFI_STARTPROC32 simple
6041 @@ -114,6 +121,11 @@ ENTRY(ia32_sysenter_target)
6042 SWAPGS_UNSAFE_STACK
6043 movq PER_CPU_VAR(kernel_stack), %rsp
6044 addq $(KERNEL_STACK_OFFSET),%rsp
6045 +
6046 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6047 + call pax_enter_kernel_user
6048 +#endif
6049 +
6050 /*
6051 * No need to follow this irqs on/off section: the syscall
6052 * disabled irqs, here we enable it straight after entry:
6053 @@ -144,6 +156,12 @@ ENTRY(ia32_sysenter_target)
6054 SAVE_ARGS 0,0,1
6055 /* no need to do an access_ok check here because rbp has been
6056 32bit zero extended */
6057 +
6058 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6059 + mov $PAX_USER_SHADOW_BASE,%r10
6060 + add %r10,%rbp
6061 +#endif
6062 +
6063 1: movl (%rbp),%ebp
6064 .section __ex_table,"a"
6065 .quad 1b,ia32_badarg
6066 @@ -153,7 +171,7 @@ ENTRY(ia32_sysenter_target)
6067 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6068 CFI_REMEMBER_STATE
6069 jnz sysenter_tracesys
6070 - cmpl $(IA32_NR_syscalls-1),%eax
6071 + cmpq $(IA32_NR_syscalls-1),%rax
6072 ja ia32_badsys
6073 sysenter_do_call:
6074 IA32_ARG_FIXUP
6075 @@ -166,6 +184,11 @@ sysenter_dispatch:
6076 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6077 jnz sysexit_audit
6078 sysexit_from_sys_call:
6079 +
6080 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6081 + call pax_exit_kernel_user
6082 +#endif
6083 +
6084 andl $~TS_COMPAT,TI_status(%r10)
6085 /* clear IF, that popfq doesn't enable interrupts early */
6086 andl $~0x200,EFLAGS-R11(%rsp)
6087 @@ -195,7 +218,7 @@ sysexit_from_sys_call:
6088 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6089 call audit_syscall_entry
6090 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6091 - cmpl $(IA32_NR_syscalls-1),%eax
6092 + cmpq $(IA32_NR_syscalls-1),%rax
6093 ja ia32_badsys
6094 movl %ebx,%edi /* reload 1st syscall arg */
6095 movl RCX-ARGOFFSET(%rsp),%esi /* reload 2nd syscall arg */
6096 @@ -248,7 +271,7 @@ sysenter_tracesys:
6097 call syscall_trace_enter
6098 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6099 RESTORE_REST
6100 - cmpl $(IA32_NR_syscalls-1),%eax
6101 + cmpq $(IA32_NR_syscalls-1),%rax
6102 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
6103 jmp sysenter_do_call
6104 CFI_ENDPROC
6105 @@ -284,6 +307,11 @@ ENTRY(ia32_cstar_target)
6106 movl %esp,%r8d
6107 CFI_REGISTER rsp,r8
6108 movq PER_CPU_VAR(kernel_stack),%rsp
6109 +
6110 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6111 + call pax_enter_kernel_user
6112 +#endif
6113 +
6114 /*
6115 * No need to follow this irqs on/off section: the syscall
6116 * disabled irqs and here we enable it straight after entry:
6117 @@ -305,6 +333,12 @@ ENTRY(ia32_cstar_target)
6118 /* no need to do an access_ok check here because r8 has been
6119 32bit zero extended */
6120 /* hardware stack frame is complete now */
6121 +
6122 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6123 + mov $PAX_USER_SHADOW_BASE,%r10
6124 + add %r10,%r8
6125 +#endif
6126 +
6127 1: movl (%r8),%r9d
6128 .section __ex_table,"a"
6129 .quad 1b,ia32_badarg
6130 @@ -314,7 +348,7 @@ ENTRY(ia32_cstar_target)
6131 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6132 CFI_REMEMBER_STATE
6133 jnz cstar_tracesys
6134 - cmpl $IA32_NR_syscalls-1,%eax
6135 + cmpq $IA32_NR_syscalls-1,%rax
6136 ja ia32_badsys
6137 cstar_do_call:
6138 IA32_ARG_FIXUP 1
6139 @@ -327,6 +361,11 @@ cstar_dispatch:
6140 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6141 jnz sysretl_audit
6142 sysretl_from_sys_call:
6143 +
6144 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6145 + call pax_exit_kernel_user
6146 +#endif
6147 +
6148 andl $~TS_COMPAT,TI_status(%r10)
6149 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
6150 movl RIP-ARGOFFSET(%rsp),%ecx
6151 @@ -367,7 +406,7 @@ cstar_tracesys:
6152 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6153 RESTORE_REST
6154 xchgl %ebp,%r9d
6155 - cmpl $(IA32_NR_syscalls-1),%eax
6156 + cmpq $(IA32_NR_syscalls-1),%rax
6157 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
6158 jmp cstar_do_call
6159 END(ia32_cstar_target)
6160 @@ -409,6 +448,11 @@ ENTRY(ia32_syscall)
6161 CFI_REL_OFFSET rip,RIP-RIP
6162 PARAVIRT_ADJUST_EXCEPTION_FRAME
6163 SWAPGS
6164 +
6165 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6166 + call pax_enter_kernel_user
6167 +#endif
6168 +
6169 /*
6170 * No need to follow this irqs on/off section: the syscall
6171 * disabled irqs and here we enable it straight after entry:
6172 @@ -425,7 +469,7 @@ ENTRY(ia32_syscall)
6173 orl $TS_COMPAT,TI_status(%r10)
6174 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
6175 jnz ia32_tracesys
6176 - cmpl $(IA32_NR_syscalls-1),%eax
6177 + cmpq $(IA32_NR_syscalls-1),%rax
6178 ja ia32_badsys
6179 ia32_do_call:
6180 IA32_ARG_FIXUP
6181 @@ -444,7 +488,7 @@ ia32_tracesys:
6182 call syscall_trace_enter
6183 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6184 RESTORE_REST
6185 - cmpl $(IA32_NR_syscalls-1),%eax
6186 + cmpq $(IA32_NR_syscalls-1),%rax
6187 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
6188 jmp ia32_do_call
6189 END(ia32_syscall)
6190 diff -urNp linux-2.6.35.4/arch/x86/ia32/ia32_signal.c linux-2.6.35.4/arch/x86/ia32/ia32_signal.c
6191 --- linux-2.6.35.4/arch/x86/ia32/ia32_signal.c 2010-08-26 19:47:12.000000000 -0400
6192 +++ linux-2.6.35.4/arch/x86/ia32/ia32_signal.c 2010-09-17 20:12:09.000000000 -0400
6193 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
6194 sp -= frame_size;
6195 /* Align the stack pointer according to the i386 ABI,
6196 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6197 - sp = ((sp + 4) & -16ul) - 4;
6198 + sp = ((sp - 12) & -16ul) - 4;
6199 return (void __user *) sp;
6200 }
6201
6202 @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
6203 0xb8,
6204 __NR_ia32_rt_sigreturn,
6205 0x80cd,
6206 - 0,
6207 + 0
6208 };
6209
6210 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6211 diff -urNp linux-2.6.35.4/arch/x86/include/asm/alternative.h linux-2.6.35.4/arch/x86/include/asm/alternative.h
6212 --- linux-2.6.35.4/arch/x86/include/asm/alternative.h 2010-08-26 19:47:12.000000000 -0400
6213 +++ linux-2.6.35.4/arch/x86/include/asm/alternative.h 2010-09-17 20:12:09.000000000 -0400
6214 @@ -91,7 +91,7 @@ static inline int alternatives_text_rese
6215 " .byte 664f-663f\n" /* replacementlen */ \
6216 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
6217 ".previous\n" \
6218 - ".section .altinstr_replacement, \"ax\"\n" \
6219 + ".section .altinstr_replacement, \"a\"\n" \
6220 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
6221 ".previous"
6222
6223 diff -urNp linux-2.6.35.4/arch/x86/include/asm/apm.h linux-2.6.35.4/arch/x86/include/asm/apm.h
6224 --- linux-2.6.35.4/arch/x86/include/asm/apm.h 2010-08-26 19:47:12.000000000 -0400
6225 +++ linux-2.6.35.4/arch/x86/include/asm/apm.h 2010-09-17 20:12:09.000000000 -0400
6226 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
6227 __asm__ __volatile__(APM_DO_ZERO_SEGS
6228 "pushl %%edi\n\t"
6229 "pushl %%ebp\n\t"
6230 - "lcall *%%cs:apm_bios_entry\n\t"
6231 + "lcall *%%ss:apm_bios_entry\n\t"
6232 "setc %%al\n\t"
6233 "popl %%ebp\n\t"
6234 "popl %%edi\n\t"
6235 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
6236 __asm__ __volatile__(APM_DO_ZERO_SEGS
6237 "pushl %%edi\n\t"
6238 "pushl %%ebp\n\t"
6239 - "lcall *%%cs:apm_bios_entry\n\t"
6240 + "lcall *%%ss:apm_bios_entry\n\t"
6241 "setc %%bl\n\t"
6242 "popl %%ebp\n\t"
6243 "popl %%edi\n\t"
6244 diff -urNp linux-2.6.35.4/arch/x86/include/asm/asm.h linux-2.6.35.4/arch/x86/include/asm/asm.h
6245 --- linux-2.6.35.4/arch/x86/include/asm/asm.h 2010-08-26 19:47:12.000000000 -0400
6246 +++ linux-2.6.35.4/arch/x86/include/asm/asm.h 2010-09-17 20:12:09.000000000 -0400
6247 @@ -37,6 +37,12 @@
6248 #define _ASM_SI __ASM_REG(si)
6249 #define _ASM_DI __ASM_REG(di)
6250
6251 +#ifdef CONFIG_X86_32
6252 +#define _ASM_INTO "into"
6253 +#else
6254 +#define _ASM_INTO "int $4"
6255 +#endif
6256 +
6257 /* Exception table entry */
6258 #ifdef __ASSEMBLY__
6259 # define _ASM_EXTABLE(from,to) \
6260 diff -urNp linux-2.6.35.4/arch/x86/include/asm/atomic64_32.h linux-2.6.35.4/arch/x86/include/asm/atomic64_32.h
6261 --- linux-2.6.35.4/arch/x86/include/asm/atomic64_32.h 2010-08-26 19:47:12.000000000 -0400
6262 +++ linux-2.6.35.4/arch/x86/include/asm/atomic64_32.h 2010-09-17 20:12:09.000000000 -0400
6263 @@ -12,6 +12,14 @@ typedef struct {
6264 u64 __aligned(8) counter;
6265 } atomic64_t;
6266
6267 +#ifdef CONFIG_PAX_REFCOUNT
6268 +typedef struct {
6269 + u64 __aligned(8) counter;
6270 +} atomic64_unchecked_t;
6271 +#else
6272 +typedef atomic64_t atomic64_unchecked_t;
6273 +#endif
6274 +
6275 #define ATOMIC64_INIT(val) { (val) }
6276
6277 #ifdef CONFIG_X86_CMPXCHG64
6278 diff -urNp linux-2.6.35.4/arch/x86/include/asm/atomic64_64.h linux-2.6.35.4/arch/x86/include/asm/atomic64_64.h
6279 --- linux-2.6.35.4/arch/x86/include/asm/atomic64_64.h 2010-08-26 19:47:12.000000000 -0400
6280 +++ linux-2.6.35.4/arch/x86/include/asm/atomic64_64.h 2010-09-17 20:12:09.000000000 -0400
6281 @@ -22,6 +22,18 @@ static inline long atomic64_read(const a
6282 }
6283
6284 /**
6285 + * atomic64_read_unchecked - read atomic64 variable
6286 + * @v: pointer of type atomic64_unchecked_t
6287 + *
6288 + * Atomically reads the value of @v.
6289 + * Doesn't imply a read memory barrier.
6290 + */
6291 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6292 +{
6293 + return v->counter;
6294 +}
6295 +
6296 +/**
6297 * atomic64_set - set atomic64 variable
6298 * @v: pointer to type atomic64_t
6299 * @i: required value
6300 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64
6301 }
6302
6303 /**
6304 + * atomic64_set_unchecked - set atomic64 variable
6305 + * @v: pointer to type atomic64_unchecked_t
6306 + * @i: required value
6307 + *
6308 + * Atomically sets the value of @v to @i.
6309 + */
6310 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6311 +{
6312 + v->counter = i;
6313 +}
6314 +
6315 +/**
6316 * atomic64_add - add integer to atomic64 variable
6317 * @i: integer value to add
6318 * @v: pointer to type atomic64_t
6319 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64
6320 */
6321 static inline void atomic64_add(long i, atomic64_t *v)
6322 {
6323 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
6324 +
6325 +#ifdef CONFIG_PAX_REFCOUNT
6326 + "jno 0f\n"
6327 + LOCK_PREFIX "subq %1,%0\n"
6328 + "int $4\n0:\n"
6329 + _ASM_EXTABLE(0b, 0b)
6330 +#endif
6331 +
6332 + : "=m" (v->counter)
6333 + : "er" (i), "m" (v->counter));
6334 +}
6335 +
6336 +/**
6337 + * atomic64_add_unchecked - add integer to atomic64 variable
6338 + * @i: integer value to add
6339 + * @v: pointer to type atomic64_unchecked_t
6340 + *
6341 + * Atomically adds @i to @v.
6342 + */
6343 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6344 +{
6345 asm volatile(LOCK_PREFIX "addq %1,%0"
6346 : "=m" (v->counter)
6347 : "er" (i), "m" (v->counter));
6348 @@ -56,7 +102,15 @@ static inline void atomic64_add(long i,
6349 */
6350 static inline void atomic64_sub(long i, atomic64_t *v)
6351 {
6352 - asm volatile(LOCK_PREFIX "subq %1,%0"
6353 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
6354 +
6355 +#ifdef CONFIG_PAX_REFCOUNT
6356 + "jno 0f\n"
6357 + LOCK_PREFIX "addq %1,%0\n"
6358 + "int $4\n0:\n"
6359 + _ASM_EXTABLE(0b, 0b)
6360 +#endif
6361 +
6362 : "=m" (v->counter)
6363 : "er" (i), "m" (v->counter));
6364 }
6365 @@ -74,7 +128,16 @@ static inline int atomic64_sub_and_test(
6366 {
6367 unsigned char c;
6368
6369 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
6370 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
6371 +
6372 +#ifdef CONFIG_PAX_REFCOUNT
6373 + "jno 0f\n"
6374 + LOCK_PREFIX "addq %2,%0\n"
6375 + "int $4\n0:\n"
6376 + _ASM_EXTABLE(0b, 0b)
6377 +#endif
6378 +
6379 + "sete %1\n"
6380 : "=m" (v->counter), "=qm" (c)
6381 : "er" (i), "m" (v->counter) : "memory");
6382 return c;
6383 @@ -88,6 +151,31 @@ static inline int atomic64_sub_and_test(
6384 */
6385 static inline void atomic64_inc(atomic64_t *v)
6386 {
6387 + asm volatile(LOCK_PREFIX "incq %0\n"
6388 +
6389 +#ifdef CONFIG_PAX_REFCOUNT
6390 + "jno 0f\n"
6391 + "int $4\n0:\n"
6392 + ".pushsection .fixup,\"ax\"\n"
6393 + "1:\n"
6394 + LOCK_PREFIX "decq %0\n"
6395 + "jmp 0b\n"
6396 + ".popsection\n"
6397 + _ASM_EXTABLE(0b, 1b)
6398 +#endif
6399 +
6400 + : "=m" (v->counter)
6401 + : "m" (v->counter));
6402 +}
6403 +
6404 +/**
6405 + * atomic64_inc_unchecked - increment atomic64 variable
6406 + * @v: pointer to type atomic64_unchecked_t
6407 + *
6408 + * Atomically increments @v by 1.
6409 + */
6410 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
6411 +{
6412 asm volatile(LOCK_PREFIX "incq %0"
6413 : "=m" (v->counter)
6414 : "m" (v->counter));
6415 @@ -101,7 +189,32 @@ static inline void atomic64_inc(atomic64
6416 */
6417 static inline void atomic64_dec(atomic64_t *v)
6418 {
6419 - asm volatile(LOCK_PREFIX "decq %0"
6420 + asm volatile(LOCK_PREFIX "decq %0\n"
6421 +
6422 +#ifdef CONFIG_PAX_REFCOUNT
6423 + "jno 0f\n"
6424 + "int $4\n0:\n"
6425 + ".pushsection .fixup,\"ax\"\n"
6426 + "1: \n"
6427 + LOCK_PREFIX "incq %0\n"
6428 + "jmp 0b\n"
6429 + ".popsection\n"
6430 + _ASM_EXTABLE(0b, 1b)
6431 +#endif
6432 +
6433 + : "=m" (v->counter)
6434 + : "m" (v->counter));
6435 +}
6436 +
6437 +/**
6438 + * atomic64_dec_unchecked - decrement atomic64 variable
6439 + * @v: pointer to type atomic64_t
6440 + *
6441 + * Atomically decrements @v by 1.
6442 + */
6443 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
6444 +{
6445 + asm volatile(LOCK_PREFIX "decq %0\n"
6446 : "=m" (v->counter)
6447 : "m" (v->counter));
6448 }
6449 @@ -118,7 +231,20 @@ static inline int atomic64_dec_and_test(
6450 {
6451 unsigned char c;
6452
6453 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
6454 + asm volatile(LOCK_PREFIX "decq %0\n"
6455 +
6456 +#ifdef CONFIG_PAX_REFCOUNT
6457 + "jno 0f\n"
6458 + "int $4\n0:\n"
6459 + ".pushsection .fixup,\"ax\"\n"
6460 + "1: \n"
6461 + LOCK_PREFIX "incq %0\n"
6462 + "jmp 0b\n"
6463 + ".popsection\n"
6464 + _ASM_EXTABLE(0b, 1b)
6465 +#endif
6466 +
6467 + "sete %1\n"
6468 : "=m" (v->counter), "=qm" (c)
6469 : "m" (v->counter) : "memory");
6470 return c != 0;
6471 @@ -136,7 +262,20 @@ static inline int atomic64_inc_and_test(
6472 {
6473 unsigned char c;
6474
6475 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
6476 + asm volatile(LOCK_PREFIX "incq %0\n"
6477 +
6478 +#ifdef CONFIG_PAX_REFCOUNT
6479 + "jno 0f\n"
6480 + "int $4\n0:\n"
6481 + ".pushsection .fixup,\"ax\"\n"
6482 + "1: \n"
6483 + LOCK_PREFIX "decq %0\n"
6484 + "jmp 0b\n"
6485 + ".popsection\n"
6486 + _ASM_EXTABLE(0b, 1b)
6487 +#endif
6488 +
6489 + "sete %1\n"
6490 : "=m" (v->counter), "=qm" (c)
6491 : "m" (v->counter) : "memory");
6492 return c != 0;
6493 @@ -155,7 +294,16 @@ static inline int atomic64_add_negative(
6494 {
6495 unsigned char c;
6496
6497 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
6498 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
6499 +
6500 +#ifdef CONFIG_PAX_REFCOUNT
6501 + "jno 0f\n"
6502 + LOCK_PREFIX "subq %2,%0\n"
6503 + "int $4\n0:\n"
6504 + _ASM_EXTABLE(0b, 0b)
6505 +#endif
6506 +
6507 + "sets %1\n"
6508 : "=m" (v->counter), "=qm" (c)
6509 : "er" (i), "m" (v->counter) : "memory");
6510 return c;
6511 @@ -171,7 +319,31 @@ static inline int atomic64_add_negative(
6512 static inline long atomic64_add_return(long i, atomic64_t *v)
6513 {
6514 long __i = i;
6515 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
6516 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
6517 +
6518 +#ifdef CONFIG_PAX_REFCOUNT
6519 + "jno 0f\n"
6520 + "movq %0, %1\n"
6521 + "int $4\n0:\n"
6522 + _ASM_EXTABLE(0b, 0b)
6523 +#endif
6524 +
6525 + : "+r" (i), "+m" (v->counter)
6526 + : : "memory");
6527 + return i + __i;
6528 +}
6529 +
6530 +/**
6531 + * atomic64_add_return_unchecked - add and return
6532 + * @i: integer value to add
6533 + * @v: pointer to type atomic64_unchecked_t
6534 + *
6535 + * Atomically adds @i to @v and returns @i + @v
6536 + */
6537 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6538 +{
6539 + long __i = i;
6540 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
6541 : "+r" (i), "+m" (v->counter)
6542 : : "memory");
6543 return i + __i;
6544 @@ -183,6 +355,10 @@ static inline long atomic64_sub_return(l
6545 }
6546
6547 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
6548 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6549 +{
6550 + return atomic64_add_return_unchecked(1, v);
6551 +}
6552 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
6553
6554 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6555 @@ -206,17 +382,29 @@ static inline long atomic64_xchg(atomic6
6556 */
6557 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
6558 {
6559 - long c, old;
6560 + long c, old, new;
6561 c = atomic64_read(v);
6562 for (;;) {
6563 - if (unlikely(c == (u)))
6564 + if (unlikely(c == u))
6565 break;
6566 - old = atomic64_cmpxchg((v), c, c + (a));
6567 +
6568 + asm volatile("add %2,%0\n"
6569 +
6570 +#ifdef CONFIG_PAX_REFCOUNT
6571 + "jno 0f\n"
6572 + "int $4\n0:\n"
6573 + _ASM_EXTABLE(0b, 0b)
6574 +#endif
6575 +
6576 + : "=r" (new)
6577 + : "0" (c), "ir" (a));
6578 +
6579 + old = atomic64_cmpxchg(v, c, new);
6580 if (likely(old == c))
6581 break;
6582 c = old;
6583 }
6584 - return c != (u);
6585 + return c != u;
6586 }
6587
6588 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6589 diff -urNp linux-2.6.35.4/arch/x86/include/asm/atomic.h linux-2.6.35.4/arch/x86/include/asm/atomic.h
6590 --- linux-2.6.35.4/arch/x86/include/asm/atomic.h 2010-08-26 19:47:12.000000000 -0400
6591 +++ linux-2.6.35.4/arch/x86/include/asm/atomic.h 2010-09-17 20:12:09.000000000 -0400
6592 @@ -26,6 +26,17 @@ static inline int atomic_read(const atom
6593 }
6594
6595 /**
6596 + * atomic_read_unchecked - read atomic variable
6597 + * @v: pointer of type atomic_unchecked_t
6598 + *
6599 + * Atomically reads the value of @v.
6600 + */
6601 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6602 +{
6603 + return v->counter;
6604 +}
6605 +
6606 +/**
6607 * atomic_set - set atomic variable
6608 * @v: pointer of type atomic_t
6609 * @i: required value
6610 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *
6611 }
6612
6613 /**
6614 + * atomic_set_unchecked - set atomic variable
6615 + * @v: pointer of type atomic_unchecked_t
6616 + * @i: required value
6617 + *
6618 + * Atomically sets the value of @v to @i.
6619 + */
6620 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6621 +{
6622 + v->counter = i;
6623 +}
6624 +
6625 +/**
6626 * atomic_add - add integer to atomic variable
6627 * @i: integer value to add
6628 * @v: pointer of type atomic_t
6629 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *
6630 */
6631 static inline void atomic_add(int i, atomic_t *v)
6632 {
6633 - asm volatile(LOCK_PREFIX "addl %1,%0"
6634 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6635 +
6636 +#ifdef CONFIG_PAX_REFCOUNT
6637 + "jno 0f\n"
6638 + LOCK_PREFIX "subl %1,%0\n"
6639 + _ASM_INTO "\n0:\n"
6640 + _ASM_EXTABLE(0b, 0b)
6641 +#endif
6642 +
6643 + : "+m" (v->counter)
6644 + : "ir" (i));
6645 +}
6646 +
6647 +/**
6648 + * atomic_add_unchecked - add integer to atomic variable
6649 + * @i: integer value to add
6650 + * @v: pointer of type atomic_unchecked_t
6651 + *
6652 + * Atomically adds @i to @v.
6653 + */
6654 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6655 +{
6656 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6657 : "+m" (v->counter)
6658 : "ir" (i));
6659 }
6660 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, ato
6661 */
6662 static inline void atomic_sub(int i, atomic_t *v)
6663 {
6664 - asm volatile(LOCK_PREFIX "subl %1,%0"
6665 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6666 +
6667 +#ifdef CONFIG_PAX_REFCOUNT
6668 + "jno 0f\n"
6669 + LOCK_PREFIX "addl %1,%0\n"
6670 + _ASM_INTO "\n0:\n"
6671 + _ASM_EXTABLE(0b, 0b)
6672 +#endif
6673 +
6674 + : "+m" (v->counter)
6675 + : "ir" (i));
6676 +}
6677 +
6678 +/**
6679 + * atomic_sub_unchecked - subtract integer from atomic variable
6680 + * @i: integer value to subtract
6681 + * @v: pointer of type atomic_t
6682 + *
6683 + * Atomically subtracts @i from @v.
6684 + */
6685 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6686 +{
6687 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6688 : "+m" (v->counter)
6689 : "ir" (i));
6690 }
6691 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(in
6692 {
6693 unsigned char c;
6694
6695 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6696 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
6697 +
6698 +#ifdef CONFIG_PAX_REFCOUNT
6699 + "jno 0f\n"
6700 + LOCK_PREFIX "addl %2,%0\n"
6701 + _ASM_INTO "\n0:\n"
6702 + _ASM_EXTABLE(0b, 0b)
6703 +#endif
6704 +
6705 + "sete %1\n"
6706 : "+m" (v->counter), "=qm" (c)
6707 : "ir" (i) : "memory");
6708 return c;
6709 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(in
6710 */
6711 static inline void atomic_inc(atomic_t *v)
6712 {
6713 - asm volatile(LOCK_PREFIX "incl %0"
6714 + asm volatile(LOCK_PREFIX "incl %0\n"
6715 +
6716 +#ifdef CONFIG_PAX_REFCOUNT
6717 + "jno 0f\n"
6718 + LOCK_PREFIX "decl %0\n"
6719 + _ASM_INTO "\n0:\n"
6720 + _ASM_EXTABLE(0b, 0b)
6721 +#endif
6722 +
6723 + : "+m" (v->counter));
6724 +}
6725 +
6726 +/**
6727 + * atomic_inc_unchecked - increment atomic variable
6728 + * @v: pointer of type atomic_unchecked_t
6729 + *
6730 + * Atomically increments @v by 1.
6731 + */
6732 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6733 +{
6734 + asm volatile(LOCK_PREFIX "incl %0\n"
6735 : "+m" (v->counter));
6736 }
6737
6738 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *
6739 */
6740 static inline void atomic_dec(atomic_t *v)
6741 {
6742 - asm volatile(LOCK_PREFIX "decl %0"
6743 + asm volatile(LOCK_PREFIX "decl %0\n"
6744 +
6745 +#ifdef CONFIG_PAX_REFCOUNT
6746 + "jno 0f\n"
6747 + LOCK_PREFIX "incl %0\n"
6748 + _ASM_INTO "\n0:\n"
6749 + _ASM_EXTABLE(0b, 0b)
6750 +#endif
6751 +
6752 + : "+m" (v->counter));
6753 +}
6754 +
6755 +/**
6756 + * atomic_dec_unchecked - decrement atomic variable
6757 + * @v: pointer of type atomic_t
6758 + *
6759 + * Atomically decrements @v by 1.
6760 + */
6761 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6762 +{
6763 + asm volatile(LOCK_PREFIX "decl %0\n"
6764 : "+m" (v->counter));
6765 }
6766
6767 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(at
6768 {
6769 unsigned char c;
6770
6771 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
6772 + asm volatile(LOCK_PREFIX "decl %0\n"
6773 +
6774 +#ifdef CONFIG_PAX_REFCOUNT
6775 + "jno 0f\n"
6776 + LOCK_PREFIX "incl %0\n"
6777 + _ASM_INTO "\n0:\n"
6778 + _ASM_EXTABLE(0b, 0b)
6779 +#endif
6780 +
6781 + "sete %1\n"
6782 : "+m" (v->counter), "=qm" (c)
6783 : : "memory");
6784 return c != 0;
6785 @@ -138,7 +263,16 @@ static inline int atomic_inc_and_test(at
6786 {
6787 unsigned char c;
6788
6789 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
6790 + asm volatile(LOCK_PREFIX "incl %0\n"
6791 +
6792 +#ifdef CONFIG_PAX_REFCOUNT
6793 + "jno 0f\n"
6794 + LOCK_PREFIX "decl %0\n"
6795 + _ASM_INTO "\n0:\n"
6796 + _ASM_EXTABLE(0b, 0b)
6797 +#endif
6798 +
6799 + "sete %1\n"
6800 : "+m" (v->counter), "=qm" (c)
6801 : : "memory");
6802 return c != 0;
6803 @@ -157,7 +291,16 @@ static inline int atomic_add_negative(in
6804 {
6805 unsigned char c;
6806
6807 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
6808 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
6809 +
6810 +#ifdef CONFIG_PAX_REFCOUNT
6811 + "jno 0f\n"
6812 + LOCK_PREFIX "subl %2,%0\n"
6813 + _ASM_INTO "\n0:\n"
6814 + _ASM_EXTABLE(0b, 0b)
6815 +#endif
6816 +
6817 + "sets %1\n"
6818 : "+m" (v->counter), "=qm" (c)
6819 : "ir" (i) : "memory");
6820 return c;
6821 @@ -180,6 +323,46 @@ static inline int atomic_add_return(int
6822 #endif
6823 /* Modern 486+ processor */
6824 __i = i;
6825 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
6826 +
6827 +#ifdef CONFIG_PAX_REFCOUNT
6828 + "jno 0f\n"
6829 + "movl %0, %1\n"
6830 + _ASM_INTO "\n0:\n"
6831 + _ASM_EXTABLE(0b, 0b)
6832 +#endif
6833 +
6834 + : "+r" (i), "+m" (v->counter)
6835 + : : "memory");
6836 + return i + __i;
6837 +
6838 +#ifdef CONFIG_M386
6839 +no_xadd: /* Legacy 386 processor */
6840 + local_irq_save(flags);
6841 + __i = atomic_read(v);
6842 + atomic_set(v, i + __i);
6843 + local_irq_restore(flags);
6844 + return i + __i;
6845 +#endif
6846 +}
6847 +
6848 +/**
6849 + * atomic_add_return_unchecked - add integer and return
6850 + * @v: pointer of type atomic_unchecked_t
6851 + * @i: integer value to add
6852 + *
6853 + * Atomically adds @i to @v and returns @i + @v
6854 + */
6855 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6856 +{
6857 + int __i;
6858 +#ifdef CONFIG_M386
6859 + unsigned long flags;
6860 + if (unlikely(boot_cpu_data.x86 <= 3))
6861 + goto no_xadd;
6862 +#endif
6863 + /* Modern 486+ processor */
6864 + __i = i;
6865 asm volatile(LOCK_PREFIX "xaddl %0, %1"
6866 : "+r" (i), "+m" (v->counter)
6867 : : "memory");
6868 @@ -208,6 +391,10 @@ static inline int atomic_sub_return(int
6869 }
6870
6871 #define atomic_inc_return(v) (atomic_add_return(1, v))
6872 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6873 +{
6874 + return atomic_add_return_unchecked(1, v);
6875 +}
6876 #define atomic_dec_return(v) (atomic_sub_return(1, v))
6877
6878 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6879 @@ -231,17 +418,29 @@ static inline int atomic_xchg(atomic_t *
6880 */
6881 static inline int atomic_add_unless(atomic_t *v, int a, int u)
6882 {
6883 - int c, old;
6884 + int c, old, new;
6885 c = atomic_read(v);
6886 for (;;) {
6887 - if (unlikely(c == (u)))
6888 + if (unlikely(c == u))
6889 break;
6890 - old = atomic_cmpxchg((v), c, c + (a));
6891 +
6892 + asm volatile("addl %2,%0\n"
6893 +
6894 +#ifdef CONFIG_PAX_REFCOUNT
6895 + "jno 0f\n"
6896 + _ASM_INTO "\n0:\n"
6897 + _ASM_EXTABLE(0b, 0b)
6898 +#endif
6899 +
6900 + : "=r" (new)
6901 + : "0" (c), "ir" (a));
6902 +
6903 + old = atomic_cmpxchg(v, c, new);
6904 if (likely(old == c))
6905 break;
6906 c = old;
6907 }
6908 - return c != (u);
6909 + return c != u;
6910 }
6911
6912 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
6913 diff -urNp linux-2.6.35.4/arch/x86/include/asm/boot.h linux-2.6.35.4/arch/x86/include/asm/boot.h
6914 --- linux-2.6.35.4/arch/x86/include/asm/boot.h 2010-08-26 19:47:12.000000000 -0400
6915 +++ linux-2.6.35.4/arch/x86/include/asm/boot.h 2010-09-17 20:12:09.000000000 -0400
6916 @@ -11,10 +11,15 @@
6917 #include <asm/pgtable_types.h>
6918
6919 /* Physical address where kernel should be loaded. */
6920 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6921 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6922 + (CONFIG_PHYSICAL_ALIGN - 1)) \
6923 & ~(CONFIG_PHYSICAL_ALIGN - 1))
6924
6925 +#ifndef __ASSEMBLY__
6926 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
6927 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
6928 +#endif
6929 +
6930 /* Minimum kernel alignment, as a power of two */
6931 #ifdef CONFIG_X86_64
6932 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
6933 diff -urNp linux-2.6.35.4/arch/x86/include/asm/cacheflush.h linux-2.6.35.4/arch/x86/include/asm/cacheflush.h
6934 --- linux-2.6.35.4/arch/x86/include/asm/cacheflush.h 2010-08-26 19:47:12.000000000 -0400
6935 +++ linux-2.6.35.4/arch/x86/include/asm/cacheflush.h 2010-09-17 20:12:09.000000000 -0400
6936 @@ -66,7 +66,7 @@ static inline unsigned long get_page_mem
6937 unsigned long pg_flags = pg->flags & _PGMT_MASK;
6938
6939 if (pg_flags == _PGMT_DEFAULT)
6940 - return -1;
6941 + return ~0UL;
6942 else if (pg_flags == _PGMT_WC)
6943 return _PAGE_CACHE_WC;
6944 else if (pg_flags == _PGMT_UC_MINUS)
6945 diff -urNp linux-2.6.35.4/arch/x86/include/asm/cache.h linux-2.6.35.4/arch/x86/include/asm/cache.h
6946 --- linux-2.6.35.4/arch/x86/include/asm/cache.h 2010-08-26 19:47:12.000000000 -0400
6947 +++ linux-2.6.35.4/arch/x86/include/asm/cache.h 2010-09-17 20:12:09.000000000 -0400
6948 @@ -8,6 +8,7 @@
6949 #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6950
6951 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
6952 +#define __read_only __attribute__((__section__(".data..read_only")))
6953
6954 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
6955 #define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
6956 diff -urNp linux-2.6.35.4/arch/x86/include/asm/checksum_32.h linux-2.6.35.4/arch/x86/include/asm/checksum_32.h
6957 --- linux-2.6.35.4/arch/x86/include/asm/checksum_32.h 2010-08-26 19:47:12.000000000 -0400
6958 +++ linux-2.6.35.4/arch/x86/include/asm/checksum_32.h 2010-09-17 20:12:09.000000000 -0400
6959 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
6960 int len, __wsum sum,
6961 int *src_err_ptr, int *dst_err_ptr);
6962
6963 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
6964 + int len, __wsum sum,
6965 + int *src_err_ptr, int *dst_err_ptr);
6966 +
6967 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
6968 + int len, __wsum sum,
6969 + int *src_err_ptr, int *dst_err_ptr);
6970 +
6971 /*
6972 * Note: when you get a NULL pointer exception here this means someone
6973 * passed in an incorrect kernel address to one of these functions.
6974 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
6975 int *err_ptr)
6976 {
6977 might_sleep();
6978 - return csum_partial_copy_generic((__force void *)src, dst,
6979 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
6980 len, sum, err_ptr, NULL);
6981 }
6982
6983 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
6984 {
6985 might_sleep();
6986 if (access_ok(VERIFY_WRITE, dst, len))
6987 - return csum_partial_copy_generic(src, (__force void *)dst,
6988 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
6989 len, sum, NULL, err_ptr);
6990
6991 if (len)
6992 diff -urNp linux-2.6.35.4/arch/x86/include/asm/compat.h linux-2.6.35.4/arch/x86/include/asm/compat.h
6993 --- linux-2.6.35.4/arch/x86/include/asm/compat.h 2010-08-26 19:47:12.000000000 -0400
6994 +++ linux-2.6.35.4/arch/x86/include/asm/compat.h 2010-09-17 20:12:37.000000000 -0400
6995 @@ -205,7 +205,7 @@ static inline compat_uptr_t ptr_to_compa
6996 return (u32)(unsigned long)uptr;
6997 }
6998
6999 -static inline void __user *compat_alloc_user_space(long len)
7000 +static inline void __user *arch_compat_alloc_user_space(long len)
7001 {
7002 struct pt_regs *regs = task_pt_regs(current);
7003 return (void __user *)regs->sp - len;
7004 diff -urNp linux-2.6.35.4/arch/x86/include/asm/cpufeature.h linux-2.6.35.4/arch/x86/include/asm/cpufeature.h
7005 --- linux-2.6.35.4/arch/x86/include/asm/cpufeature.h 2010-08-26 19:47:12.000000000 -0400
7006 +++ linux-2.6.35.4/arch/x86/include/asm/cpufeature.h 2010-09-17 20:12:09.000000000 -0400
7007 @@ -323,7 +323,7 @@ static __always_inline __pure bool __sta
7008 " .byte 4f - 3f\n" /* replacement len */
7009 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* padding */
7010 ".previous\n"
7011 - ".section .altinstr_replacement,\"ax\"\n"
7012 + ".section .altinstr_replacement,\"a\"\n"
7013 "3: movb $1,%0\n"
7014 "4:\n"
7015 ".previous\n"
7016 diff -urNp linux-2.6.35.4/arch/x86/include/asm/desc.h linux-2.6.35.4/arch/x86/include/asm/desc.h
7017 --- linux-2.6.35.4/arch/x86/include/asm/desc.h 2010-08-26 19:47:12.000000000 -0400
7018 +++ linux-2.6.35.4/arch/x86/include/asm/desc.h 2010-09-17 20:12:09.000000000 -0400
7019 @@ -4,6 +4,7 @@
7020 #include <asm/desc_defs.h>
7021 #include <asm/ldt.h>
7022 #include <asm/mmu.h>
7023 +#include <asm/pgtable.h>
7024 #include <linux/smp.h>
7025
7026 static inline void fill_ldt(struct desc_struct *desc,
7027 @@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_
7028 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
7029 desc->type = (info->read_exec_only ^ 1) << 1;
7030 desc->type |= info->contents << 2;
7031 + desc->type |= info->seg_not_present ^ 1;
7032 desc->s = 1;
7033 desc->dpl = 0x3;
7034 desc->p = info->seg_not_present ^ 1;
7035 @@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_
7036 }
7037
7038 extern struct desc_ptr idt_descr;
7039 -extern gate_desc idt_table[];
7040 -
7041 -struct gdt_page {
7042 - struct desc_struct gdt[GDT_ENTRIES];
7043 -} __attribute__((aligned(PAGE_SIZE)));
7044 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
7045 +extern gate_desc idt_table[256];
7046
7047 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
7048 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
7049 {
7050 - return per_cpu(gdt_page, cpu).gdt;
7051 + return cpu_gdt_table[cpu];
7052 }
7053
7054 #ifdef CONFIG_X86_64
7055 @@ -115,19 +113,24 @@ static inline void paravirt_free_ldt(str
7056 static inline void native_write_idt_entry(gate_desc *idt, int entry,
7057 const gate_desc *gate)
7058 {
7059 + pax_open_kernel();
7060 memcpy(&idt[entry], gate, sizeof(*gate));
7061 + pax_close_kernel();
7062 }
7063
7064 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
7065 const void *desc)
7066 {
7067 + pax_open_kernel();
7068 memcpy(&ldt[entry], desc, 8);
7069 + pax_close_kernel();
7070 }
7071
7072 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
7073 const void *desc, int type)
7074 {
7075 unsigned int size;
7076 +
7077 switch (type) {
7078 case DESC_TSS:
7079 size = sizeof(tss_desc);
7080 @@ -139,7 +142,10 @@ static inline void native_write_gdt_entr
7081 size = sizeof(struct desc_struct);
7082 break;
7083 }
7084 +
7085 + pax_open_kernel();
7086 memcpy(&gdt[entry], desc, size);
7087 + pax_close_kernel();
7088 }
7089
7090 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
7091 @@ -211,7 +217,9 @@ static inline void native_set_ldt(const
7092
7093 static inline void native_load_tr_desc(void)
7094 {
7095 + pax_open_kernel();
7096 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
7097 + pax_close_kernel();
7098 }
7099
7100 static inline void native_load_gdt(const struct desc_ptr *dtr)
7101 @@ -246,8 +254,10 @@ static inline void native_load_tls(struc
7102 unsigned int i;
7103 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
7104
7105 + pax_open_kernel();
7106 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
7107 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
7108 + pax_close_kernel();
7109 }
7110
7111 #define _LDT_empty(info) \
7112 @@ -309,7 +319,7 @@ static inline void set_desc_limit(struct
7113 desc->limit = (limit >> 16) & 0xf;
7114 }
7115
7116 -static inline void _set_gate(int gate, unsigned type, void *addr,
7117 +static inline void _set_gate(int gate, unsigned type, const void *addr,
7118 unsigned dpl, unsigned ist, unsigned seg)
7119 {
7120 gate_desc s;
7121 @@ -327,7 +337,7 @@ static inline void _set_gate(int gate, u
7122 * Pentium F0 0F bugfix can have resulted in the mapped
7123 * IDT being write-protected.
7124 */
7125 -static inline void set_intr_gate(unsigned int n, void *addr)
7126 +static inline void set_intr_gate(unsigned int n, const void *addr)
7127 {
7128 BUG_ON((unsigned)n > 0xFF);
7129 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
7130 @@ -356,19 +366,19 @@ static inline void alloc_intr_gate(unsig
7131 /*
7132 * This routine sets up an interrupt gate at directory privilege level 3.
7133 */
7134 -static inline void set_system_intr_gate(unsigned int n, void *addr)
7135 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
7136 {
7137 BUG_ON((unsigned)n > 0xFF);
7138 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
7139 }
7140
7141 -static inline void set_system_trap_gate(unsigned int n, void *addr)
7142 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
7143 {
7144 BUG_ON((unsigned)n > 0xFF);
7145 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
7146 }
7147
7148 -static inline void set_trap_gate(unsigned int n, void *addr)
7149 +static inline void set_trap_gate(unsigned int n, const void *addr)
7150 {
7151 BUG_ON((unsigned)n > 0xFF);
7152 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
7153 @@ -377,19 +387,31 @@ static inline void set_trap_gate(unsigne
7154 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
7155 {
7156 BUG_ON((unsigned)n > 0xFF);
7157 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
7158 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
7159 }
7160
7161 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
7162 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
7163 {
7164 BUG_ON((unsigned)n > 0xFF);
7165 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
7166 }
7167
7168 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
7169 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
7170 {
7171 BUG_ON((unsigned)n > 0xFF);
7172 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
7173 }
7174
7175 +#ifdef CONFIG_X86_32
7176 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
7177 +{
7178 + struct desc_struct d;
7179 +
7180 + if (likely(limit))
7181 + limit = (limit - 1UL) >> PAGE_SHIFT;
7182 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
7183 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
7184 +}
7185 +#endif
7186 +
7187 #endif /* _ASM_X86_DESC_H */
7188 diff -urNp linux-2.6.35.4/arch/x86/include/asm/device.h linux-2.6.35.4/arch/x86/include/asm/device.h
7189 --- linux-2.6.35.4/arch/x86/include/asm/device.h 2010-08-26 19:47:12.000000000 -0400
7190 +++ linux-2.6.35.4/arch/x86/include/asm/device.h 2010-09-17 20:12:09.000000000 -0400
7191 @@ -6,7 +6,7 @@ struct dev_archdata {
7192 void *acpi_handle;
7193 #endif
7194 #ifdef CONFIG_X86_64
7195 -struct dma_map_ops *dma_ops;
7196 + const struct dma_map_ops *dma_ops;
7197 #endif
7198 #if defined(CONFIG_DMAR) || defined(CONFIG_AMD_IOMMU)
7199 void *iommu; /* hook for IOMMU specific extension */
7200 diff -urNp linux-2.6.35.4/arch/x86/include/asm/dma-mapping.h linux-2.6.35.4/arch/x86/include/asm/dma-mapping.h
7201 --- linux-2.6.35.4/arch/x86/include/asm/dma-mapping.h 2010-08-26 19:47:12.000000000 -0400
7202 +++ linux-2.6.35.4/arch/x86/include/asm/dma-mapping.h 2010-09-17 20:12:09.000000000 -0400
7203 @@ -26,9 +26,9 @@ extern int iommu_merge;
7204 extern struct device x86_dma_fallback_dev;
7205 extern int panic_on_overflow;
7206
7207 -extern struct dma_map_ops *dma_ops;
7208 +extern const struct dma_map_ops *dma_ops;
7209
7210 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
7211 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
7212 {
7213 #ifdef CONFIG_X86_32
7214 return dma_ops;
7215 @@ -45,7 +45,7 @@ static inline struct dma_map_ops *get_dm
7216 /* Make sure we keep the same behaviour */
7217 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
7218 {
7219 - struct dma_map_ops *ops = get_dma_ops(dev);
7220 + const struct dma_map_ops *ops = get_dma_ops(dev);
7221 if (ops->mapping_error)
7222 return ops->mapping_error(dev, dma_addr);
7223
7224 @@ -123,7 +123,7 @@ static inline void *
7225 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
7226 gfp_t gfp)
7227 {
7228 - struct dma_map_ops *ops = get_dma_ops(dev);
7229 + const struct dma_map_ops *ops = get_dma_ops(dev);
7230 void *memory;
7231
7232 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
7233 @@ -150,7 +150,7 @@ dma_alloc_coherent(struct device *dev, s
7234 static inline void dma_free_coherent(struct device *dev, size_t size,
7235 void *vaddr, dma_addr_t bus)
7236 {
7237 - struct dma_map_ops *ops = get_dma_ops(dev);
7238 + const struct dma_map_ops *ops = get_dma_ops(dev);
7239
7240 WARN_ON(irqs_disabled()); /* for portability */
7241
7242 diff -urNp linux-2.6.35.4/arch/x86/include/asm/e820.h linux-2.6.35.4/arch/x86/include/asm/e820.h
7243 --- linux-2.6.35.4/arch/x86/include/asm/e820.h 2010-08-26 19:47:12.000000000 -0400
7244 +++ linux-2.6.35.4/arch/x86/include/asm/e820.h 2010-09-17 20:12:09.000000000 -0400
7245 @@ -69,7 +69,7 @@ struct e820map {
7246 #define ISA_START_ADDRESS 0xa0000
7247 #define ISA_END_ADDRESS 0x100000
7248
7249 -#define BIOS_BEGIN 0x000a0000
7250 +#define BIOS_BEGIN 0x000c0000
7251 #define BIOS_END 0x00100000
7252
7253 #ifdef __KERNEL__
7254 diff -urNp linux-2.6.35.4/arch/x86/include/asm/elf.h linux-2.6.35.4/arch/x86/include/asm/elf.h
7255 --- linux-2.6.35.4/arch/x86/include/asm/elf.h 2010-08-26 19:47:12.000000000 -0400
7256 +++ linux-2.6.35.4/arch/x86/include/asm/elf.h 2010-09-17 20:12:09.000000000 -0400
7257 @@ -237,7 +237,25 @@ extern int force_personality32;
7258 the loader. We need to make sure that it is out of the way of the program
7259 that it will "exec", and that there is sufficient room for the brk. */
7260
7261 +#ifdef CONFIG_PAX_SEGMEXEC
7262 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
7263 +#else
7264 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
7265 +#endif
7266 +
7267 +#ifdef CONFIG_PAX_ASLR
7268 +#ifdef CONFIG_X86_32
7269 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
7270 +
7271 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7272 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7273 +#else
7274 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
7275 +
7276 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7277 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7278 +#endif
7279 +#endif
7280
7281 /* This yields a mask that user programs can use to figure out what
7282 instruction set this CPU supports. This could be done in user space,
7283 @@ -291,8 +309,7 @@ do { \
7284 #define ARCH_DLINFO \
7285 do { \
7286 if (vdso_enabled) \
7287 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
7288 - (unsigned long)current->mm->context.vdso); \
7289 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);\
7290 } while (0)
7291
7292 #define AT_SYSINFO 32
7293 @@ -303,7 +320,7 @@ do { \
7294
7295 #endif /* !CONFIG_X86_32 */
7296
7297 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
7298 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
7299
7300 #define VDSO_ENTRY \
7301 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
7302 @@ -317,7 +334,4 @@ extern int arch_setup_additional_pages(s
7303 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
7304 #define compat_arch_setup_additional_pages syscall32_setup_pages
7305
7306 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7307 -#define arch_randomize_brk arch_randomize_brk
7308 -
7309 #endif /* _ASM_X86_ELF_H */
7310 diff -urNp linux-2.6.35.4/arch/x86/include/asm/futex.h linux-2.6.35.4/arch/x86/include/asm/futex.h
7311 --- linux-2.6.35.4/arch/x86/include/asm/futex.h 2010-08-26 19:47:12.000000000 -0400
7312 +++ linux-2.6.35.4/arch/x86/include/asm/futex.h 2010-09-17 20:12:09.000000000 -0400
7313 @@ -11,17 +11,54 @@
7314 #include <asm/processor.h>
7315 #include <asm/system.h>
7316
7317 +#ifdef CONFIG_X86_32
7318 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
7319 + asm volatile( \
7320 + "movw\t%w6, %%ds\n" \
7321 + "1:\t" insn "\n" \
7322 + "2:\tpushl\t%%ss\n" \
7323 + "\tpopl\t%%ds\n" \
7324 + "\t.section .fixup,\"ax\"\n" \
7325 + "3:\tmov\t%3, %1\n" \
7326 + "\tjmp\t2b\n" \
7327 + "\t.previous\n" \
7328 + _ASM_EXTABLE(1b, 3b) \
7329 + : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
7330 + : "i" (-EFAULT), "0" (oparg), "1" (0), "r" (__USER_DS))
7331 +
7332 +#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
7333 + asm volatile("movw\t%w7, %%es\n" \
7334 + "1:\tmovl\t%%es:%2, %0\n" \
7335 + "\tmovl\t%0, %3\n" \
7336 + "\t" insn "\n" \
7337 + "2:\t" LOCK_PREFIX "cmpxchgl %3, %%es:%2\n"\
7338 + "\tjnz\t1b\n" \
7339 + "3:\tpushl\t%%ss\n" \
7340 + "\tpopl\t%%es\n" \
7341 + "\t.section .fixup,\"ax\"\n" \
7342 + "4:\tmov\t%5, %1\n" \
7343 + "\tjmp\t3b\n" \
7344 + "\t.previous\n" \
7345 + _ASM_EXTABLE(1b, 4b) \
7346 + _ASM_EXTABLE(2b, 4b) \
7347 + : "=&a" (oldval), "=&r" (ret), \
7348 + "+m" (*uaddr), "=&r" (tem) \
7349 + : "r" (oparg), "i" (-EFAULT), "1" (0), "r" (__USER_DS))
7350 +#else
7351 +#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
7352 + typecheck(u32 *, uaddr); \
7353 asm volatile("1:\t" insn "\n" \
7354 "2:\t.section .fixup,\"ax\"\n" \
7355 "3:\tmov\t%3, %1\n" \
7356 "\tjmp\t2b\n" \
7357 "\t.previous\n" \
7358 _ASM_EXTABLE(1b, 3b) \
7359 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
7360 + : "=r" (oldval), "=r" (ret), \
7361 + "+m" (*(uaddr + PAX_USER_SHADOW_BASE / 4))\
7362 : "i" (-EFAULT), "0" (oparg), "1" (0))
7363
7364 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
7365 + typecheck(u32 *, uaddr); \
7366 asm volatile("1:\tmovl %2, %0\n" \
7367 "\tmovl\t%0, %3\n" \
7368 "\t" insn "\n" \
7369 @@ -34,10 +71,12 @@
7370 _ASM_EXTABLE(1b, 4b) \
7371 _ASM_EXTABLE(2b, 4b) \
7372 : "=&a" (oldval), "=&r" (ret), \
7373 - "+m" (*uaddr), "=&r" (tem) \
7374 + "+m" (*(uaddr + PAX_USER_SHADOW_BASE / 4)),\
7375 + "=&r" (tem) \
7376 : "r" (oparg), "i" (-EFAULT), "1" (0))
7377 +#endif
7378
7379 -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
7380 +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
7381 {
7382 int op = (encoded_op >> 28) & 7;
7383 int cmp = (encoded_op >> 24) & 15;
7384 @@ -61,11 +100,20 @@ static inline int futex_atomic_op_inuser
7385
7386 switch (op) {
7387 case FUTEX_OP_SET:
7388 +#ifdef CONFIG_X86_32
7389 + __futex_atomic_op1("xchgl %0, %%ds:%2", ret, oldval, uaddr, oparg);
7390 +#else
7391 __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
7392 +#endif
7393 break;
7394 case FUTEX_OP_ADD:
7395 +#ifdef CONFIG_X86_32
7396 + __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %%ds:%2", ret, oldval,
7397 + uaddr, oparg);
7398 +#else
7399 __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
7400 uaddr, oparg);
7401 +#endif
7402 break;
7403 case FUTEX_OP_OR:
7404 __futex_atomic_op2("orl %4, %3", ret, oldval, uaddr, oparg);
7405 @@ -109,7 +157,7 @@ static inline int futex_atomic_op_inuser
7406 return ret;
7407 }
7408
7409 -static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
7410 +static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
7411 int newval)
7412 {
7413
7414 @@ -119,17 +167,31 @@ static inline int futex_atomic_cmpxchg_i
7415 return -ENOSYS;
7416 #endif
7417
7418 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
7419 + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
7420 return -EFAULT;
7421
7422 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
7423 - "2:\t.section .fixup, \"ax\"\n"
7424 + asm volatile(
7425 +#ifdef CONFIG_X86_32
7426 + "\tmovw %w5, %%ds\n"
7427 + "1:\t" LOCK_PREFIX "cmpxchgl %3, %%ds:%1\n"
7428 + "2:\tpushl %%ss\n"
7429 + "\tpopl %%ds\n"
7430 +#else
7431 + "1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
7432 + "2:\n"
7433 +#endif
7434 + "\t.section .fixup, \"ax\"\n"
7435 "3:\tmov %2, %0\n"
7436 "\tjmp 2b\n"
7437 "\t.previous\n"
7438 _ASM_EXTABLE(1b, 3b)
7439 +#ifdef CONFIG_X86_32
7440 : "=a" (oldval), "+m" (*uaddr)
7441 + : "i" (-EFAULT), "r" (newval), "0" (oldval), "r" (__USER_DS)
7442 +#else
7443 + : "=a" (oldval), "+m" (*(uaddr + PAX_USER_SHADOW_BASE / 4))
7444 : "i" (-EFAULT), "r" (newval), "0" (oldval)
7445 +#endif
7446 : "memory"
7447 );
7448
7449 diff -urNp linux-2.6.35.4/arch/x86/include/asm/i387.h linux-2.6.35.4/arch/x86/include/asm/i387.h
7450 --- linux-2.6.35.4/arch/x86/include/asm/i387.h 2010-08-26 19:47:12.000000000 -0400
7451 +++ linux-2.6.35.4/arch/x86/include/asm/i387.h 2010-09-17 20:12:09.000000000 -0400
7452 @@ -77,6 +77,11 @@ static inline int fxrstor_checking(struc
7453 {
7454 int err;
7455
7456 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7457 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7458 + fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
7459 +#endif
7460 +
7461 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
7462 "2:\n"
7463 ".section .fixup,\"ax\"\n"
7464 @@ -127,6 +132,11 @@ static inline int fxsave_user(struct i38
7465 {
7466 int err;
7467
7468 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7469 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7470 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
7471 +#endif
7472 +
7473 asm volatile("1: rex64/fxsave (%[fx])\n\t"
7474 "2:\n"
7475 ".section .fixup,\"ax\"\n"
7476 @@ -220,13 +230,8 @@ static inline int fxrstor_checking(struc
7477 }
7478
7479 /* We need a safe address that is cheap to find and that is already
7480 - in L1 during context switch. The best choices are unfortunately
7481 - different for UP and SMP */
7482 -#ifdef CONFIG_SMP
7483 -#define safe_address (__per_cpu_offset[0])
7484 -#else
7485 -#define safe_address (kstat_cpu(0).cpustat.user)
7486 -#endif
7487 + in L1 during context switch. */
7488 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
7489
7490 /*
7491 * These must be called with preempt disabled
7492 diff -urNp linux-2.6.35.4/arch/x86/include/asm/io.h linux-2.6.35.4/arch/x86/include/asm/io.h
7493 --- linux-2.6.35.4/arch/x86/include/asm/io.h 2010-08-26 19:47:12.000000000 -0400
7494 +++ linux-2.6.35.4/arch/x86/include/asm/io.h 2010-09-17 20:12:09.000000000 -0400
7495 @@ -213,6 +213,17 @@ extern void iounmap(volatile void __iome
7496
7497 #include <linux/vmalloc.h>
7498
7499 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
7500 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
7501 +{
7502 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1 << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7503 +}
7504 +
7505 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
7506 +{
7507 + return (pfn + (count >> PAGE_SHIFT)) < (1 << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7508 +}
7509 +
7510 /*
7511 * Convert a virtual cached pointer to an uncached pointer
7512 */
7513 diff -urNp linux-2.6.35.4/arch/x86/include/asm/iommu.h linux-2.6.35.4/arch/x86/include/asm/iommu.h
7514 --- linux-2.6.35.4/arch/x86/include/asm/iommu.h 2010-08-26 19:47:12.000000000 -0400
7515 +++ linux-2.6.35.4/arch/x86/include/asm/iommu.h 2010-09-17 20:12:09.000000000 -0400
7516 @@ -1,7 +1,7 @@
7517 #ifndef _ASM_X86_IOMMU_H
7518 #define _ASM_X86_IOMMU_H
7519
7520 -extern struct dma_map_ops nommu_dma_ops;
7521 +extern const struct dma_map_ops nommu_dma_ops;
7522 extern int force_iommu, no_iommu;
7523 extern int iommu_detected;
7524 extern int iommu_pass_through;
7525 diff -urNp linux-2.6.35.4/arch/x86/include/asm/irqflags.h linux-2.6.35.4/arch/x86/include/asm/irqflags.h
7526 --- linux-2.6.35.4/arch/x86/include/asm/irqflags.h 2010-08-26 19:47:12.000000000 -0400
7527 +++ linux-2.6.35.4/arch/x86/include/asm/irqflags.h 2010-09-17 20:12:09.000000000 -0400
7528 @@ -142,6 +142,11 @@ static inline unsigned long __raw_local_
7529 sti; \
7530 sysexit
7531
7532 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
7533 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
7534 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
7535 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
7536 +
7537 #else
7538 #define INTERRUPT_RETURN iret
7539 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
7540 diff -urNp linux-2.6.35.4/arch/x86/include/asm/kvm_host.h linux-2.6.35.4/arch/x86/include/asm/kvm_host.h
7541 --- linux-2.6.35.4/arch/x86/include/asm/kvm_host.h 2010-08-26 19:47:12.000000000 -0400
7542 +++ linux-2.6.35.4/arch/x86/include/asm/kvm_host.h 2010-09-17 20:12:09.000000000 -0400
7543 @@ -536,7 +536,7 @@ struct kvm_x86_ops {
7544 const struct trace_print_flags *exit_reasons_str;
7545 };
7546
7547 -extern struct kvm_x86_ops *kvm_x86_ops;
7548 +extern const struct kvm_x86_ops *kvm_x86_ops;
7549
7550 int kvm_mmu_module_init(void);
7551 void kvm_mmu_module_exit(void);
7552 diff -urNp linux-2.6.35.4/arch/x86/include/asm/local.h linux-2.6.35.4/arch/x86/include/asm/local.h
7553 --- linux-2.6.35.4/arch/x86/include/asm/local.h 2010-08-26 19:47:12.000000000 -0400
7554 +++ linux-2.6.35.4/arch/x86/include/asm/local.h 2010-09-17 20:12:09.000000000 -0400
7555 @@ -18,26 +18,90 @@ typedef struct {
7556
7557 static inline void local_inc(local_t *l)
7558 {
7559 - asm volatile(_ASM_INC "%0"
7560 + asm volatile(_ASM_INC "%0\n"
7561 +
7562 +#ifdef CONFIG_PAX_REFCOUNT
7563 +#ifdef CONFIG_X86_32
7564 + "into\n0:\n"
7565 +#else
7566 + "jno 0f\n"
7567 + "int $4\n0:\n"
7568 +#endif
7569 + ".pushsection .fixup,\"ax\"\n"
7570 + "1:\n"
7571 + _ASM_DEC "%0\n"
7572 + "jmp 0b\n"
7573 + ".popsection\n"
7574 + _ASM_EXTABLE(0b, 1b)
7575 +#endif
7576 +
7577 : "+m" (l->a.counter));
7578 }
7579
7580 static inline void local_dec(local_t *l)
7581 {
7582 - asm volatile(_ASM_DEC "%0"
7583 + asm volatile(_ASM_DEC "%0\n"
7584 +
7585 +#ifdef CONFIG_PAX_REFCOUNT
7586 +#ifdef CONFIG_X86_32
7587 + "into\n0:\n"
7588 +#else
7589 + "jno 0f\n"
7590 + "int $4\n0:\n"
7591 +#endif
7592 + ".pushsection .fixup,\"ax\"\n"
7593 + "1:\n"
7594 + _ASM_INC "%0\n"
7595 + "jmp 0b\n"
7596 + ".popsection\n"
7597 + _ASM_EXTABLE(0b, 1b)
7598 +#endif
7599 +
7600 : "+m" (l->a.counter));
7601 }
7602
7603 static inline void local_add(long i, local_t *l)
7604 {
7605 - asm volatile(_ASM_ADD "%1,%0"
7606 + asm volatile(_ASM_ADD "%1,%0\n"
7607 +
7608 +#ifdef CONFIG_PAX_REFCOUNT
7609 +#ifdef CONFIG_X86_32
7610 + "into\n0:\n"
7611 +#else
7612 + "jno 0f\n"
7613 + "int $4\n0:\n"
7614 +#endif
7615 + ".pushsection .fixup,\"ax\"\n"
7616 + "1:\n"
7617 + _ASM_SUB "%1,%0\n"
7618 + "jmp 0b\n"
7619 + ".popsection\n"
7620 + _ASM_EXTABLE(0b, 1b)
7621 +#endif
7622 +
7623 : "+m" (l->a.counter)
7624 : "ir" (i));
7625 }
7626
7627 static inline void local_sub(long i, local_t *l)
7628 {
7629 - asm volatile(_ASM_SUB "%1,%0"
7630 + asm volatile(_ASM_SUB "%1,%0\n"
7631 +
7632 +#ifdef CONFIG_PAX_REFCOUNT
7633 +#ifdef CONFIG_X86_32
7634 + "into\n0:\n"
7635 +#else
7636 + "jno 0f\n"
7637 + "int $4\n0:\n"
7638 +#endif
7639 + ".pushsection .fixup,\"ax\"\n"
7640 + "1:\n"
7641 + _ASM_ADD "%1,%0\n"
7642 + "jmp 0b\n"
7643 + ".popsection\n"
7644 + _ASM_EXTABLE(0b, 1b)
7645 +#endif
7646 +
7647 : "+m" (l->a.counter)
7648 : "ir" (i));
7649 }
7650 @@ -55,7 +119,24 @@ static inline int local_sub_and_test(lon
7651 {
7652 unsigned char c;
7653
7654 - asm volatile(_ASM_SUB "%2,%0; sete %1"
7655 + asm volatile(_ASM_SUB "%2,%0\n"
7656 +
7657 +#ifdef CONFIG_PAX_REFCOUNT
7658 +#ifdef CONFIG_X86_32
7659 + "into\n0:\n"
7660 +#else
7661 + "jno 0f\n"
7662 + "int $4\n0:\n"
7663 +#endif
7664 + ".pushsection .fixup,\"ax\"\n"
7665 + "1:\n"
7666 + _ASM_ADD "%2,%0\n"
7667 + "jmp 0b\n"
7668 + ".popsection\n"
7669 + _ASM_EXTABLE(0b, 1b)
7670 +#endif
7671 +
7672 + "sete %1\n"
7673 : "+m" (l->a.counter), "=qm" (c)
7674 : "ir" (i) : "memory");
7675 return c;
7676 @@ -73,7 +154,24 @@ static inline int local_dec_and_test(loc
7677 {
7678 unsigned char c;
7679
7680 - asm volatile(_ASM_DEC "%0; sete %1"
7681 + asm volatile(_ASM_DEC "%0\n"
7682 +
7683 +#ifdef CONFIG_PAX_REFCOUNT
7684 +#ifdef CONFIG_X86_32
7685 + "into\n0:\n"
7686 +#else
7687 + "jno 0f\n"
7688 + "int $4\n0:\n"
7689 +#endif
7690 + ".pushsection .fixup,\"ax\"\n"
7691 + "1:\n"
7692 + _ASM_INC "%0\n"
7693 + "jmp 0b\n"
7694 + ".popsection\n"
7695 + _ASM_EXTABLE(0b, 1b)
7696 +#endif
7697 +
7698 + "sete %1\n"
7699 : "+m" (l->a.counter), "=qm" (c)
7700 : : "memory");
7701 return c != 0;
7702 @@ -91,7 +189,24 @@ static inline int local_inc_and_test(loc
7703 {
7704 unsigned char c;
7705
7706 - asm volatile(_ASM_INC "%0; sete %1"
7707 + asm volatile(_ASM_INC "%0\n"
7708 +
7709 +#ifdef CONFIG_PAX_REFCOUNT
7710 +#ifdef CONFIG_X86_32
7711 + "into\n0:\n"
7712 +#else
7713 + "jno 0f\n"
7714 + "int $4\n0:\n"
7715 +#endif
7716 + ".pushsection .fixup,\"ax\"\n"
7717 + "1:\n"
7718 + _ASM_DEC "%0\n"
7719 + "jmp 0b\n"
7720 + ".popsection\n"
7721 + _ASM_EXTABLE(0b, 1b)
7722 +#endif
7723 +
7724 + "sete %1\n"
7725 : "+m" (l->a.counter), "=qm" (c)
7726 : : "memory");
7727 return c != 0;
7728 @@ -110,7 +225,24 @@ static inline int local_add_negative(lon
7729 {
7730 unsigned char c;
7731
7732 - asm volatile(_ASM_ADD "%2,%0; sets %1"
7733 + asm volatile(_ASM_ADD "%2,%0\n"
7734 +
7735 +#ifdef CONFIG_PAX_REFCOUNT
7736 +#ifdef CONFIG_X86_32
7737 + "into\n0:\n"
7738 +#else
7739 + "jno 0f\n"
7740 + "int $4\n0:\n"
7741 +#endif
7742 + ".pushsection .fixup,\"ax\"\n"
7743 + "1:\n"
7744 + _ASM_SUB "%2,%0\n"
7745 + "jmp 0b\n"
7746 + ".popsection\n"
7747 + _ASM_EXTABLE(0b, 1b)
7748 +#endif
7749 +
7750 + "sets %1\n"
7751 : "+m" (l->a.counter), "=qm" (c)
7752 : "ir" (i) : "memory");
7753 return c;
7754 @@ -133,7 +265,23 @@ static inline long local_add_return(long
7755 #endif
7756 /* Modern 486+ processor */
7757 __i = i;
7758 - asm volatile(_ASM_XADD "%0, %1;"
7759 + asm volatile(_ASM_XADD "%0, %1\n"
7760 +
7761 +#ifdef CONFIG_PAX_REFCOUNT
7762 +#ifdef CONFIG_X86_32
7763 + "into\n0:\n"
7764 +#else
7765 + "jno 0f\n"
7766 + "int $4\n0:\n"
7767 +#endif
7768 + ".pushsection .fixup,\"ax\"\n"
7769 + "1:\n"
7770 + _ASM_MOV "%0,%1\n"
7771 + "jmp 0b\n"
7772 + ".popsection\n"
7773 + _ASM_EXTABLE(0b, 1b)
7774 +#endif
7775 +
7776 : "+r" (i), "+m" (l->a.counter)
7777 : : "memory");
7778 return i + __i;
7779 diff -urNp linux-2.6.35.4/arch/x86/include/asm/mc146818rtc.h linux-2.6.35.4/arch/x86/include/asm/mc146818rtc.h
7780 --- linux-2.6.35.4/arch/x86/include/asm/mc146818rtc.h 2010-08-26 19:47:12.000000000 -0400
7781 +++ linux-2.6.35.4/arch/x86/include/asm/mc146818rtc.h 2010-09-17 20:12:09.000000000 -0400
7782 @@ -81,8 +81,8 @@ static inline unsigned char current_lock
7783 #else
7784 #define lock_cmos_prefix(reg) do {} while (0)
7785 #define lock_cmos_suffix(reg) do {} while (0)
7786 -#define lock_cmos(reg)
7787 -#define unlock_cmos()
7788 +#define lock_cmos(reg) do {} while (0)
7789 +#define unlock_cmos() do {} while (0)
7790 #define do_i_have_lock_cmos() 0
7791 #define current_lock_cmos_reg() 0
7792 #endif
7793 diff -urNp linux-2.6.35.4/arch/x86/include/asm/microcode.h linux-2.6.35.4/arch/x86/include/asm/microcode.h
7794 --- linux-2.6.35.4/arch/x86/include/asm/microcode.h 2010-08-26 19:47:12.000000000 -0400
7795 +++ linux-2.6.35.4/arch/x86/include/asm/microcode.h 2010-09-17 20:12:09.000000000 -0400
7796 @@ -12,13 +12,13 @@ struct device;
7797 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
7798
7799 struct microcode_ops {
7800 - enum ucode_state (*request_microcode_user) (int cpu,
7801 + enum ucode_state (* const request_microcode_user) (int cpu,
7802 const void __user *buf, size_t size);
7803
7804 - enum ucode_state (*request_microcode_fw) (int cpu,
7805 + enum ucode_state (* const request_microcode_fw) (int cpu,
7806 struct device *device);
7807
7808 - void (*microcode_fini_cpu) (int cpu);
7809 + void (* const microcode_fini_cpu) (int cpu);
7810
7811 /*
7812 * The generic 'microcode_core' part guarantees that
7813 @@ -38,18 +38,18 @@ struct ucode_cpu_info {
7814 extern struct ucode_cpu_info ucode_cpu_info[];
7815
7816 #ifdef CONFIG_MICROCODE_INTEL
7817 -extern struct microcode_ops * __init init_intel_microcode(void);
7818 +extern const struct microcode_ops * __init init_intel_microcode(void);
7819 #else
7820 -static inline struct microcode_ops * __init init_intel_microcode(void)
7821 +static inline const struct microcode_ops * __init init_intel_microcode(void)
7822 {
7823 return NULL;
7824 }
7825 #endif /* CONFIG_MICROCODE_INTEL */
7826
7827 #ifdef CONFIG_MICROCODE_AMD
7828 -extern struct microcode_ops * __init init_amd_microcode(void);
7829 +extern const struct microcode_ops * __init init_amd_microcode(void);
7830 #else
7831 -static inline struct microcode_ops * __init init_amd_microcode(void)
7832 +static inline const struct microcode_ops * __init init_amd_microcode(void)
7833 {
7834 return NULL;
7835 }
7836 diff -urNp linux-2.6.35.4/arch/x86/include/asm/mman.h linux-2.6.35.4/arch/x86/include/asm/mman.h
7837 --- linux-2.6.35.4/arch/x86/include/asm/mman.h 2010-08-26 19:47:12.000000000 -0400
7838 +++ linux-2.6.35.4/arch/x86/include/asm/mman.h 2010-09-17 20:12:09.000000000 -0400
7839 @@ -5,4 +5,14 @@
7840
7841 #include <asm-generic/mman.h>
7842
7843 +#ifdef __KERNEL__
7844 +#ifndef __ASSEMBLY__
7845 +#ifdef CONFIG_X86_32
7846 +#define arch_mmap_check i386_mmap_check
7847 +int i386_mmap_check(unsigned long addr, unsigned long len,
7848 + unsigned long flags);
7849 +#endif
7850 +#endif
7851 +#endif
7852 +
7853 #endif /* _ASM_X86_MMAN_H */
7854 diff -urNp linux-2.6.35.4/arch/x86/include/asm/mmu_context.h linux-2.6.35.4/arch/x86/include/asm/mmu_context.h
7855 --- linux-2.6.35.4/arch/x86/include/asm/mmu_context.h 2010-08-26 19:47:12.000000000 -0400
7856 +++ linux-2.6.35.4/arch/x86/include/asm/mmu_context.h 2010-09-17 20:12:09.000000000 -0400
7857 @@ -24,6 +24,21 @@ void destroy_context(struct mm_struct *m
7858
7859 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
7860 {
7861 +
7862 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7863 + unsigned int i;
7864 + pgd_t *pgd;
7865 +
7866 + pax_open_kernel();
7867 + pgd = get_cpu_pgd(smp_processor_id());
7868 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
7869 + if (paravirt_enabled())
7870 + set_pgd(pgd+i, native_make_pgd(0));
7871 + else
7872 + pgd[i] = native_make_pgd(0);
7873 + pax_close_kernel();
7874 +#endif
7875 +
7876 #ifdef CONFIG_SMP
7877 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
7878 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
7879 @@ -34,27 +49,70 @@ static inline void switch_mm(struct mm_s
7880 struct task_struct *tsk)
7881 {
7882 unsigned cpu = smp_processor_id();
7883 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
7884 + int tlbstate = TLBSTATE_OK;
7885 +#endif
7886
7887 if (likely(prev != next)) {
7888 /* stop flush ipis for the previous mm */
7889 cpumask_clear_cpu(cpu, mm_cpumask(prev));
7890 #ifdef CONFIG_SMP
7891 +#ifdef CONFIG_X86_32
7892 + tlbstate = percpu_read(cpu_tlbstate.state);
7893 +#endif
7894 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7895 percpu_write(cpu_tlbstate.active_mm, next);
7896 #endif
7897 cpumask_set_cpu(cpu, mm_cpumask(next));
7898
7899 /* Re-load page tables */
7900 +#ifdef CONFIG_PAX_PER_CPU_PGD
7901 + pax_open_kernel();
7902 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7903 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7904 + pax_close_kernel();
7905 + load_cr3(get_cpu_pgd(cpu));
7906 +#else
7907 load_cr3(next->pgd);
7908 +#endif
7909
7910 /*
7911 * load the LDT, if the LDT is different:
7912 */
7913 if (unlikely(prev->context.ldt != next->context.ldt))
7914 load_LDT_nolock(&next->context);
7915 - }
7916 +
7917 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7918 + if (!(__supported_pte_mask & _PAGE_NX)) {
7919 + smp_mb__before_clear_bit();
7920 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
7921 + smp_mb__after_clear_bit();
7922 + cpu_set(cpu, next->context.cpu_user_cs_mask);
7923 + }
7924 +#endif
7925 +
7926 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7927 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
7928 + prev->context.user_cs_limit != next->context.user_cs_limit))
7929 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7930 #ifdef CONFIG_SMP
7931 + else if (unlikely(tlbstate != TLBSTATE_OK))
7932 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7933 +#endif
7934 +#endif
7935 +
7936 + }
7937 else {
7938 +
7939 +#ifdef CONFIG_PAX_PER_CPU_PGD
7940 + pax_open_kernel();
7941 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7942 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7943 + pax_close_kernel();
7944 + load_cr3(get_cpu_pgd(cpu));
7945 +#endif
7946 +
7947 +#ifdef CONFIG_SMP
7948 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7949 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
7950
7951 @@ -63,11 +121,28 @@ static inline void switch_mm(struct mm_s
7952 * tlb flush IPI delivery. We must reload CR3
7953 * to make sure to use no freed page tables.
7954 */
7955 +
7956 +#ifndef CONFIG_PAX_PER_CPU_PGD
7957 load_cr3(next->pgd);
7958 +#endif
7959 +
7960 load_LDT_nolock(&next->context);
7961 +
7962 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
7963 + if (!(__supported_pte_mask & _PAGE_NX))
7964 + cpu_set(cpu, next->context.cpu_user_cs_mask);
7965 +#endif
7966 +
7967 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7968 +#ifdef CONFIG_PAX_PAGEEXEC
7969 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
7970 +#endif
7971 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7972 +#endif
7973 +
7974 }
7975 - }
7976 #endif
7977 + }
7978 }
7979
7980 #define activate_mm(prev, next) \
7981 diff -urNp linux-2.6.35.4/arch/x86/include/asm/mmu.h linux-2.6.35.4/arch/x86/include/asm/mmu.h
7982 --- linux-2.6.35.4/arch/x86/include/asm/mmu.h 2010-08-26 19:47:12.000000000 -0400
7983 +++ linux-2.6.35.4/arch/x86/include/asm/mmu.h 2010-09-17 20:12:09.000000000 -0400
7984 @@ -9,10 +9,23 @@
7985 * we put the segment information here.
7986 */
7987 typedef struct {
7988 - void *ldt;
7989 + struct desc_struct *ldt;
7990 int size;
7991 struct mutex lock;
7992 - void *vdso;
7993 + unsigned long vdso;
7994 +
7995 +#ifdef CONFIG_X86_32
7996 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
7997 + unsigned long user_cs_base;
7998 + unsigned long user_cs_limit;
7999 +
8000 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8001 + cpumask_t cpu_user_cs_mask;
8002 +#endif
8003 +
8004 +#endif
8005 +#endif
8006 +
8007 } mm_context_t;
8008
8009 #ifdef CONFIG_SMP
8010 diff -urNp linux-2.6.35.4/arch/x86/include/asm/module.h linux-2.6.35.4/arch/x86/include/asm/module.h
8011 --- linux-2.6.35.4/arch/x86/include/asm/module.h 2010-08-26 19:47:12.000000000 -0400
8012 +++ linux-2.6.35.4/arch/x86/include/asm/module.h 2010-09-17 20:12:37.000000000 -0400
8013 @@ -59,13 +59,31 @@
8014 #error unknown processor family
8015 #endif
8016
8017 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8018 +#define MODULE_PAX_UDEREF "UDEREF "
8019 +#else
8020 +#define MODULE_PAX_UDEREF ""
8021 +#endif
8022 +
8023 #ifdef CONFIG_X86_32
8024 # ifdef CONFIG_4KSTACKS
8025 # define MODULE_STACKSIZE "4KSTACKS "
8026 # else
8027 # define MODULE_STACKSIZE ""
8028 # endif
8029 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
8030 +# ifdef CONFIG_PAX_KERNEXEC
8031 +# define MODULE_PAX_KERNEXEC "KERNEXEC "
8032 +# else
8033 +# define MODULE_PAX_KERNEXEC ""
8034 +# endif
8035 +# ifdef CONFIG_GRKERNSEC
8036 +# define MODULE_GRSEC "GRSECURITY "
8037 +# else
8038 +# define MODULE_GRSEC ""
8039 +# endif
8040 +# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
8041 +#else
8042 +# define MODULE_ARCH_VERMAGIC MODULE_PAX_UDEREF
8043 #endif
8044
8045 #endif /* _ASM_X86_MODULE_H */
8046 diff -urNp linux-2.6.35.4/arch/x86/include/asm/page_32_types.h linux-2.6.35.4/arch/x86/include/asm/page_32_types.h
8047 --- linux-2.6.35.4/arch/x86/include/asm/page_32_types.h 2010-08-26 19:47:12.000000000 -0400
8048 +++ linux-2.6.35.4/arch/x86/include/asm/page_32_types.h 2010-09-17 20:12:09.000000000 -0400
8049 @@ -15,6 +15,10 @@
8050 */
8051 #define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
8052
8053 +#ifdef CONFIG_PAX_PAGEEXEC
8054 +#define CONFIG_ARCH_TRACK_EXEC_LIMIT 1
8055 +#endif
8056 +
8057 #ifdef CONFIG_4KSTACKS
8058 #define THREAD_ORDER 0
8059 #else
8060 diff -urNp linux-2.6.35.4/arch/x86/include/asm/paravirt.h linux-2.6.35.4/arch/x86/include/asm/paravirt.h
8061 --- linux-2.6.35.4/arch/x86/include/asm/paravirt.h 2010-08-26 19:47:12.000000000 -0400
8062 +++ linux-2.6.35.4/arch/x86/include/asm/paravirt.h 2010-09-17 20:12:09.000000000 -0400
8063 @@ -720,6 +720,21 @@ static inline void __set_fixmap(unsigned
8064 pv_mmu_ops.set_fixmap(idx, phys, flags);
8065 }
8066
8067 +#ifdef CONFIG_PAX_KERNEXEC
8068 +static inline unsigned long pax_open_kernel(void)
8069 +{
8070 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
8071 +}
8072 +
8073 +static inline unsigned long pax_close_kernel(void)
8074 +{
8075 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
8076 +}
8077 +#else
8078 +static inline unsigned long pax_open_kernel(void) { return 0; }
8079 +static inline unsigned long pax_close_kernel(void) { return 0; }
8080 +#endif
8081 +
8082 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
8083
8084 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
8085 @@ -936,7 +951,7 @@ extern void default_banner(void);
8086
8087 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
8088 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
8089 -#define PARA_INDIRECT(addr) *%cs:addr
8090 +#define PARA_INDIRECT(addr) *%ss:addr
8091 #endif
8092
8093 #define INTERRUPT_RETURN \
8094 @@ -1013,6 +1028,21 @@ extern void default_banner(void);
8095 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
8096 CLBR_NONE, \
8097 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
8098 +
8099 +#define GET_CR0_INTO_RDI \
8100 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
8101 + mov %rax,%rdi
8102 +
8103 +#define SET_RDI_INTO_CR0 \
8104 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
8105 +
8106 +#define GET_CR3_INTO_RDI \
8107 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
8108 + mov %rax,%rdi
8109 +
8110 +#define SET_RDI_INTO_CR3 \
8111 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
8112 +
8113 #endif /* CONFIG_X86_32 */
8114
8115 #endif /* __ASSEMBLY__ */
8116 diff -urNp linux-2.6.35.4/arch/x86/include/asm/paravirt_types.h linux-2.6.35.4/arch/x86/include/asm/paravirt_types.h
8117 --- linux-2.6.35.4/arch/x86/include/asm/paravirt_types.h 2010-08-26 19:47:12.000000000 -0400
8118 +++ linux-2.6.35.4/arch/x86/include/asm/paravirt_types.h 2010-09-17 20:12:09.000000000 -0400
8119 @@ -312,6 +312,12 @@ struct pv_mmu_ops {
8120 an mfn. We can tell which is which from the index. */
8121 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
8122 phys_addr_t phys, pgprot_t flags);
8123 +
8124 +#ifdef CONFIG_PAX_KERNEXEC
8125 + unsigned long (*pax_open_kernel)(void);
8126 + unsigned long (*pax_close_kernel)(void);
8127 +#endif
8128 +
8129 };
8130
8131 struct arch_spinlock;
8132 diff -urNp linux-2.6.35.4/arch/x86/include/asm/pci_x86.h linux-2.6.35.4/arch/x86/include/asm/pci_x86.h
8133 --- linux-2.6.35.4/arch/x86/include/asm/pci_x86.h 2010-08-26 19:47:12.000000000 -0400
8134 +++ linux-2.6.35.4/arch/x86/include/asm/pci_x86.h 2010-09-17 20:12:09.000000000 -0400
8135 @@ -91,16 +91,16 @@ extern int (*pcibios_enable_irq)(struct
8136 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
8137
8138 struct pci_raw_ops {
8139 - int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
8140 + int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
8141 int reg, int len, u32 *val);
8142 - int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
8143 + int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
8144 int reg, int len, u32 val);
8145 };
8146
8147 -extern struct pci_raw_ops *raw_pci_ops;
8148 -extern struct pci_raw_ops *raw_pci_ext_ops;
8149 +extern const struct pci_raw_ops *raw_pci_ops;
8150 +extern const struct pci_raw_ops *raw_pci_ext_ops;
8151
8152 -extern struct pci_raw_ops pci_direct_conf1;
8153 +extern const struct pci_raw_ops pci_direct_conf1;
8154 extern bool port_cf9_safe;
8155
8156 /* arch_initcall level */
8157 diff -urNp linux-2.6.35.4/arch/x86/include/asm/pgalloc.h linux-2.6.35.4/arch/x86/include/asm/pgalloc.h
8158 --- linux-2.6.35.4/arch/x86/include/asm/pgalloc.h 2010-08-26 19:47:12.000000000 -0400
8159 +++ linux-2.6.35.4/arch/x86/include/asm/pgalloc.h 2010-09-17 20:12:09.000000000 -0400
8160 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
8161 pmd_t *pmd, pte_t *pte)
8162 {
8163 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
8164 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
8165 +}
8166 +
8167 +static inline void pmd_populate_user(struct mm_struct *mm,
8168 + pmd_t *pmd, pte_t *pte)
8169 +{
8170 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
8171 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
8172 }
8173
8174 diff -urNp linux-2.6.35.4/arch/x86/include/asm/pgtable-2level.h linux-2.6.35.4/arch/x86/include/asm/pgtable-2level.h
8175 --- linux-2.6.35.4/arch/x86/include/asm/pgtable-2level.h 2010-08-26 19:47:12.000000000 -0400
8176 +++ linux-2.6.35.4/arch/x86/include/asm/pgtable-2level.h 2010-09-17 20:12:09.000000000 -0400
8177 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
8178
8179 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8180 {
8181 + pax_open_kernel();
8182 *pmdp = pmd;
8183 + pax_close_kernel();
8184 }
8185
8186 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
8187 diff -urNp linux-2.6.35.4/arch/x86/include/asm/pgtable_32.h linux-2.6.35.4/arch/x86/include/asm/pgtable_32.h
8188 --- linux-2.6.35.4/arch/x86/include/asm/pgtable_32.h 2010-08-26 19:47:12.000000000 -0400
8189 +++ linux-2.6.35.4/arch/x86/include/asm/pgtable_32.h 2010-09-17 20:12:09.000000000 -0400
8190 @@ -25,8 +25,6 @@
8191 struct mm_struct;
8192 struct vm_area_struct;
8193
8194 -extern pgd_t swapper_pg_dir[1024];
8195 -
8196 static inline void pgtable_cache_init(void) { }
8197 static inline void check_pgt_cache(void) { }
8198 void paging_init(void);
8199 @@ -47,6 +45,11 @@ extern void set_pmd_pfn(unsigned long, u
8200 # include <asm/pgtable-2level.h>
8201 #endif
8202
8203 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
8204 +#ifdef CONFIG_X86_PAE
8205 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
8206 +#endif
8207 +
8208 #if defined(CONFIG_HIGHPTE)
8209 #define __KM_PTE \
8210 (in_nmi() ? KM_NMI_PTE : \
8211 @@ -71,7 +74,9 @@ extern void set_pmd_pfn(unsigned long, u
8212 /* Clear a kernel PTE and flush it from the TLB */
8213 #define kpte_clear_flush(ptep, vaddr) \
8214 do { \
8215 + pax_open_kernel(); \
8216 pte_clear(&init_mm, (vaddr), (ptep)); \
8217 + pax_close_kernel(); \
8218 __flush_tlb_one((vaddr)); \
8219 } while (0)
8220
8221 @@ -83,6 +88,9 @@ do { \
8222
8223 #endif /* !__ASSEMBLY__ */
8224
8225 +#define HAVE_ARCH_UNMAPPED_AREA
8226 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
8227 +
8228 /*
8229 * kern_addr_valid() is (1) for FLATMEM and (0) for
8230 * SPARSEMEM and DISCONTIGMEM
8231 diff -urNp linux-2.6.35.4/arch/x86/include/asm/pgtable_32_types.h linux-2.6.35.4/arch/x86/include/asm/pgtable_32_types.h
8232 --- linux-2.6.35.4/arch/x86/include/asm/pgtable_32_types.h 2010-08-26 19:47:12.000000000 -0400
8233 +++ linux-2.6.35.4/arch/x86/include/asm/pgtable_32_types.h 2010-09-17 20:12:09.000000000 -0400
8234 @@ -8,7 +8,7 @@
8235 */
8236 #ifdef CONFIG_X86_PAE
8237 # include <asm/pgtable-3level_types.h>
8238 -# define PMD_SIZE (1UL << PMD_SHIFT)
8239 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
8240 # define PMD_MASK (~(PMD_SIZE - 1))
8241 #else
8242 # include <asm/pgtable-2level_types.h>
8243 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
8244 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
8245 #endif
8246
8247 +#ifdef CONFIG_PAX_KERNEXEC
8248 +#ifndef __ASSEMBLY__
8249 +extern unsigned char MODULES_EXEC_VADDR[];
8250 +extern unsigned char MODULES_EXEC_END[];
8251 +#endif
8252 +#include <asm/boot.h>
8253 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
8254 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
8255 +#else
8256 +#define ktla_ktva(addr) (addr)
8257 +#define ktva_ktla(addr) (addr)
8258 +#endif
8259 +
8260 #define MODULES_VADDR VMALLOC_START
8261 #define MODULES_END VMALLOC_END
8262 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
8263 diff -urNp linux-2.6.35.4/arch/x86/include/asm/pgtable-3level.h linux-2.6.35.4/arch/x86/include/asm/pgtable-3level.h
8264 --- linux-2.6.35.4/arch/x86/include/asm/pgtable-3level.h 2010-08-26 19:47:12.000000000 -0400
8265 +++ linux-2.6.35.4/arch/x86/include/asm/pgtable-3level.h 2010-09-17 20:12:09.000000000 -0400
8266 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
8267
8268 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8269 {
8270 + pax_open_kernel();
8271 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
8272 + pax_close_kernel();
8273 }
8274
8275 static inline void native_set_pud(pud_t *pudp, pud_t pud)
8276 {
8277 + pax_open_kernel();
8278 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
8279 + pax_close_kernel();
8280 }
8281
8282 /*
8283 diff -urNp linux-2.6.35.4/arch/x86/include/asm/pgtable_64.h linux-2.6.35.4/arch/x86/include/asm/pgtable_64.h
8284 --- linux-2.6.35.4/arch/x86/include/asm/pgtable_64.h 2010-08-26 19:47:12.000000000 -0400
8285 +++ linux-2.6.35.4/arch/x86/include/asm/pgtable_64.h 2010-09-17 20:12:09.000000000 -0400
8286 @@ -16,10 +16,13 @@
8287
8288 extern pud_t level3_kernel_pgt[512];
8289 extern pud_t level3_ident_pgt[512];
8290 +extern pud_t level3_vmalloc_pgt[512];
8291 +extern pud_t level3_vmemmap_pgt[512];
8292 +extern pud_t level2_vmemmap_pgt[512];
8293 extern pmd_t level2_kernel_pgt[512];
8294 extern pmd_t level2_fixmap_pgt[512];
8295 -extern pmd_t level2_ident_pgt[512];
8296 -extern pgd_t init_level4_pgt[];
8297 +extern pmd_t level2_ident_pgt[512*2];
8298 +extern pgd_t init_level4_pgt[512];
8299
8300 #define swapper_pg_dir init_level4_pgt
8301
8302 @@ -74,7 +77,9 @@ static inline pte_t native_ptep_get_and_
8303
8304 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8305 {
8306 + pax_open_kernel();
8307 *pmdp = pmd;
8308 + pax_close_kernel();
8309 }
8310
8311 static inline void native_pmd_clear(pmd_t *pmd)
8312 @@ -94,7 +99,9 @@ static inline void native_pud_clear(pud_
8313
8314 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
8315 {
8316 + pax_open_kernel();
8317 *pgdp = pgd;
8318 + pax_close_kernel();
8319 }
8320
8321 static inline void native_pgd_clear(pgd_t *pgd)
8322 diff -urNp linux-2.6.35.4/arch/x86/include/asm/pgtable_64_types.h linux-2.6.35.4/arch/x86/include/asm/pgtable_64_types.h
8323 --- linux-2.6.35.4/arch/x86/include/asm/pgtable_64_types.h 2010-08-26 19:47:12.000000000 -0400
8324 +++ linux-2.6.35.4/arch/x86/include/asm/pgtable_64_types.h 2010-09-17 20:12:09.000000000 -0400
8325 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
8326 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
8327 #define MODULES_END _AC(0xffffffffff000000, UL)
8328 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
8329 +#define MODULES_EXEC_VADDR MODULES_VADDR
8330 +#define MODULES_EXEC_END MODULES_END
8331 +
8332 +#define ktla_ktva(addr) (addr)
8333 +#define ktva_ktla(addr) (addr)
8334
8335 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
8336 diff -urNp linux-2.6.35.4/arch/x86/include/asm/pgtable.h linux-2.6.35.4/arch/x86/include/asm/pgtable.h
8337 --- linux-2.6.35.4/arch/x86/include/asm/pgtable.h 2010-08-26 19:47:12.000000000 -0400
8338 +++ linux-2.6.35.4/arch/x86/include/asm/pgtable.h 2010-09-17 20:12:09.000000000 -0400
8339 @@ -76,12 +76,51 @@ extern struct list_head pgd_list;
8340
8341 #define arch_end_context_switch(prev) do {} while(0)
8342
8343 +#define pax_open_kernel() native_pax_open_kernel()
8344 +#define pax_close_kernel() native_pax_close_kernel()
8345 #endif /* CONFIG_PARAVIRT */
8346
8347 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
8348 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
8349 +
8350 +#ifdef CONFIG_PAX_KERNEXEC
8351 +static inline unsigned long native_pax_open_kernel(void)
8352 +{
8353 + unsigned long cr0;
8354 +
8355 + preempt_disable();
8356 + barrier();
8357 + cr0 = read_cr0() ^ X86_CR0_WP;
8358 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
8359 + write_cr0(cr0);
8360 + return cr0 ^ X86_CR0_WP;
8361 +}
8362 +
8363 +static inline unsigned long native_pax_close_kernel(void)
8364 +{
8365 + unsigned long cr0;
8366 +
8367 + cr0 = read_cr0() ^ X86_CR0_WP;
8368 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
8369 + write_cr0(cr0);
8370 + barrier();
8371 + preempt_enable_no_resched();
8372 + return cr0 ^ X86_CR0_WP;
8373 +}
8374 +#else
8375 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
8376 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
8377 +#endif
8378 +
8379 /*
8380 * The following only work if pte_present() is true.
8381 * Undefined behaviour if not..
8382 */
8383 +static inline int pte_user(pte_t pte)
8384 +{
8385 + return pte_val(pte) & _PAGE_USER;
8386 +}
8387 +
8388 static inline int pte_dirty(pte_t pte)
8389 {
8390 return pte_flags(pte) & _PAGE_DIRTY;
8391 @@ -169,9 +208,29 @@ static inline pte_t pte_wrprotect(pte_t
8392 return pte_clear_flags(pte, _PAGE_RW);
8393 }
8394
8395 +static inline pte_t pte_mkread(pte_t pte)
8396 +{
8397 + return __pte(pte_val(pte) | _PAGE_USER);
8398 +}
8399 +
8400 static inline pte_t pte_mkexec(pte_t pte)
8401 {
8402 - return pte_clear_flags(pte, _PAGE_NX);
8403 +#ifdef CONFIG_X86_PAE
8404 + if (__supported_pte_mask & _PAGE_NX)
8405 + return pte_clear_flags(pte, _PAGE_NX);
8406 + else
8407 +#endif
8408 + return pte_set_flags(pte, _PAGE_USER);
8409 +}
8410 +
8411 +static inline pte_t pte_exprotect(pte_t pte)
8412 +{
8413 +#ifdef CONFIG_X86_PAE
8414 + if (__supported_pte_mask & _PAGE_NX)
8415 + return pte_set_flags(pte, _PAGE_NX);
8416 + else
8417 +#endif
8418 + return pte_clear_flags(pte, _PAGE_USER);
8419 }
8420
8421 static inline pte_t pte_mkdirty(pte_t pte)
8422 @@ -304,6 +363,15 @@ pte_t *populate_extra_pte(unsigned long
8423 #endif
8424
8425 #ifndef __ASSEMBLY__
8426 +
8427 +#ifdef CONFIG_PAX_PER_CPU_PGD
8428 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
8429 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
8430 +{
8431 + return cpu_pgd[cpu];
8432 +}
8433 +#endif
8434 +
8435 #include <linux/mm_types.h>
8436
8437 static inline int pte_none(pte_t pte)
8438 @@ -474,7 +542,7 @@ static inline pud_t *pud_offset(pgd_t *p
8439
8440 static inline int pgd_bad(pgd_t pgd)
8441 {
8442 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
8443 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
8444 }
8445
8446 static inline int pgd_none(pgd_t pgd)
8447 @@ -497,7 +565,12 @@ static inline int pgd_none(pgd_t pgd)
8448 * pgd_offset() returns a (pgd_t *)
8449 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
8450 */
8451 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
8452 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
8453 +
8454 +#ifdef CONFIG_PAX_PER_CPU_PGD
8455 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
8456 +#endif
8457 +
8458 /*
8459 * a shortcut which implies the use of the kernel's pgd, instead
8460 * of a process's
8461 @@ -508,6 +581,20 @@ static inline int pgd_none(pgd_t pgd)
8462 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
8463 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
8464
8465 +#ifdef CONFIG_X86_32
8466 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
8467 +#else
8468 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
8469 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
8470 +
8471 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8472 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
8473 +#else
8474 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
8475 +#endif
8476 +
8477 +#endif
8478 +
8479 #ifndef __ASSEMBLY__
8480
8481 extern int direct_gbpages;
8482 @@ -613,11 +700,23 @@ static inline void ptep_set_wrprotect(st
8483 * dst and src can be on the same page, but the range must not overlap,
8484 * and must not cross a page boundary.
8485 */
8486 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
8487 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
8488 {
8489 - memcpy(dst, src, count * sizeof(pgd_t));
8490 + pax_open_kernel();
8491 + while (count--)
8492 + *dst++ = *src++;
8493 + pax_close_kernel();
8494 }
8495
8496 +#ifdef CONFIG_PAX_PER_CPU_PGD
8497 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8498 +#endif
8499 +
8500 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8501 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8502 +#else
8503 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
8504 +#endif
8505
8506 #include <asm-generic/pgtable.h>
8507 #endif /* __ASSEMBLY__ */
8508 diff -urNp linux-2.6.35.4/arch/x86/include/asm/pgtable_types.h linux-2.6.35.4/arch/x86/include/asm/pgtable_types.h
8509 --- linux-2.6.35.4/arch/x86/include/asm/pgtable_types.h 2010-08-26 19:47:12.000000000 -0400
8510 +++ linux-2.6.35.4/arch/x86/include/asm/pgtable_types.h 2010-09-17 20:12:09.000000000 -0400
8511 @@ -16,12 +16,11 @@
8512 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
8513 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
8514 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
8515 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
8516 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
8517 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
8518 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
8519 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
8520 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
8521 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
8522 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
8523 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
8524
8525 /* If _PAGE_BIT_PRESENT is clear, we use these: */
8526 @@ -39,7 +38,6 @@
8527 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
8528 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
8529 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
8530 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
8531 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
8532 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
8533 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
8534 @@ -55,8 +53,10 @@
8535
8536 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
8537 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
8538 -#else
8539 +#elif defined(CONFIG_KMEMCHECK)
8540 #define _PAGE_NX (_AT(pteval_t, 0))
8541 +#else
8542 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
8543 #endif
8544
8545 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
8546 @@ -93,6 +93,9 @@
8547 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
8548 _PAGE_ACCESSED)
8549
8550 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
8551 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
8552 +
8553 #define __PAGE_KERNEL_EXEC \
8554 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
8555 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
8556 @@ -103,8 +106,8 @@
8557 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
8558 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
8559 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
8560 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
8561 -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
8562 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
8563 +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
8564 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
8565 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
8566 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
8567 @@ -163,8 +166,8 @@
8568 * bits are combined, this will alow user to access the high address mapped
8569 * VDSO in the presence of CONFIG_COMPAT_VDSO
8570 */
8571 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
8572 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
8573 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8574 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8575 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
8576 #endif
8577
8578 @@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t p
8579 {
8580 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
8581 }
8582 +#endif
8583
8584 +#if PAGETABLE_LEVELS == 3
8585 +#include <asm-generic/pgtable-nopud.h>
8586 +#endif
8587 +
8588 +#if PAGETABLE_LEVELS == 2
8589 +#include <asm-generic/pgtable-nopmd.h>
8590 +#endif
8591 +
8592 +#ifndef __ASSEMBLY__
8593 #if PAGETABLE_LEVELS > 3
8594 typedef struct { pudval_t pud; } pud_t;
8595
8596 @@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pu
8597 return pud.pud;
8598 }
8599 #else
8600 -#include <asm-generic/pgtable-nopud.h>
8601 -
8602 static inline pudval_t native_pud_val(pud_t pud)
8603 {
8604 return native_pgd_val(pud.pgd);
8605 @@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pm
8606 return pmd.pmd;
8607 }
8608 #else
8609 -#include <asm-generic/pgtable-nopmd.h>
8610 -
8611 static inline pmdval_t native_pmd_val(pmd_t pmd)
8612 {
8613 return native_pgd_val(pmd.pud.pgd);
8614 @@ -278,7 +287,6 @@ typedef struct page *pgtable_t;
8615
8616 extern pteval_t __supported_pte_mask;
8617 extern void set_nx(void);
8618 -extern int nx_enabled;
8619
8620 #define pgprot_writecombine pgprot_writecombine
8621 extern pgprot_t pgprot_writecombine(pgprot_t prot);
8622 diff -urNp linux-2.6.35.4/arch/x86/include/asm/processor.h linux-2.6.35.4/arch/x86/include/asm/processor.h
8623 --- linux-2.6.35.4/arch/x86/include/asm/processor.h 2010-08-26 19:47:12.000000000 -0400
8624 +++ linux-2.6.35.4/arch/x86/include/asm/processor.h 2010-09-17 20:12:09.000000000 -0400
8625 @@ -269,7 +269,7 @@ struct tss_struct {
8626
8627 } ____cacheline_aligned;
8628
8629 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
8630 +extern struct tss_struct init_tss[NR_CPUS];
8631
8632 /*
8633 * Save the original ist values for checking stack pointers during debugging
8634 @@ -884,8 +884,15 @@ static inline void spin_lock_prefetch(co
8635 */
8636 #define TASK_SIZE PAGE_OFFSET
8637 #define TASK_SIZE_MAX TASK_SIZE
8638 +
8639 +#ifdef CONFIG_PAX_SEGMEXEC
8640 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
8641 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
8642 +#else
8643 #define STACK_TOP TASK_SIZE
8644 -#define STACK_TOP_MAX STACK_TOP
8645 +#endif
8646 +
8647 +#define STACK_TOP_MAX TASK_SIZE
8648
8649 #define INIT_THREAD { \
8650 .sp0 = sizeof(init_stack) + (long)&init_stack, \
8651 @@ -902,7 +909,7 @@ static inline void spin_lock_prefetch(co
8652 */
8653 #define INIT_TSS { \
8654 .x86_tss = { \
8655 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
8656 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8657 .ss0 = __KERNEL_DS, \
8658 .ss1 = __KERNEL_CS, \
8659 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
8660 @@ -913,11 +920,7 @@ static inline void spin_lock_prefetch(co
8661 extern unsigned long thread_saved_pc(struct task_struct *tsk);
8662
8663 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
8664 -#define KSTK_TOP(info) \
8665 -({ \
8666 - unsigned long *__ptr = (unsigned long *)(info); \
8667 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
8668 -})
8669 +#define KSTK_TOP(info) ((info)->task.thread.sp0)
8670
8671 /*
8672 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
8673 @@ -932,7 +935,7 @@ extern unsigned long thread_saved_pc(str
8674 #define task_pt_regs(task) \
8675 ({ \
8676 struct pt_regs *__regs__; \
8677 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
8678 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
8679 __regs__ - 1; \
8680 })
8681
8682 @@ -942,13 +945,13 @@ extern unsigned long thread_saved_pc(str
8683 /*
8684 * User space process size. 47bits minus one guard page.
8685 */
8686 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
8687 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
8688
8689 /* This decides where the kernel will search for a free chunk of vm
8690 * space during mmap's.
8691 */
8692 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
8693 - 0xc0000000 : 0xFFFFe000)
8694 + 0xc0000000 : 0xFFFFf000)
8695
8696 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
8697 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
8698 @@ -985,6 +988,10 @@ extern void start_thread(struct pt_regs
8699 */
8700 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
8701
8702 +#ifdef CONFIG_PAX_SEGMEXEC
8703 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
8704 +#endif
8705 +
8706 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
8707
8708 /* Get/set a process' ability to use the timestamp counter instruction */
8709 diff -urNp linux-2.6.35.4/arch/x86/include/asm/ptrace.h linux-2.6.35.4/arch/x86/include/asm/ptrace.h
8710 --- linux-2.6.35.4/arch/x86/include/asm/ptrace.h 2010-08-26 19:47:12.000000000 -0400
8711 +++ linux-2.6.35.4/arch/x86/include/asm/ptrace.h 2010-09-17 20:12:09.000000000 -0400
8712 @@ -152,28 +152,29 @@ static inline unsigned long regs_return_
8713 }
8714
8715 /*
8716 - * user_mode_vm(regs) determines whether a register set came from user mode.
8717 + * user_mode(regs) determines whether a register set came from user mode.
8718 * This is true if V8086 mode was enabled OR if the register set was from
8719 * protected mode with RPL-3 CS value. This tricky test checks that with
8720 * one comparison. Many places in the kernel can bypass this full check
8721 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
8722 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
8723 + * be used.
8724 */
8725 -static inline int user_mode(struct pt_regs *regs)
8726 +static inline int user_mode_novm(struct pt_regs *regs)
8727 {
8728 #ifdef CONFIG_X86_32
8729 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
8730 #else
8731 - return !!(regs->cs & 3);
8732 + return !!(regs->cs & SEGMENT_RPL_MASK);
8733 #endif
8734 }
8735
8736 -static inline int user_mode_vm(struct pt_regs *regs)
8737 +static inline int user_mode(struct pt_regs *regs)
8738 {
8739 #ifdef CONFIG_X86_32
8740 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
8741 USER_RPL;
8742 #else
8743 - return user_mode(regs);
8744 + return user_mode_novm(regs);
8745 #endif
8746 }
8747
8748 diff -urNp linux-2.6.35.4/arch/x86/include/asm/reboot.h linux-2.6.35.4/arch/x86/include/asm/reboot.h
8749 --- linux-2.6.35.4/arch/x86/include/asm/reboot.h 2010-08-26 19:47:12.000000000 -0400
8750 +++ linux-2.6.35.4/arch/x86/include/asm/reboot.h 2010-09-17 20:12:09.000000000 -0400
8751 @@ -18,7 +18,7 @@ extern struct machine_ops machine_ops;
8752
8753 void native_machine_crash_shutdown(struct pt_regs *regs);
8754 void native_machine_shutdown(void);
8755 -void machine_real_restart(const unsigned char *code, int length);
8756 +void machine_real_restart(const unsigned char *code, unsigned int length);
8757
8758 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
8759 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
8760 diff -urNp linux-2.6.35.4/arch/x86/include/asm/rwsem.h linux-2.6.35.4/arch/x86/include/asm/rwsem.h
8761 --- linux-2.6.35.4/arch/x86/include/asm/rwsem.h 2010-08-26 19:47:12.000000000 -0400
8762 +++ linux-2.6.35.4/arch/x86/include/asm/rwsem.h 2010-09-17 20:12:09.000000000 -0400
8763 @@ -118,10 +118,26 @@ static inline void __down_read(struct rw
8764 {
8765 asm volatile("# beginning down_read\n\t"
8766 LOCK_PREFIX _ASM_INC "(%1)\n\t"
8767 +
8768 +#ifdef CONFIG_PAX_REFCOUNT
8769 +#ifdef CONFIG_X86_32
8770 + "into\n0:\n"
8771 +#else
8772 + "jno 0f\n"
8773 + "int $4\n0:\n"
8774 +#endif
8775 + ".pushsection .fixup,\"ax\"\n"
8776 + "1:\n"
8777 + LOCK_PREFIX _ASM_DEC "(%1)\n"
8778 + "jmp 0b\n"
8779 + ".popsection\n"
8780 + _ASM_EXTABLE(0b, 1b)
8781 +#endif
8782 +
8783 /* adds 0x00000001, returns the old value */
8784 - " jns 1f\n"
8785 + " jns 2f\n"
8786 " call call_rwsem_down_read_failed\n"
8787 - "1:\n\t"
8788 + "2:\n\t"
8789 "# ending down_read\n\t"
8790 : "+m" (sem->count)
8791 : "a" (sem)
8792 @@ -136,13 +152,29 @@ static inline int __down_read_trylock(st
8793 rwsem_count_t result, tmp;
8794 asm volatile("# beginning __down_read_trylock\n\t"
8795 " mov %0,%1\n\t"
8796 - "1:\n\t"
8797 + "2:\n\t"
8798 " mov %1,%2\n\t"
8799 " add %3,%2\n\t"
8800 - " jle 2f\n\t"
8801 +
8802 +#ifdef CONFIG_PAX_REFCOUNT
8803 +#ifdef CONFIG_X86_32
8804 + "into\n0:\n"
8805 +#else
8806 + "jno 0f\n"
8807 + "int $4\n0:\n"
8808 +#endif
8809 + ".pushsection .fixup,\"ax\"\n"
8810 + "1:\n"
8811 + "sub %3,%2\n"
8812 + "jmp 0b\n"
8813 + ".popsection\n"
8814 + _ASM_EXTABLE(0b, 1b)
8815 +#endif
8816 +
8817 + " jle 3f\n\t"
8818 LOCK_PREFIX " cmpxchg %2,%0\n\t"
8819 - " jnz 1b\n\t"
8820 - "2:\n\t"
8821 + " jnz 2b\n\t"
8822 + "3:\n\t"
8823 "# ending __down_read_trylock\n\t"
8824 : "+m" (sem->count), "=&a" (result), "=&r" (tmp)
8825 : "i" (RWSEM_ACTIVE_READ_BIAS)
8826 @@ -160,12 +192,28 @@ static inline void __down_write_nested(s
8827 tmp = RWSEM_ACTIVE_WRITE_BIAS;
8828 asm volatile("# beginning down_write\n\t"
8829 LOCK_PREFIX " xadd %1,(%2)\n\t"
8830 +
8831 +#ifdef CONFIG_PAX_REFCOUNT
8832 +#ifdef CONFIG_X86_32
8833 + "into\n0:\n"
8834 +#else
8835 + "jno 0f\n"
8836 + "int $4\n0:\n"
8837 +#endif
8838 + ".pushsection .fixup,\"ax\"\n"
8839 + "1:\n"
8840 + "mov %1,(%2)\n"
8841 + "jmp 0b\n"
8842 + ".popsection\n"
8843 + _ASM_EXTABLE(0b, 1b)
8844 +#endif
8845 +
8846 /* subtract 0x0000ffff, returns the old value */
8847 " test %1,%1\n\t"
8848 /* was the count 0 before? */
8849 - " jz 1f\n"
8850 + " jz 2f\n"
8851 " call call_rwsem_down_write_failed\n"
8852 - "1:\n"
8853 + "2:\n"
8854 "# ending down_write"
8855 : "+m" (sem->count), "=d" (tmp)
8856 : "a" (sem), "1" (tmp)
8857 @@ -198,10 +246,26 @@ static inline void __up_read(struct rw_s
8858 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
8859 asm volatile("# beginning __up_read\n\t"
8860 LOCK_PREFIX " xadd %1,(%2)\n\t"
8861 +
8862 +#ifdef CONFIG_PAX_REFCOUNT
8863 +#ifdef CONFIG_X86_32
8864 + "into\n0:\n"
8865 +#else
8866 + "jno 0f\n"
8867 + "int $4\n0:\n"
8868 +#endif
8869 + ".pushsection .fixup,\"ax\"\n"
8870 + "1:\n"
8871 + "mov %1,(%2)\n"
8872 + "jmp 0b\n"
8873 + ".popsection\n"
8874 + _ASM_EXTABLE(0b, 1b)
8875 +#endif
8876 +
8877 /* subtracts 1, returns the old value */
8878 - " jns 1f\n\t"
8879 + " jns 2f\n\t"
8880 " call call_rwsem_wake\n"
8881 - "1:\n"
8882 + "2:\n"
8883 "# ending __up_read\n"
8884 : "+m" (sem->count), "=d" (tmp)
8885 : "a" (sem), "1" (tmp)
8886 @@ -216,11 +280,27 @@ static inline void __up_write(struct rw_
8887 rwsem_count_t tmp;
8888 asm volatile("# beginning __up_write\n\t"
8889 LOCK_PREFIX " xadd %1,(%2)\n\t"
8890 +
8891 +#ifdef CONFIG_PAX_REFCOUNT
8892 +#ifdef CONFIG_X86_32
8893 + "into\n0:\n"
8894 +#else
8895 + "jno 0f\n"
8896 + "int $4\n0:\n"
8897 +#endif
8898 + ".pushsection .fixup,\"ax\"\n"
8899 + "1:\n"
8900 + "mov %1,(%2)\n"
8901 + "jmp 0b\n"
8902 + ".popsection\n"
8903 + _ASM_EXTABLE(0b, 1b)
8904 +#endif
8905 +
8906 /* tries to transition
8907 0xffff0001 -> 0x00000000 */
8908 - " jz 1f\n"
8909 + " jz 2f\n"
8910 " call call_rwsem_wake\n"
8911 - "1:\n\t"
8912 + "2:\n\t"
8913 "# ending __up_write\n"
8914 : "+m" (sem->count), "=d" (tmp)
8915 : "a" (sem), "1" (-RWSEM_ACTIVE_WRITE_BIAS)
8916 @@ -234,13 +314,29 @@ static inline void __downgrade_write(str
8917 {
8918 asm volatile("# beginning __downgrade_write\n\t"
8919 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
8920 +
8921 +#ifdef CONFIG_PAX_REFCOUNT
8922 +#ifdef CONFIG_X86_32
8923 + "into\n0:\n"
8924 +#else
8925 + "jno 0f\n"
8926 + "int $4\n0:\n"
8927 +#endif
8928 + ".pushsection .fixup,\"ax\"\n"
8929 + "1:\n"
8930 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
8931 + "jmp 0b\n"
8932 + ".popsection\n"
8933 + _ASM_EXTABLE(0b, 1b)
8934 +#endif
8935 +
8936 /*
8937 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
8938 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
8939 */
8940 - " jns 1f\n\t"
8941 + " jns 2f\n\t"
8942 " call call_rwsem_downgrade_wake\n"
8943 - "1:\n\t"
8944 + "2:\n\t"
8945 "# ending __downgrade_write\n"
8946 : "+m" (sem->count)
8947 : "a" (sem), "er" (-RWSEM_WAITING_BIAS)
8948 @@ -253,7 +349,23 @@ static inline void __downgrade_write(str
8949 static inline void rwsem_atomic_add(rwsem_count_t delta,
8950 struct rw_semaphore *sem)
8951 {
8952 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
8953 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
8954 +
8955 +#ifdef CONFIG_PAX_REFCOUNT
8956 +#ifdef CONFIG_X86_32
8957 + "into\n0:\n"
8958 +#else
8959 + "jno 0f\n"
8960 + "int $4\n0:\n"
8961 +#endif
8962 + ".pushsection .fixup,\"ax\"\n"
8963 + "1:\n"
8964 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
8965 + "jmp 0b\n"
8966 + ".popsection\n"
8967 + _ASM_EXTABLE(0b, 1b)
8968 +#endif
8969 +
8970 : "+m" (sem->count)
8971 : "er" (delta));
8972 }
8973 @@ -266,7 +378,23 @@ static inline rwsem_count_t rwsem_atomic
8974 {
8975 rwsem_count_t tmp = delta;
8976
8977 - asm volatile(LOCK_PREFIX "xadd %0,%1"
8978 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
8979 +
8980 +#ifdef CONFIG_PAX_REFCOUNT
8981 +#ifdef CONFIG_X86_32
8982 + "into\n0:\n"
8983 +#else
8984 + "jno 0f\n"
8985 + "int $4\n0:\n"
8986 +#endif
8987 + ".pushsection .fixup,\"ax\"\n"
8988 + "1:\n"
8989 + "mov %0,%1\n"
8990 + "jmp 0b\n"
8991 + ".popsection\n"
8992 + _ASM_EXTABLE(0b, 1b)
8993 +#endif
8994 +
8995 : "+r" (tmp), "+m" (sem->count)
8996 : : "memory");
8997
8998 diff -urNp linux-2.6.35.4/arch/x86/include/asm/segment.h linux-2.6.35.4/arch/x86/include/asm/segment.h
8999 --- linux-2.6.35.4/arch/x86/include/asm/segment.h 2010-08-26 19:47:12.000000000 -0400
9000 +++ linux-2.6.35.4/arch/x86/include/asm/segment.h 2010-09-17 20:12:09.000000000 -0400
9001 @@ -62,8 +62,8 @@
9002 * 26 - ESPFIX small SS
9003 * 27 - per-cpu [ offset to per-cpu data area ]
9004 * 28 - stack_canary-20 [ for stack protector ]
9005 - * 29 - unused
9006 - * 30 - unused
9007 + * 29 - PCI BIOS CS
9008 + * 30 - PCI BIOS DS
9009 * 31 - TSS for double fault handler
9010 */
9011 #define GDT_ENTRY_TLS_MIN 6
9012 @@ -77,6 +77,8 @@
9013
9014 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
9015
9016 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
9017 +
9018 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
9019
9020 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
9021 @@ -88,7 +90,7 @@
9022 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
9023 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
9024
9025 -#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
9026 +#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
9027 #ifdef CONFIG_SMP
9028 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
9029 #else
9030 @@ -102,6 +104,12 @@
9031 #define __KERNEL_STACK_CANARY 0
9032 #endif
9033
9034 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
9035 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
9036 +
9037 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
9038 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
9039 +
9040 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
9041
9042 /*
9043 @@ -139,7 +147,7 @@
9044 */
9045
9046 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
9047 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
9048 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
9049
9050
9051 #else
9052 @@ -163,6 +171,8 @@
9053 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
9054 #define __USER32_DS __USER_DS
9055
9056 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
9057 +
9058 #define GDT_ENTRY_TSS 8 /* needs two entries */
9059 #define GDT_ENTRY_LDT 10 /* needs two entries */
9060 #define GDT_ENTRY_TLS_MIN 12
9061 @@ -183,6 +193,7 @@
9062 #endif
9063
9064 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
9065 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
9066 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
9067 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
9068 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
9069 diff -urNp linux-2.6.35.4/arch/x86/include/asm/spinlock.h linux-2.6.35.4/arch/x86/include/asm/spinlock.h
9070 --- linux-2.6.35.4/arch/x86/include/asm/spinlock.h 2010-08-26 19:47:12.000000000 -0400
9071 +++ linux-2.6.35.4/arch/x86/include/asm/spinlock.h 2010-09-17 20:12:09.000000000 -0400
9072 @@ -249,18 +249,50 @@ static inline int arch_write_can_lock(ar
9073 static inline void arch_read_lock(arch_rwlock_t *rw)
9074 {
9075 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
9076 - "jns 1f\n"
9077 - "call __read_lock_failed\n\t"
9078 +
9079 +#ifdef CONFIG_PAX_REFCOUNT
9080 +#ifdef CONFIG_X86_32
9081 + "into\n0:\n"
9082 +#else
9083 + "jno 0f\n"
9084 + "int $4\n0:\n"
9085 +#endif
9086 + ".pushsection .fixup,\"ax\"\n"
9087 "1:\n"
9088 + LOCK_PREFIX " addl $1,(%0)\n"
9089 + "jmp 0b\n"
9090 + ".popsection\n"
9091 + _ASM_EXTABLE(0b, 1b)
9092 +#endif
9093 +
9094 + "jns 2f\n"
9095 + "call __read_lock_failed\n\t"
9096 + "2:\n"
9097 ::LOCK_PTR_REG (rw) : "memory");
9098 }
9099
9100 static inline void arch_write_lock(arch_rwlock_t *rw)
9101 {
9102 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
9103 - "jz 1f\n"
9104 - "call __write_lock_failed\n\t"
9105 +
9106 +#ifdef CONFIG_PAX_REFCOUNT
9107 +#ifdef CONFIG_X86_32
9108 + "into\n0:\n"
9109 +#else
9110 + "jno 0f\n"
9111 + "int $4\n0:\n"
9112 +#endif
9113 + ".pushsection .fixup,\"ax\"\n"
9114 "1:\n"
9115 + LOCK_PREFIX " addl %1,(%0)\n"
9116 + "jmp 0b\n"
9117 + ".popsection\n"
9118 + _ASM_EXTABLE(0b, 1b)
9119 +#endif
9120 +
9121 + "jz 2f\n"
9122 + "call __write_lock_failed\n\t"
9123 + "2:\n"
9124 ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
9125 }
9126
9127 @@ -286,12 +318,45 @@ static inline int arch_write_trylock(arc
9128
9129 static inline void arch_read_unlock(arch_rwlock_t *rw)
9130 {
9131 - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
9132 + asm volatile(LOCK_PREFIX "incl %0\n"
9133 +
9134 +#ifdef CONFIG_PAX_REFCOUNT
9135 +#ifdef CONFIG_X86_32
9136 + "into\n0:\n"
9137 +#else
9138 + "jno 0f\n"
9139 + "int $4\n0:\n"
9140 +#endif
9141 + ".pushsection .fixup,\"ax\"\n"
9142 + "1:\n"
9143 + LOCK_PREFIX "decl %0\n"
9144 + "jmp 0b\n"
9145 + ".popsection\n"
9146 + _ASM_EXTABLE(0b, 1b)
9147 +#endif
9148 +
9149 + :"+m" (rw->lock) : : "memory");
9150 }
9151
9152 static inline void arch_write_unlock(arch_rwlock_t *rw)
9153 {
9154 - asm volatile(LOCK_PREFIX "addl %1, %0"
9155 + asm volatile(LOCK_PREFIX "addl %1, %0\n"
9156 +
9157 +#ifdef CONFIG_PAX_REFCOUNT
9158 +#ifdef CONFIG_X86_32
9159 + "into\n0:\n"
9160 +#else
9161 + "jno 0f\n"
9162 + "int $4\n0:\n"
9163 +#endif
9164 + ".pushsection .fixup,\"ax\"\n"
9165 + "1:\n"
9166 + LOCK_PREFIX "subl %1,%0\n"
9167 + "jmp 0b\n"
9168 + ".popsection\n"
9169 + _ASM_EXTABLE(0b, 1b)
9170 +#endif
9171 +
9172 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
9173 }
9174
9175 diff -urNp linux-2.6.35.4/arch/x86/include/asm/system.h linux-2.6.35.4/arch/x86/include/asm/system.h
9176 --- linux-2.6.35.4/arch/x86/include/asm/system.h 2010-08-26 19:47:12.000000000 -0400
9177 +++ linux-2.6.35.4/arch/x86/include/asm/system.h 2010-09-17 20:12:09.000000000 -0400
9178 @@ -202,7 +202,7 @@ static inline unsigned long get_limit(un
9179 {
9180 unsigned long __limit;
9181 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
9182 - return __limit + 1;
9183 + return __limit;
9184 }
9185
9186 static inline void native_clts(void)
9187 @@ -342,7 +342,7 @@ void enable_hlt(void);
9188
9189 void cpu_idle_wait(void);
9190
9191 -extern unsigned long arch_align_stack(unsigned long sp);
9192 +#define arch_align_stack(x) ((x) & ~0xfUL)
9193 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
9194
9195 void default_idle(void);
9196 diff -urNp linux-2.6.35.4/arch/x86/include/asm/uaccess_32.h linux-2.6.35.4/arch/x86/include/asm/uaccess_32.h
9197 --- linux-2.6.35.4/arch/x86/include/asm/uaccess_32.h 2010-08-26 19:47:12.000000000 -0400
9198 +++ linux-2.6.35.4/arch/x86/include/asm/uaccess_32.h 2010-09-17 20:12:09.000000000 -0400
9199 @@ -44,6 +44,9 @@ unsigned long __must_check __copy_from_u
9200 static __always_inline unsigned long __must_check
9201 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
9202 {
9203 + if ((long)n < 0)
9204 + return n;
9205 +
9206 if (__builtin_constant_p(n)) {
9207 unsigned long ret;
9208
9209 @@ -62,6 +65,8 @@ __copy_to_user_inatomic(void __user *to,
9210 return ret;
9211 }
9212 }
9213 + if (!__builtin_constant_p(n))
9214 + check_object_size(from, n, true);
9215 return __copy_to_user_ll(to, from, n);
9216 }
9217
9218 @@ -89,6 +94,9 @@ __copy_to_user(void __user *to, const vo
9219 static __always_inline unsigned long
9220 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
9221 {
9222 + if ((long)n < 0)
9223 + return n;
9224 +
9225 /* Avoid zeroing the tail if the copy fails..
9226 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
9227 * but as the zeroing behaviour is only significant when n is not
9228 @@ -138,6 +146,10 @@ static __always_inline unsigned long
9229 __copy_from_user(void *to, const void __user *from, unsigned long n)
9230 {
9231 might_fault();
9232 +
9233 + if ((long)n < 0)
9234 + return n;
9235 +
9236 if (__builtin_constant_p(n)) {
9237 unsigned long ret;
9238
9239 @@ -153,6 +165,8 @@ __copy_from_user(void *to, const void __
9240 return ret;
9241 }
9242 }
9243 + if (!__builtin_constant_p(n))
9244 + check_object_size(to, n, false);
9245 return __copy_from_user_ll(to, from, n);
9246 }
9247
9248 @@ -160,6 +174,10 @@ static __always_inline unsigned long __c
9249 const void __user *from, unsigned long n)
9250 {
9251 might_fault();
9252 +
9253 + if ((long)n < 0)
9254 + return n;
9255 +
9256 if (__builtin_constant_p(n)) {
9257 unsigned long ret;
9258
9259 @@ -182,15 +200,19 @@ static __always_inline unsigned long
9260 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
9261 unsigned long n)
9262 {
9263 - return __copy_from_user_ll_nocache_nozero(to, from, n);
9264 -}
9265 + if ((long)n < 0)
9266 + return n;
9267
9268 -unsigned long __must_check copy_to_user(void __user *to,
9269 - const void *from, unsigned long n);
9270 -unsigned long __must_check _copy_from_user(void *to,
9271 - const void __user *from,
9272 - unsigned long n);
9273 + return __copy_from_user_ll_nocache_nozero(to, from, n);
9274 +}
9275
9276 +extern void copy_to_user_overflow(void)
9277 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9278 + __compiletime_error("copy_to_user() buffer size is not provably correct")
9279 +#else
9280 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
9281 +#endif
9282 +;
9283
9284 extern void copy_from_user_overflow(void)
9285 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9286 @@ -200,17 +222,61 @@ extern void copy_from_user_overflow(void
9287 #endif
9288 ;
9289
9290 -static inline unsigned long __must_check copy_from_user(void *to,
9291 - const void __user *from,
9292 - unsigned long n)
9293 +/**
9294 + * copy_to_user: - Copy a block of data into user space.
9295 + * @to: Destination address, in user space.
9296 + * @from: Source address, in kernel space.
9297 + * @n: Number of bytes to copy.
9298 + *
9299 + * Context: User context only. This function may sleep.
9300 + *
9301 + * Copy data from kernel space to user space.
9302 + *
9303 + * Returns number of bytes that could not be copied.
9304 + * On success, this will be zero.
9305 + */
9306 +static inline unsigned long __must_check
9307 +copy_to_user(void __user *to, const void *from, unsigned long n)
9308 +{
9309 + int sz = __compiletime_object_size(from);
9310 +
9311 + if (unlikely(sz != -1 && sz < n))
9312 + copy_to_user_overflow();
9313 + else if (access_ok(VERIFY_WRITE, to, n))
9314 + n = __copy_to_user(to, from, n);
9315 + return n;
9316 +}
9317 +
9318 +/**
9319 + * copy_from_user: - Copy a block of data from user space.
9320 + * @to: Destination address, in kernel space.
9321 + * @from: Source address, in user space.
9322 + * @n: Number of bytes to copy.
9323 + *
9324 + * Context: User context only. This function may sleep.
9325 + *
9326 + * Copy data from user space to kernel space.
9327 + *
9328 + * Returns number of bytes that could not be copied.
9329 + * On success, this will be zero.
9330 + *
9331 + * If some data could not be copied, this function will pad the copied
9332 + * data to the requested size using zero bytes.
9333 + */
9334 +static inline unsigned long __must_check
9335 +copy_from_user(void *to, const void __user *from, unsigned long n)
9336 {
9337 int sz = __compiletime_object_size(to);
9338
9339 - if (likely(sz == -1 || sz >= n))
9340 - n = _copy_from_user(to, from, n);
9341 - else
9342 + if (unlikely(sz != -1 && sz < n))
9343 copy_from_user_overflow();
9344 -
9345 + else if (access_ok(VERIFY_READ, from, n))
9346 + n = __copy_from_user(to, from, n);
9347 + else if ((long)n > 0) {
9348 + if (!__builtin_constant_p(n))
9349 + check_object_size(to, n, false);
9350 + memset(to, 0, n);
9351 + }
9352 return n;
9353 }
9354
9355 diff -urNp linux-2.6.35.4/arch/x86/include/asm/uaccess_64.h linux-2.6.35.4/arch/x86/include/asm/uaccess_64.h
9356 --- linux-2.6.35.4/arch/x86/include/asm/uaccess_64.h 2010-08-26 19:47:12.000000000 -0400
9357 +++ linux-2.6.35.4/arch/x86/include/asm/uaccess_64.h 2010-09-17 20:12:37.000000000 -0400
9358 @@ -11,6 +11,11 @@
9359 #include <asm/alternative.h>
9360 #include <asm/cpufeature.h>
9361 #include <asm/page.h>
9362 +#include <asm/pgtable.h>
9363 +
9364 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
9365 +
9366 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
9367
9368 /*
9369 * Copy To/From Userspace
9370 @@ -37,26 +42,26 @@ copy_user_generic(void *to, const void *
9371 return ret;
9372 }
9373
9374 -__must_check unsigned long
9375 -_copy_to_user(void __user *to, const void *from, unsigned len);
9376 -__must_check unsigned long
9377 -_copy_from_user(void *to, const void __user *from, unsigned len);
9378 +static __always_inline __must_check unsigned long
9379 +__copy_to_user(void __user *to, const void *from, unsigned len);
9380 +static __always_inline __must_check unsigned long
9381 +__copy_from_user(void *to, const void __user *from, unsigned len);
9382 __must_check unsigned long
9383 copy_in_user(void __user *to, const void __user *from, unsigned len);
9384
9385 static inline unsigned long __must_check copy_from_user(void *to,
9386 const void __user *from,
9387 - unsigned long n)
9388 + unsigned n)
9389 {
9390 - int sz = __compiletime_object_size(to);
9391 -
9392 might_fault();
9393 - if (likely(sz == -1 || sz >= n))
9394 - n = _copy_from_user(to, from, n);
9395 -#ifdef CONFIG_DEBUG_VM
9396 - else
9397 - WARN(1, "Buffer overflow detected!\n");
9398 -#endif
9399 +
9400 + if (access_ok(VERIFY_READ, from, n))
9401 + n = __copy_from_user(to, from, n);
9402 + else if ((int)n > 0) {
9403 + if (!__builtin_constant_p(n))
9404 + check_object_size(to, n, false);
9405 + memset(to, 0, n);
9406 + }
9407 return n;
9408 }
9409
9410 @@ -65,17 +70,35 @@ int copy_to_user(void __user *dst, const
9411 {
9412 might_fault();
9413
9414 - return _copy_to_user(dst, src, size);
9415 + if (access_ok(VERIFY_WRITE, dst, size))
9416 + size = __copy_to_user(dst, src, size);
9417 + return size;
9418 }
9419
9420 static __always_inline __must_check
9421 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
9422 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
9423 {
9424 - int ret = 0;
9425 + int sz = __compiletime_object_size(dst);
9426 + unsigned ret = 0;
9427
9428 might_fault();
9429 - if (!__builtin_constant_p(size))
9430 +
9431 + if ((int)size < 0)
9432 + return size;
9433 +
9434 + if (unlikely(sz != -1 && sz < size)) {
9435 +#ifdef CONFIG_DEBUG_VM
9436 + WARN(1, "Buffer overflow detected!\n");
9437 +#endif
9438 + return size;
9439 + }
9440 +
9441 + if (!__builtin_constant_p(size)) {
9442 + check_object_size(dst, size, false);
9443 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9444 + src += PAX_USER_SHADOW_BASE;
9445 return copy_user_generic(dst, (__force void *)src, size);
9446 + }
9447 switch (size) {
9448 case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
9449 ret, "b", "b", "=q", 1);
9450 @@ -108,18 +131,36 @@ int __copy_from_user(void *dst, const vo
9451 ret, "q", "", "=r", 8);
9452 return ret;
9453 default:
9454 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9455 + src += PAX_USER_SHADOW_BASE;
9456 return copy_user_generic(dst, (__force void *)src, size);
9457 }
9458 }
9459
9460 static __always_inline __must_check
9461 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
9462 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
9463 {
9464 - int ret = 0;
9465 + int sz = __compiletime_object_size(src);
9466 + unsigned ret = 0;
9467
9468 might_fault();
9469 - if (!__builtin_constant_p(size))
9470 +
9471 + if ((int)size < 0)
9472 + return size;
9473 +
9474 + if (unlikely(sz != -1 && sz < size)) {
9475 +#ifdef CONFIG_DEBUG_VM
9476 + WARN(1, "Buffer overflow detected!\n");
9477 +#endif
9478 + return size;
9479 + }
9480 +
9481 + if (!__builtin_constant_p(size)) {
9482 + check_object_size(src, size, true);
9483 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9484 + dst += PAX_USER_SHADOW_BASE;
9485 return copy_user_generic((__force void *)dst, src, size);
9486 + }
9487 switch (size) {
9488 case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
9489 ret, "b", "b", "iq", 1);
9490 @@ -152,19 +193,30 @@ int __copy_to_user(void __user *dst, con
9491 ret, "q", "", "er", 8);
9492 return ret;
9493 default:
9494 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9495 + dst += PAX_USER_SHADOW_BASE;
9496 return copy_user_generic((__force void *)dst, src, size);
9497 }
9498 }
9499
9500 static __always_inline __must_check
9501 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9502 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9503 {
9504 - int ret = 0;
9505 + unsigned ret = 0;
9506
9507 might_fault();
9508 - if (!__builtin_constant_p(size))
9509 +
9510 + if ((int)size < 0)
9511 + return size;
9512 +
9513 + if (!__builtin_constant_p(size)) {
9514 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9515 + src += PAX_USER_SHADOW_BASE;
9516 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9517 + dst += PAX_USER_SHADOW_BASE;
9518 return copy_user_generic((__force void *)dst,
9519 (__force void *)src, size);
9520 + }
9521 switch (size) {
9522 case 1: {
9523 u8 tmp;
9524 @@ -204,6 +256,10 @@ int __copy_in_user(void __user *dst, con
9525 return ret;
9526 }
9527 default:
9528 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9529 + src += PAX_USER_SHADOW_BASE;
9530 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9531 + dst += PAX_USER_SHADOW_BASE;
9532 return copy_user_generic((__force void *)dst,
9533 (__force void *)src, size);
9534 }
9535 @@ -222,33 +278,45 @@ __must_check unsigned long __clear_user(
9536 static __must_check __always_inline int
9537 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
9538 {
9539 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9540 + src += PAX_USER_SHADOW_BASE;
9541 return copy_user_generic(dst, (__force const void *)src, size);
9542 }
9543
9544 -static __must_check __always_inline int
9545 +static __must_check __always_inline unsigned long
9546 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
9547 {
9548 + if ((int)size < 0)
9549 + return size;
9550 +
9551 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9552 + dst += PAX_USER_SHADOW_BASE;
9553 return copy_user_generic((__force void *)dst, src, size);
9554 }
9555
9556 -extern long __copy_user_nocache(void *dst, const void __user *src,
9557 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
9558 unsigned size, int zerorest);
9559
9560 -static inline int
9561 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9562 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9563 {
9564 might_sleep();
9565 +
9566 + if ((int)size < 0)
9567 + return size;
9568 +
9569 return __copy_user_nocache(dst, src, size, 1);
9570 }
9571
9572 -static inline int
9573 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9574 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9575 unsigned size)
9576 {
9577 + if ((int)size < 0)
9578 + return size;
9579 +
9580 return __copy_user_nocache(dst, src, size, 0);
9581 }
9582
9583 -unsigned long
9584 +extern unsigned long
9585 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
9586
9587 #endif /* _ASM_X86_UACCESS_64_H */
9588 diff -urNp linux-2.6.35.4/arch/x86/include/asm/uaccess.h linux-2.6.35.4/arch/x86/include/asm/uaccess.h
9589 --- linux-2.6.35.4/arch/x86/include/asm/uaccess.h 2010-08-26 19:47:12.000000000 -0400
9590 +++ linux-2.6.35.4/arch/x86/include/asm/uaccess.h 2010-09-17 20:12:09.000000000 -0400
9591 @@ -8,12 +8,15 @@
9592 #include <linux/thread_info.h>
9593 #include <linux/prefetch.h>
9594 #include <linux/string.h>
9595 +#include <linux/sched.h>
9596 #include <asm/asm.h>
9597 #include <asm/page.h>
9598
9599 #define VERIFY_READ 0
9600 #define VERIFY_WRITE 1
9601
9602 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
9603 +
9604 /*
9605 * The fs value determines whether argument validity checking should be
9606 * performed or not. If get_fs() == USER_DS, checking is performed, with
9607 @@ -29,7 +32,12 @@
9608
9609 #define get_ds() (KERNEL_DS)
9610 #define get_fs() (current_thread_info()->addr_limit)
9611 +#ifdef CONFIG_X86_32
9612 +void __set_fs(mm_segment_t x, int cpu);
9613 +void set_fs(mm_segment_t x);
9614 +#else
9615 #define set_fs(x) (current_thread_info()->addr_limit = (x))
9616 +#endif
9617
9618 #define segment_eq(a, b) ((a).seg == (b).seg)
9619
9620 @@ -77,7 +85,33 @@
9621 * checks that the pointer is in the user space range - after calling
9622 * this function, memory access functions may still return -EFAULT.
9623 */
9624 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9625 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9626 +#define access_ok(type, addr, size) \
9627 +({ \
9628 + long __size = size; \
9629 + unsigned long __addr = (unsigned long)addr; \
9630 + unsigned long __addr_ao = __addr & PAGE_MASK; \
9631 + unsigned long __end_ao = __addr + __size - 1; \
9632 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
9633 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
9634 + while(__addr_ao <= __end_ao) { \
9635 + char __c_ao; \
9636 + __addr_ao += PAGE_SIZE; \
9637 + if (__size > PAGE_SIZE) \
9638 + cond_resched(); \
9639 + if (__get_user(__c_ao, (char __user *)__addr)) \
9640 + break; \
9641 + if (type != VERIFY_WRITE) { \
9642 + __addr = __addr_ao; \
9643 + continue; \
9644 + } \
9645 + if (__put_user(__c_ao, (char __user *)__addr)) \
9646 + break; \
9647 + __addr = __addr_ao; \
9648 + } \
9649 + } \
9650 + __ret_ao; \
9651 +})
9652
9653 /*
9654 * The exception table consists of pairs of addresses: the first is the
9655 @@ -183,13 +217,21 @@ extern int __get_user_bad(void);
9656 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
9657 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
9658
9659 -
9660 +#ifdef CONFIG_X86_32
9661 +#define _ASM_LOAD_USER_DS(ds) "movw %w" #ds ",%%ds\n"
9662 +#define _ASM_LOAD_KERNEL_DS "pushl %%ss; popl %%ds\n"
9663 +#else
9664 +#define _ASM_LOAD_USER_DS(ds)
9665 +#define _ASM_LOAD_KERNEL_DS
9666 +#endif
9667
9668 #ifdef CONFIG_X86_32
9669 #define __put_user_asm_u64(x, addr, err, errret) \
9670 - asm volatile("1: movl %%eax,0(%2)\n" \
9671 - "2: movl %%edx,4(%2)\n" \
9672 + asm volatile(_ASM_LOAD_USER_DS(5) \
9673 + "1: movl %%eax,%%ds:0(%2)\n" \
9674 + "2: movl %%edx,%%ds:4(%2)\n" \
9675 "3:\n" \
9676 + _ASM_LOAD_KERNEL_DS \
9677 ".section .fixup,\"ax\"\n" \
9678 "4: movl %3,%0\n" \
9679 " jmp 3b\n" \
9680 @@ -197,15 +239,18 @@ extern int __get_user_bad(void);
9681 _ASM_EXTABLE(1b, 4b) \
9682 _ASM_EXTABLE(2b, 4b) \
9683 : "=r" (err) \
9684 - : "A" (x), "r" (addr), "i" (errret), "0" (err))
9685 + : "A" (x), "r" (addr), "i" (errret), "0" (err), \
9686 + "r"(__USER_DS))
9687
9688 #define __put_user_asm_ex_u64(x, addr) \
9689 - asm volatile("1: movl %%eax,0(%1)\n" \
9690 - "2: movl %%edx,4(%1)\n" \
9691 + asm volatile(_ASM_LOAD_USER_DS(2) \
9692 + "1: movl %%eax,%%ds:0(%1)\n" \
9693 + "2: movl %%edx,%%ds:4(%1)\n" \
9694 "3:\n" \
9695 + _ASM_LOAD_KERNEL_DS \
9696 _ASM_EXTABLE(1b, 2b - 1b) \
9697 _ASM_EXTABLE(2b, 3b - 2b) \
9698 - : : "A" (x), "r" (addr))
9699 + : : "A" (x), "r" (addr), "r"(__USER_DS))
9700
9701 #define __put_user_x8(x, ptr, __ret_pu) \
9702 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
9703 @@ -374,16 +419,18 @@ do { \
9704 } while (0)
9705
9706 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9707 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
9708 + asm volatile(_ASM_LOAD_USER_DS(5) \
9709 + "1: mov"itype" %%ds:%2,%"rtype"1\n" \
9710 "2:\n" \
9711 + _ASM_LOAD_KERNEL_DS \
9712 ".section .fixup,\"ax\"\n" \
9713 "3: mov %3,%0\n" \
9714 " xor"itype" %"rtype"1,%"rtype"1\n" \
9715 " jmp 2b\n" \
9716 ".previous\n" \
9717 _ASM_EXTABLE(1b, 3b) \
9718 - : "=r" (err), ltype(x) \
9719 - : "m" (__m(addr)), "i" (errret), "0" (err))
9720 + : "=r" (err), ltype (x) \
9721 + : "m" (__m(addr)), "i" (errret), "0" (err), "r"(__USER_DS))
9722
9723 #define __get_user_size_ex(x, ptr, size) \
9724 do { \
9725 @@ -407,10 +454,12 @@ do { \
9726 } while (0)
9727
9728 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
9729 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
9730 + asm volatile(_ASM_LOAD_USER_DS(2) \
9731 + "1: mov"itype" %%ds:%1,%"rtype"0\n" \
9732 "2:\n" \
9733 + _ASM_LOAD_KERNEL_DS \
9734 _ASM_EXTABLE(1b, 2b - 1b) \
9735 - : ltype(x) : "m" (__m(addr)))
9736 + : ltype(x) : "m" (__m(addr)), "r"(__USER_DS))
9737
9738 #define __put_user_nocheck(x, ptr, size) \
9739 ({ \
9740 @@ -424,13 +473,24 @@ do { \
9741 int __gu_err; \
9742 unsigned long __gu_val; \
9743 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
9744 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
9745 + (x) = (__typeof__(*(ptr)))__gu_val; \
9746 __gu_err; \
9747 })
9748
9749 /* FIXME: this hack is definitely wrong -AK */
9750 struct __large_struct { unsigned long buf[100]; };
9751 -#define __m(x) (*(struct __large_struct __user *)(x))
9752 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9753 +#define ____m(x) \
9754 +({ \
9755 + unsigned long ____x = (unsigned long)(x); \
9756 + if (____x < PAX_USER_SHADOW_BASE) \
9757 + ____x += PAX_USER_SHADOW_BASE; \
9758 + (void __user *)____x; \
9759 +})
9760 +#else
9761 +#define ____m(x) (x)
9762 +#endif
9763 +#define __m(x) (*(struct __large_struct __user *)____m(x))
9764
9765 /*
9766 * Tell gcc we read from memory instead of writing: this is because
9767 @@ -438,21 +498,26 @@ struct __large_struct { unsigned long bu
9768 * aliasing issues.
9769 */
9770 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9771 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
9772 + asm volatile(_ASM_LOAD_USER_DS(5) \
9773 + "1: mov"itype" %"rtype"1,%%ds:%2\n" \
9774 "2:\n" \
9775 + _ASM_LOAD_KERNEL_DS \
9776 ".section .fixup,\"ax\"\n" \
9777 "3: mov %3,%0\n" \
9778 " jmp 2b\n" \
9779 ".previous\n" \
9780 _ASM_EXTABLE(1b, 3b) \
9781 : "=r"(err) \
9782 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
9783 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err),\
9784 + "r"(__USER_DS))
9785
9786 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
9787 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
9788 + asm volatile(_ASM_LOAD_USER_DS(2) \
9789 + "1: mov"itype" %"rtype"0,%%ds:%1\n" \
9790 "2:\n" \
9791 + _ASM_LOAD_KERNEL_DS \
9792 _ASM_EXTABLE(1b, 2b - 1b) \
9793 - : : ltype(x), "m" (__m(addr)))
9794 + : : ltype(x), "m" (__m(addr)), "r"(__USER_DS))
9795
9796 /*
9797 * uaccess_try and catch
9798 @@ -530,7 +595,7 @@ struct __large_struct { unsigned long bu
9799 #define get_user_ex(x, ptr) do { \
9800 unsigned long __gue_val; \
9801 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
9802 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
9803 + (x) = (__typeof__(*(ptr)))__gue_val; \
9804 } while (0)
9805
9806 #ifdef CONFIG_X86_WP_WORKS_OK
9807 @@ -567,6 +632,7 @@ extern struct movsl_mask {
9808
9809 #define ARCH_HAS_NOCACHE_UACCESS 1
9810
9811 +#define ARCH_HAS_SORT_EXTABLE
9812 #ifdef CONFIG_X86_32
9813 # include "uaccess_32.h"
9814 #else
9815 diff -urNp linux-2.6.35.4/arch/x86/include/asm/vgtod.h linux-2.6.35.4/arch/x86/include/asm/vgtod.h
9816 --- linux-2.6.35.4/arch/x86/include/asm/vgtod.h 2010-08-26 19:47:12.000000000 -0400
9817 +++ linux-2.6.35.4/arch/x86/include/asm/vgtod.h 2010-09-17 20:12:09.000000000 -0400
9818 @@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
9819 int sysctl_enabled;
9820 struct timezone sys_tz;
9821 struct { /* extract of a clocksource struct */
9822 + char name[8];
9823 cycle_t (*vread)(void);
9824 cycle_t cycle_last;
9825 cycle_t mask;
9826 diff -urNp linux-2.6.35.4/arch/x86/include/asm/vmi.h linux-2.6.35.4/arch/x86/include/asm/vmi.h
9827 --- linux-2.6.35.4/arch/x86/include/asm/vmi.h 2010-08-26 19:47:12.000000000 -0400
9828 +++ linux-2.6.35.4/arch/x86/include/asm/vmi.h 2010-09-17 20:12:09.000000000 -0400
9829 @@ -191,6 +191,7 @@ struct vrom_header {
9830 u8 reserved[96]; /* Reserved for headers */
9831 char vmi_init[8]; /* VMI_Init jump point */
9832 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
9833 + char rom_data[8048]; /* rest of the option ROM */
9834 } __attribute__((packed));
9835
9836 struct pnp_header {
9837 diff -urNp linux-2.6.35.4/arch/x86/include/asm/vsyscall.h linux-2.6.35.4/arch/x86/include/asm/vsyscall.h
9838 --- linux-2.6.35.4/arch/x86/include/asm/vsyscall.h 2010-08-26 19:47:12.000000000 -0400
9839 +++ linux-2.6.35.4/arch/x86/include/asm/vsyscall.h 2010-09-17 20:12:09.000000000 -0400
9840 @@ -15,9 +15,10 @@ enum vsyscall_num {
9841
9842 #ifdef __KERNEL__
9843 #include <linux/seqlock.h>
9844 +#include <linux/getcpu.h>
9845 +#include <linux/time.h>
9846
9847 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
9848 -#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
9849
9850 /* Definitions for CONFIG_GENERIC_TIME definitions */
9851 #define __section_vsyscall_gtod_data __attribute__ \
9852 @@ -31,7 +32,6 @@ enum vsyscall_num {
9853 #define VGETCPU_LSL 2
9854
9855 extern int __vgetcpu_mode;
9856 -extern volatile unsigned long __jiffies;
9857
9858 /* kernel space (writeable) */
9859 extern int vgetcpu_mode;
9860 @@ -39,6 +39,9 @@ extern struct timezone sys_tz;
9861
9862 extern void map_vsyscall(void);
9863
9864 +extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
9865 +extern time_t vtime(time_t *t);
9866 +extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
9867 #endif /* __KERNEL__ */
9868
9869 #endif /* _ASM_X86_VSYSCALL_H */
9870 diff -urNp linux-2.6.35.4/arch/x86/include/asm/xsave.h linux-2.6.35.4/arch/x86/include/asm/xsave.h
9871 --- linux-2.6.35.4/arch/x86/include/asm/xsave.h 2010-08-26 19:47:12.000000000 -0400
9872 +++ linux-2.6.35.4/arch/x86/include/asm/xsave.h 2010-09-17 20:12:09.000000000 -0400
9873 @@ -59,6 +59,12 @@ static inline int fpu_xrstor_checking(st
9874 static inline int xsave_user(struct xsave_struct __user *buf)
9875 {
9876 int err;
9877 +
9878 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9879 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
9880 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
9881 +#endif
9882 +
9883 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
9884 "2:\n"
9885 ".section .fixup,\"ax\"\n"
9886 @@ -85,6 +91,11 @@ static inline int xrestore_user(struct x
9887 u32 lmask = mask;
9888 u32 hmask = mask >> 32;
9889
9890 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9891 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
9892 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
9893 +#endif
9894 +
9895 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
9896 "2:\n"
9897 ".section .fixup,\"ax\"\n"
9898 diff -urNp linux-2.6.35.4/arch/x86/Kconfig linux-2.6.35.4/arch/x86/Kconfig
9899 --- linux-2.6.35.4/arch/x86/Kconfig 2010-08-26 19:47:12.000000000 -0400
9900 +++ linux-2.6.35.4/arch/x86/Kconfig 2010-09-17 20:12:37.000000000 -0400
9901 @@ -1038,7 +1038,7 @@ choice
9902
9903 config NOHIGHMEM
9904 bool "off"
9905 - depends on !X86_NUMAQ
9906 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
9907 ---help---
9908 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
9909 However, the address space of 32-bit x86 processors is only 4
9910 @@ -1075,7 +1075,7 @@ config NOHIGHMEM
9911
9912 config HIGHMEM4G
9913 bool "4GB"
9914 - depends on !X86_NUMAQ
9915 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
9916 ---help---
9917 Select this if you have a 32-bit processor and between 1 and 4
9918 gigabytes of physical RAM.
9919 @@ -1129,7 +1129,7 @@ config PAGE_OFFSET
9920 hex
9921 default 0xB0000000 if VMSPLIT_3G_OPT
9922 default 0x80000000 if VMSPLIT_2G
9923 - default 0x78000000 if VMSPLIT_2G_OPT
9924 + default 0x70000000 if VMSPLIT_2G_OPT
9925 default 0x40000000 if VMSPLIT_1G
9926 default 0xC0000000
9927 depends on X86_32
9928 @@ -1461,7 +1461,7 @@ config ARCH_USES_PG_UNCACHED
9929
9930 config EFI
9931 bool "EFI runtime service support"
9932 - depends on ACPI
9933 + depends on ACPI && !PAX_KERNEXEC
9934 ---help---
9935 This enables the kernel to use EFI runtime services that are
9936 available (such as the EFI variable services).
9937 @@ -1548,6 +1548,7 @@ config KEXEC_JUMP
9938 config PHYSICAL_START
9939 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
9940 default "0x1000000"
9941 + range 0x400000 0x40000000
9942 ---help---
9943 This gives the physical address where the kernel is loaded.
9944
9945 @@ -1611,6 +1612,7 @@ config X86_NEED_RELOCS
9946 config PHYSICAL_ALIGN
9947 hex "Alignment value to which kernel should be aligned" if X86_32
9948 default "0x1000000"
9949 + range 0x400000 0x1000000 if PAX_KERNEXEC
9950 range 0x2000 0x1000000
9951 ---help---
9952 This value puts the alignment restrictions on physical address
9953 @@ -1642,9 +1644,10 @@ config HOTPLUG_CPU
9954 Say N if you want to disable CPU hotplug.
9955
9956 config COMPAT_VDSO
9957 - def_bool y
9958 + def_bool n
9959 prompt "Compat VDSO support"
9960 depends on X86_32 || IA32_EMULATION
9961 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
9962 ---help---
9963 Map the 32-bit VDSO to the predictable old-style address too.
9964
9965 diff -urNp linux-2.6.35.4/arch/x86/Kconfig.cpu linux-2.6.35.4/arch/x86/Kconfig.cpu
9966 --- linux-2.6.35.4/arch/x86/Kconfig.cpu 2010-08-26 19:47:12.000000000 -0400
9967 +++ linux-2.6.35.4/arch/x86/Kconfig.cpu 2010-09-17 20:12:09.000000000 -0400
9968 @@ -336,7 +336,7 @@ config X86_PPRO_FENCE
9969
9970 config X86_F00F_BUG
9971 def_bool y
9972 - depends on M586MMX || M586TSC || M586 || M486 || M386
9973 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
9974
9975 config X86_INVD_BUG
9976 def_bool y
9977 @@ -360,7 +360,7 @@ config X86_POPAD_OK
9978
9979 config X86_ALIGNMENT_16
9980 def_bool y
9981 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
9982 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
9983
9984 config X86_INTEL_USERCOPY
9985 def_bool y
9986 @@ -406,7 +406,7 @@ config X86_CMPXCHG64
9987 # generates cmov.
9988 config X86_CMOV
9989 def_bool y
9990 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
9991 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
9992
9993 config X86_MINIMUM_CPU_FAMILY
9994 int
9995 diff -urNp linux-2.6.35.4/arch/x86/Kconfig.debug linux-2.6.35.4/arch/x86/Kconfig.debug
9996 --- linux-2.6.35.4/arch/x86/Kconfig.debug 2010-08-26 19:47:12.000000000 -0400
9997 +++ linux-2.6.35.4/arch/x86/Kconfig.debug 2010-09-17 20:12:09.000000000 -0400
9998 @@ -97,7 +97,7 @@ config X86_PTDUMP
9999 config DEBUG_RODATA
10000 bool "Write protect kernel read-only data structures"
10001 default y
10002 - depends on DEBUG_KERNEL
10003 + depends on DEBUG_KERNEL && BROKEN
10004 ---help---
10005 Mark the kernel read-only data as write-protected in the pagetables,
10006 in order to catch accidental (and incorrect) writes to such const
10007 diff -urNp linux-2.6.35.4/arch/x86/kernel/acpi/boot.c linux-2.6.35.4/arch/x86/kernel/acpi/boot.c
10008 --- linux-2.6.35.4/arch/x86/kernel/acpi/boot.c 2010-08-26 19:47:12.000000000 -0400
10009 +++ linux-2.6.35.4/arch/x86/kernel/acpi/boot.c 2010-09-17 20:12:09.000000000 -0400
10010 @@ -1472,7 +1472,7 @@ static struct dmi_system_id __initdata a
10011 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6715b"),
10012 },
10013 },
10014 - {}
10015 + { NULL, NULL, {{0, {0}}}, NULL}
10016 };
10017
10018 /*
10019 diff -urNp linux-2.6.35.4/arch/x86/kernel/acpi/realmode/wakeup.S linux-2.6.35.4/arch/x86/kernel/acpi/realmode/wakeup.S
10020 --- linux-2.6.35.4/arch/x86/kernel/acpi/realmode/wakeup.S 2010-08-26 19:47:12.000000000 -0400
10021 +++ linux-2.6.35.4/arch/x86/kernel/acpi/realmode/wakeup.S 2010-09-17 20:12:09.000000000 -0400
10022 @@ -104,7 +104,7 @@ _start:
10023 movl %eax, %ecx
10024 orl %edx, %ecx
10025 jz 1f
10026 - movl $0xc0000080, %ecx
10027 + mov $MSR_EFER, %ecx
10028 wrmsr
10029 1:
10030
10031 diff -urNp linux-2.6.35.4/arch/x86/kernel/acpi/sleep.c linux-2.6.35.4/arch/x86/kernel/acpi/sleep.c
10032 --- linux-2.6.35.4/arch/x86/kernel/acpi/sleep.c 2010-08-26 19:47:12.000000000 -0400
10033 +++ linux-2.6.35.4/arch/x86/kernel/acpi/sleep.c 2010-09-17 20:12:09.000000000 -0400
10034 @@ -11,11 +11,12 @@
10035 #include <linux/cpumask.h>
10036 #include <asm/segment.h>
10037 #include <asm/desc.h>
10038 +#include <asm/e820.h>
10039
10040 #include "realmode/wakeup.h"
10041 #include "sleep.h"
10042
10043 -unsigned long acpi_wakeup_address;
10044 +unsigned long acpi_wakeup_address = 0x2000;
10045 unsigned long acpi_realmode_flags;
10046
10047 /* address in low memory of the wakeup routine. */
10048 @@ -96,8 +97,12 @@ int acpi_save_state_mem(void)
10049 header->trampoline_segment = setup_trampoline() >> 4;
10050 #ifdef CONFIG_SMP
10051 stack_start.sp = temp_stack + sizeof(temp_stack);
10052 +
10053 + pax_open_kernel();
10054 early_gdt_descr.address =
10055 (unsigned long)get_cpu_gdt_table(smp_processor_id());
10056 + pax_close_kernel();
10057 +
10058 initial_gs = per_cpu_offset(smp_processor_id());
10059 #endif
10060 initial_code = (unsigned long)wakeup_long64;
10061 diff -urNp linux-2.6.35.4/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.35.4/arch/x86/kernel/acpi/wakeup_32.S
10062 --- linux-2.6.35.4/arch/x86/kernel/acpi/wakeup_32.S 2010-08-26 19:47:12.000000000 -0400
10063 +++ linux-2.6.35.4/arch/x86/kernel/acpi/wakeup_32.S 2010-09-17 20:12:09.000000000 -0400
10064 @@ -30,13 +30,11 @@ wakeup_pmode_return:
10065 # and restore the stack ... but you need gdt for this to work
10066 movl saved_context_esp, %esp
10067
10068 - movl %cs:saved_magic, %eax
10069 - cmpl $0x12345678, %eax
10070 + cmpl $0x12345678, saved_magic
10071 jne bogus_magic
10072
10073 # jump to place where we left off
10074 - movl saved_eip, %eax
10075 - jmp *%eax
10076 + jmp *(saved_eip)
10077
10078 bogus_magic:
10079 jmp bogus_magic
10080 diff -urNp linux-2.6.35.4/arch/x86/kernel/alternative.c linux-2.6.35.4/arch/x86/kernel/alternative.c
10081 --- linux-2.6.35.4/arch/x86/kernel/alternative.c 2010-08-26 19:47:12.000000000 -0400
10082 +++ linux-2.6.35.4/arch/x86/kernel/alternative.c 2010-09-17 20:12:09.000000000 -0400
10083 @@ -247,7 +247,7 @@ static void alternatives_smp_lock(const
10084 if (!*poff || ptr < text || ptr >= text_end)
10085 continue;
10086 /* turn DS segment override prefix into lock prefix */
10087 - if (*ptr == 0x3e)
10088 + if (*ktla_ktva(ptr) == 0x3e)
10089 text_poke(ptr, ((unsigned char []){0xf0}), 1);
10090 };
10091 mutex_unlock(&text_mutex);
10092 @@ -268,7 +268,7 @@ static void alternatives_smp_unlock(cons
10093 if (!*poff || ptr < text || ptr >= text_end)
10094 continue;
10095 /* turn lock prefix into DS segment override prefix */
10096 - if (*ptr == 0xf0)
10097 + if (*ktla_ktva(ptr) == 0xf0)
10098 text_poke(ptr, ((unsigned char []){0x3E}), 1);
10099 };
10100 mutex_unlock(&text_mutex);
10101 @@ -436,7 +436,7 @@ void __init_or_module apply_paravirt(str
10102
10103 BUG_ON(p->len > MAX_PATCH_LEN);
10104 /* prep the buffer with the original instructions */
10105 - memcpy(insnbuf, p->instr, p->len);
10106 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
10107 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
10108 (unsigned long)p->instr, p->len);
10109
10110 @@ -504,7 +504,7 @@ void __init alternative_instructions(voi
10111 if (smp_alt_once)
10112 free_init_pages("SMP alternatives",
10113 (unsigned long)__smp_locks,
10114 - (unsigned long)__smp_locks_end);
10115 + PAGE_ALIGN((unsigned long)__smp_locks_end));
10116
10117 restart_nmi();
10118 }
10119 @@ -521,13 +521,17 @@ void __init alternative_instructions(voi
10120 * instructions. And on the local CPU you need to be protected again NMI or MCE
10121 * handlers seeing an inconsistent instruction while you patch.
10122 */
10123 -static void *__init_or_module text_poke_early(void *addr, const void *opcode,
10124 +static void *__kprobes text_poke_early(void *addr, const void *opcode,
10125 size_t len)
10126 {
10127 unsigned long flags;
10128 local_irq_save(flags);
10129 - memcpy(addr, opcode, len);
10130 +
10131 + pax_open_kernel();
10132 + memcpy(ktla_ktva(addr), opcode, len);
10133 sync_core();
10134 + pax_close_kernel();
10135 +
10136 local_irq_restore(flags);
10137 /* Could also do a CLFLUSH here to speed up CPU recovery; but
10138 that causes hangs on some VIA CPUs. */
10139 @@ -549,36 +553,22 @@ static void *__init_or_module text_poke_
10140 */
10141 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
10142 {
10143 - unsigned long flags;
10144 - char *vaddr;
10145 + unsigned char *vaddr = ktla_ktva(addr);
10146 struct page *pages[2];
10147 - int i;
10148 + size_t i;
10149
10150 if (!core_kernel_text((unsigned long)addr)) {
10151 - pages[0] = vmalloc_to_page(addr);
10152 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
10153 + pages[0] = vmalloc_to_page(vaddr);
10154 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
10155 } else {
10156 - pages[0] = virt_to_page(addr);
10157 + pages[0] = virt_to_page(vaddr);
10158 WARN_ON(!PageReserved(pages[0]));
10159 - pages[1] = virt_to_page(addr + PAGE_SIZE);
10160 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
10161 }
10162 BUG_ON(!pages[0]);
10163 - local_irq_save(flags);
10164 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
10165 - if (pages[1])
10166 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
10167 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
10168 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
10169 - clear_fixmap(FIX_TEXT_POKE0);
10170 - if (pages[1])
10171 - clear_fixmap(FIX_TEXT_POKE1);
10172 - local_flush_tlb();
10173 - sync_core();
10174 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
10175 - that causes hangs on some VIA CPUs. */
10176 + text_poke_early(addr, opcode, len);
10177 for (i = 0; i < len; i++)
10178 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
10179 - local_irq_restore(flags);
10180 + BUG_ON(((char *)vaddr)[i] != ((char *)opcode)[i]);
10181 return addr;
10182 }
10183
10184 diff -urNp linux-2.6.35.4/arch/x86/kernel/amd_iommu.c linux-2.6.35.4/arch/x86/kernel/amd_iommu.c
10185 --- linux-2.6.35.4/arch/x86/kernel/amd_iommu.c 2010-08-26 19:47:12.000000000 -0400
10186 +++ linux-2.6.35.4/arch/x86/kernel/amd_iommu.c 2010-09-17 20:12:09.000000000 -0400
10187 @@ -2284,7 +2284,7 @@ static void prealloc_protection_domains(
10188 }
10189 }
10190
10191 -static struct dma_map_ops amd_iommu_dma_ops = {
10192 +static const struct dma_map_ops amd_iommu_dma_ops = {
10193 .alloc_coherent = alloc_coherent,
10194 .free_coherent = free_coherent,
10195 .map_page = map_page,
10196 diff -urNp linux-2.6.35.4/arch/x86/kernel/apic/io_apic.c linux-2.6.35.4/arch/x86/kernel/apic/io_apic.c
10197 --- linux-2.6.35.4/arch/x86/kernel/apic/io_apic.c 2010-08-26 19:47:12.000000000 -0400
10198 +++ linux-2.6.35.4/arch/x86/kernel/apic/io_apic.c 2010-09-17 20:12:09.000000000 -0400
10199 @@ -691,7 +691,7 @@ struct IO_APIC_route_entry **alloc_ioapi
10200 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
10201 GFP_ATOMIC);
10202 if (!ioapic_entries)
10203 - return 0;
10204 + return NULL;
10205
10206 for (apic = 0; apic < nr_ioapics; apic++) {
10207 ioapic_entries[apic] =
10208 @@ -708,7 +708,7 @@ nomem:
10209 kfree(ioapic_entries[apic]);
10210 kfree(ioapic_entries);
10211
10212 - return 0;
10213 + return NULL;
10214 }
10215
10216 /*
10217 @@ -1118,7 +1118,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
10218 }
10219 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
10220
10221 -void lock_vector_lock(void)
10222 +void lock_vector_lock(void) __acquires(vector_lock)
10223 {
10224 /* Used to the online set of cpus does not change
10225 * during assign_irq_vector.
10226 @@ -1126,7 +1126,7 @@ void lock_vector_lock(void)
10227 raw_spin_lock(&vector_lock);
10228 }
10229
10230 -void unlock_vector_lock(void)
10231 +void unlock_vector_lock(void) __releases(vector_lock)
10232 {
10233 raw_spin_unlock(&vector_lock);
10234 }
10235 diff -urNp linux-2.6.35.4/arch/x86/kernel/apm_32.c linux-2.6.35.4/arch/x86/kernel/apm_32.c
10236 --- linux-2.6.35.4/arch/x86/kernel/apm_32.c 2010-08-26 19:47:12.000000000 -0400
10237 +++ linux-2.6.35.4/arch/x86/kernel/apm_32.c 2010-09-17 20:12:09.000000000 -0400
10238 @@ -410,7 +410,7 @@ static DEFINE_MUTEX(apm_mutex);
10239 * This is for buggy BIOS's that refer to (real mode) segment 0x40
10240 * even though they are called in protected mode.
10241 */
10242 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
10243 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
10244 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
10245
10246 static const char driver_version[] = "1.16ac"; /* no spaces */
10247 @@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
10248 BUG_ON(cpu != 0);
10249 gdt = get_cpu_gdt_table(cpu);
10250 save_desc_40 = gdt[0x40 / 8];
10251 +
10252 + pax_open_kernel();
10253 gdt[0x40 / 8] = bad_bios_desc;
10254 + pax_close_kernel();
10255
10256 apm_irq_save(flags);
10257 APM_DO_SAVE_SEGS;
10258 @@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
10259 &call->esi);
10260 APM_DO_RESTORE_SEGS;
10261 apm_irq_restore(flags);
10262 +
10263 + pax_open_kernel();
10264 gdt[0x40 / 8] = save_desc_40;
10265 + pax_close_kernel();
10266 +
10267 put_cpu();
10268
10269 return call->eax & 0xff;
10270 @@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void
10271 BUG_ON(cpu != 0);
10272 gdt = get_cpu_gdt_table(cpu);
10273 save_desc_40 = gdt[0x40 / 8];
10274 +
10275 + pax_open_kernel();
10276 gdt[0x40 / 8] = bad_bios_desc;
10277 + pax_close_kernel();
10278
10279 apm_irq_save(flags);
10280 APM_DO_SAVE_SEGS;
10281 @@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void
10282 &call->eax);
10283 APM_DO_RESTORE_SEGS;
10284 apm_irq_restore(flags);
10285 +
10286 + pax_open_kernel();
10287 gdt[0x40 / 8] = save_desc_40;
10288 + pax_close_kernel();
10289 +
10290 put_cpu();
10291 return error;
10292 }
10293 @@ -975,7 +989,7 @@ recalc:
10294
10295 static void apm_power_off(void)
10296 {
10297 - unsigned char po_bios_call[] = {
10298 + const unsigned char po_bios_call[] = {
10299 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
10300 0x8e, 0xd0, /* movw ax,ss */
10301 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
10302 @@ -1931,7 +1945,10 @@ static const struct file_operations apm_
10303 static struct miscdevice apm_device = {
10304 APM_MINOR_DEV,
10305 "apm_bios",
10306 - &apm_bios_fops
10307 + &apm_bios_fops,
10308 + {NULL, NULL},
10309 + NULL,
10310 + NULL
10311 };
10312
10313
10314 @@ -2252,7 +2269,7 @@ static struct dmi_system_id __initdata a
10315 { DMI_MATCH(DMI_SYS_VENDOR, "IBM"), },
10316 },
10317
10318 - { }
10319 + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL}
10320 };
10321
10322 /*
10323 @@ -2355,12 +2372,15 @@ static int __init apm_init(void)
10324 * code to that CPU.
10325 */
10326 gdt = get_cpu_gdt_table(0);
10327 +
10328 + pax_open_kernel();
10329 set_desc_base(&gdt[APM_CS >> 3],
10330 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
10331 set_desc_base(&gdt[APM_CS_16 >> 3],
10332 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
10333 set_desc_base(&gdt[APM_DS >> 3],
10334 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
10335 + pax_close_kernel();
10336
10337 proc_create("apm", 0, NULL, &apm_file_ops);
10338
10339 diff -urNp linux-2.6.35.4/arch/x86/kernel/asm-offsets_32.c linux-2.6.35.4/arch/x86/kernel/asm-offsets_32.c
10340 --- linux-2.6.35.4/arch/x86/kernel/asm-offsets_32.c 2010-08-26 19:47:12.000000000 -0400
10341 +++ linux-2.6.35.4/arch/x86/kernel/asm-offsets_32.c 2010-09-17 20:12:09.000000000 -0400
10342 @@ -115,6 +115,11 @@ void foo(void)
10343 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
10344 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
10345 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
10346 +
10347 +#ifdef CONFIG_PAX_KERNEXEC
10348 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
10349 +#endif
10350 +
10351 #endif
10352
10353 #ifdef CONFIG_XEN
10354 diff -urNp linux-2.6.35.4/arch/x86/kernel/asm-offsets_64.c linux-2.6.35.4/arch/x86/kernel/asm-offsets_64.c
10355 --- linux-2.6.35.4/arch/x86/kernel/asm-offsets_64.c 2010-08-26 19:47:12.000000000 -0400
10356 +++ linux-2.6.35.4/arch/x86/kernel/asm-offsets_64.c 2010-09-17 20:12:09.000000000 -0400
10357 @@ -63,6 +63,18 @@ int main(void)
10358 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
10359 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
10360 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
10361 +
10362 +#ifdef CONFIG_PAX_KERNEXEC
10363 + OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
10364 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
10365 +#endif
10366 +
10367 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10368 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
10369 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
10370 + OFFSET(PV_MMU_set_pgd, pv_mmu_ops, set_pgd);
10371 +#endif
10372 +
10373 #endif
10374
10375
10376 @@ -115,6 +127,7 @@ int main(void)
10377 ENTRY(cr8);
10378 BLANK();
10379 #undef ENTRY
10380 + DEFINE(TSS_size, sizeof(struct tss_struct));
10381 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
10382 BLANK();
10383 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
10384 diff -urNp linux-2.6.35.4/arch/x86/kernel/cpu/common.c linux-2.6.35.4/arch/x86/kernel/cpu/common.c
10385 --- linux-2.6.35.4/arch/x86/kernel/cpu/common.c 2010-08-26 19:47:12.000000000 -0400
10386 +++ linux-2.6.35.4/arch/x86/kernel/cpu/common.c 2010-09-17 20:12:09.000000000 -0400
10387 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
10388
10389 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
10390
10391 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
10392 -#ifdef CONFIG_X86_64
10393 - /*
10394 - * We need valid kernel segments for data and code in long mode too
10395 - * IRET will check the segment types kkeil 2000/10/28
10396 - * Also sysret mandates a special GDT layout
10397 - *
10398 - * TLS descriptors are currently at a different place compared to i386.
10399 - * Hopefully nobody expects them at a fixed place (Wine?)
10400 - */
10401 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
10402 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
10403 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
10404 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
10405 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
10406 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
10407 -#else
10408 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
10409 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10410 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
10411 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
10412 - /*
10413 - * Segments used for calling PnP BIOS have byte granularity.
10414 - * They code segments and data segments have fixed 64k limits,
10415 - * the transfer segment sizes are set at run time.
10416 - */
10417 - /* 32-bit code */
10418 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10419 - /* 16-bit code */
10420 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10421 - /* 16-bit data */
10422 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
10423 - /* 16-bit data */
10424 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
10425 - /* 16-bit data */
10426 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
10427 - /*
10428 - * The APM segments have byte granularity and their bases
10429 - * are set at run time. All have 64k limits.
10430 - */
10431 - /* 32-bit code */
10432 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10433 - /* 16-bit code */
10434 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10435 - /* data */
10436 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
10437 -
10438 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10439 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10440 - GDT_STACK_CANARY_INIT
10441 -#endif
10442 -} };
10443 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
10444 -
10445 static int __init x86_xsave_setup(char *s)
10446 {
10447 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
10448 @@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
10449 {
10450 struct desc_ptr gdt_descr;
10451
10452 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
10453 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
10454 gdt_descr.size = GDT_SIZE - 1;
10455 load_gdt(&gdt_descr);
10456 /* Reload the per-cpu base */
10457 @@ -802,6 +748,10 @@ static void __cpuinit identify_cpu(struc
10458 /* Filter out anything that depends on CPUID levels we don't have */
10459 filter_cpuid_features(c, true);
10460
10461 +#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
10462 + setup_clear_cpu_cap(X86_FEATURE_SEP);
10463 +#endif
10464 +
10465 /* If the model name is still unset, do table lookup. */
10466 if (!c->x86_model_id[0]) {
10467 const char *p;
10468 @@ -1117,7 +1067,7 @@ void __cpuinit cpu_init(void)
10469 int i;
10470
10471 cpu = stack_smp_processor_id();
10472 - t = &per_cpu(init_tss, cpu);
10473 + t = init_tss + cpu;
10474 oist = &per_cpu(orig_ist, cpu);
10475
10476 #ifdef CONFIG_NUMA
10477 @@ -1143,7 +1093,7 @@ void __cpuinit cpu_init(void)
10478 switch_to_new_gdt(cpu);
10479 loadsegment(fs, 0);
10480
10481 - load_idt((const struct desc_ptr *)&idt_descr);
10482 + load_idt(&idt_descr);
10483
10484 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
10485 syscall_init();
10486 @@ -1205,7 +1155,7 @@ void __cpuinit cpu_init(void)
10487 {
10488 int cpu = smp_processor_id();
10489 struct task_struct *curr = current;
10490 - struct tss_struct *t = &per_cpu(init_tss, cpu);
10491 + struct tss_struct *t = init_tss + cpu;
10492 struct thread_struct *thread = &curr->thread;
10493
10494 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
10495 diff -urNp linux-2.6.35.4/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c linux-2.6.35.4/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
10496 --- linux-2.6.35.4/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 2010-08-26 19:47:12.000000000 -0400
10497 +++ linux-2.6.35.4/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 2010-09-17 20:12:09.000000000 -0400
10498 @@ -484,7 +484,7 @@ static const struct dmi_system_id sw_any
10499 DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"),
10500 },
10501 },
10502 - { }
10503 + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
10504 };
10505
10506 static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
10507 diff -urNp linux-2.6.35.4/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c linux-2.6.35.4/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
10508 --- linux-2.6.35.4/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 2010-08-26 19:47:12.000000000 -0400
10509 +++ linux-2.6.35.4/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 2010-09-17 20:12:09.000000000 -0400
10510 @@ -226,7 +226,7 @@ static struct cpu_model models[] =
10511 { &cpu_ids[CPU_MP4HT_D0], NULL, 0, NULL },
10512 { &cpu_ids[CPU_MP4HT_E0], NULL, 0, NULL },
10513
10514 - { NULL, }
10515 + { NULL, NULL, 0, NULL}
10516 };
10517 #undef _BANIAS
10518 #undef BANIAS
10519 diff -urNp linux-2.6.35.4/arch/x86/kernel/cpu/intel.c linux-2.6.35.4/arch/x86/kernel/cpu/intel.c
10520 --- linux-2.6.35.4/arch/x86/kernel/cpu/intel.c 2010-08-26 19:47:12.000000000 -0400
10521 +++ linux-2.6.35.4/arch/x86/kernel/cpu/intel.c 2010-09-17 20:12:09.000000000 -0400
10522 @@ -160,7 +160,7 @@ static void __cpuinit trap_init_f00f_bug
10523 * Update the IDT descriptor and reload the IDT so that
10524 * it uses the read-only mapped virtual address.
10525 */
10526 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
10527 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
10528 load_idt(&idt_descr);
10529 }
10530 #endif
10531 diff -urNp linux-2.6.35.4/arch/x86/kernel/cpu/Makefile linux-2.6.35.4/arch/x86/kernel/cpu/Makefile
10532 --- linux-2.6.35.4/arch/x86/kernel/cpu/Makefile 2010-08-26 19:47:12.000000000 -0400
10533 +++ linux-2.6.35.4/arch/x86/kernel/cpu/Makefile 2010-09-17 20:12:09.000000000 -0400
10534 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
10535 CFLAGS_REMOVE_perf_event.o = -pg
10536 endif
10537
10538 -# Make sure load_percpu_segment has no stackprotector
10539 -nostackp := $(call cc-option, -fno-stack-protector)
10540 -CFLAGS_common.o := $(nostackp)
10541 -
10542 obj-y := intel_cacheinfo.o addon_cpuid_features.o
10543 obj-y += proc.o capflags.o powerflags.o common.o
10544 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
10545 diff -urNp linux-2.6.35.4/arch/x86/kernel/cpu/mcheck/mce.c linux-2.6.35.4/arch/x86/kernel/cpu/mcheck/mce.c
10546 --- linux-2.6.35.4/arch/x86/kernel/cpu/mcheck/mce.c 2010-08-26 19:47:12.000000000 -0400
10547 +++ linux-2.6.35.4/arch/x86/kernel/cpu/mcheck/mce.c 2010-09-17 20:12:09.000000000 -0400
10548 @@ -219,7 +219,7 @@ static void print_mce(struct mce *m)
10549 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
10550 m->cs, m->ip);
10551
10552 - if (m->cs == __KERNEL_CS)
10553 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
10554 print_symbol("{%s}", m->ip);
10555 pr_cont("\n");
10556 }
10557 @@ -1471,14 +1471,14 @@ void __cpuinit mcheck_cpu_init(struct cp
10558 */
10559
10560 static DEFINE_SPINLOCK(mce_state_lock);
10561 -static int open_count; /* #times opened */
10562 +static atomic_t open_count; /* #times opened */
10563 static int open_exclu; /* already open exclusive? */
10564
10565 static int mce_open(struct inode *inode, struct file *file)
10566 {
10567 spin_lock(&mce_state_lock);
10568
10569 - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
10570 + if (open_exclu || (atomic_read(&open_count) && (file->f_flags & O_EXCL))) {
10571 spin_unlock(&mce_state_lock);
10572
10573 return -EBUSY;
10574 @@ -1486,7 +1486,7 @@ static int mce_open(struct inode *inode,
10575
10576 if (file->f_flags & O_EXCL)
10577 open_exclu = 1;
10578 - open_count++;
10579 + atomic_inc(&open_count);
10580
10581 spin_unlock(&mce_state_lock);
10582
10583 @@ -1497,7 +1497,7 @@ static int mce_release(struct inode *ino
10584 {
10585 spin_lock(&mce_state_lock);
10586
10587 - open_count--;
10588 + atomic_dec(&open_count);
10589 open_exclu = 0;
10590
10591 spin_unlock(&mce_state_lock);
10592 @@ -1683,6 +1683,7 @@ static struct miscdevice mce_log_device
10593 MISC_MCELOG_MINOR,
10594 "mcelog",
10595 &mce_chrdev_ops,
10596 + {NULL, NULL}, NULL, NULL
10597 };
10598
10599 /*
10600 diff -urNp linux-2.6.35.4/arch/x86/kernel/cpu/mtrr/generic.c linux-2.6.35.4/arch/x86/kernel/cpu/mtrr/generic.c
10601 --- linux-2.6.35.4/arch/x86/kernel/cpu/mtrr/generic.c 2010-08-26 19:47:12.000000000 -0400
10602 +++ linux-2.6.35.4/arch/x86/kernel/cpu/mtrr/generic.c 2010-09-17 20:12:09.000000000 -0400
10603 @@ -28,7 +28,7 @@ static struct fixed_range_block fixed_ra
10604 { MSR_MTRRfix64K_00000, 1 }, /* one 64k MTRR */
10605 { MSR_MTRRfix16K_80000, 2 }, /* two 16k MTRRs */
10606 { MSR_MTRRfix4K_C0000, 8 }, /* eight 4k MTRRs */
10607 - {}
10608 + { 0, 0 }
10609 };
10610
10611 static unsigned long smp_changes_mask;
10612 diff -urNp linux-2.6.35.4/arch/x86/kernel/cpu/mtrr/main.c linux-2.6.35.4/arch/x86/kernel/cpu/mtrr/main.c
10613 --- linux-2.6.35.4/arch/x86/kernel/cpu/mtrr/main.c 2010-08-26 19:47:12.000000000 -0400
10614 +++ linux-2.6.35.4/arch/x86/kernel/cpu/mtrr/main.c 2010-09-17 20:12:09.000000000 -0400
10615 @@ -61,7 +61,7 @@ static DEFINE_MUTEX(mtrr_mutex);
10616 u64 size_or_mask, size_and_mask;
10617 static bool mtrr_aps_delayed_init;
10618
10619 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
10620 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
10621
10622 const struct mtrr_ops *mtrr_if;
10623
10624 diff -urNp linux-2.6.35.4/arch/x86/kernel/cpu/mtrr/mtrr.h linux-2.6.35.4/arch/x86/kernel/cpu/mtrr/mtrr.h
10625 --- linux-2.6.35.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2010-08-26 19:47:12.000000000 -0400
10626 +++ linux-2.6.35.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2010-09-17 20:12:09.000000000 -0400
10627 @@ -12,19 +12,19 @@
10628 extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
10629
10630 struct mtrr_ops {
10631 - u32 vendor;
10632 - u32 use_intel_if;
10633 - void (*set)(unsigned int reg, unsigned long base,
10634 + const u32 vendor;
10635 + const u32 use_intel_if;
10636 + void (* const set)(unsigned int reg, unsigned long base,
10637 unsigned long size, mtrr_type type);
10638 - void (*set_all)(void);
10639 + void (* const set_all)(void);
10640
10641 - void (*get)(unsigned int reg, unsigned long *base,
10642 + void (* const get)(unsigned int reg, unsigned long *base,
10643 unsigned long *size, mtrr_type *type);
10644 - int (*get_free_region)(unsigned long base, unsigned long size,
10645 + int (* const get_free_region)(unsigned long base, unsigned long size,
10646 int replace_reg);
10647 - int (*validate_add_page)(unsigned long base, unsigned long size,
10648 + int (* const validate_add_page)(unsigned long base, unsigned long size,
10649 unsigned int type);
10650 - int (*have_wrcomb)(void);
10651 + int (* const have_wrcomb)(void);
10652 };
10653
10654 extern int generic_get_free_region(unsigned long base, unsigned long size,
10655 diff -urNp linux-2.6.35.4/arch/x86/kernel/cpu/perfctr-watchdog.c linux-2.6.35.4/arch/x86/kernel/cpu/perfctr-watchdog.c
10656 --- linux-2.6.35.4/arch/x86/kernel/cpu/perfctr-watchdog.c 2010-08-26 19:47:12.000000000 -0400
10657 +++ linux-2.6.35.4/arch/x86/kernel/cpu/perfctr-watchdog.c 2010-09-17 20:12:09.000000000 -0400
10658 @@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
10659
10660 /* Interface defining a CPU specific perfctr watchdog */
10661 struct wd_ops {
10662 - int (*reserve)(void);
10663 - void (*unreserve)(void);
10664 - int (*setup)(unsigned nmi_hz);
10665 - void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
10666 - void (*stop)(void);
10667 + int (* const reserve)(void);
10668 + void (* const unreserve)(void);
10669 + int (* const setup)(unsigned nmi_hz);
10670 + void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
10671 + void (* const stop)(void);
10672 unsigned perfctr;
10673 unsigned evntsel;
10674 u64 checkbit;
10675 @@ -634,6 +634,7 @@ static const struct wd_ops p4_wd_ops = {
10676 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
10677 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
10678
10679 +/* cannot be const, see probe_nmi_watchdog */
10680 static struct wd_ops intel_arch_wd_ops;
10681
10682 static int setup_intel_arch_watchdog(unsigned nmi_hz)
10683 @@ -686,6 +687,7 @@ static int setup_intel_arch_watchdog(uns
10684 return 1;
10685 }
10686
10687 +/* cannot be const */
10688 static struct wd_ops intel_arch_wd_ops __read_mostly = {
10689 .reserve = single_msr_reserve,
10690 .unreserve = single_msr_unreserve,
10691 diff -urNp linux-2.6.35.4/arch/x86/kernel/cpu/perf_event.c linux-2.6.35.4/arch/x86/kernel/cpu/perf_event.c
10692 --- linux-2.6.35.4/arch/x86/kernel/cpu/perf_event.c 2010-08-26 19:47:12.000000000 -0400
10693 +++ linux-2.6.35.4/arch/x86/kernel/cpu/perf_event.c 2010-09-17 20:12:09.000000000 -0400
10694 @@ -1685,7 +1685,7 @@ perf_callchain_user(struct pt_regs *regs
10695 break;
10696
10697 callchain_store(entry, frame.return_address);
10698 - fp = frame.next_frame;
10699 + fp = (__force const void __user *)frame.next_frame;
10700 }
10701 }
10702
10703 diff -urNp linux-2.6.35.4/arch/x86/kernel/crash.c linux-2.6.35.4/arch/x86/kernel/crash.c
10704 --- linux-2.6.35.4/arch/x86/kernel/crash.c 2010-08-26 19:47:12.000000000 -0400
10705 +++ linux-2.6.35.4/arch/x86/kernel/crash.c 2010-09-17 20:12:09.000000000 -0400
10706 @@ -40,7 +40,7 @@ static void kdump_nmi_callback(int cpu,
10707 regs = args->regs;
10708
10709 #ifdef CONFIG_X86_32
10710 - if (!user_mode_vm(regs)) {
10711 + if (!user_mode(regs)) {
10712 crash_fixup_ss_esp(&fixed_regs, regs);
10713 regs = &fixed_regs;
10714 }
10715 diff -urNp linux-2.6.35.4/arch/x86/kernel/doublefault_32.c linux-2.6.35.4/arch/x86/kernel/doublefault_32.c
10716 --- linux-2.6.35.4/arch/x86/kernel/doublefault_32.c 2010-08-26 19:47:12.000000000 -0400
10717 +++ linux-2.6.35.4/arch/x86/kernel/doublefault_32.c 2010-09-17 20:12:09.000000000 -0400
10718 @@ -11,7 +11,7 @@
10719
10720 #define DOUBLEFAULT_STACKSIZE (1024)
10721 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
10722 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
10723 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
10724
10725 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
10726
10727 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
10728 unsigned long gdt, tss;
10729
10730 store_gdt(&gdt_desc);
10731 - gdt = gdt_desc.address;
10732 + gdt = (unsigned long)gdt_desc.address;
10733
10734 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
10735
10736 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
10737 /* 0x2 bit is always set */
10738 .flags = X86_EFLAGS_SF | 0x2,
10739 .sp = STACK_START,
10740 - .es = __USER_DS,
10741 + .es = __KERNEL_DS,
10742 .cs = __KERNEL_CS,
10743 .ss = __KERNEL_DS,
10744 - .ds = __USER_DS,
10745 + .ds = __KERNEL_DS,
10746 .fs = __KERNEL_PERCPU,
10747
10748 .__cr3 = __pa_nodebug(swapper_pg_dir),
10749 diff -urNp linux-2.6.35.4/arch/x86/kernel/dumpstack_32.c linux-2.6.35.4/arch/x86/kernel/dumpstack_32.c
10750 --- linux-2.6.35.4/arch/x86/kernel/dumpstack_32.c 2010-08-26 19:47:12.000000000 -0400
10751 +++ linux-2.6.35.4/arch/x86/kernel/dumpstack_32.c 2010-09-17 20:12:09.000000000 -0400
10752 @@ -107,11 +107,12 @@ void show_registers(struct pt_regs *regs
10753 * When in-kernel, we also print out the stack and code at the
10754 * time of the fault..
10755 */
10756 - if (!user_mode_vm(regs)) {
10757 + if (!user_mode(regs)) {
10758 unsigned int code_prologue = code_bytes * 43 / 64;
10759 unsigned int code_len = code_bytes;
10760 unsigned char c;
10761 u8 *ip;
10762 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
10763
10764 printk(KERN_EMERG "Stack:\n");
10765 show_stack_log_lvl(NULL, regs, &regs->sp,
10766 @@ -119,10 +120,10 @@ void show_registers(struct pt_regs *regs
10767
10768 printk(KERN_EMERG "Code: ");
10769
10770 - ip = (u8 *)regs->ip - code_prologue;
10771 + ip = (u8 *)regs->ip - code_prologue + cs_base;
10772 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
10773 /* try starting at IP */
10774 - ip = (u8 *)regs->ip;
10775 + ip = (u8 *)regs->ip + cs_base;
10776 code_len = code_len - code_prologue + 1;
10777 }
10778 for (i = 0; i < code_len; i++, ip++) {
10779 @@ -131,7 +132,7 @@ void show_registers(struct pt_regs *regs
10780 printk(" Bad EIP value.");
10781 break;
10782 }
10783 - if (ip == (u8 *)regs->ip)
10784 + if (ip == (u8 *)regs->ip + cs_base)
10785 printk("<%02x> ", c);
10786 else
10787 printk("%02x ", c);
10788 @@ -144,6 +145,7 @@ int is_valid_bugaddr(unsigned long ip)
10789 {
10790 unsigned short ud2;
10791
10792 + ip = ktla_ktva(ip);
10793 if (ip < PAGE_OFFSET)
10794 return 0;
10795 if (probe_kernel_address((unsigned short *)ip, ud2))
10796 diff -urNp linux-2.6.35.4/arch/x86/kernel/dumpstack.c linux-2.6.35.4/arch/x86/kernel/dumpstack.c
10797 --- linux-2.6.35.4/arch/x86/kernel/dumpstack.c 2010-08-26 19:47:12.000000000 -0400
10798 +++ linux-2.6.35.4/arch/x86/kernel/dumpstack.c 2010-09-17 20:12:09.000000000 -0400
10799 @@ -207,7 +207,7 @@ void dump_stack(void)
10800 #endif
10801
10802 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
10803 - current->pid, current->comm, print_tainted(),
10804 + task_pid_nr(current), current->comm, print_tainted(),
10805 init_utsname()->release,
10806 (int)strcspn(init_utsname()->version, " "),
10807 init_utsname()->version);
10808 @@ -263,7 +263,7 @@ void __kprobes oops_end(unsigned long fl
10809 panic("Fatal exception in interrupt");
10810 if (panic_on_oops)
10811 panic("Fatal exception");
10812 - do_exit(signr);
10813 + do_group_exit(signr);
10814 }
10815
10816 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
10817 @@ -290,7 +290,7 @@ int __kprobes __die(const char *str, str
10818
10819 show_registers(regs);
10820 #ifdef CONFIG_X86_32
10821 - if (user_mode_vm(regs)) {
10822 + if (user_mode(regs)) {
10823 sp = regs->sp;
10824 ss = regs->ss & 0xffff;
10825 } else {
10826 @@ -318,7 +318,7 @@ void die(const char *str, struct pt_regs
10827 unsigned long flags = oops_begin();
10828 int sig = SIGSEGV;
10829
10830 - if (!user_mode_vm(regs))
10831 + if (!user_mode(regs))
10832 report_bug(regs->ip, regs);
10833
10834 if (__die(str, regs, err))
10835 diff -urNp linux-2.6.35.4/arch/x86/kernel/efi_32.c linux-2.6.35.4/arch/x86/kernel/efi_32.c
10836 --- linux-2.6.35.4/arch/x86/kernel/efi_32.c 2010-08-26 19:47:12.000000000 -0400
10837 +++ linux-2.6.35.4/arch/x86/kernel/efi_32.c 2010-09-17 20:12:09.000000000 -0400
10838 @@ -38,70 +38,38 @@
10839 */
10840
10841 static unsigned long efi_rt_eflags;
10842 -static pgd_t efi_bak_pg_dir_pointer[2];
10843 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
10844
10845 -void efi_call_phys_prelog(void)
10846 +void __init efi_call_phys_prelog(void)
10847 {
10848 - unsigned long cr4;
10849 - unsigned long temp;
10850 struct desc_ptr gdt_descr;
10851
10852 local_irq_save(efi_rt_eflags);
10853
10854 - /*
10855 - * If I don't have PAE, I should just duplicate two entries in page
10856 - * directory. If I have PAE, I just need to duplicate one entry in
10857 - * page directory.
10858 - */
10859 - cr4 = read_cr4_safe();
10860
10861 - if (cr4 & X86_CR4_PAE) {
10862 - efi_bak_pg_dir_pointer[0].pgd =
10863 - swapper_pg_dir[pgd_index(0)].pgd;
10864 - swapper_pg_dir[0].pgd =
10865 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
10866 - } else {
10867 - efi_bak_pg_dir_pointer[0].pgd =
10868 - swapper_pg_dir[pgd_index(0)].pgd;
10869 - efi_bak_pg_dir_pointer[1].pgd =
10870 - swapper_pg_dir[pgd_index(0x400000)].pgd;
10871 - swapper_pg_dir[pgd_index(0)].pgd =
10872 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
10873 - temp = PAGE_OFFSET + 0x400000;
10874 - swapper_pg_dir[pgd_index(0x400000)].pgd =
10875 - swapper_pg_dir[pgd_index(temp)].pgd;
10876 - }
10877 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
10878 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
10879 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
10880
10881 /*
10882 * After the lock is released, the original page table is restored.
10883 */
10884 __flush_tlb_all();
10885
10886 - gdt_descr.address = __pa(get_cpu_gdt_table(0));
10887 + gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
10888 gdt_descr.size = GDT_SIZE - 1;
10889 load_gdt(&gdt_descr);
10890 }
10891
10892 -void efi_call_phys_epilog(void)
10893 +void __init efi_call_phys_epilog(void)
10894 {
10895 - unsigned long cr4;
10896 struct desc_ptr gdt_descr;
10897
10898 - gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
10899 + gdt_descr.address = get_cpu_gdt_table(0);
10900 gdt_descr.size = GDT_SIZE - 1;
10901 load_gdt(&gdt_descr);
10902
10903 - cr4 = read_cr4_safe();
10904 -
10905 - if (cr4 & X86_CR4_PAE) {
10906 - swapper_pg_dir[pgd_index(0)].pgd =
10907 - efi_bak_pg_dir_pointer[0].pgd;
10908 - } else {
10909 - swapper_pg_dir[pgd_index(0)].pgd =
10910 - efi_bak_pg_dir_pointer[0].pgd;
10911 - swapper_pg_dir[pgd_index(0x400000)].pgd =
10912 - efi_bak_pg_dir_pointer[1].pgd;
10913 - }
10914 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
10915
10916 /*
10917 * After the lock is released, the original page table is restored.
10918 diff -urNp linux-2.6.35.4/arch/x86/kernel/efi_stub_32.S linux-2.6.35.4/arch/x86/kernel/efi_stub_32.S
10919 --- linux-2.6.35.4/arch/x86/kernel/efi_stub_32.S 2010-08-26 19:47:12.000000000 -0400
10920 +++ linux-2.6.35.4/arch/x86/kernel/efi_stub_32.S 2010-09-17 20:12:09.000000000 -0400
10921 @@ -6,6 +6,7 @@
10922 */
10923
10924 #include <linux/linkage.h>
10925 +#include <linux/init.h>
10926 #include <asm/page_types.h>
10927
10928 /*
10929 @@ -20,7 +21,7 @@
10930 * service functions will comply with gcc calling convention, too.
10931 */
10932
10933 -.text
10934 +__INIT
10935 ENTRY(efi_call_phys)
10936 /*
10937 * 0. The function can only be called in Linux kernel. So CS has been
10938 @@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
10939 * The mapping of lower virtual memory has been created in prelog and
10940 * epilog.
10941 */
10942 - movl $1f, %edx
10943 - subl $__PAGE_OFFSET, %edx
10944 - jmp *%edx
10945 + jmp 1f-__PAGE_OFFSET
10946 1:
10947
10948 /*
10949 @@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
10950 * parameter 2, ..., param n. To make things easy, we save the return
10951 * address of efi_call_phys in a global variable.
10952 */
10953 - popl %edx
10954 - movl %edx, saved_return_addr
10955 - /* get the function pointer into ECX*/
10956 - popl %ecx
10957 - movl %ecx, efi_rt_function_ptr
10958 - movl $2f, %edx
10959 - subl $__PAGE_OFFSET, %edx
10960 - pushl %edx
10961 + popl (saved_return_addr)
10962 + popl (efi_rt_function_ptr)
10963
10964 /*
10965 * 3. Clear PG bit in %CR0.
10966 @@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
10967 /*
10968 * 5. Call the physical function.
10969 */
10970 - jmp *%ecx
10971 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
10972
10973 -2:
10974 /*
10975 * 6. After EFI runtime service returns, control will return to
10976 * following instruction. We'd better readjust stack pointer first.
10977 @@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
10978 movl %cr0, %edx
10979 orl $0x80000000, %edx
10980 movl %edx, %cr0
10981 - jmp 1f
10982 -1:
10983 +
10984 /*
10985 * 8. Now restore the virtual mode from flat mode by
10986 * adding EIP with PAGE_OFFSET.
10987 */
10988 - movl $1f, %edx
10989 - jmp *%edx
10990 + jmp 1f+__PAGE_OFFSET
10991 1:
10992
10993 /*
10994 * 9. Balance the stack. And because EAX contain the return value,
10995 * we'd better not clobber it.
10996 */
10997 - leal efi_rt_function_ptr, %edx
10998 - movl (%edx), %ecx
10999 - pushl %ecx
11000 + pushl (efi_rt_function_ptr)
11001
11002 /*
11003 - * 10. Push the saved return address onto the stack and return.
11004 + * 10. Return to the saved return address.
11005 */
11006 - leal saved_return_addr, %edx
11007 - movl (%edx), %ecx
11008 - pushl %ecx
11009 - ret
11010 + jmpl *(saved_return_addr)
11011 ENDPROC(efi_call_phys)
11012 .previous
11013
11014 -.data
11015 +__INITDATA
11016 saved_return_addr:
11017 .long 0
11018 efi_rt_function_ptr:
11019 diff -urNp linux-2.6.35.4/arch/x86/kernel/entry_32.S linux-2.6.35.4/arch/x86/kernel/entry_32.S
11020 --- linux-2.6.35.4/arch/x86/kernel/entry_32.S 2010-08-26 19:47:12.000000000 -0400
11021 +++ linux-2.6.35.4/arch/x86/kernel/entry_32.S 2010-09-17 20:12:09.000000000 -0400
11022 @@ -192,7 +192,67 @@
11023
11024 #endif /* CONFIG_X86_32_LAZY_GS */
11025
11026 -.macro SAVE_ALL
11027 +.macro PAX_EXIT_KERNEL
11028 +#ifdef CONFIG_PAX_KERNEXEC
11029 +#ifdef CONFIG_PARAVIRT
11030 + push %eax; push %ecx;
11031 +#endif
11032 + mov %cs, %esi
11033 + cmp $__KERNEXEC_KERNEL_CS, %esi
11034 + jnz 2f
11035 +#ifdef CONFIG_PARAVIRT
11036 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
11037 + mov %eax, %esi
11038 +#else
11039 + mov %cr0, %esi
11040 +#endif
11041 + btr $16, %esi
11042 + ljmp $__KERNEL_CS, $1f
11043 +1:
11044 +#ifdef CONFIG_PARAVIRT
11045 + mov %esi, %eax
11046 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
11047 +#else
11048 + mov %esi, %cr0
11049 +#endif
11050 +2:
11051 +#ifdef CONFIG_PARAVIRT
11052 + pop %ecx; pop %eax
11053 +#endif
11054 +#endif
11055 +.endm
11056 +
11057 +.macro PAX_ENTER_KERNEL
11058 +#ifdef CONFIG_PAX_KERNEXEC
11059 +#ifdef CONFIG_PARAVIRT
11060 + push %eax; push %ecx;
11061 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
11062 + mov %eax, %esi
11063 +#else
11064 + mov %cr0, %esi
11065 +#endif
11066 + bts $16, %esi
11067 + jnc 1f
11068 + mov %cs, %esi
11069 + cmp $__KERNEL_CS, %esi
11070 + jz 3f
11071 + ljmp $__KERNEL_CS, $3f
11072 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
11073 +2:
11074 +#ifdef CONFIG_PARAVIRT
11075 + mov %esi, %eax
11076 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11077 +#else
11078 + mov %esi, %cr0
11079 +#endif
11080 +3:
11081 +#ifdef CONFIG_PARAVIRT
11082 + pop %ecx; pop %eax
11083 +#endif
11084 +#endif
11085 +.endm
11086 +
11087 +.macro __SAVE_ALL _DS
11088 cld
11089 PUSH_GS
11090 pushl %fs
11091 @@ -225,7 +285,7 @@
11092 pushl %ebx
11093 CFI_ADJUST_CFA_OFFSET 4
11094 CFI_REL_OFFSET ebx, 0
11095 - movl $(__USER_DS), %edx
11096 + movl $\_DS, %edx
11097 movl %edx, %ds
11098 movl %edx, %es
11099 movl $(__KERNEL_PERCPU), %edx
11100 @@ -233,6 +293,15 @@
11101 SET_KERNEL_GS %edx
11102 .endm
11103
11104 +.macro SAVE_ALL
11105 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
11106 + __SAVE_ALL __KERNEL_DS
11107 + PAX_ENTER_KERNEL
11108 +#else
11109 + __SAVE_ALL __USER_DS
11110 +#endif
11111 +.endm
11112 +
11113 .macro RESTORE_INT_REGS
11114 popl %ebx
11115 CFI_ADJUST_CFA_OFFSET -4
11116 @@ -357,7 +426,15 @@ check_userspace:
11117 movb PT_CS(%esp), %al
11118 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
11119 cmpl $USER_RPL, %eax
11120 +
11121 +#ifdef CONFIG_PAX_KERNEXEC
11122 + jae resume_userspace
11123 +
11124 + PAX_EXIT_KERNEL
11125 + jmp resume_kernel
11126 +#else
11127 jb resume_kernel # not returning to v8086 or userspace
11128 +#endif
11129
11130 ENTRY(resume_userspace)
11131 LOCKDEP_SYS_EXIT
11132 @@ -423,10 +500,9 @@ sysenter_past_esp:
11133 /*CFI_REL_OFFSET cs, 0*/
11134 /*
11135 * Push current_thread_info()->sysenter_return to the stack.
11136 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
11137 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
11138 */
11139 - pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
11140 + GET_THREAD_INFO(%ebp)
11141 + pushl TI_sysenter_return(%ebp)
11142 CFI_ADJUST_CFA_OFFSET 4
11143 CFI_REL_OFFSET eip, 0
11144
11145 @@ -439,9 +515,19 @@ sysenter_past_esp:
11146 * Load the potential sixth argument from user stack.
11147 * Careful about security.
11148 */
11149 + movl PT_OLDESP(%esp),%ebp
11150 +
11151 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11152 + mov PT_OLDSS(%esp),%ds
11153 +1: movl %ds:(%ebp),%ebp
11154 + push %ss
11155 + pop %ds
11156 +#else
11157 cmpl $__PAGE_OFFSET-3,%ebp
11158 jae syscall_fault
11159 1: movl (%ebp),%ebp
11160 +#endif
11161 +
11162 movl %ebp,PT_EBP(%esp)
11163 .section __ex_table,"a"
11164 .align 4
11165 @@ -464,12 +550,23 @@ sysenter_do_call:
11166 testl $_TIF_ALLWORK_MASK, %ecx
11167 jne sysexit_audit
11168 sysenter_exit:
11169 +
11170 +#ifdef CONFIG_PAX_RANDKSTACK
11171 + pushl %eax
11172 + CFI_ADJUST_CFA_OFFSET 4
11173 + call pax_randomize_kstack
11174 + popl %eax
11175 + CFI_ADJUST_CFA_OFFSET -4
11176 +#endif
11177 +
11178 /* if something modifies registers it must also disable sysexit */
11179 movl PT_EIP(%esp), %edx
11180 movl PT_OLDESP(%esp), %ecx
11181 xorl %ebp,%ebp
11182 TRACE_IRQS_ON
11183 1: mov PT_FS(%esp), %fs
11184 +2: mov PT_DS(%esp), %ds
11185 +3: mov PT_ES(%esp), %es
11186 PTGS_TO_GS
11187 ENABLE_INTERRUPTS_SYSEXIT
11188
11189 @@ -513,11 +610,17 @@ sysexit_audit:
11190
11191 CFI_ENDPROC
11192 .pushsection .fixup,"ax"
11193 -2: movl $0,PT_FS(%esp)
11194 +4: movl $0,PT_FS(%esp)
11195 + jmp 1b
11196 +5: movl $0,PT_DS(%esp)
11197 + jmp 1b
11198 +6: movl $0,PT_ES(%esp)
11199 jmp 1b
11200 .section __ex_table,"a"
11201 .align 4
11202 - .long 1b,2b
11203 + .long 1b,4b
11204 + .long 2b,5b
11205 + .long 3b,6b
11206 .popsection
11207 PTGS_TO_GS_EX
11208 ENDPROC(ia32_sysenter_target)
11209 @@ -551,6 +654,10 @@ syscall_exit:
11210 testl $_TIF_ALLWORK_MASK, %ecx # current->work
11211 jne syscall_exit_work
11212
11213 +#ifdef CONFIG_PAX_RANDKSTACK
11214 + call pax_randomize_kstack
11215 +#endif
11216 +
11217 restore_all:
11218 TRACE_IRQS_IRET
11219 restore_all_notrace:
11220 @@ -615,7 +722,13 @@ ldt_ss:
11221 mov PT_OLDESP(%esp), %eax /* load userspace esp */
11222 mov %dx, %ax /* eax: new kernel esp */
11223 sub %eax, %edx /* offset (low word is 0) */
11224 - PER_CPU(gdt_page, %ebx)
11225 +#ifdef CONFIG_SMP
11226 + movl PER_CPU_VAR(cpu_number), %ebx
11227 + shll $PAGE_SHIFT_asm, %ebx
11228 + addl $cpu_gdt_table, %ebx
11229 +#else
11230 + movl $cpu_gdt_table, %ebx
11231 +#endif
11232 shr $16, %edx
11233 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
11234 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
11235 @@ -655,25 +768,19 @@ work_resched:
11236
11237 work_notifysig: # deal with pending signals and
11238 # notify-resume requests
11239 + movl %esp, %eax
11240 #ifdef CONFIG_VM86
11241 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
11242 - movl %esp, %eax
11243 - jne work_notifysig_v86 # returning to kernel-space or
11244 + jz 1f # returning to kernel-space or
11245 # vm86-space
11246 - xorl %edx, %edx
11247 - call do_notify_resume
11248 - jmp resume_userspace_sig
11249
11250 - ALIGN
11251 -work_notifysig_v86:
11252 pushl %ecx # save ti_flags for do_notify_resume
11253 CFI_ADJUST_CFA_OFFSET 4
11254 call save_v86_state # %eax contains pt_regs pointer
11255 popl %ecx
11256 CFI_ADJUST_CFA_OFFSET -4
11257 movl %eax, %esp
11258 -#else
11259 - movl %esp, %eax
11260 +1:
11261 #endif
11262 xorl %edx, %edx
11263 call do_notify_resume
11264 @@ -708,6 +815,10 @@ END(syscall_exit_work)
11265
11266 RING0_INT_FRAME # can't unwind into user space anyway
11267 syscall_fault:
11268 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11269 + push %ss
11270 + pop %ds
11271 +#endif
11272 GET_THREAD_INFO(%ebp)
11273 movl $-EFAULT,PT_EAX(%esp)
11274 jmp resume_userspace
11275 @@ -791,7 +902,13 @@ ptregs_clone:
11276 * normal stack and adjusts ESP with the matching offset.
11277 */
11278 /* fixup the stack */
11279 - PER_CPU(gdt_page, %ebx)
11280 +#ifdef CONFIG_SMP
11281 + movl PER_CPU_VAR(cpu_number), %ebx
11282 + shll $PAGE_SHIFT_asm, %ebx
11283 + addl $cpu_gdt_table, %ebx
11284 +#else
11285 + movl $cpu_gdt_table, %ebx
11286 +#endif
11287 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
11288 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
11289 shl $16, %eax
11290 @@ -1273,7 +1390,6 @@ return_to_handler:
11291 jmp *%ecx
11292 #endif
11293
11294 -.section .rodata,"a"
11295 #include "syscall_table_32.S"
11296
11297 syscall_table_size=(.-sys_call_table)
11298 @@ -1330,9 +1446,12 @@ error_code:
11299 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
11300 REG_TO_PTGS %ecx
11301 SET_KERNEL_GS %ecx
11302 - movl $(__USER_DS), %ecx
11303 + movl $(__KERNEL_DS), %ecx
11304 movl %ecx, %ds
11305 movl %ecx, %es
11306 +
11307 + PAX_ENTER_KERNEL
11308 +
11309 TRACE_IRQS_OFF
11310 movl %esp,%eax # pt_regs pointer
11311 call *%edi
11312 @@ -1426,6 +1545,9 @@ nmi_stack_correct:
11313 xorl %edx,%edx # zero error code
11314 movl %esp,%eax # pt_regs pointer
11315 call do_nmi
11316 +
11317 + PAX_EXIT_KERNEL
11318 +
11319 jmp restore_all_notrace
11320 CFI_ENDPROC
11321
11322 @@ -1466,6 +1588,9 @@ nmi_espfix_stack:
11323 FIXUP_ESPFIX_STACK # %eax == %esp
11324 xorl %edx,%edx # zero error code
11325 call do_nmi
11326 +
11327 + PAX_EXIT_KERNEL
11328 +
11329 RESTORE_REGS
11330 lss 12+4(%esp), %esp # back to espfix stack
11331 CFI_ADJUST_CFA_OFFSET -24
11332 diff -urNp linux-2.6.35.4/arch/x86/kernel/entry_64.S linux-2.6.35.4/arch/x86/kernel/entry_64.S
11333 --- linux-2.6.35.4/arch/x86/kernel/entry_64.S 2010-08-26 19:47:12.000000000 -0400
11334 +++ linux-2.6.35.4/arch/x86/kernel/entry_64.S 2010-09-17 20:12:09.000000000 -0400
11335 @@ -53,6 +53,7 @@
11336 #include <asm/paravirt.h>
11337 #include <asm/ftrace.h>
11338 #include <asm/percpu.h>
11339 +#include <asm/pgtable.h>
11340
11341 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
11342 #include <linux/elf-em.h>
11343 @@ -174,6 +175,189 @@ ENTRY(native_usergs_sysret64)
11344 ENDPROC(native_usergs_sysret64)
11345 #endif /* CONFIG_PARAVIRT */
11346
11347 + .macro ljmpq sel, off
11348 +#if defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
11349 + .byte 0x48; ljmp *1234f(%rip)
11350 + .pushsection .rodata
11351 + .align 16
11352 + 1234: .quad \off; .word \sel
11353 + .popsection
11354 +#else
11355 + push $\sel
11356 + push $\off
11357 + lretq
11358 +#endif
11359 + .endm
11360 +
11361 +ENTRY(pax_enter_kernel)
11362 +
11363 +#ifdef CONFIG_PAX_KERNEXEC
11364 + push %rdi
11365 +
11366 +#ifdef CONFIG_PARAVIRT
11367 + PV_SAVE_REGS(CLBR_RDI)
11368 +#endif
11369 +
11370 + GET_CR0_INTO_RDI
11371 + bts $16,%rdi
11372 + jnc 1f
11373 + mov %cs,%edi
11374 + cmp $__KERNEL_CS,%edi
11375 + jz 3f
11376 + ljmpq __KERNEL_CS,3f
11377 +1: ljmpq __KERNEXEC_KERNEL_CS,2f
11378 +2: SET_RDI_INTO_CR0
11379 +3:
11380 +
11381 +#ifdef CONFIG_PARAVIRT
11382 + PV_RESTORE_REGS(CLBR_RDI)
11383 +#endif
11384 +
11385 + pop %rdi
11386 +#endif
11387 +
11388 + retq
11389 +ENDPROC(pax_enter_kernel)
11390 +
11391 +ENTRY(pax_exit_kernel)
11392 +
11393 +#ifdef CONFIG_PAX_KERNEXEC
11394 + push %rdi
11395 +
11396 +#ifdef CONFIG_PARAVIRT
11397 + PV_SAVE_REGS(CLBR_RDI)
11398 +#endif
11399 +
11400 + mov %cs,%rdi
11401 + cmp $__KERNEXEC_KERNEL_CS,%edi
11402 + jnz 2f
11403 + GET_CR0_INTO_RDI
11404 + btr $16,%rdi
11405 + ljmpq __KERNEL_CS,1f
11406 +1: SET_RDI_INTO_CR0
11407 +2:
11408 +
11409 +#ifdef CONFIG_PARAVIRT
11410 + PV_RESTORE_REGS(CLBR_RDI);
11411 +#endif
11412 +
11413 + pop %rdi
11414 +#endif
11415 +
11416 + retq
11417 +ENDPROC(pax_exit_kernel)
11418 +
11419 +ENTRY(pax_enter_kernel_user)
11420 +
11421 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11422 + push %rdi
11423 + push %rbx
11424 +
11425 +#ifdef CONFIG_PARAVIRT
11426 + PV_SAVE_REGS(CLBR_RDI)
11427 +#endif
11428 +
11429 + GET_CR3_INTO_RDI
11430 + mov %rdi,%rbx
11431 + add $__START_KERNEL_map,%rbx
11432 + sub phys_base(%rip),%rbx
11433 +
11434 +#ifdef CONFIG_PARAVIRT
11435 + push %rdi
11436 + cmpl $0, pv_info+PARAVIRT_enabled
11437 + jz 1f
11438 + i = 0
11439 + .rept USER_PGD_PTRS
11440 + mov i*8(%rbx),%rsi
11441 + mov $0,%sil
11442 + lea i*8(%rbx),%rdi
11443 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
11444 + i = i + 1
11445 + .endr
11446 + jmp 2f
11447 +1:
11448 +#endif
11449 +
11450 + i = 0
11451 + .rept USER_PGD_PTRS
11452 + movb $0,i*8(%rbx)
11453 + i = i + 1
11454 + .endr
11455 +
11456 +#ifdef CONFIG_PARAVIRT
11457 +2: pop %rdi
11458 +#endif
11459 + SET_RDI_INTO_CR3
11460 +
11461 +#ifdef CONFIG_PAX_KERNEXEC
11462 + GET_CR0_INTO_RDI
11463 + bts $16,%rdi
11464 + SET_RDI_INTO_CR0
11465 +#endif
11466 +
11467 +#ifdef CONFIG_PARAVIRT
11468 + PV_RESTORE_REGS(CLBR_RDI)
11469 +#endif
11470 +
11471 + pop %rbx
11472 + pop %rdi
11473 +#endif
11474 +
11475 + retq
11476 +ENDPROC(pax_enter_kernel_user)
11477 +
11478 +ENTRY(pax_exit_kernel_user)
11479 +
11480 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11481 + push %rdi
11482 +
11483 +#ifdef CONFIG_PARAVIRT
11484 + push %rbx
11485 + PV_SAVE_REGS(CLBR_RDI)
11486 +#endif
11487 +
11488 +#ifdef CONFIG_PAX_KERNEXEC
11489 + GET_CR0_INTO_RDI
11490 + btr $16,%rdi
11491 + SET_RDI_INTO_CR0
11492 +#endif
11493 +
11494 + GET_CR3_INTO_RDI
11495 + add $__START_KERNEL_map,%rdi
11496 + sub phys_base(%rip),%rdi
11497 +
11498 +#ifdef CONFIG_PARAVIRT
11499 + cmpl $0, pv_info+PARAVIRT_enabled
11500 + jz 1f
11501 + mov %rdi,%rbx
11502 + i = 0
11503 + .rept USER_PGD_PTRS
11504 + mov i*8(%rbx),%rsi
11505 + mov $0x67,%sil
11506 + lea i*8(%rbx),%rdi
11507 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
11508 + i = i + 1
11509 + .endr
11510 + jmp 2f
11511 +1:
11512 +#endif
11513 +
11514 + i = 0
11515 + .rept USER_PGD_PTRS
11516 + movb $0x67,i*8(%rdi)
11517 + i = i + 1
11518 + .endr
11519 +
11520 +#ifdef CONFIG_PARAVIRT
11521 +2: PV_RESTORE_REGS(CLBR_RDI)
11522 + pop %rbx
11523 +#endif
11524 +
11525 + pop %rdi
11526 +#endif
11527 +
11528 + retq
11529 +ENDPROC(pax_exit_kernel_user)
11530
11531 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
11532 #ifdef CONFIG_TRACE_IRQFLAGS
11533 @@ -317,7 +501,7 @@ ENTRY(save_args)
11534 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
11535 movq_cfi rbp, 8 /* push %rbp */
11536 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
11537 - testl $3, CS(%rdi)
11538 + testb $3, CS(%rdi)
11539 je 1f
11540 SWAPGS
11541 /*
11542 @@ -409,7 +593,7 @@ ENTRY(ret_from_fork)
11543
11544 RESTORE_REST
11545
11546 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
11547 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
11548 je int_ret_from_sys_call
11549
11550 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
11551 @@ -468,6 +652,11 @@ ENTRY(system_call_after_swapgs)
11552
11553 movq %rsp,PER_CPU_VAR(old_rsp)
11554 movq PER_CPU_VAR(kernel_stack),%rsp
11555 +
11556 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11557 + call pax_enter_kernel_user
11558 +#endif
11559 +
11560 /*
11561 * No need to follow this irqs off/on section - it's straight
11562 * and short:
11563 @@ -502,6 +691,11 @@ sysret_check:
11564 andl %edi,%edx
11565 jnz sysret_careful
11566 CFI_REMEMBER_STATE
11567 +
11568 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11569 + call pax_exit_kernel_user
11570 +#endif
11571 +
11572 /*
11573 * sysretq will re-enable interrupts:
11574 */
11575 @@ -613,7 +807,7 @@ tracesys:
11576 GLOBAL(int_ret_from_sys_call)
11577 DISABLE_INTERRUPTS(CLBR_NONE)
11578 TRACE_IRQS_OFF
11579 - testl $3,CS-ARGOFFSET(%rsp)
11580 + testb $3,CS-ARGOFFSET(%rsp)
11581 je retint_restore_args
11582 movl $_TIF_ALLWORK_MASK,%edi
11583 /* edi: mask to check */
11584 @@ -800,6 +994,16 @@ END(interrupt)
11585 CFI_ADJUST_CFA_OFFSET 10*8
11586 call save_args
11587 PARTIAL_FRAME 0
11588 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11589 + testb $3, CS(%rdi)
11590 + jnz 1f
11591 + call pax_enter_kernel
11592 + jmp 2f
11593 +1: call pax_enter_kernel_user
11594 +2:
11595 +#else
11596 + call pax_enter_kernel
11597 +#endif
11598 call \func
11599 .endm
11600
11601 @@ -826,7 +1030,7 @@ ret_from_intr:
11602 CFI_ADJUST_CFA_OFFSET -8
11603 exit_intr:
11604 GET_THREAD_INFO(%rcx)
11605 - testl $3,CS-ARGOFFSET(%rsp)
11606 + testb $3,CS-ARGOFFSET(%rsp)
11607 je retint_kernel
11608
11609 /* Interrupt came from user space */
11610 @@ -848,12 +1052,18 @@ retint_swapgs: /* return to user-space
11611 * The iretq could re-enable interrupts:
11612 */
11613 DISABLE_INTERRUPTS(CLBR_ANY)
11614 +
11615 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11616 + call pax_exit_kernel_user
11617 +#endif
11618 +
11619 TRACE_IRQS_IRETQ
11620 SWAPGS
11621 jmp restore_args
11622
11623 retint_restore_args: /* return to kernel space */
11624 DISABLE_INTERRUPTS(CLBR_ANY)
11625 + call pax_exit_kernel
11626 /*
11627 * The iretq could re-enable interrupts:
11628 */
11629 @@ -1040,6 +1250,16 @@ ENTRY(\sym)
11630 CFI_ADJUST_CFA_OFFSET 15*8
11631 call error_entry
11632 DEFAULT_FRAME 0
11633 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11634 + testb $3, CS(%rsp)
11635 + jnz 1f
11636 + call pax_enter_kernel
11637 + jmp 2f
11638 +1: call pax_enter_kernel_user
11639 +2:
11640 +#else
11641 + call pax_enter_kernel
11642 +#endif
11643 movq %rsp,%rdi /* pt_regs pointer */
11644 xorl %esi,%esi /* no error code */
11645 call \do_sym
11646 @@ -1057,6 +1277,16 @@ ENTRY(\sym)
11647 subq $15*8, %rsp
11648 call save_paranoid
11649 TRACE_IRQS_OFF
11650 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11651 + testb $3, CS(%rsp)
11652 + jnz 1f
11653 + call pax_enter_kernel
11654 + jmp 2f
11655 +1: call pax_enter_kernel_user
11656 +2:
11657 +#else
11658 + call pax_enter_kernel
11659 +#endif
11660 movq %rsp,%rdi /* pt_regs pointer */
11661 xorl %esi,%esi /* no error code */
11662 call \do_sym
11663 @@ -1074,9 +1304,24 @@ ENTRY(\sym)
11664 subq $15*8, %rsp
11665 call save_paranoid
11666 TRACE_IRQS_OFF
11667 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11668 + testb $3, CS(%rsp)
11669 + jnz 1f
11670 + call pax_enter_kernel
11671 + jmp 2f
11672 +1: call pax_enter_kernel_user
11673 +2:
11674 +#else
11675 + call pax_enter_kernel
11676 +#endif
11677 movq %rsp,%rdi /* pt_regs pointer */
11678 xorl %esi,%esi /* no error code */
11679 - PER_CPU(init_tss, %r12)
11680 +#ifdef CONFIG_SMP
11681 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
11682 + lea init_tss(%r12), %r12
11683 +#else
11684 + lea init_tss(%rip), %r12
11685 +#endif
11686 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%r12)
11687 call \do_sym
11688 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%r12)
11689 @@ -1093,6 +1338,16 @@ ENTRY(\sym)
11690 CFI_ADJUST_CFA_OFFSET 15*8
11691 call error_entry
11692 DEFAULT_FRAME 0
11693 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11694 + testb $3, CS(%rsp)
11695 + jnz 1f
11696 + call pax_enter_kernel
11697 + jmp 2f
11698 +1: call pax_enter_kernel_user
11699 +2:
11700 +#else
11701 + call pax_enter_kernel
11702 +#endif
11703 movq %rsp,%rdi /* pt_regs pointer */
11704 movq ORIG_RAX(%rsp),%rsi /* get error code */
11705 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
11706 @@ -1112,6 +1367,16 @@ ENTRY(\sym)
11707 call save_paranoid
11708 DEFAULT_FRAME 0
11709 TRACE_IRQS_OFF
11710 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11711 + testb $3, CS(%rsp)
11712 + jnz 1f
11713 + call pax_enter_kernel
11714 + jmp 2f
11715 +1: call pax_enter_kernel_user
11716 +2:
11717 +#else
11718 + call pax_enter_kernel
11719 +#endif
11720 movq %rsp,%rdi /* pt_regs pointer */
11721 movq ORIG_RAX(%rsp),%rsi /* get error code */
11722 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
11723 @@ -1370,14 +1635,27 @@ ENTRY(paranoid_exit)
11724 TRACE_IRQS_OFF
11725 testl %ebx,%ebx /* swapgs needed? */
11726 jnz paranoid_restore
11727 - testl $3,CS(%rsp)
11728 + testb $3,CS(%rsp)
11729 jnz paranoid_userspace
11730 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11731 + call pax_exit_kernel
11732 + TRACE_IRQS_IRETQ 0
11733 + SWAPGS_UNSAFE_STACK
11734 + RESTORE_ALL 8
11735 + jmp irq_return
11736 +#endif
11737 paranoid_swapgs:
11738 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11739 + call pax_exit_kernel_user
11740 +#else
11741 + call pax_exit_kernel
11742 +#endif
11743 TRACE_IRQS_IRETQ 0
11744 SWAPGS_UNSAFE_STACK
11745 RESTORE_ALL 8
11746 jmp irq_return
11747 paranoid_restore:
11748 + call pax_exit_kernel
11749 TRACE_IRQS_IRETQ 0
11750 RESTORE_ALL 8
11751 jmp irq_return
11752 @@ -1435,7 +1713,7 @@ ENTRY(error_entry)
11753 movq_cfi r14, R14+8
11754 movq_cfi r15, R15+8
11755 xorl %ebx,%ebx
11756 - testl $3,CS+8(%rsp)
11757 + testb $3,CS+8(%rsp)
11758 je error_kernelspace
11759 error_swapgs:
11760 SWAPGS
11761 @@ -1499,6 +1777,16 @@ ENTRY(nmi)
11762 CFI_ADJUST_CFA_OFFSET 15*8
11763 call save_paranoid
11764 DEFAULT_FRAME 0
11765 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11766 + testb $3, CS(%rsp)
11767 + jnz 1f
11768 + call pax_enter_kernel
11769 + jmp 2f
11770 +1: call pax_enter_kernel_user
11771 +2:
11772 +#else
11773 + call pax_enter_kernel
11774 +#endif
11775 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
11776 movq %rsp,%rdi
11777 movq $-1,%rsi
11778 @@ -1509,11 +1797,12 @@ ENTRY(nmi)
11779 DISABLE_INTERRUPTS(CLBR_NONE)
11780 testl %ebx,%ebx /* swapgs needed? */
11781 jnz nmi_restore
11782 - testl $3,CS(%rsp)
11783 + testb $3,CS(%rsp)
11784 jnz nmi_userspace
11785 nmi_swapgs:
11786 SWAPGS_UNSAFE_STACK
11787 nmi_restore:
11788 + call pax_exit_kernel
11789 RESTORE_ALL 8
11790 jmp irq_return
11791 nmi_userspace:
11792 diff -urNp linux-2.6.35.4/arch/x86/kernel/ftrace.c linux-2.6.35.4/arch/x86/kernel/ftrace.c
11793 --- linux-2.6.35.4/arch/x86/kernel/ftrace.c 2010-08-26 19:47:12.000000000 -0400
11794 +++ linux-2.6.35.4/arch/x86/kernel/ftrace.c 2010-09-17 20:12:09.000000000 -0400
11795 @@ -174,7 +174,9 @@ void ftrace_nmi_enter(void)
11796
11797 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
11798 smp_rmb();
11799 + pax_open_kernel();
11800 ftrace_mod_code();
11801 + pax_close_kernel();
11802 atomic_inc(&nmi_update_count);
11803 }
11804 /* Must have previous changes seen before executions */
11805 @@ -260,7 +262,7 @@ do_ftrace_mod_code(unsigned long ip, voi
11806
11807
11808
11809 -static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
11810 +static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
11811
11812 static unsigned char *ftrace_nop_replace(void)
11813 {
11814 @@ -273,6 +275,8 @@ ftrace_modify_code(unsigned long ip, uns
11815 {
11816 unsigned char replaced[MCOUNT_INSN_SIZE];
11817
11818 + ip = ktla_ktva(ip);
11819 +
11820 /*
11821 * Note: Due to modules and __init, code can
11822 * disappear and change, we need to protect against faulting
11823 @@ -329,7 +333,7 @@ int ftrace_update_ftrace_func(ftrace_fun
11824 unsigned char old[MCOUNT_INSN_SIZE], *new;
11825 int ret;
11826
11827 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
11828 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
11829 new = ftrace_call_replace(ip, (unsigned long)func);
11830 ret = ftrace_modify_code(ip, old, new);
11831
11832 @@ -382,15 +386,15 @@ int __init ftrace_dyn_arch_init(void *da
11833 switch (faulted) {
11834 case 0:
11835 pr_info("converting mcount calls to 0f 1f 44 00 00\n");
11836 - memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
11837 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
11838 break;
11839 case 1:
11840 pr_info("converting mcount calls to 66 66 66 66 90\n");
11841 - memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
11842 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
11843 break;
11844 case 2:
11845 pr_info("converting mcount calls to jmp . + 5\n");
11846 - memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
11847 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
11848 break;
11849 }
11850
11851 @@ -411,6 +415,8 @@ static int ftrace_mod_jmp(unsigned long
11852 {
11853 unsigned char code[MCOUNT_INSN_SIZE];
11854
11855 + ip = ktla_ktva(ip);
11856 +
11857 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
11858 return -EFAULT;
11859
11860 diff -urNp linux-2.6.35.4/arch/x86/kernel/head32.c linux-2.6.35.4/arch/x86/kernel/head32.c
11861 --- linux-2.6.35.4/arch/x86/kernel/head32.c 2010-08-26 19:47:12.000000000 -0400
11862 +++ linux-2.6.35.4/arch/x86/kernel/head32.c 2010-09-17 20:12:09.000000000 -0400
11863 @@ -17,6 +17,7 @@
11864 #include <asm/apic.h>
11865 #include <asm/io_apic.h>
11866 #include <asm/bios_ebda.h>
11867 +#include <asm/boot.h>
11868
11869 static void __init i386_default_early_setup(void)
11870 {
11871 @@ -40,7 +41,7 @@ void __init i386_start_kernel(void)
11872 "EX TRAMPOLINE");
11873 #endif
11874
11875 - reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
11876 + reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
11877
11878 #ifdef CONFIG_BLK_DEV_INITRD
11879 /* Reserve INITRD */
11880 diff -urNp linux-2.6.35.4/arch/x86/kernel/head_32.S linux-2.6.35.4/arch/x86/kernel/head_32.S
11881 --- linux-2.6.35.4/arch/x86/kernel/head_32.S 2010-08-26 19:47:12.000000000 -0400
11882 +++ linux-2.6.35.4/arch/x86/kernel/head_32.S 2010-09-17 20:12:09.000000000 -0400
11883 @@ -25,6 +25,12 @@
11884 /* Physical address */
11885 #define pa(X) ((X) - __PAGE_OFFSET)
11886
11887 +#ifdef CONFIG_PAX_KERNEXEC
11888 +#define ta(X) (X)
11889 +#else
11890 +#define ta(X) ((X) - __PAGE_OFFSET)
11891 +#endif
11892 +
11893 /*
11894 * References to members of the new_cpu_data structure.
11895 */
11896 @@ -54,11 +60,7 @@
11897 * and small than max_low_pfn, otherwise will waste some page table entries
11898 */
11899
11900 -#if PTRS_PER_PMD > 1
11901 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
11902 -#else
11903 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
11904 -#endif
11905 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
11906
11907 /* Enough space to fit pagetables for the low memory linear map */
11908 MAPPING_BEYOND_END = \
11909 @@ -75,6 +77,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
11910 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
11911
11912 /*
11913 + * Real beginning of normal "text" segment
11914 + */
11915 +ENTRY(stext)
11916 +ENTRY(_stext)
11917 +
11918 +/*
11919 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
11920 * %esi points to the real-mode code as a 32-bit pointer.
11921 * CS and DS must be 4 GB flat segments, but we don't depend on
11922 @@ -82,6 +90,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
11923 * can.
11924 */
11925 __HEAD
11926 +
11927 +#ifdef CONFIG_PAX_KERNEXEC
11928 + jmp startup_32
11929 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
11930 +.fill PAGE_SIZE-5,1,0xcc
11931 +#endif
11932 +
11933 ENTRY(startup_32)
11934 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
11935 us to not reload segments */
11936 @@ -99,6 +114,55 @@ ENTRY(startup_32)
11937 movl %eax,%gs
11938 2:
11939
11940 +#ifdef CONFIG_SMP
11941 + movl $pa(cpu_gdt_table),%edi
11942 + movl $__per_cpu_load,%eax
11943 + movw %ax,__KERNEL_PERCPU + 2(%edi)
11944 + rorl $16,%eax
11945 + movb %al,__KERNEL_PERCPU + 4(%edi)
11946 + movb %ah,__KERNEL_PERCPU + 7(%edi)
11947 + movl $__per_cpu_end - 1,%eax
11948 + subl $__per_cpu_start,%eax
11949 + movw %ax,__KERNEL_PERCPU + 0(%edi)
11950 +#endif
11951 +
11952 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11953 + movl $NR_CPUS,%ecx
11954 + movl $pa(cpu_gdt_table),%edi
11955 +1:
11956 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
11957 + addl $PAGE_SIZE_asm,%edi
11958 + loop 1b
11959 +#endif
11960 +
11961 +#ifdef CONFIG_PAX_KERNEXEC
11962 + movl $pa(boot_gdt),%edi
11963 + movl $__LOAD_PHYSICAL_ADDR,%eax
11964 + movw %ax,__BOOT_CS + 2(%edi)
11965 + rorl $16,%eax
11966 + movb %al,__BOOT_CS + 4(%edi)
11967 + movb %ah,__BOOT_CS + 7(%edi)
11968 + rorl $16,%eax
11969 +
11970 + ljmp $(__BOOT_CS),$1f
11971 +1:
11972 +
11973 + movl $NR_CPUS,%ecx
11974 + movl $pa(cpu_gdt_table),%edi
11975 + addl $__PAGE_OFFSET,%eax
11976 +1:
11977 + movw %ax,__KERNEL_CS + 2(%edi)
11978 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
11979 + rorl $16,%eax
11980 + movb %al,__KERNEL_CS + 4(%edi)
11981 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
11982 + movb %ah,__KERNEL_CS + 7(%edi)
11983 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
11984 + rorl $16,%eax
11985 + addl $PAGE_SIZE_asm,%edi
11986 + loop 1b
11987 +#endif
11988 +
11989 /*
11990 * Clear BSS first so that there are no surprises...
11991 */
11992 @@ -142,9 +206,7 @@ ENTRY(startup_32)
11993 cmpl $num_subarch_entries, %eax
11994 jae bad_subarch
11995
11996 - movl pa(subarch_entries)(,%eax,4), %eax
11997 - subl $__PAGE_OFFSET, %eax
11998 - jmp *%eax
11999 + jmp *pa(subarch_entries)(,%eax,4)
12000
12001 bad_subarch:
12002 WEAK(lguest_entry)
12003 @@ -156,10 +218,10 @@ WEAK(xen_entry)
12004 __INITDATA
12005
12006 subarch_entries:
12007 - .long default_entry /* normal x86/PC */
12008 - .long lguest_entry /* lguest hypervisor */
12009 - .long xen_entry /* Xen hypervisor */
12010 - .long default_entry /* Moorestown MID */
12011 + .long ta(default_entry) /* normal x86/PC */
12012 + .long ta(lguest_entry) /* lguest hypervisor */
12013 + .long ta(xen_entry) /* Xen hypervisor */
12014 + .long ta(default_entry) /* Moorestown MID */
12015 num_subarch_entries = (. - subarch_entries) / 4
12016 .previous
12017 #endif /* CONFIG_PARAVIRT */
12018 @@ -220,8 +282,11 @@ default_entry:
12019 movl %eax, pa(max_pfn_mapped)
12020
12021 /* Do early initialization of the fixmap area */
12022 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
12023 - movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
12024 +#ifdef CONFIG_COMPAT_VDSO
12025 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
12026 +#else
12027 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
12028 +#endif
12029 #else /* Not PAE */
12030
12031 page_pde_offset = (__PAGE_OFFSET >> 20);
12032 @@ -251,8 +316,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12033 movl %eax, pa(max_pfn_mapped)
12034
12035 /* Do early initialization of the fixmap area */
12036 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
12037 - movl %eax,pa(swapper_pg_dir+0xffc)
12038 +#ifdef CONFIG_COMPAT_VDSO
12039 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
12040 +#else
12041 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
12042 +#endif
12043 #endif
12044 jmp 3f
12045 /*
12046 @@ -299,6 +367,7 @@ ENTRY(startup_32_smp)
12047 orl %edx,%eax
12048 movl %eax,%cr4
12049
12050 +#ifdef CONFIG_X86_PAE
12051 testb $X86_CR4_PAE, %al # check if PAE is enabled
12052 jz 6f
12053
12054 @@ -323,6 +392,9 @@ ENTRY(startup_32_smp)
12055 /* Make changes effective */
12056 wrmsr
12057
12058 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
12059 +#endif
12060 +
12061 6:
12062
12063 /*
12064 @@ -348,9 +420,7 @@ ENTRY(startup_32_smp)
12065
12066 #ifdef CONFIG_SMP
12067 cmpb $0, ready
12068 - jz 1f /* Initial CPU cleans BSS */
12069 - jmp checkCPUtype
12070 -1:
12071 + jnz checkCPUtype /* Initial CPU cleans BSS */
12072 #endif /* CONFIG_SMP */
12073
12074 /*
12075 @@ -428,7 +498,7 @@ is386: movl $2,%ecx # set MP
12076 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
12077 movl %eax,%ss # after changing gdt.
12078
12079 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
12080 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
12081 movl %eax,%ds
12082 movl %eax,%es
12083
12084 @@ -442,8 +512,11 @@ is386: movl $2,%ecx # set MP
12085 */
12086 cmpb $0,ready
12087 jne 1f
12088 - movl $gdt_page,%eax
12089 + movl $cpu_gdt_table,%eax
12090 movl $stack_canary,%ecx
12091 +#ifdef CONFIG_SMP
12092 + addl $__per_cpu_load,%ecx
12093 +#endif
12094 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
12095 shrl $16, %ecx
12096 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
12097 @@ -461,10 +534,6 @@ is386: movl $2,%ecx # set MP
12098 #ifdef CONFIG_SMP
12099 movb ready, %cl
12100 movb $1, ready
12101 - cmpb $0,%cl # the first CPU calls start_kernel
12102 - je 1f
12103 - movl (stack_start), %esp
12104 -1:
12105 #endif /* CONFIG_SMP */
12106 jmp *(initial_code)
12107
12108 @@ -550,22 +619,22 @@ early_page_fault:
12109 jmp early_fault
12110
12111 early_fault:
12112 - cld
12113 #ifdef CONFIG_PRINTK
12114 + cmpl $1,%ss:early_recursion_flag
12115 + je hlt_loop
12116 + incl %ss:early_recursion_flag
12117 + cld
12118 pusha
12119 movl $(__KERNEL_DS),%eax
12120 movl %eax,%ds
12121 movl %eax,%es
12122 - cmpl $2,early_recursion_flag
12123 - je hlt_loop
12124 - incl early_recursion_flag
12125 movl %cr2,%eax
12126 pushl %eax
12127 pushl %edx /* trapno */
12128 pushl $fault_msg
12129 call printk
12130 +; call dump_stack
12131 #endif
12132 - call dump_stack
12133 hlt_loop:
12134 hlt
12135 jmp hlt_loop
12136 @@ -573,8 +642,11 @@ hlt_loop:
12137 /* This is the default interrupt "handler" :-) */
12138 ALIGN
12139 ignore_int:
12140 - cld
12141 #ifdef CONFIG_PRINTK
12142 + cmpl $2,%ss:early_recursion_flag
12143 + je hlt_loop
12144 + incl %ss:early_recursion_flag
12145 + cld
12146 pushl %eax
12147 pushl %ecx
12148 pushl %edx
12149 @@ -583,9 +655,6 @@ ignore_int:
12150 movl $(__KERNEL_DS),%eax
12151 movl %eax,%ds
12152 movl %eax,%es
12153 - cmpl $2,early_recursion_flag
12154 - je hlt_loop
12155 - incl early_recursion_flag
12156 pushl 16(%esp)
12157 pushl 24(%esp)
12158 pushl 32(%esp)
12159 @@ -612,27 +681,38 @@ ENTRY(initial_code)
12160 /*
12161 * BSS section
12162 */
12163 -__PAGE_ALIGNED_BSS
12164 - .align PAGE_SIZE_asm
12165 #ifdef CONFIG_X86_PAE
12166 +.section .swapper_pg_pmd,"a",@progbits
12167 swapper_pg_pmd:
12168 .fill 1024*KPMDS,4,0
12169 #else
12170 +.section .swapper_pg_dir,"a",@progbits
12171 ENTRY(swapper_pg_dir)
12172 .fill 1024,4,0
12173 #endif
12174 +
12175 swapper_pg_fixmap:
12176 .fill 1024,4,0
12177 +
12178 +.section .empty_zero_page,"a",@progbits
12179 ENTRY(empty_zero_page)
12180 .fill 4096,1,0
12181
12182 /*
12183 + * The IDT has to be page-aligned to simplify the Pentium
12184 + * F0 0F bug workaround.. We have a special link segment
12185 + * for this.
12186 + */
12187 +.section .idt,"a",@progbits
12188 +ENTRY(idt_table)
12189 + .fill 256,8,0
12190 +
12191 +/*
12192 * This starts the data section.
12193 */
12194 #ifdef CONFIG_X86_PAE
12195 -__PAGE_ALIGNED_DATA
12196 - /* Page-aligned for the benefit of paravirt? */
12197 - .align PAGE_SIZE_asm
12198 +.section .swapper_pg_dir,"a",@progbits
12199 +
12200 ENTRY(swapper_pg_dir)
12201 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
12202 # if KPMDS == 3
12203 @@ -651,15 +731,24 @@ ENTRY(swapper_pg_dir)
12204 # error "Kernel PMDs should be 1, 2 or 3"
12205 # endif
12206 .align PAGE_SIZE_asm /* needs to be page-sized too */
12207 +
12208 +#ifdef CONFIG_PAX_PER_CPU_PGD
12209 +ENTRY(cpu_pgd)
12210 + .rept NR_CPUS
12211 + .fill 4,8,0
12212 + .endr
12213 +#endif
12214 +
12215 #endif
12216
12217 .data
12218 ENTRY(stack_start)
12219 - .long init_thread_union+THREAD_SIZE
12220 + .long init_thread_union+THREAD_SIZE-8
12221 .long __BOOT_DS
12222
12223 ready: .byte 0
12224
12225 +.section .rodata,"a",@progbits
12226 early_recursion_flag:
12227 .long 0
12228
12229 @@ -695,7 +784,7 @@ fault_msg:
12230 .word 0 # 32 bit align gdt_desc.address
12231 boot_gdt_descr:
12232 .word __BOOT_DS+7
12233 - .long boot_gdt - __PAGE_OFFSET
12234 + .long pa(boot_gdt)
12235
12236 .word 0 # 32-bit align idt_desc.address
12237 idt_descr:
12238 @@ -706,7 +795,7 @@ idt_descr:
12239 .word 0 # 32 bit align gdt_desc.address
12240 ENTRY(early_gdt_descr)
12241 .word GDT_ENTRIES*8-1
12242 - .long gdt_page /* Overwritten for secondary CPUs */
12243 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
12244
12245 /*
12246 * The boot_gdt must mirror the equivalent in setup.S and is
12247 @@ -715,5 +804,65 @@ ENTRY(early_gdt_descr)
12248 .align L1_CACHE_BYTES
12249 ENTRY(boot_gdt)
12250 .fill GDT_ENTRY_BOOT_CS,8,0
12251 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
12252 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
12253 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
12254 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
12255 +
12256 + .align PAGE_SIZE_asm
12257 +ENTRY(cpu_gdt_table)
12258 + .rept NR_CPUS
12259 + .quad 0x0000000000000000 /* NULL descriptor */
12260 + .quad 0x0000000000000000 /* 0x0b reserved */
12261 + .quad 0x0000000000000000 /* 0x13 reserved */
12262 + .quad 0x0000000000000000 /* 0x1b reserved */
12263 +
12264 +#ifdef CONFIG_PAX_KERNEXEC
12265 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
12266 +#else
12267 + .quad 0x0000000000000000 /* 0x20 unused */
12268 +#endif
12269 +
12270 + .quad 0x0000000000000000 /* 0x28 unused */
12271 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
12272 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
12273 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
12274 + .quad 0x0000000000000000 /* 0x4b reserved */
12275 + .quad 0x0000000000000000 /* 0x53 reserved */
12276 + .quad 0x0000000000000000 /* 0x5b reserved */
12277 +
12278 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
12279 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
12280 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
12281 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
12282 +
12283 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
12284 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
12285 +
12286 + /*
12287 + * Segments used for calling PnP BIOS have byte granularity.
12288 + * The code segments and data segments have fixed 64k limits,
12289 + * the transfer segment sizes are set at run time.
12290 + */
12291 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
12292 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
12293 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
12294 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
12295 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
12296 +
12297 + /*
12298 + * The APM segments have byte granularity and their bases
12299 + * are set at run time. All have 64k limits.
12300 + */
12301 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
12302 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
12303 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
12304 +
12305 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
12306 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
12307 + .quad 0x0040910000000018 /* 0xe0 - STACK_CANARY */
12308 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
12309 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
12310 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
12311 +
12312 + /* Be sure this is zeroed to avoid false validations in Xen */
12313 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
12314 + .endr
12315 diff -urNp linux-2.6.35.4/arch/x86/kernel/head_64.S linux-2.6.35.4/arch/x86/kernel/head_64.S
12316 --- linux-2.6.35.4/arch/x86/kernel/head_64.S 2010-08-26 19:47:12.000000000 -0400
12317 +++ linux-2.6.35.4/arch/x86/kernel/head_64.S 2010-09-17 20:12:09.000000000 -0400
12318 @@ -19,6 +19,7 @@
12319 #include <asm/cache.h>
12320 #include <asm/processor-flags.h>
12321 #include <asm/percpu.h>
12322 +#include <asm/cpufeature.h>
12323
12324 #ifdef CONFIG_PARAVIRT
12325 #include <asm/asm-offsets.h>
12326 @@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
12327 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
12328 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
12329 L3_START_KERNEL = pud_index(__START_KERNEL_map)
12330 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
12331 +L3_VMALLOC_START = pud_index(VMALLOC_START)
12332 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
12333 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
12334
12335 .text
12336 __HEAD
12337 @@ -85,35 +90,22 @@ startup_64:
12338 */
12339 addq %rbp, init_level4_pgt + 0(%rip)
12340 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
12341 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
12342 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
12343 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
12344
12345 addq %rbp, level3_ident_pgt + 0(%rip)
12346 +#ifndef CONFIG_XEN
12347 + addq %rbp, level3_ident_pgt + 8(%rip)
12348 +#endif
12349
12350 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
12351 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
12352 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
12353
12354 - addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
12355 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
12356 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
12357
12358 - /* Add an Identity mapping if I am above 1G */
12359 - leaq _text(%rip), %rdi
12360 - andq $PMD_PAGE_MASK, %rdi
12361 -
12362 - movq %rdi, %rax
12363 - shrq $PUD_SHIFT, %rax
12364 - andq $(PTRS_PER_PUD - 1), %rax
12365 - jz ident_complete
12366 -
12367 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
12368 - leaq level3_ident_pgt(%rip), %rbx
12369 - movq %rdx, 0(%rbx, %rax, 8)
12370 -
12371 - movq %rdi, %rax
12372 - shrq $PMD_SHIFT, %rax
12373 - andq $(PTRS_PER_PMD - 1), %rax
12374 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
12375 - leaq level2_spare_pgt(%rip), %rbx
12376 - movq %rdx, 0(%rbx, %rax, 8)
12377 -ident_complete:
12378 + addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
12379 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
12380
12381 /*
12382 * Fixup the kernel text+data virtual addresses. Note that
12383 @@ -161,8 +153,8 @@ ENTRY(secondary_startup_64)
12384 * after the boot processor executes this code.
12385 */
12386
12387 - /* Enable PAE mode and PGE */
12388 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
12389 + /* Enable PAE mode and PSE/PGE */
12390 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
12391 movq %rax, %cr4
12392
12393 /* Setup early boot stage 4 level pagetables. */
12394 @@ -184,9 +176,14 @@ ENTRY(secondary_startup_64)
12395 movl $MSR_EFER, %ecx
12396 rdmsr
12397 btsl $_EFER_SCE, %eax /* Enable System Call */
12398 - btl $20,%edi /* No Execute supported? */
12399 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
12400 jnc 1f
12401 btsl $_EFER_NX, %eax
12402 + leaq init_level4_pgt(%rip), %rdi
12403 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
12404 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
12405 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
12406 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
12407 1: wrmsr /* Make changes effective */
12408
12409 /* Setup cr0 */
12410 @@ -271,7 +268,7 @@ ENTRY(secondary_startup_64)
12411 bad_address:
12412 jmp bad_address
12413
12414 - .section ".init.text","ax"
12415 + __INIT
12416 #ifdef CONFIG_EARLY_PRINTK
12417 .globl early_idt_handlers
12418 early_idt_handlers:
12419 @@ -316,18 +313,23 @@ ENTRY(early_idt_handler)
12420 #endif /* EARLY_PRINTK */
12421 1: hlt
12422 jmp 1b
12423 + .previous
12424
12425 #ifdef CONFIG_EARLY_PRINTK
12426 + __INITDATA
12427 early_recursion_flag:
12428 .long 0
12429 + .previous
12430
12431 + .section .rodata,"a",@progbits
12432 early_idt_msg:
12433 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
12434 early_idt_ripmsg:
12435 .asciz "RIP %s\n"
12436 -#endif /* CONFIG_EARLY_PRINTK */
12437 .previous
12438 +#endif /* CONFIG_EARLY_PRINTK */
12439
12440 + .section .rodata,"a",@progbits
12441 #define NEXT_PAGE(name) \
12442 .balign PAGE_SIZE; \
12443 ENTRY(name)
12444 @@ -351,13 +353,36 @@ NEXT_PAGE(init_level4_pgt)
12445 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12446 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
12447 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12448 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
12449 + .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
12450 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
12451 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
12452 .org init_level4_pgt + L4_START_KERNEL*8, 0
12453 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
12454 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
12455
12456 +#ifdef CONFIG_PAX_PER_CPU_PGD
12457 +NEXT_PAGE(cpu_pgd)
12458 + .rept NR_CPUS
12459 + .fill 512,8,0
12460 + .endr
12461 +#endif
12462 +
12463 NEXT_PAGE(level3_ident_pgt)
12464 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
12465 +#ifdef CONFIG_XEN
12466 .fill 511,8,0
12467 +#else
12468 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
12469 + .fill 510,8,0
12470 +#endif
12471 +
12472 +NEXT_PAGE(level3_vmalloc_pgt)
12473 + .fill 512,8,0
12474 +
12475 +NEXT_PAGE(level3_vmemmap_pgt)
12476 + .fill L3_VMEMMAP_START,8,0
12477 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
12478
12479 NEXT_PAGE(level3_kernel_pgt)
12480 .fill L3_START_KERNEL,8,0
12481 @@ -365,20 +390,23 @@ NEXT_PAGE(level3_kernel_pgt)
12482 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
12483 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
12484
12485 +NEXT_PAGE(level2_vmemmap_pgt)
12486 + .fill 512,8,0
12487 +
12488 NEXT_PAGE(level2_fixmap_pgt)
12489 - .fill 506,8,0
12490 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
12491 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
12492 - .fill 5,8,0
12493 + .fill 507,8,0
12494 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
12495 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
12496 + .fill 4,8,0
12497
12498 -NEXT_PAGE(level1_fixmap_pgt)
12499 +NEXT_PAGE(level1_vsyscall_pgt)
12500 .fill 512,8,0
12501
12502 -NEXT_PAGE(level2_ident_pgt)
12503 - /* Since I easily can, map the first 1G.
12504 + /* Since I easily can, map the first 2G.
12505 * Don't set NX because code runs from these pages.
12506 */
12507 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
12508 +NEXT_PAGE(level2_ident_pgt)
12509 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
12510
12511 NEXT_PAGE(level2_kernel_pgt)
12512 /*
12513 @@ -391,33 +419,55 @@ NEXT_PAGE(level2_kernel_pgt)
12514 * If you want to increase this then increase MODULES_VADDR
12515 * too.)
12516 */
12517 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
12518 - KERNEL_IMAGE_SIZE/PMD_SIZE)
12519 -
12520 -NEXT_PAGE(level2_spare_pgt)
12521 - .fill 512, 8, 0
12522 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
12523
12524 #undef PMDS
12525 #undef NEXT_PAGE
12526
12527 - .data
12528 + .align PAGE_SIZE
12529 +ENTRY(cpu_gdt_table)
12530 + .rept NR_CPUS
12531 + .quad 0x0000000000000000 /* NULL descriptor */
12532 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
12533 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
12534 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
12535 + .quad 0x00cffb000000ffff /* __USER32_CS */
12536 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
12537 + .quad 0x00affb000000ffff /* __USER_CS */
12538 +
12539 +#ifdef CONFIG_PAX_KERNEXEC
12540 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
12541 +#else
12542 + .quad 0x0 /* unused */
12543 +#endif
12544 +
12545 + .quad 0,0 /* TSS */
12546 + .quad 0,0 /* LDT */
12547 + .quad 0,0,0 /* three TLS descriptors */
12548 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
12549 + /* asm/segment.h:GDT_ENTRIES must match this */
12550 +
12551 + /* zero the remaining page */
12552 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
12553 + .endr
12554 +
12555 .align 16
12556 .globl early_gdt_descr
12557 early_gdt_descr:
12558 .word GDT_ENTRIES*8-1
12559 early_gdt_descr_base:
12560 - .quad INIT_PER_CPU_VAR(gdt_page)
12561 + .quad cpu_gdt_table
12562
12563 ENTRY(phys_base)
12564 /* This must match the first entry in level2_kernel_pgt */
12565 .quad 0x0000000000000000
12566
12567 #include "../../x86/xen/xen-head.S"
12568 -
12569 - .section .bss, "aw", @nobits
12570 +
12571 + .section .rodata,"a",@progbits
12572 .align L1_CACHE_BYTES
12573 ENTRY(idt_table)
12574 - .skip IDT_ENTRIES * 16
12575 + .fill 512,8,0
12576
12577 __PAGE_ALIGNED_BSS
12578 .align PAGE_SIZE
12579 diff -urNp linux-2.6.35.4/arch/x86/kernel/i386_ksyms_32.c linux-2.6.35.4/arch/x86/kernel/i386_ksyms_32.c
12580 --- linux-2.6.35.4/arch/x86/kernel/i386_ksyms_32.c 2010-08-26 19:47:12.000000000 -0400
12581 +++ linux-2.6.35.4/arch/x86/kernel/i386_ksyms_32.c 2010-09-17 20:12:09.000000000 -0400
12582 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
12583 EXPORT_SYMBOL(cmpxchg8b_emu);
12584 #endif
12585
12586 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
12587 +
12588 /* Networking helper routines. */
12589 EXPORT_SYMBOL(csum_partial_copy_generic);
12590 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
12591 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
12592
12593 EXPORT_SYMBOL(__get_user_1);
12594 EXPORT_SYMBOL(__get_user_2);
12595 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
12596
12597 EXPORT_SYMBOL(csum_partial);
12598 EXPORT_SYMBOL(empty_zero_page);
12599 +
12600 +#ifdef CONFIG_PAX_KERNEXEC
12601 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
12602 +#endif
12603 diff -urNp linux-2.6.35.4/arch/x86/kernel/init_task.c linux-2.6.35.4/arch/x86/kernel/init_task.c
12604 --- linux-2.6.35.4/arch/x86/kernel/init_task.c 2010-08-26 19:47:12.000000000 -0400
12605 +++ linux-2.6.35.4/arch/x86/kernel/init_task.c 2010-09-17 20:12:09.000000000 -0400
12606 @@ -38,5 +38,5 @@ EXPORT_SYMBOL(init_task);
12607 * section. Since TSS's are completely CPU-local, we want them
12608 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
12609 */
12610 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
12611 -
12612 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
12613 +EXPORT_SYMBOL(init_tss);
12614 diff -urNp linux-2.6.35.4/arch/x86/kernel/ioport.c linux-2.6.35.4/arch/x86/kernel/ioport.c
12615 --- linux-2.6.35.4/arch/x86/kernel/ioport.c 2010-08-26 19:47:12.000000000 -0400
12616 +++ linux-2.6.35.4/arch/x86/kernel/ioport.c 2010-09-17 20:12:37.000000000 -0400
12617 @@ -6,6 +6,7 @@
12618 #include <linux/sched.h>
12619 #include <linux/kernel.h>
12620 #include <linux/capability.h>
12621 +#include <linux/security.h>
12622 #include <linux/errno.h>
12623 #include <linux/types.h>
12624 #include <linux/ioport.h>
12625 @@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long
12626
12627 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
12628 return -EINVAL;
12629 +#ifdef CONFIG_GRKERNSEC_IO
12630 + if (turn_on && grsec_disable_privio) {
12631 + gr_handle_ioperm();
12632 + return -EPERM;
12633 + }
12634 +#endif
12635 if (turn_on && !capable(CAP_SYS_RAWIO))
12636 return -EPERM;
12637
12638 @@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long
12639 * because the ->io_bitmap_max value must match the bitmap
12640 * contents:
12641 */
12642 - tss = &per_cpu(init_tss, get_cpu());
12643 + tss = init_tss + get_cpu();
12644
12645 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
12646
12647 @@ -112,6 +119,12 @@ long sys_iopl(unsigned int level, struct
12648 return -EINVAL;
12649 /* Trying to gain more privileges? */
12650 if (level > old) {
12651 +#ifdef CONFIG_GRKERNSEC_IO
12652 + if (grsec_disable_privio) {
12653 + gr_handle_iopl();
12654 + return -EPERM;
12655 + }
12656 +#endif
12657 if (!capable(CAP_SYS_RAWIO))
12658 return -EPERM;
12659 }
12660 diff -urNp linux-2.6.35.4/arch/x86/kernel/irq_32.c linux-2.6.35.4/arch/x86/kernel/irq_32.c
12661 --- linux-2.6.35.4/arch/x86/kernel/irq_32.c 2010-08-26 19:47:12.000000000 -0400
12662 +++ linux-2.6.35.4/arch/x86/kernel/irq_32.c 2010-09-17 20:12:09.000000000 -0400
12663 @@ -94,7 +94,7 @@ execute_on_irq_stack(int overflow, struc
12664 return 0;
12665
12666 /* build the stack frame on the IRQ stack */
12667 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
12668 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
12669 irqctx->tinfo.task = curctx->tinfo.task;
12670 irqctx->tinfo.previous_esp = current_stack_pointer;
12671
12672 @@ -175,7 +175,7 @@ asmlinkage void do_softirq(void)
12673 irqctx->tinfo.previous_esp = current_stack_pointer;
12674
12675 /* build the stack frame on the softirq stack */
12676 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
12677 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
12678
12679 call_on_stack(__do_softirq, isp);
12680 /*
12681 diff -urNp linux-2.6.35.4/arch/x86/kernel/kgdb.c linux-2.6.35.4/arch/x86/kernel/kgdb.c
12682 --- linux-2.6.35.4/arch/x86/kernel/kgdb.c 2010-08-26 19:47:12.000000000 -0400
12683 +++ linux-2.6.35.4/arch/x86/kernel/kgdb.c 2010-09-17 20:12:09.000000000 -0400
12684 @@ -77,7 +77,7 @@ void pt_regs_to_gdb_regs(unsigned long *
12685 gdb_regs[GDB_CS] = regs->cs;
12686 gdb_regs[GDB_FS] = 0xFFFF;
12687 gdb_regs[GDB_GS] = 0xFFFF;
12688 - if (user_mode_vm(regs)) {
12689 + if (user_mode(regs)) {
12690 gdb_regs[GDB_SS] = regs->ss;
12691 gdb_regs[GDB_SP] = regs->sp;
12692 } else {
12693 @@ -720,7 +720,7 @@ void kgdb_arch_set_pc(struct pt_regs *re
12694 regs->ip = ip;
12695 }
12696
12697 -struct kgdb_arch arch_kgdb_ops = {
12698 +const struct kgdb_arch arch_kgdb_ops = {
12699 /* Breakpoint instruction: */
12700 .gdb_bpt_instr = { 0xcc },
12701 .flags = KGDB_HW_BREAKPOINT,
12702 diff -urNp linux-2.6.35.4/arch/x86/kernel/kprobes.c linux-2.6.35.4/arch/x86/kernel/kprobes.c
12703 --- linux-2.6.35.4/arch/x86/kernel/kprobes.c 2010-08-26 19:47:12.000000000 -0400
12704 +++ linux-2.6.35.4/arch/x86/kernel/kprobes.c 2010-09-17 20:12:09.000000000 -0400
12705 @@ -114,9 +114,12 @@ static void __kprobes __synthesize_relat
12706 s32 raddr;
12707 } __attribute__((packed)) *insn;
12708
12709 - insn = (struct __arch_relative_insn *)from;
12710 + insn = (struct __arch_relative_insn *)(ktla_ktva(from));
12711 +
12712 + pax_open_kernel();
12713 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
12714 insn->op = op;
12715 + pax_close_kernel();
12716 }
12717
12718 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
12719 @@ -315,7 +318,9 @@ static int __kprobes __copy_instruction(
12720 }
12721 }
12722 insn_get_length(&insn);
12723 + pax_open_kernel();
12724 memcpy(dest, insn.kaddr, insn.length);
12725 + pax_close_kernel();
12726
12727 #ifdef CONFIG_X86_64
12728 if (insn_rip_relative(&insn)) {
12729 @@ -339,7 +344,9 @@ static int __kprobes __copy_instruction(
12730 (u8 *) dest;
12731 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
12732 disp = (u8 *) dest + insn_offset_displacement(&insn);
12733 + pax_open_kernel();
12734 *(s32 *) disp = (s32) newdisp;
12735 + pax_close_kernel();
12736 }
12737 #endif
12738 return insn.length;
12739 @@ -353,12 +360,12 @@ static void __kprobes arch_copy_kprobe(s
12740 */
12741 __copy_instruction(p->ainsn.insn, p->addr, 0);
12742
12743 - if (can_boost(p->addr))
12744 + if (can_boost(ktla_ktva(p->addr)))
12745 p->ainsn.boostable = 0;
12746 else
12747 p->ainsn.boostable = -1;
12748
12749 - p->opcode = *p->addr;
12750 + p->opcode = *(ktla_ktva(p->addr));
12751 }
12752
12753 int __kprobes arch_prepare_kprobe(struct kprobe *p)
12754 @@ -475,7 +482,7 @@ static void __kprobes setup_singlestep(s
12755 * nor set current_kprobe, because it doesn't use single
12756 * stepping.
12757 */
12758 - regs->ip = (unsigned long)p->ainsn.insn;
12759 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
12760 preempt_enable_no_resched();
12761 return;
12762 }
12763 @@ -494,7 +501,7 @@ static void __kprobes setup_singlestep(s
12764 if (p->opcode == BREAKPOINT_INSTRUCTION)
12765 regs->ip = (unsigned long)p->addr;
12766 else
12767 - regs->ip = (unsigned long)p->ainsn.insn;
12768 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
12769 }
12770
12771 /*
12772 @@ -573,7 +580,7 @@ static int __kprobes kprobe_handler(stru
12773 setup_singlestep(p, regs, kcb, 0);
12774 return 1;
12775 }
12776 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
12777 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
12778 /*
12779 * The breakpoint instruction was removed right
12780 * after we hit it. Another cpu has removed
12781 @@ -799,7 +806,7 @@ static void __kprobes resume_execution(s
12782 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
12783 {
12784 unsigned long *tos = stack_addr(regs);
12785 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
12786 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
12787 unsigned long orig_ip = (unsigned long)p->addr;
12788 kprobe_opcode_t *insn = p->ainsn.insn;
12789
12790 @@ -982,7 +989,7 @@ int __kprobes kprobe_exceptions_notify(s
12791 struct die_args *args = data;
12792 int ret = NOTIFY_DONE;
12793
12794 - if (args->regs && user_mode_vm(args->regs))
12795 + if (args->regs && user_mode(args->regs))
12796 return ret;
12797
12798 switch (val) {
12799 diff -urNp linux-2.6.35.4/arch/x86/kernel/ldt.c linux-2.6.35.4/arch/x86/kernel/ldt.c
12800 --- linux-2.6.35.4/arch/x86/kernel/ldt.c 2010-08-26 19:47:12.000000000 -0400
12801 +++ linux-2.6.35.4/arch/x86/kernel/ldt.c 2010-09-17 20:12:09.000000000 -0400
12802 @@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, i
12803 if (reload) {
12804 #ifdef CONFIG_SMP
12805 preempt_disable();
12806 - load_LDT(pc);
12807 + load_LDT_nolock(pc);
12808 if (!cpumask_equal(mm_cpumask(current->mm),
12809 cpumask_of(smp_processor_id())))
12810 smp_call_function(flush_ldt, current->mm, 1);
12811 preempt_enable();
12812 #else
12813 - load_LDT(pc);
12814 + load_LDT_nolock(pc);
12815 #endif
12816 }
12817 if (oldsize) {
12818 @@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t
12819 return err;
12820
12821 for (i = 0; i < old->size; i++)
12822 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
12823 + write_ldt_entry(new->ldt, i, old->ldt + i);
12824 return 0;
12825 }
12826
12827 @@ -116,6 +116,24 @@ int init_new_context(struct task_struct
12828 retval = copy_ldt(&mm->context, &old_mm->context);
12829 mutex_unlock(&old_mm->context.lock);
12830 }
12831 +
12832 + if (tsk == current) {
12833 + mm->context.vdso = ~0UL;
12834 +
12835 +#ifdef CONFIG_X86_32
12836 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
12837 + mm->context.user_cs_base = 0UL;
12838 + mm->context.user_cs_limit = ~0UL;
12839 +
12840 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
12841 + cpus_clear(mm->context.cpu_user_cs_mask);
12842 +#endif
12843 +
12844 +#endif
12845 +#endif
12846 +
12847 + }
12848 +
12849 return retval;
12850 }
12851
12852 @@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, u
12853 }
12854 }
12855
12856 +#ifdef CONFIG_PAX_SEGMEXEC
12857 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
12858 + error = -EINVAL;
12859 + goto out_unlock;
12860 + }
12861 +#endif
12862 +
12863 fill_ldt(&ldt, &ldt_info);
12864 if (oldmode)
12865 ldt.avl = 0;
12866 diff -urNp linux-2.6.35.4/arch/x86/kernel/machine_kexec_32.c linux-2.6.35.4/arch/x86/kernel/machine_kexec_32.c
12867 --- linux-2.6.35.4/arch/x86/kernel/machine_kexec_32.c 2010-08-26 19:47:12.000000000 -0400
12868 +++ linux-2.6.35.4/arch/x86/kernel/machine_kexec_32.c 2010-09-17 20:12:09.000000000 -0400
12869 @@ -27,7 +27,7 @@
12870 #include <asm/cacheflush.h>
12871 #include <asm/debugreg.h>
12872
12873 -static void set_idt(void *newidt, __u16 limit)
12874 +static void set_idt(struct desc_struct *newidt, __u16 limit)
12875 {
12876 struct desc_ptr curidt;
12877
12878 @@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16
12879 }
12880
12881
12882 -static void set_gdt(void *newgdt, __u16 limit)
12883 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
12884 {
12885 struct desc_ptr curgdt;
12886
12887 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
12888 }
12889
12890 control_page = page_address(image->control_code_page);
12891 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
12892 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
12893
12894 relocate_kernel_ptr = control_page;
12895 page_list[PA_CONTROL_PAGE] = __pa(control_page);
12896 diff -urNp linux-2.6.35.4/arch/x86/kernel/microcode_amd.c linux-2.6.35.4/arch/x86/kernel/microcode_amd.c
12897 --- linux-2.6.35.4/arch/x86/kernel/microcode_amd.c 2010-08-26 19:47:12.000000000 -0400
12898 +++ linux-2.6.35.4/arch/x86/kernel/microcode_amd.c 2010-09-17 20:12:09.000000000 -0400
12899 @@ -331,7 +331,7 @@ static void microcode_fini_cpu_amd(int c
12900 uci->mc = NULL;
12901 }
12902
12903 -static struct microcode_ops microcode_amd_ops = {
12904 +static const struct microcode_ops microcode_amd_ops = {
12905 .request_microcode_user = request_microcode_user,
12906 .request_microcode_fw = request_microcode_fw,
12907 .collect_cpu_info = collect_cpu_info_amd,
12908 @@ -339,7 +339,7 @@ static struct microcode_ops microcode_am
12909 .microcode_fini_cpu = microcode_fini_cpu_amd,
12910 };
12911
12912 -struct microcode_ops * __init init_amd_microcode(void)
12913 +const struct microcode_ops * __init init_amd_microcode(void)
12914 {
12915 return &microcode_amd_ops;
12916 }
12917 diff -urNp linux-2.6.35.4/arch/x86/kernel/microcode_core.c linux-2.6.35.4/arch/x86/kernel/microcode_core.c
12918 --- linux-2.6.35.4/arch/x86/kernel/microcode_core.c 2010-08-26 19:47:12.000000000 -0400
12919 +++ linux-2.6.35.4/arch/x86/kernel/microcode_core.c 2010-09-17 20:12:09.000000000 -0400
12920 @@ -92,7 +92,7 @@ MODULE_LICENSE("GPL");
12921
12922 #define MICROCODE_VERSION "2.00"
12923
12924 -static struct microcode_ops *microcode_ops;
12925 +static const struct microcode_ops *microcode_ops;
12926
12927 /*
12928 * Synchronization.
12929 diff -urNp linux-2.6.35.4/arch/x86/kernel/microcode_intel.c linux-2.6.35.4/arch/x86/kernel/microcode_intel.c
12930 --- linux-2.6.35.4/arch/x86/kernel/microcode_intel.c 2010-08-26 19:47:12.000000000 -0400
12931 +++ linux-2.6.35.4/arch/x86/kernel/microcode_intel.c 2010-09-17 20:12:09.000000000 -0400
12932 @@ -446,13 +446,13 @@ static enum ucode_state request_microcod
12933
12934 static int get_ucode_user(void *to, const void *from, size_t n)
12935 {
12936 - return copy_from_user(to, from, n);
12937 + return copy_from_user(to, (__force const void __user *)from, n);
12938 }
12939
12940 static enum ucode_state
12941 request_microcode_user(int cpu, const void __user *buf, size_t size)
12942 {
12943 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
12944 + return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
12945 }
12946
12947 static void microcode_fini_cpu(int cpu)
12948 @@ -463,7 +463,7 @@ static void microcode_fini_cpu(int cpu)
12949 uci->mc = NULL;
12950 }
12951
12952 -static struct microcode_ops microcode_intel_ops = {
12953 +static const struct microcode_ops microcode_intel_ops = {
12954 .request_microcode_user = request_microcode_user,
12955 .request_microcode_fw = request_microcode_fw,
12956 .collect_cpu_info = collect_cpu_info,
12957 @@ -471,7 +471,7 @@ static struct microcode_ops microcode_in
12958 .microcode_fini_cpu = microcode_fini_cpu,
12959 };
12960
12961 -struct microcode_ops * __init init_intel_microcode(void)
12962 +const struct microcode_ops * __init init_intel_microcode(void)
12963 {
12964 return &microcode_intel_ops;
12965 }
12966 diff -urNp linux-2.6.35.4/arch/x86/kernel/module.c linux-2.6.35.4/arch/x86/kernel/module.c
12967 --- linux-2.6.35.4/arch/x86/kernel/module.c 2010-08-26 19:47:12.000000000 -0400
12968 +++ linux-2.6.35.4/arch/x86/kernel/module.c 2010-09-17 20:12:09.000000000 -0400
12969 @@ -35,7 +35,7 @@
12970 #define DEBUGP(fmt...)
12971 #endif
12972
12973 -void *module_alloc(unsigned long size)
12974 +static void *__module_alloc(unsigned long size, pgprot_t prot)
12975 {
12976 struct vm_struct *area;
12977
12978 @@ -49,8 +49,18 @@ void *module_alloc(unsigned long size)
12979 if (!area)
12980 return NULL;
12981
12982 - return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
12983 - PAGE_KERNEL_EXEC);
12984 + return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
12985 +}
12986 +
12987 +void *module_alloc(unsigned long size)
12988 +{
12989 +
12990 +#ifdef CONFIG_PAX_KERNEXEC
12991 + return __module_alloc(size, PAGE_KERNEL);
12992 +#else
12993 + return __module_alloc(size, PAGE_KERNEL_EXEC);
12994 +#endif
12995 +
12996 }
12997
12998 /* Free memory returned from module_alloc */
12999 @@ -59,6 +69,40 @@ void module_free(struct module *mod, voi
13000 vfree(module_region);
13001 }
13002
13003 +#ifdef CONFIG_PAX_KERNEXEC
13004 +#ifdef CONFIG_X86_32
13005 +void *module_alloc_exec(unsigned long size)
13006 +{
13007 + struct vm_struct *area;
13008 +
13009 + if (size == 0)
13010 + return NULL;
13011 +
13012 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
13013 + return area ? area->addr : NULL;
13014 +}
13015 +EXPORT_SYMBOL(module_alloc_exec);
13016 +
13017 +void module_free_exec(struct module *mod, void *module_region)
13018 +{
13019 + vunmap(module_region);
13020 +}
13021 +EXPORT_SYMBOL(module_free_exec);
13022 +#else
13023 +void module_free_exec(struct module *mod, void *module_region)
13024 +{
13025 + module_free(mod, module_region);
13026 +}
13027 +EXPORT_SYMBOL(module_free_exec);
13028 +
13029 +void *module_alloc_exec(unsigned long size)
13030 +{
13031 + return __module_alloc(size, PAGE_KERNEL_RX);
13032 +}
13033 +EXPORT_SYMBOL(module_alloc_exec);
13034 +#endif
13035 +#endif
13036 +
13037 /* We don't need anything special. */
13038 int module_frob_arch_sections(Elf_Ehdr *hdr,
13039 Elf_Shdr *sechdrs,
13040 @@ -78,14 +122,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13041 unsigned int i;
13042 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
13043 Elf32_Sym *sym;
13044 - uint32_t *location;
13045 + uint32_t *plocation, location;
13046
13047 DEBUGP("Applying relocate section %u to %u\n", relsec,
13048 sechdrs[relsec].sh_info);
13049 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
13050 /* This is where to make the change */
13051 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
13052 - + rel[i].r_offset;
13053 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
13054 + location = (uint32_t)plocation;
13055 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
13056 + plocation = ktla_ktva((void *)plocation);
13057 /* This is the symbol it is referring to. Note that all
13058 undefined symbols have been resolved. */
13059 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
13060 @@ -94,11 +140,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13061 switch (ELF32_R_TYPE(rel[i].r_info)) {
13062 case R_386_32:
13063 /* We add the value into the location given */
13064 - *location += sym->st_value;
13065 + pax_open_kernel();
13066 + *plocation += sym->st_value;
13067 + pax_close_kernel();
13068 break;
13069 case R_386_PC32:
13070 /* Add the value, subtract its postition */
13071 - *location += sym->st_value - (uint32_t)location;
13072 + pax_open_kernel();
13073 + *plocation += sym->st_value - location;
13074 + pax_close_kernel();
13075 break;
13076 default:
13077 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
13078 @@ -154,21 +204,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
13079 case R_X86_64_NONE:
13080 break;
13081 case R_X86_64_64:
13082 + pax_open_kernel();
13083 *(u64 *)loc = val;
13084 + pax_close_kernel();
13085 break;
13086 case R_X86_64_32:
13087 + pax_open_kernel();
13088 *(u32 *)loc = val;
13089 + pax_close_kernel();
13090 if (val != *(u32 *)loc)
13091 goto overflow;
13092 break;
13093 case R_X86_64_32S:
13094 + pax_open_kernel();
13095 *(s32 *)loc = val;
13096 + pax_close_kernel();
13097 if ((s64)val != *(s32 *)loc)
13098 goto overflow;
13099 break;
13100 case R_X86_64_PC32:
13101 val -= (u64)loc;
13102 + pax_open_kernel();
13103 *(u32 *)loc = val;
13104 + pax_close_kernel();
13105 +
13106 #if 0
13107 if ((s64)val != *(s32 *)loc)
13108 goto overflow;
13109 diff -urNp linux-2.6.35.4/arch/x86/kernel/paravirt.c linux-2.6.35.4/arch/x86/kernel/paravirt.c
13110 --- linux-2.6.35.4/arch/x86/kernel/paravirt.c 2010-08-26 19:47:12.000000000 -0400
13111 +++ linux-2.6.35.4/arch/x86/kernel/paravirt.c 2010-09-17 20:12:09.000000000 -0400
13112 @@ -122,7 +122,7 @@ unsigned paravirt_patch_jmp(void *insnbu
13113 * corresponding structure. */
13114 static void *get_call_destination(u8 type)
13115 {
13116 - struct paravirt_patch_template tmpl = {
13117 + const struct paravirt_patch_template tmpl = {
13118 .pv_init_ops = pv_init_ops,
13119 .pv_time_ops = pv_time_ops,
13120 .pv_cpu_ops = pv_cpu_ops,
13121 @@ -145,14 +145,14 @@ unsigned paravirt_patch_default(u8 type,
13122 if (opfunc == NULL)
13123 /* If there's no function, patch it with a ud2a (BUG) */
13124 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
13125 - else if (opfunc == _paravirt_nop)
13126 + else if (opfunc == (void *)_paravirt_nop)
13127 /* If the operation is a nop, then nop the callsite */
13128 ret = paravirt_patch_nop();
13129
13130 /* identity functions just return their single argument */
13131 - else if (opfunc == _paravirt_ident_32)
13132 + else if (opfunc == (void *)_paravirt_ident_32)
13133 ret = paravirt_patch_ident_32(insnbuf, len);
13134 - else if (opfunc == _paravirt_ident_64)
13135 + else if (opfunc == (void *)_paravirt_ident_64)
13136 ret = paravirt_patch_ident_64(insnbuf, len);
13137
13138 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
13139 @@ -178,7 +178,7 @@ unsigned paravirt_patch_insns(void *insn
13140 if (insn_len > len || start == NULL)
13141 insn_len = len;
13142 else
13143 - memcpy(insnbuf, start, insn_len);
13144 + memcpy(insnbuf, ktla_ktva(start), insn_len);
13145
13146 return insn_len;
13147 }
13148 @@ -294,22 +294,22 @@ void arch_flush_lazy_mmu_mode(void)
13149 preempt_enable();
13150 }
13151
13152 -struct pv_info pv_info = {
13153 +struct pv_info pv_info __read_only = {
13154 .name = "bare hardware",
13155 .paravirt_enabled = 0,
13156 .kernel_rpl = 0,
13157 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
13158 };
13159
13160 -struct pv_init_ops pv_init_ops = {
13161 +struct pv_init_ops pv_init_ops __read_only = {
13162 .patch = native_patch,
13163 };
13164
13165 -struct pv_time_ops pv_time_ops = {
13166 +struct pv_time_ops pv_time_ops __read_only = {
13167 .sched_clock = native_sched_clock,
13168 };
13169
13170 -struct pv_irq_ops pv_irq_ops = {
13171 +struct pv_irq_ops pv_irq_ops __read_only = {
13172 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
13173 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
13174 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
13175 @@ -321,7 +321,7 @@ struct pv_irq_ops pv_irq_ops = {
13176 #endif
13177 };
13178
13179 -struct pv_cpu_ops pv_cpu_ops = {
13180 +struct pv_cpu_ops pv_cpu_ops __read_only = {
13181 .cpuid = native_cpuid,
13182 .get_debugreg = native_get_debugreg,
13183 .set_debugreg = native_set_debugreg,
13184 @@ -382,7 +382,7 @@ struct pv_cpu_ops pv_cpu_ops = {
13185 .end_context_switch = paravirt_nop,
13186 };
13187
13188 -struct pv_apic_ops pv_apic_ops = {
13189 +struct pv_apic_ops pv_apic_ops __read_only = {
13190 #ifdef CONFIG_X86_LOCAL_APIC
13191 .startup_ipi_hook = paravirt_nop,
13192 #endif
13193 @@ -396,7 +396,7 @@ struct pv_apic_ops pv_apic_ops = {
13194 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
13195 #endif
13196
13197 -struct pv_mmu_ops pv_mmu_ops = {
13198 +struct pv_mmu_ops pv_mmu_ops __read_only = {
13199
13200 .read_cr2 = native_read_cr2,
13201 .write_cr2 = native_write_cr2,
13202 @@ -463,6 +463,12 @@ struct pv_mmu_ops pv_mmu_ops = {
13203 },
13204
13205 .set_fixmap = native_set_fixmap,
13206 +
13207 +#ifdef CONFIG_PAX_KERNEXEC
13208 + .pax_open_kernel = native_pax_open_kernel,
13209 + .pax_close_kernel = native_pax_close_kernel,
13210 +#endif
13211 +
13212 };
13213
13214 EXPORT_SYMBOL_GPL(pv_time_ops);
13215 diff -urNp linux-2.6.35.4/arch/x86/kernel/paravirt-spinlocks.c linux-2.6.35.4/arch/x86/kernel/paravirt-spinlocks.c
13216 --- linux-2.6.35.4/arch/x86/kernel/paravirt-spinlocks.c 2010-08-26 19:47:12.000000000 -0400
13217 +++ linux-2.6.35.4/arch/x86/kernel/paravirt-spinlocks.c 2010-09-17 20:12:09.000000000 -0400
13218 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t
13219 arch_spin_lock(lock);
13220 }
13221
13222 -struct pv_lock_ops pv_lock_ops = {
13223 +struct pv_lock_ops pv_lock_ops __read_only = {
13224 #ifdef CONFIG_SMP
13225 .spin_is_locked = __ticket_spin_is_locked,
13226 .spin_is_contended = __ticket_spin_is_contended,
13227 diff -urNp linux-2.6.35.4/arch/x86/kernel/pci-calgary_64.c linux-2.6.35.4/arch/x86/kernel/pci-calgary_64.c
13228 --- linux-2.6.35.4/arch/x86/kernel/pci-calgary_64.c 2010-08-26 19:47:12.000000000 -0400
13229 +++ linux-2.6.35.4/arch/x86/kernel/pci-calgary_64.c 2010-09-17 20:12:09.000000000 -0400
13230 @@ -475,7 +475,7 @@ static void calgary_free_coherent(struct
13231 free_pages((unsigned long)vaddr, get_order(size));
13232 }
13233
13234 -static struct dma_map_ops calgary_dma_ops = {
13235 +static const struct dma_map_ops calgary_dma_ops = {
13236 .alloc_coherent = calgary_alloc_coherent,
13237 .free_coherent = calgary_free_coherent,
13238 .map_sg = calgary_map_sg,
13239 diff -urNp linux-2.6.35.4/arch/x86/kernel/pci-dma.c linux-2.6.35.4/arch/x86/kernel/pci-dma.c
13240 --- linux-2.6.35.4/arch/x86/kernel/pci-dma.c 2010-08-26 19:47:12.000000000 -0400
13241 +++ linux-2.6.35.4/arch/x86/kernel/pci-dma.c 2010-09-17 20:12:09.000000000 -0400
13242 @@ -16,7 +16,7 @@
13243
13244 static int forbid_dac __read_mostly;
13245
13246 -struct dma_map_ops *dma_ops = &nommu_dma_ops;
13247 +const struct dma_map_ops *dma_ops = &nommu_dma_ops;
13248 EXPORT_SYMBOL(dma_ops);
13249
13250 static int iommu_sac_force __read_mostly;
13251 @@ -248,7 +248,7 @@ early_param("iommu", iommu_setup);
13252
13253 int dma_supported(struct device *dev, u64 mask)
13254 {
13255 - struct dma_map_ops *ops = get_dma_ops(dev);
13256 + const struct dma_map_ops *ops = get_dma_ops(dev);
13257
13258 #ifdef CONFIG_PCI
13259 if (mask > 0xffffffff && forbid_dac > 0) {
13260 diff -urNp linux-2.6.35.4/arch/x86/kernel/pci-gart_64.c linux-2.6.35.4/arch/x86/kernel/pci-gart_64.c
13261 --- linux-2.6.35.4/arch/x86/kernel/pci-gart_64.c 2010-08-26 19:47:12.000000000 -0400
13262 +++ linux-2.6.35.4/arch/x86/kernel/pci-gart_64.c 2010-09-17 20:12:09.000000000 -0400
13263 @@ -699,7 +699,7 @@ static __init int init_k8_gatt(struct ag
13264 return -1;
13265 }
13266
13267 -static struct dma_map_ops gart_dma_ops = {
13268 +static const struct dma_map_ops gart_dma_ops = {
13269 .map_sg = gart_map_sg,
13270 .unmap_sg = gart_unmap_sg,
13271 .map_page = gart_map_page,
13272 diff -urNp linux-2.6.35.4/arch/x86/kernel/pci-nommu.c linux-2.6.35.4/arch/x86/kernel/pci-nommu.c
13273 --- linux-2.6.35.4/arch/x86/kernel/pci-nommu.c 2010-08-26 19:47:12.000000000 -0400
13274 +++ linux-2.6.35.4/arch/x86/kernel/pci-nommu.c 2010-09-17 20:12:09.000000000 -0400
13275 @@ -95,7 +95,7 @@ static void nommu_sync_sg_for_device(str
13276 flush_write_buffers();
13277 }
13278
13279 -struct dma_map_ops nommu_dma_ops = {
13280 +const struct dma_map_ops nommu_dma_ops = {
13281 .alloc_coherent = dma_generic_alloc_coherent,
13282 .free_coherent = nommu_free_coherent,
13283 .map_sg = nommu_map_sg,
13284 diff -urNp linux-2.6.35.4/arch/x86/kernel/pci-swiotlb.c linux-2.6.35.4/arch/x86/kernel/pci-swiotlb.c
13285 --- linux-2.6.35.4/arch/x86/kernel/pci-swiotlb.c 2010-08-26 19:47:12.000000000 -0400
13286 +++ linux-2.6.35.4/arch/x86/kernel/pci-swiotlb.c 2010-09-17 20:12:09.000000000 -0400
13287 @@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(
13288 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
13289 }
13290
13291 -static struct dma_map_ops swiotlb_dma_ops = {
13292 +static const struct dma_map_ops swiotlb_dma_ops = {
13293 .mapping_error = swiotlb_dma_mapping_error,
13294 .alloc_coherent = x86_swiotlb_alloc_coherent,
13295 .free_coherent = swiotlb_free_coherent,
13296 diff -urNp linux-2.6.35.4/arch/x86/kernel/process_32.c linux-2.6.35.4/arch/x86/kernel/process_32.c
13297 --- linux-2.6.35.4/arch/x86/kernel/process_32.c 2010-08-26 19:47:12.000000000 -0400
13298 +++ linux-2.6.35.4/arch/x86/kernel/process_32.c 2010-09-17 20:12:09.000000000 -0400
13299 @@ -65,6 +65,7 @@ asmlinkage void ret_from_fork(void) __as
13300 unsigned long thread_saved_pc(struct task_struct *tsk)
13301 {
13302 return ((unsigned long *)tsk->thread.sp)[3];
13303 +//XXX return tsk->thread.eip;
13304 }
13305
13306 #ifndef CONFIG_SMP
13307 @@ -126,7 +127,7 @@ void __show_regs(struct pt_regs *regs, i
13308 unsigned long sp;
13309 unsigned short ss, gs;
13310
13311 - if (user_mode_vm(regs)) {
13312 + if (user_mode(regs)) {
13313 sp = regs->sp;
13314 ss = regs->ss & 0xffff;
13315 gs = get_user_gs(regs);
13316 @@ -196,7 +197,7 @@ int copy_thread(unsigned long clone_flag
13317 struct task_struct *tsk;
13318 int err;
13319
13320 - childregs = task_pt_regs(p);
13321 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
13322 *childregs = *regs;
13323 childregs->ax = 0;
13324 childregs->sp = sp;
13325 @@ -230,6 +231,7 @@ int copy_thread(unsigned long clone_flag
13326 * Set a new TLS for the child thread?
13327 */
13328 if (clone_flags & CLONE_SETTLS)
13329 +//XXX needs set_fs()?
13330 err = do_set_thread_area(p, -1,
13331 (struct user_desc __user *)childregs->si, 0);
13332
13333 @@ -293,7 +295,7 @@ __switch_to(struct task_struct *prev_p,
13334 struct thread_struct *prev = &prev_p->thread,
13335 *next = &next_p->thread;
13336 int cpu = smp_processor_id();
13337 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
13338 + struct tss_struct *tss = init_tss + cpu;
13339 bool preload_fpu;
13340
13341 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
13342 @@ -328,6 +330,11 @@ __switch_to(struct task_struct *prev_p,
13343 */
13344 lazy_save_gs(prev->gs);
13345
13346 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13347 + if (!segment_eq(task_thread_info(prev_p)->addr_limit, task_thread_info(next_p)->addr_limit))
13348 + __set_fs(task_thread_info(next_p)->addr_limit, cpu);
13349 +#endif
13350 +
13351 /*
13352 * Load the per-thread Thread-Local Storage descriptor.
13353 */
13354 @@ -404,3 +411,27 @@ unsigned long get_wchan(struct task_stru
13355 return 0;
13356 }
13357
13358 +#ifdef CONFIG_PAX_RANDKSTACK
13359 +asmlinkage void pax_randomize_kstack(void)
13360 +{
13361 + struct thread_struct *thread = &current->thread;
13362 + unsigned long time;
13363 +
13364 + if (!randomize_va_space)
13365 + return;
13366 +
13367 + rdtscl(time);
13368 +
13369 + /* P4 seems to return a 0 LSB, ignore it */
13370 +#ifdef CONFIG_MPENTIUM4
13371 + time &= 0x1EUL;
13372 + time <<= 2;
13373 +#else
13374 + time &= 0xFUL;
13375 + time <<= 3;
13376 +#endif
13377 +
13378 + thread->sp0 ^= time;
13379 + load_sp0(init_tss + smp_processor_id(), thread);
13380 +}
13381 +#endif
13382 diff -urNp linux-2.6.35.4/arch/x86/kernel/process_64.c linux-2.6.35.4/arch/x86/kernel/process_64.c
13383 --- linux-2.6.35.4/arch/x86/kernel/process_64.c 2010-08-26 19:47:12.000000000 -0400
13384 +++ linux-2.6.35.4/arch/x86/kernel/process_64.c 2010-09-17 20:12:09.000000000 -0400
13385 @@ -87,7 +87,7 @@ static void __exit_idle(void)
13386 void exit_idle(void)
13387 {
13388 /* idle loop has pid 0 */
13389 - if (current->pid)
13390 + if (task_pid_nr(current))
13391 return;
13392 __exit_idle();
13393 }
13394 @@ -375,7 +375,7 @@ __switch_to(struct task_struct *prev_p,
13395 struct thread_struct *prev = &prev_p->thread;
13396 struct thread_struct *next = &next_p->thread;
13397 int cpu = smp_processor_id();
13398 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
13399 + struct tss_struct *tss = init_tss + cpu;
13400 unsigned fsindex, gsindex;
13401 bool preload_fpu;
13402
13403 @@ -528,12 +528,11 @@ unsigned long get_wchan(struct task_stru
13404 if (!p || p == current || p->state == TASK_RUNNING)
13405 return 0;
13406 stack = (unsigned long)task_stack_page(p);
13407 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
13408 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-8-sizeof(u64))
13409 return 0;
13410 fp = *(u64 *)(p->thread.sp);
13411 do {
13412 - if (fp < (unsigned long)stack ||
13413 - fp >= (unsigned long)stack+THREAD_SIZE)
13414 + if (fp < stack || fp > stack+THREAD_SIZE-8-sizeof(u64))
13415 return 0;
13416 ip = *(u64 *)(fp+8);
13417 if (!in_sched_functions(ip))
13418 diff -urNp linux-2.6.35.4/arch/x86/kernel/process.c linux-2.6.35.4/arch/x86/kernel/process.c
13419 --- linux-2.6.35.4/arch/x86/kernel/process.c 2010-08-26 19:47:12.000000000 -0400
13420 +++ linux-2.6.35.4/arch/x86/kernel/process.c 2010-09-17 20:12:09.000000000 -0400
13421 @@ -73,7 +73,7 @@ void exit_thread(void)
13422 unsigned long *bp = t->io_bitmap_ptr;
13423
13424 if (bp) {
13425 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
13426 + struct tss_struct *tss = init_tss + get_cpu();
13427
13428 t->io_bitmap_ptr = NULL;
13429 clear_thread_flag(TIF_IO_BITMAP);
13430 @@ -107,7 +107,7 @@ void show_regs_common(void)
13431
13432 printk(KERN_CONT "\n");
13433 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s/%s\n",
13434 - current->pid, current->comm, print_tainted(),
13435 + task_pid_nr(current), current->comm, print_tainted(),
13436 init_utsname()->release,
13437 (int)strcspn(init_utsname()->version, " "),
13438 init_utsname()->version, board, product);
13439 @@ -117,6 +117,9 @@ void flush_thread(void)
13440 {
13441 struct task_struct *tsk = current;
13442
13443 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR)
13444 + loadsegment(gs, 0);
13445 +#endif
13446 flush_ptrace_hw_breakpoint(tsk);
13447 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
13448 /*
13449 @@ -279,8 +282,8 @@ int kernel_thread(int (*fn)(void *), voi
13450 regs.di = (unsigned long) arg;
13451
13452 #ifdef CONFIG_X86_32
13453 - regs.ds = __USER_DS;
13454 - regs.es = __USER_DS;
13455 + regs.ds = __KERNEL_DS;
13456 + regs.es = __KERNEL_DS;
13457 regs.fs = __KERNEL_PERCPU;
13458 regs.gs = __KERNEL_STACK_CANARY;
13459 #else
13460 @@ -689,17 +692,3 @@ static int __init idle_setup(char *str)
13461 return 0;
13462 }
13463 early_param("idle", idle_setup);
13464 -
13465 -unsigned long arch_align_stack(unsigned long sp)
13466 -{
13467 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
13468 - sp -= get_random_int() % 8192;
13469 - return sp & ~0xf;
13470 -}
13471 -
13472 -unsigned long arch_randomize_brk(struct mm_struct *mm)
13473 -{
13474 - unsigned long range_end = mm->brk + 0x02000000;
13475 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
13476 -}
13477 -
13478 diff -urNp linux-2.6.35.4/arch/x86/kernel/ptrace.c linux-2.6.35.4/arch/x86/kernel/ptrace.c
13479 --- linux-2.6.35.4/arch/x86/kernel/ptrace.c 2010-08-26 19:47:12.000000000 -0400
13480 +++ linux-2.6.35.4/arch/x86/kernel/ptrace.c 2010-09-17 20:12:09.000000000 -0400
13481 @@ -804,7 +804,7 @@ static const struct user_regset_view use
13482 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
13483 {
13484 int ret;
13485 - unsigned long __user *datap = (unsigned long __user *)data;
13486 + unsigned long __user *datap = (__force unsigned long __user *)data;
13487
13488 switch (request) {
13489 /* read the word at location addr in the USER area. */
13490 @@ -891,14 +891,14 @@ long arch_ptrace(struct task_struct *chi
13491 if (addr < 0)
13492 return -EIO;
13493 ret = do_get_thread_area(child, addr,
13494 - (struct user_desc __user *) data);
13495 + (__force struct user_desc __user *) data);
13496 break;
13497
13498 case PTRACE_SET_THREAD_AREA:
13499 if (addr < 0)
13500 return -EIO;
13501 ret = do_set_thread_area(child, addr,
13502 - (struct user_desc __user *) data, 0);
13503 + (__force struct user_desc __user *) data, 0);
13504 break;
13505 #endif
13506
13507 @@ -1315,7 +1315,7 @@ static void fill_sigtrap_info(struct tas
13508 memset(info, 0, sizeof(*info));
13509 info->si_signo = SIGTRAP;
13510 info->si_code = si_code;
13511 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
13512 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
13513 }
13514
13515 void user_single_step_siginfo(struct task_struct *tsk,
13516 diff -urNp linux-2.6.35.4/arch/x86/kernel/reboot.c linux-2.6.35.4/arch/x86/kernel/reboot.c
13517 --- linux-2.6.35.4/arch/x86/kernel/reboot.c 2010-08-26 19:47:12.000000000 -0400
13518 +++ linux-2.6.35.4/arch/x86/kernel/reboot.c 2010-09-17 20:12:09.000000000 -0400
13519 @@ -33,7 +33,7 @@ void (*pm_power_off)(void);
13520 EXPORT_SYMBOL(pm_power_off);
13521
13522 static const struct desc_ptr no_idt = {};
13523 -static int reboot_mode;
13524 +static unsigned short reboot_mode;
13525 enum reboot_type reboot_type = BOOT_KBD;
13526 int reboot_force;
13527
13528 @@ -284,7 +284,7 @@ static struct dmi_system_id __initdata r
13529 DMI_MATCH(DMI_BOARD_NAME, "P4S800"),
13530 },
13531 },
13532 - { }
13533 + { NULL, NULL, {{0, {0}}}, NULL}
13534 };
13535
13536 static int __init reboot_init(void)
13537 @@ -300,12 +300,12 @@ core_initcall(reboot_init);
13538 controller to pulse the CPU reset line, which is more thorough, but
13539 doesn't work with at least one type of 486 motherboard. It is easy
13540 to stop this code working; hence the copious comments. */
13541 -static const unsigned long long
13542 -real_mode_gdt_entries [3] =
13543 +static struct desc_struct
13544 +real_mode_gdt_entries [3] __read_only =
13545 {
13546 - 0x0000000000000000ULL, /* Null descriptor */
13547 - 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
13548 - 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
13549 + GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
13550 + GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
13551 + GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
13552 };
13553
13554 static const struct desc_ptr
13555 @@ -354,7 +354,7 @@ static const unsigned char jump_to_bios
13556 * specified by the code and length parameters.
13557 * We assume that length will aways be less that 100!
13558 */
13559 -void machine_real_restart(const unsigned char *code, int length)
13560 +void machine_real_restart(const unsigned char *code, unsigned int length)
13561 {
13562 local_irq_disable();
13563
13564 @@ -374,8 +374,8 @@ void machine_real_restart(const unsigned
13565 /* Remap the kernel at virtual address zero, as well as offset zero
13566 from the kernel segment. This assumes the kernel segment starts at
13567 virtual address PAGE_OFFSET. */
13568 - memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
13569 - sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
13570 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
13571 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
13572
13573 /*
13574 * Use `swapper_pg_dir' as our page directory.
13575 @@ -387,16 +387,15 @@ void machine_real_restart(const unsigned
13576 boot)". This seems like a fairly standard thing that gets set by
13577 REBOOT.COM programs, and the previous reset routine did this
13578 too. */
13579 - *((unsigned short *)0x472) = reboot_mode;
13580 + *(unsigned short *)(__va(0x472)) = reboot_mode;
13581
13582 /* For the switch to real mode, copy some code to low memory. It has
13583 to be in the first 64k because it is running in 16-bit mode, and it
13584 has to have the same physical and virtual address, because it turns
13585 off paging. Copy it near the end of the first page, out of the way
13586 of BIOS variables. */
13587 - memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
13588 - real_mode_switch, sizeof (real_mode_switch));
13589 - memcpy((void *)(0x1000 - 100), code, length);
13590 + memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
13591 + memcpy(__va(0x1000 - 100), code, length);
13592
13593 /* Set up the IDT for real mode. */
13594 load_idt(&real_mode_idt);
13595 diff -urNp linux-2.6.35.4/arch/x86/kernel/setup.c linux-2.6.35.4/arch/x86/kernel/setup.c
13596 --- linux-2.6.35.4/arch/x86/kernel/setup.c 2010-08-26 19:47:12.000000000 -0400
13597 +++ linux-2.6.35.4/arch/x86/kernel/setup.c 2010-09-17 20:12:09.000000000 -0400
13598 @@ -704,7 +704,7 @@ static void __init trim_bios_range(void)
13599 * area (640->1Mb) as ram even though it is not.
13600 * take them out.
13601 */
13602 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
13603 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
13604 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
13605 }
13606
13607 @@ -791,14 +791,14 @@ void __init setup_arch(char **cmdline_p)
13608
13609 if (!boot_params.hdr.root_flags)
13610 root_mountflags &= ~MS_RDONLY;
13611 - init_mm.start_code = (unsigned long) _text;
13612 - init_mm.end_code = (unsigned long) _etext;
13613 + init_mm.start_code = ktla_ktva((unsigned long) _text);
13614 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
13615 init_mm.end_data = (unsigned long) _edata;
13616 init_mm.brk = _brk_end;
13617
13618 - code_resource.start = virt_to_phys(_text);
13619 - code_resource.end = virt_to_phys(_etext)-1;
13620 - data_resource.start = virt_to_phys(_etext);
13621 + code_resource.start = virt_to_phys(ktla_ktva(_text));
13622 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
13623 + data_resource.start = virt_to_phys(_sdata);
13624 data_resource.end = virt_to_phys(_edata)-1;
13625 bss_resource.start = virt_to_phys(&__bss_start);
13626 bss_resource.end = virt_to_phys(&__bss_stop)-1;
13627 diff -urNp linux-2.6.35.4/arch/x86/kernel/setup_percpu.c linux-2.6.35.4/arch/x86/kernel/setup_percpu.c
13628 --- linux-2.6.35.4/arch/x86/kernel/setup_percpu.c 2010-08-26 19:47:12.000000000 -0400
13629 +++ linux-2.6.35.4/arch/x86/kernel/setup_percpu.c 2010-09-17 20:12:09.000000000 -0400
13630 @@ -21,19 +21,17 @@
13631 #include <asm/cpu.h>
13632 #include <asm/stackprotector.h>
13633
13634 +#ifdef CONFIG_SMP
13635 DEFINE_PER_CPU(int, cpu_number);
13636 EXPORT_PER_CPU_SYMBOL(cpu_number);
13637 +#endif
13638
13639 -#ifdef CONFIG_X86_64
13640 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
13641 -#else
13642 -#define BOOT_PERCPU_OFFSET 0
13643 -#endif
13644
13645 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
13646 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
13647
13648 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
13649 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
13650 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
13651 };
13652 EXPORT_SYMBOL(__per_cpu_offset);
13653 @@ -161,10 +159,10 @@ static inline void setup_percpu_segment(
13654 {
13655 #ifdef CONFIG_X86_32
13656 struct desc_struct gdt;
13657 + unsigned long base = per_cpu_offset(cpu);
13658
13659 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
13660 - 0x2 | DESCTYPE_S, 0x8);
13661 - gdt.s = 1;
13662 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
13663 + 0x83 | DESCTYPE_S, 0xC);
13664 write_gdt_entry(get_cpu_gdt_table(cpu),
13665 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
13666 #endif
13667 @@ -213,6 +211,11 @@ void __init setup_per_cpu_areas(void)
13668 /* alrighty, percpu areas up and running */
13669 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
13670 for_each_possible_cpu(cpu) {
13671 +#ifdef CONFIG_CC_STACKPROTECTOR
13672 +#ifdef CONFIG_x86_32
13673 + unsigned long canary = per_cpu(stack_canary, cpu);
13674 +#endif
13675 +#endif
13676 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
13677 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
13678 per_cpu(cpu_number, cpu) = cpu;
13679 @@ -249,6 +252,12 @@ void __init setup_per_cpu_areas(void)
13680 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
13681 #endif
13682 #endif
13683 +#ifdef CONFIG_CC_STACKPROTECTOR
13684 +#ifdef CONFIG_x86_32
13685 + if (cpu == boot_cpu_id)
13686 + per_cpu(stack_canary, cpu) = canary;
13687 +#endif
13688 +#endif
13689 /*
13690 * Up to this point, the boot CPU has been using .init.data
13691 * area. Reload any changed state for the boot CPU.
13692 diff -urNp linux-2.6.35.4/arch/x86/kernel/signal.c linux-2.6.35.4/arch/x86/kernel/signal.c
13693 --- linux-2.6.35.4/arch/x86/kernel/signal.c 2010-08-26 19:47:12.000000000 -0400
13694 +++ linux-2.6.35.4/arch/x86/kernel/signal.c 2010-09-17 20:12:09.000000000 -0400
13695 @@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsi
13696 * Align the stack pointer according to the i386 ABI,
13697 * i.e. so that on function entry ((sp + 4) & 15) == 0.
13698 */
13699 - sp = ((sp + 4) & -16ul) - 4;
13700 + sp = ((sp - 12) & -16ul) - 4;
13701 #else /* !CONFIG_X86_32 */
13702 sp = round_down(sp, 16) - 8;
13703 #endif
13704 @@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, str
13705 * Return an always-bogus address instead so we will die with SIGSEGV.
13706 */
13707 if (onsigstack && !likely(on_sig_stack(sp)))
13708 - return (void __user *)-1L;
13709 + return (__force void __user *)-1L;
13710
13711 /* save i387 state */
13712 if (used_math() && save_i387_xstate(*fpstate) < 0)
13713 - return (void __user *)-1L;
13714 + return (__force void __user *)-1L;
13715
13716 return (void __user *)sp;
13717 }
13718 @@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigactio
13719 }
13720
13721 if (current->mm->context.vdso)
13722 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
13723 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
13724 else
13725 - restorer = &frame->retcode;
13726 + restorer = (void __user *)&frame->retcode;
13727 if (ka->sa.sa_flags & SA_RESTORER)
13728 restorer = ka->sa.sa_restorer;
13729
13730 @@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigactio
13731 * reasons and because gdb uses it as a signature to notice
13732 * signal handler stack frames.
13733 */
13734 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
13735 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
13736
13737 if (err)
13738 return -EFAULT;
13739 @@ -378,7 +378,7 @@ static int __setup_rt_frame(int sig, str
13740 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
13741
13742 /* Set up to return from userspace. */
13743 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
13744 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
13745 if (ka->sa.sa_flags & SA_RESTORER)
13746 restorer = ka->sa.sa_restorer;
13747 put_user_ex(restorer, &frame->pretcode);
13748 @@ -390,7 +390,7 @@ static int __setup_rt_frame(int sig, str
13749 * reasons and because gdb uses it as a signature to notice
13750 * signal handler stack frames.
13751 */
13752 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
13753 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
13754 } put_user_catch(err);
13755
13756 if (err)
13757 @@ -780,7 +780,7 @@ static void do_signal(struct pt_regs *re
13758 * X86_32: vm86 regs switched out by assembly code before reaching
13759 * here, so testing against kernel CS suffices.
13760 */
13761 - if (!user_mode(regs))
13762 + if (!user_mode_novm(regs))
13763 return;
13764
13765 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
13766 diff -urNp linux-2.6.35.4/arch/x86/kernel/smpboot.c linux-2.6.35.4/arch/x86/kernel/smpboot.c
13767 --- linux-2.6.35.4/arch/x86/kernel/smpboot.c 2010-08-26 19:47:12.000000000 -0400
13768 +++ linux-2.6.35.4/arch/x86/kernel/smpboot.c 2010-09-17 20:12:09.000000000 -0400
13769 @@ -780,7 +780,11 @@ do_rest:
13770 (unsigned long)task_stack_page(c_idle.idle) -
13771 KERNEL_STACK_OFFSET + THREAD_SIZE;
13772 #endif
13773 +
13774 + pax_open_kernel();
13775 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
13776 + pax_close_kernel();
13777 +
13778 initial_code = (unsigned long)start_secondary;
13779 stack_start.sp = (void *) c_idle.idle->thread.sp;
13780
13781 @@ -920,6 +924,12 @@ int __cpuinit native_cpu_up(unsigned int
13782
13783 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
13784
13785 +#ifdef CONFIG_PAX_PER_CPU_PGD
13786 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
13787 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
13788 + KERNEL_PGD_PTRS);
13789 +#endif
13790 +
13791 #ifdef CONFIG_X86_32
13792 /* init low mem mapping */
13793 clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
13794 diff -urNp linux-2.6.35.4/arch/x86/kernel/step.c linux-2.6.35.4/arch/x86/kernel/step.c
13795 --- linux-2.6.35.4/arch/x86/kernel/step.c 2010-08-26 19:47:12.000000000 -0400
13796 +++ linux-2.6.35.4/arch/x86/kernel/step.c 2010-09-17 20:12:09.000000000 -0400
13797 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
13798 struct desc_struct *desc;
13799 unsigned long base;
13800
13801 - seg &= ~7UL;
13802 + seg >>= 3;
13803
13804 mutex_lock(&child->mm->context.lock);
13805 - if (unlikely((seg >> 3) >= child->mm->context.size))
13806 + if (unlikely(seg >= child->mm->context.size))
13807 addr = -1L; /* bogus selector, access would fault */
13808 else {
13809 desc = child->mm->context.ldt + seg;
13810 @@ -53,6 +53,9 @@ static int is_setting_trap_flag(struct t
13811 unsigned char opcode[15];
13812 unsigned long addr = convert_ip_to_linear(child, regs);
13813
13814 + if (addr == -EINVAL)
13815 + return 0;
13816 +
13817 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
13818 for (i = 0; i < copied; i++) {
13819 switch (opcode[i]) {
13820 @@ -74,7 +77,7 @@ static int is_setting_trap_flag(struct t
13821
13822 #ifdef CONFIG_X86_64
13823 case 0x40 ... 0x4f:
13824 - if (regs->cs != __USER_CS)
13825 + if ((regs->cs & 0xffff) != __USER_CS)
13826 /* 32-bit mode: register increment */
13827 return 0;
13828 /* 64-bit mode: REX prefix */
13829 diff -urNp linux-2.6.35.4/arch/x86/kernel/syscall_table_32.S linux-2.6.35.4/arch/x86/kernel/syscall_table_32.S
13830 --- linux-2.6.35.4/arch/x86/kernel/syscall_table_32.S 2010-08-26 19:47:12.000000000 -0400
13831 +++ linux-2.6.35.4/arch/x86/kernel/syscall_table_32.S 2010-09-17 20:12:09.000000000 -0400
13832 @@ -1,3 +1,4 @@
13833 +.section .rodata,"a",@progbits
13834 ENTRY(sys_call_table)
13835 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
13836 .long sys_exit
13837 diff -urNp linux-2.6.35.4/arch/x86/kernel/sys_i386_32.c linux-2.6.35.4/arch/x86/kernel/sys_i386_32.c
13838 --- linux-2.6.35.4/arch/x86/kernel/sys_i386_32.c 2010-08-26 19:47:12.000000000 -0400
13839 +++ linux-2.6.35.4/arch/x86/kernel/sys_i386_32.c 2010-09-17 20:12:09.000000000 -0400
13840 @@ -24,6 +24,224 @@
13841
13842 #include <asm/syscalls.h>
13843
13844 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
13845 +{
13846 + unsigned long pax_task_size = TASK_SIZE;
13847 +
13848 +#ifdef CONFIG_PAX_SEGMEXEC
13849 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
13850 + pax_task_size = SEGMEXEC_TASK_SIZE;
13851 +#endif
13852 +
13853 + if (len > pax_task_size || addr > pax_task_size - len)
13854 + return -EINVAL;
13855 +
13856 + return 0;
13857 +}
13858 +
13859 +unsigned long
13860 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
13861 + unsigned long len, unsigned long pgoff, unsigned long flags)
13862 +{
13863 + struct mm_struct *mm = current->mm;
13864 + struct vm_area_struct *vma;
13865 + unsigned long start_addr, pax_task_size = TASK_SIZE;
13866 +
13867 +#ifdef CONFIG_PAX_SEGMEXEC
13868 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
13869 + pax_task_size = SEGMEXEC_TASK_SIZE;
13870 +#endif
13871 +
13872 + if (len > pax_task_size)
13873 + return -ENOMEM;
13874 +
13875 + if (flags & MAP_FIXED)
13876 + return addr;
13877 +
13878 +#ifdef CONFIG_PAX_RANDMMAP
13879 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
13880 +#endif
13881 +
13882 + if (addr) {
13883 + addr = PAGE_ALIGN(addr);
13884 + if (pax_task_size - len >= addr) {
13885 + vma = find_vma(mm, addr);
13886 + if (check_heap_stack_gap(vma, addr, len))
13887 + return addr;
13888 + }
13889 + }
13890 + if (len > mm->cached_hole_size) {
13891 + start_addr = addr = mm->free_area_cache;
13892 + } else {
13893 + start_addr = addr = mm->mmap_base;
13894 + mm->cached_hole_size = 0;
13895 + }
13896 +
13897 +#ifdef CONFIG_PAX_PAGEEXEC
13898 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
13899 + start_addr = 0x00110000UL;
13900 +
13901 +#ifdef CONFIG_PAX_RANDMMAP
13902 + if (mm->pax_flags & MF_PAX_RANDMMAP)
13903 + start_addr += mm->delta_mmap & 0x03FFF000UL;
13904 +#endif
13905 +
13906 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
13907 + start_addr = addr = mm->mmap_base;
13908 + else
13909 + addr = start_addr;
13910 + }
13911 +#endif
13912 +
13913 +full_search:
13914 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
13915 + /* At this point: (!vma || addr < vma->vm_end). */
13916 + if (pax_task_size - len < addr) {
13917 + /*
13918 + * Start a new search - just in case we missed
13919 + * some holes.
13920 + */
13921 + if (start_addr != mm->mmap_base) {
13922 + start_addr = addr = mm->mmap_base;
13923 + mm->cached_hole_size = 0;
13924 + goto full_search;
13925 + }
13926 + return -ENOMEM;
13927 + }
13928 + if (check_heap_stack_gap(vma, addr, len))
13929 + break;
13930 + if (addr + mm->cached_hole_size < vma->vm_start)
13931 + mm->cached_hole_size = vma->vm_start - addr;
13932 + addr = vma->vm_end;
13933 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
13934 + start_addr = addr = mm->mmap_base;
13935 + mm->cached_hole_size = 0;
13936 + goto full_search;
13937 + }
13938 + }
13939 +
13940 + /*
13941 + * Remember the place where we stopped the search:
13942 + */
13943 + mm->free_area_cache = addr + len;
13944 + return addr;
13945 +}
13946 +
13947 +unsigned long
13948 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
13949 + const unsigned long len, const unsigned long pgoff,
13950 + const unsigned long flags)
13951 +{
13952 + struct vm_area_struct *vma;
13953 + struct mm_struct *mm = current->mm;
13954 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
13955 +
13956 +#ifdef CONFIG_PAX_SEGMEXEC
13957 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
13958 + pax_task_size = SEGMEXEC_TASK_SIZE;
13959 +#endif
13960 +
13961 + /* requested length too big for entire address space */
13962 + if (len > pax_task_size)
13963 + return -ENOMEM;
13964 +
13965 + if (flags & MAP_FIXED)
13966 + return addr;
13967 +
13968 +#ifdef CONFIG_PAX_PAGEEXEC
13969 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
13970 + goto bottomup;
13971 +#endif
13972 +
13973 +#ifdef CONFIG_PAX_RANDMMAP
13974 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
13975 +#endif
13976 +
13977 + /* requesting a specific address */
13978 + if (addr) {
13979 + addr = PAGE_ALIGN(addr);
13980 + if (pax_task_size - len >= addr) {
13981 + vma = find_vma(mm, addr);
13982 + if (check_heap_stack_gap(vma, addr, len))
13983 + return addr;
13984 + }
13985 + }
13986 +
13987 + /* check if free_area_cache is useful for us */
13988 + if (len <= mm->cached_hole_size) {
13989 + mm->cached_hole_size = 0;
13990 + mm->free_area_cache = mm->mmap_base;
13991 + }
13992 +
13993 + /* either no address requested or can't fit in requested address hole */
13994 + addr = mm->free_area_cache;
13995 +
13996 + /* make sure it can fit in the remaining address space */
13997 + if (addr > len) {
13998 + vma = find_vma(mm, addr-len);
13999 + if (check_heap_stack_gap(vma, addr - len, len))
14000 + /* remember the address as a hint for next time */
14001 + return (mm->free_area_cache = addr-len);
14002 + }
14003 +
14004 + if (mm->mmap_base < len)
14005 + goto bottomup;
14006 +
14007 + addr = mm->mmap_base-len;
14008 +
14009 + do {
14010 + /*
14011 + * Lookup failure means no vma is above this address,
14012 + * else if new region fits below vma->vm_start,
14013 + * return with success:
14014 + */
14015 + vma = find_vma(mm, addr);
14016 + if (check_heap_stack_gap(vma, addr, len))
14017 + /* remember the address as a hint for next time */
14018 + return (mm->free_area_cache = addr);
14019 +
14020 + /* remember the largest hole we saw so far */
14021 + if (addr + mm->cached_hole_size < vma->vm_start)
14022 + mm->cached_hole_size = vma->vm_start - addr;
14023 +
14024 + /* try just below the current vma->vm_start */
14025 + addr = vma->vm_start-len;
14026 + } while (len < vma->vm_start);
14027 +
14028 +bottomup:
14029 + /*
14030 + * A failed mmap() very likely causes application failure,
14031 + * so fall back to the bottom-up function here. This scenario
14032 + * can happen with large stack limits and large mmap()
14033 + * allocations.
14034 + */
14035 +
14036 +#ifdef CONFIG_PAX_SEGMEXEC
14037 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
14038 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
14039 + else
14040 +#endif
14041 +
14042 + mm->mmap_base = TASK_UNMAPPED_BASE;
14043 +
14044 +#ifdef CONFIG_PAX_RANDMMAP
14045 + if (mm->pax_flags & MF_PAX_RANDMMAP)
14046 + mm->mmap_base += mm->delta_mmap;
14047 +#endif
14048 +
14049 + mm->free_area_cache = mm->mmap_base;
14050 + mm->cached_hole_size = ~0UL;
14051 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
14052 + /*
14053 + * Restore the topdown base:
14054 + */
14055 + mm->mmap_base = base;
14056 + mm->free_area_cache = base;
14057 + mm->cached_hole_size = ~0UL;
14058 +
14059 + return addr;
14060 +}
14061 +
14062 /*
14063 * Do a system call from kernel instead of calling sys_execve so we
14064 * end up with proper pt_regs.
14065 diff -urNp linux-2.6.35.4/arch/x86/kernel/sys_x86_64.c linux-2.6.35.4/arch/x86/kernel/sys_x86_64.c
14066 --- linux-2.6.35.4/arch/x86/kernel/sys_x86_64.c 2010-08-26 19:47:12.000000000 -0400
14067 +++ linux-2.6.35.4/arch/x86/kernel/sys_x86_64.c 2010-09-17 20:12:09.000000000 -0400
14068 @@ -32,8 +32,8 @@ out:
14069 return error;
14070 }
14071
14072 -static void find_start_end(unsigned long flags, unsigned long *begin,
14073 - unsigned long *end)
14074 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
14075 + unsigned long *begin, unsigned long *end)
14076 {
14077 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
14078 unsigned long new_begin;
14079 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long
14080 *begin = new_begin;
14081 }
14082 } else {
14083 - *begin = TASK_UNMAPPED_BASE;
14084 + *begin = mm->mmap_base;
14085 *end = TASK_SIZE;
14086 }
14087 }
14088 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
14089 if (flags & MAP_FIXED)
14090 return addr;
14091
14092 - find_start_end(flags, &begin, &end);
14093 + find_start_end(mm, flags, &begin, &end);
14094
14095 if (len > end)
14096 return -ENOMEM;
14097
14098 +#ifdef CONFIG_PAX_RANDMMAP
14099 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
14100 +#endif
14101 +
14102 if (addr) {
14103 addr = PAGE_ALIGN(addr);
14104 vma = find_vma(mm, addr);
14105 - if (end - len >= addr &&
14106 - (!vma || addr + len <= vma->vm_start))
14107 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
14108 return addr;
14109 }
14110 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
14111 @@ -106,7 +109,7 @@ full_search:
14112 }
14113 return -ENOMEM;
14114 }
14115 - if (!vma || addr + len <= vma->vm_start) {
14116 + if (check_heap_stack_gap(vma, addr, len)) {
14117 /*
14118 * Remember the place where we stopped the search:
14119 */
14120 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
14121 {
14122 struct vm_area_struct *vma;
14123 struct mm_struct *mm = current->mm;
14124 - unsigned long addr = addr0;
14125 + unsigned long base = mm->mmap_base, addr = addr0;
14126
14127 /* requested length too big for entire address space */
14128 if (len > TASK_SIZE)
14129 @@ -141,12 +144,15 @@ arch_get_unmapped_area_topdown(struct fi
14130 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
14131 goto bottomup;
14132
14133 +#ifdef CONFIG_PAX_RANDMMAP
14134 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
14135 +#endif
14136 +
14137 /* requesting a specific address */
14138 if (addr) {
14139 addr = PAGE_ALIGN(addr);
14140 vma = find_vma(mm, addr);
14141 - if (TASK_SIZE - len >= addr &&
14142 - (!vma || addr + len <= vma->vm_start))
14143 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
14144 return addr;
14145 }
14146
14147 @@ -162,7 +168,7 @@ arch_get_unmapped_area_topdown(struct fi
14148 /* make sure it can fit in the remaining address space */
14149 if (addr > len) {
14150 vma = find_vma(mm, addr-len);
14151 - if (!vma || addr <= vma->vm_start)
14152 + if (check_heap_stack_gap(vma, addr - len, len))
14153 /* remember the address as a hint for next time */
14154 return mm->free_area_cache = addr-len;
14155 }
14156 @@ -179,7 +185,7 @@ arch_get_unmapped_area_topdown(struct fi
14157 * return with success:
14158 */
14159 vma = find_vma(mm, addr);
14160 - if (!vma || addr+len <= vma->vm_start)
14161 + if (check_heap_stack_gap(vma, addr, len))
14162 /* remember the address as a hint for next time */
14163 return mm->free_area_cache = addr;
14164
14165 @@ -198,13 +204,21 @@ bottomup:
14166 * can happen with large stack limits and large mmap()
14167 * allocations.
14168 */
14169 + mm->mmap_base = TASK_UNMAPPED_BASE;
14170 +
14171 +#ifdef CONFIG_PAX_RANDMMAP
14172 + if (mm->pax_flags & MF_PAX_RANDMMAP)
14173 + mm->mmap_base += mm->delta_mmap;
14174 +#endif
14175 +
14176 + mm->free_area_cache = mm->mmap_base;
14177 mm->cached_hole_size = ~0UL;
14178 - mm->free_area_cache = TASK_UNMAPPED_BASE;
14179 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
14180 /*
14181 * Restore the topdown base:
14182 */
14183 - mm->free_area_cache = mm->mmap_base;
14184 + mm->mmap_base = base;
14185 + mm->free_area_cache = base;
14186 mm->cached_hole_size = ~0UL;
14187
14188 return addr;
14189 diff -urNp linux-2.6.35.4/arch/x86/kernel/time.c linux-2.6.35.4/arch/x86/kernel/time.c
14190 --- linux-2.6.35.4/arch/x86/kernel/time.c 2010-08-26 19:47:12.000000000 -0400
14191 +++ linux-2.6.35.4/arch/x86/kernel/time.c 2010-09-17 20:12:09.000000000 -0400
14192 @@ -26,17 +26,13 @@
14193 int timer_ack;
14194 #endif
14195
14196 -#ifdef CONFIG_X86_64
14197 -volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
14198 -#endif
14199 -
14200 unsigned long profile_pc(struct pt_regs *regs)
14201 {
14202 unsigned long pc = instruction_pointer(regs);
14203
14204 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
14205 + if (!user_mode(regs) && in_lock_functions(pc)) {
14206 #ifdef CONFIG_FRAME_POINTER
14207 - return *(unsigned long *)(regs->bp + sizeof(long));
14208 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
14209 #else
14210 unsigned long *sp =
14211 (unsigned long *)kernel_stack_pointer(regs);
14212 @@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
14213 * or above a saved flags. Eflags has bits 22-31 zero,
14214 * kernel addresses don't.
14215 */
14216 +
14217 +#ifdef CONFIG_PAX_KERNEXEC
14218 + return ktla_ktva(sp[0]);
14219 +#else
14220 if (sp[0] >> 22)
14221 return sp[0];
14222 if (sp[1] >> 22)
14223 return sp[1];
14224 #endif
14225 +
14226 +#endif
14227 }
14228 return pc;
14229 }
14230 diff -urNp linux-2.6.35.4/arch/x86/kernel/tls.c linux-2.6.35.4/arch/x86/kernel/tls.c
14231 --- linux-2.6.35.4/arch/x86/kernel/tls.c 2010-08-26 19:47:12.000000000 -0400
14232 +++ linux-2.6.35.4/arch/x86/kernel/tls.c 2010-09-17 20:12:09.000000000 -0400
14233 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
14234 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
14235 return -EINVAL;
14236
14237 +#ifdef CONFIG_PAX_SEGMEXEC
14238 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
14239 + return -EINVAL;
14240 +#endif
14241 +
14242 set_tls_desc(p, idx, &info, 1);
14243
14244 return 0;
14245 diff -urNp linux-2.6.35.4/arch/x86/kernel/trampoline_32.S linux-2.6.35.4/arch/x86/kernel/trampoline_32.S
14246 --- linux-2.6.35.4/arch/x86/kernel/trampoline_32.S 2010-08-26 19:47:12.000000000 -0400
14247 +++ linux-2.6.35.4/arch/x86/kernel/trampoline_32.S 2010-09-17 20:12:09.000000000 -0400
14248 @@ -32,6 +32,12 @@
14249 #include <asm/segment.h>
14250 #include <asm/page_types.h>
14251
14252 +#ifdef CONFIG_PAX_KERNEXEC
14253 +#define ta(X) (X)
14254 +#else
14255 +#define ta(X) ((X) - __PAGE_OFFSET)
14256 +#endif
14257 +
14258 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
14259 __CPUINITRODATA
14260 .code16
14261 @@ -60,7 +66,7 @@ r_base = .
14262 inc %ax # protected mode (PE) bit
14263 lmsw %ax # into protected mode
14264 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
14265 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
14266 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
14267
14268 # These need to be in the same 64K segment as the above;
14269 # hence we don't use the boot_gdt_descr defined in head.S
14270 diff -urNp linux-2.6.35.4/arch/x86/kernel/traps.c linux-2.6.35.4/arch/x86/kernel/traps.c
14271 --- linux-2.6.35.4/arch/x86/kernel/traps.c 2010-08-26 19:47:12.000000000 -0400
14272 +++ linux-2.6.35.4/arch/x86/kernel/traps.c 2010-09-17 20:12:09.000000000 -0400
14273 @@ -70,12 +70,6 @@ asmlinkage int system_call(void);
14274
14275 /* Do we ignore FPU interrupts ? */
14276 char ignore_fpu_irq;
14277 -
14278 -/*
14279 - * The IDT has to be page-aligned to simplify the Pentium
14280 - * F0 0F bug workaround.
14281 - */
14282 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
14283 #endif
14284
14285 DECLARE_BITMAP(used_vectors, NR_VECTORS);
14286 @@ -110,13 +104,13 @@ static inline void preempt_conditional_c
14287 }
14288
14289 static void __kprobes
14290 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
14291 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
14292 long error_code, siginfo_t *info)
14293 {
14294 struct task_struct *tsk = current;
14295
14296 #ifdef CONFIG_X86_32
14297 - if (regs->flags & X86_VM_MASK) {
14298 + if (v8086_mode(regs)) {
14299 /*
14300 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
14301 * On nmi (interrupt 2), do_trap should not be called.
14302 @@ -127,7 +121,7 @@ do_trap(int trapnr, int signr, char *str
14303 }
14304 #endif
14305
14306 - if (!user_mode(regs))
14307 + if (!user_mode_novm(regs))
14308 goto kernel_trap;
14309
14310 #ifdef CONFIG_X86_32
14311 @@ -150,7 +144,7 @@ trap_signal:
14312 printk_ratelimit()) {
14313 printk(KERN_INFO
14314 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
14315 - tsk->comm, tsk->pid, str,
14316 + tsk->comm, task_pid_nr(tsk), str,
14317 regs->ip, regs->sp, error_code);
14318 print_vma_addr(" in ", regs->ip);
14319 printk("\n");
14320 @@ -167,8 +161,20 @@ kernel_trap:
14321 if (!fixup_exception(regs)) {
14322 tsk->thread.error_code = error_code;
14323 tsk->thread.trap_no = trapnr;
14324 +
14325 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14326 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
14327 + str = "PAX: suspicious stack segment fault";
14328 +#endif
14329 +
14330 die(str, regs, error_code);
14331 }
14332 +
14333 +#ifdef CONFIG_PAX_REFCOUNT
14334 + if (trapnr == 4)
14335 + pax_report_refcount_overflow(regs);
14336 +#endif
14337 +
14338 return;
14339
14340 #ifdef CONFIG_X86_32
14341 @@ -257,14 +263,30 @@ do_general_protection(struct pt_regs *re
14342 conditional_sti(regs);
14343
14344 #ifdef CONFIG_X86_32
14345 - if (regs->flags & X86_VM_MASK)
14346 + if (v8086_mode(regs))
14347 goto gp_in_vm86;
14348 #endif
14349
14350 tsk = current;
14351 - if (!user_mode(regs))
14352 + if (!user_mode_novm(regs))
14353 goto gp_in_kernel;
14354
14355 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
14356 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
14357 + struct mm_struct *mm = tsk->mm;
14358 + unsigned long limit;
14359 +
14360 + down_write(&mm->mmap_sem);
14361 + limit = mm->context.user_cs_limit;
14362 + if (limit < TASK_SIZE) {
14363 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
14364 + up_write(&mm->mmap_sem);
14365 + return;
14366 + }
14367 + up_write(&mm->mmap_sem);
14368 + }
14369 +#endif
14370 +
14371 tsk->thread.error_code = error_code;
14372 tsk->thread.trap_no = 13;
14373
14374 @@ -297,6 +319,13 @@ gp_in_kernel:
14375 if (notify_die(DIE_GPF, "general protection fault", regs,
14376 error_code, 13, SIGSEGV) == NOTIFY_STOP)
14377 return;
14378 +
14379 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14380 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
14381 + die("PAX: suspicious general protection fault", regs, error_code);
14382 + else
14383 +#endif
14384 +
14385 die("general protection fault", regs, error_code);
14386 }
14387
14388 @@ -565,7 +594,7 @@ dotraplinkage void __kprobes do_debug(st
14389 /* It's safe to allow irq's after DR6 has been saved */
14390 preempt_conditional_sti(regs);
14391
14392 - if (regs->flags & X86_VM_MASK) {
14393 + if (v8086_mode(regs)) {
14394 handle_vm86_trap((struct kernel_vm86_regs *) regs,
14395 error_code, 1);
14396 return;
14397 @@ -578,7 +607,7 @@ dotraplinkage void __kprobes do_debug(st
14398 * We already checked v86 mode above, so we can check for kernel mode
14399 * by just checking the CPL of CS.
14400 */
14401 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
14402 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
14403 tsk->thread.debugreg6 &= ~DR_STEP;
14404 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
14405 regs->flags &= ~X86_EFLAGS_TF;
14406 @@ -607,7 +636,7 @@ void math_error(struct pt_regs *regs, in
14407 return;
14408 conditional_sti(regs);
14409
14410 - if (!user_mode_vm(regs))
14411 + if (!user_mode(regs))
14412 {
14413 if (!fixup_exception(regs)) {
14414 task->thread.error_code = error_code;
14415 diff -urNp linux-2.6.35.4/arch/x86/kernel/tsc.c linux-2.6.35.4/arch/x86/kernel/tsc.c
14416 --- linux-2.6.35.4/arch/x86/kernel/tsc.c 2010-08-26 19:47:12.000000000 -0400
14417 +++ linux-2.6.35.4/arch/x86/kernel/tsc.c 2010-09-17 20:12:09.000000000 -0400
14418 @@ -795,7 +795,7 @@ static struct dmi_system_id __initdata b
14419 DMI_MATCH(DMI_BOARD_NAME, "2635FA0"),
14420 },
14421 },
14422 - {}
14423 + { NULL, NULL, {{0, {0}}}, NULL}
14424 };
14425
14426 static void __init check_system_tsc_reliable(void)
14427 diff -urNp linux-2.6.35.4/arch/x86/kernel/vm86_32.c linux-2.6.35.4/arch/x86/kernel/vm86_32.c
14428 --- linux-2.6.35.4/arch/x86/kernel/vm86_32.c 2010-08-26 19:47:12.000000000 -0400
14429 +++ linux-2.6.35.4/arch/x86/kernel/vm86_32.c 2010-09-17 20:12:37.000000000 -0400
14430 @@ -41,6 +41,7 @@
14431 #include <linux/ptrace.h>
14432 #include <linux/audit.h>
14433 #include <linux/stddef.h>
14434 +#include <linux/grsecurity.h>
14435
14436 #include <asm/uaccess.h>
14437 #include <asm/io.h>
14438 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
14439 do_exit(SIGSEGV);
14440 }
14441
14442 - tss = &per_cpu(init_tss, get_cpu());
14443 + tss = init_tss + get_cpu();
14444 current->thread.sp0 = current->thread.saved_sp0;
14445 current->thread.sysenter_cs = __KERNEL_CS;
14446 load_sp0(tss, &current->thread);
14447 @@ -207,6 +208,13 @@ int sys_vm86old(struct vm86_struct __use
14448 struct task_struct *tsk;
14449 int tmp, ret = -EPERM;
14450
14451 +#ifdef CONFIG_GRKERNSEC_VM86
14452 + if (!capable(CAP_SYS_RAWIO)) {
14453 + gr_handle_vm86();
14454 + goto out;
14455 + }
14456 +#endif
14457 +
14458 tsk = current;
14459 if (tsk->thread.saved_sp0)
14460 goto out;
14461 @@ -237,6 +245,14 @@ int sys_vm86(unsigned long cmd, unsigned
14462 int tmp, ret;
14463 struct vm86plus_struct __user *v86;
14464
14465 +#ifdef CONFIG_GRKERNSEC_VM86
14466 + if (!capable(CAP_SYS_RAWIO)) {
14467 + gr_handle_vm86();
14468 + ret = -EPERM;
14469 + goto out;
14470 + }
14471 +#endif
14472 +
14473 tsk = current;
14474 switch (cmd) {
14475 case VM86_REQUEST_IRQ:
14476 @@ -323,7 +339,7 @@ static void do_sys_vm86(struct kernel_vm
14477 tsk->thread.saved_fs = info->regs32->fs;
14478 tsk->thread.saved_gs = get_user_gs(info->regs32);
14479
14480 - tss = &per_cpu(init_tss, get_cpu());
14481 + tss = init_tss + get_cpu();
14482 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
14483 if (cpu_has_sep)
14484 tsk->thread.sysenter_cs = 0;
14485 @@ -528,7 +544,7 @@ static void do_int(struct kernel_vm86_re
14486 goto cannot_handle;
14487 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
14488 goto cannot_handle;
14489 - intr_ptr = (unsigned long __user *) (i << 2);
14490 + intr_ptr = (__force unsigned long __user *) (i << 2);
14491 if (get_user(segoffs, intr_ptr))
14492 goto cannot_handle;
14493 if ((segoffs >> 16) == BIOSSEG)
14494 diff -urNp linux-2.6.35.4/arch/x86/kernel/vmi_32.c linux-2.6.35.4/arch/x86/kernel/vmi_32.c
14495 --- linux-2.6.35.4/arch/x86/kernel/vmi_32.c 2010-08-26 19:47:12.000000000 -0400
14496 +++ linux-2.6.35.4/arch/x86/kernel/vmi_32.c 2010-09-17 20:12:09.000000000 -0400
14497 @@ -46,12 +46,17 @@ typedef u32 __attribute__((regparm(1)))
14498 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
14499
14500 #define call_vrom_func(rom,func) \
14501 - (((VROMFUNC *)(rom->func))())
14502 + (((VROMFUNC *)(ktva_ktla(rom.func)))())
14503
14504 #define call_vrom_long_func(rom,func,arg) \
14505 - (((VROMLONGFUNC *)(rom->func)) (arg))
14506 +({\
14507 + u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
14508 + struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
14509 + __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
14510 + __reloc;\
14511 +})
14512
14513 -static struct vrom_header *vmi_rom;
14514 +static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
14515 static int disable_pge;
14516 static int disable_pse;
14517 static int disable_sep;
14518 @@ -78,10 +83,10 @@ static struct {
14519 void (*set_initial_ap_state)(int, int);
14520 void (*halt)(void);
14521 void (*set_lazy_mode)(int mode);
14522 -} vmi_ops;
14523 +} vmi_ops __read_only;
14524
14525 /* Cached VMI operations */
14526 -struct vmi_timer_ops vmi_timer_ops;
14527 +struct vmi_timer_ops vmi_timer_ops __read_only;
14528
14529 /*
14530 * VMI patching routines.
14531 @@ -96,7 +101,7 @@ struct vmi_timer_ops vmi_timer_ops;
14532 static inline void patch_offset(void *insnbuf,
14533 unsigned long ip, unsigned long dest)
14534 {
14535 - *(unsigned long *)(insnbuf+1) = dest-ip-5;
14536 + *(unsigned long *)(insnbuf+1) = dest-ip-5;
14537 }
14538
14539 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
14540 @@ -104,6 +109,7 @@ static unsigned patch_internal(int call,
14541 {
14542 u64 reloc;
14543 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
14544 +
14545 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
14546 switch(rel->type) {
14547 case VMI_RELOCATION_CALL_REL:
14548 @@ -382,13 +388,13 @@ static void vmi_set_pud(pud_t *pudp, pud
14549
14550 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
14551 {
14552 - const pte_t pte = { .pte = 0 };
14553 + const pte_t pte = __pte(0ULL);
14554 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
14555 }
14556
14557 static void vmi_pmd_clear(pmd_t *pmd)
14558 {
14559 - const pte_t pte = { .pte = 0 };
14560 + const pte_t pte = __pte(0ULL);
14561 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
14562 }
14563 #endif
14564 @@ -416,8 +422,8 @@ vmi_startup_ipi_hook(int phys_apicid, un
14565 ap.ss = __KERNEL_DS;
14566 ap.esp = (unsigned long) start_esp;
14567
14568 - ap.ds = __USER_DS;
14569 - ap.es = __USER_DS;
14570 + ap.ds = __KERNEL_DS;
14571 + ap.es = __KERNEL_DS;
14572 ap.fs = __KERNEL_PERCPU;
14573 ap.gs = __KERNEL_STACK_CANARY;
14574
14575 @@ -464,6 +470,18 @@ static void vmi_leave_lazy_mmu(void)
14576 paravirt_leave_lazy_mmu();
14577 }
14578
14579 +#ifdef CONFIG_PAX_KERNEXEC
14580 +static unsigned long vmi_pax_open_kernel(void)
14581 +{
14582 + return 0;
14583 +}
14584 +
14585 +static unsigned long vmi_pax_close_kernel(void)
14586 +{
14587 + return 0;
14588 +}
14589 +#endif
14590 +
14591 static inline int __init check_vmi_rom(struct vrom_header *rom)
14592 {
14593 struct pci_header *pci;
14594 @@ -476,6 +494,10 @@ static inline int __init check_vmi_rom(s
14595 return 0;
14596 if (rom->vrom_signature != VMI_SIGNATURE)
14597 return 0;
14598 + if (rom->rom_length * 512 > sizeof(*rom)) {
14599 + printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
14600 + return 0;
14601 + }
14602 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
14603 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
14604 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
14605 @@ -540,7 +562,7 @@ static inline int __init probe_vmi_rom(v
14606 struct vrom_header *romstart;
14607 romstart = (struct vrom_header *)isa_bus_to_virt(base);
14608 if (check_vmi_rom(romstart)) {
14609 - vmi_rom = romstart;
14610 + vmi_rom = *romstart;
14611 return 1;
14612 }
14613 }
14614 @@ -816,6 +838,11 @@ static inline int __init activate_vmi(vo
14615
14616 para_fill(pv_irq_ops.safe_halt, Halt);
14617
14618 +#ifdef CONFIG_PAX_KERNEXEC
14619 + pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
14620 + pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
14621 +#endif
14622 +
14623 /*
14624 * Alternative instruction rewriting doesn't happen soon enough
14625 * to convert VMI_IRET to a call instead of a jump; so we have
14626 @@ -833,16 +860,16 @@ static inline int __init activate_vmi(vo
14627
14628 void __init vmi_init(void)
14629 {
14630 - if (!vmi_rom)
14631 + if (!vmi_rom.rom_signature)
14632 probe_vmi_rom();
14633 else
14634 - check_vmi_rom(vmi_rom);
14635 + check_vmi_rom(&vmi_rom);
14636
14637 /* In case probing for or validating the ROM failed, basil */
14638 - if (!vmi_rom)
14639 + if (!vmi_rom.rom_signature)
14640 return;
14641
14642 - reserve_top_address(-vmi_rom->virtual_top);
14643 + reserve_top_address(-vmi_rom.virtual_top);
14644
14645 #ifdef CONFIG_X86_IO_APIC
14646 /* This is virtual hardware; timer routing is wired correctly */
14647 @@ -854,7 +881,7 @@ void __init vmi_activate(void)
14648 {
14649 unsigned long flags;
14650
14651 - if (!vmi_rom)
14652 + if (!vmi_rom.rom_signature)
14653 return;
14654
14655 local_irq_save(flags);
14656 diff -urNp linux-2.6.35.4/arch/x86/kernel/vmlinux.lds.S linux-2.6.35.4/arch/x86/kernel/vmlinux.lds.S
14657 --- linux-2.6.35.4/arch/x86/kernel/vmlinux.lds.S 2010-08-26 19:47:12.000000000 -0400
14658 +++ linux-2.6.35.4/arch/x86/kernel/vmlinux.lds.S 2010-09-17 20:12:09.000000000 -0400
14659 @@ -26,6 +26,13 @@
14660 #include <asm/page_types.h>
14661 #include <asm/cache.h>
14662 #include <asm/boot.h>
14663 +#include <asm/segment.h>
14664 +
14665 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14666 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
14667 +#else
14668 +#define __KERNEL_TEXT_OFFSET 0
14669 +#endif
14670
14671 #undef i386 /* in case the preprocessor is a 32bit one */
14672
14673 @@ -34,13 +41,13 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
14674 #ifdef CONFIG_X86_32
14675 OUTPUT_ARCH(i386)
14676 ENTRY(phys_startup_32)
14677 -jiffies = jiffies_64;
14678 #else
14679 OUTPUT_ARCH(i386:x86-64)
14680 ENTRY(phys_startup_64)
14681 -jiffies_64 = jiffies;
14682 #endif
14683
14684 +jiffies = jiffies_64;
14685 +
14686 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
14687 /*
14688 * On 64-bit, align RODATA to 2MB so that even with CONFIG_DEBUG_RODATA
14689 @@ -69,31 +76,46 @@ jiffies_64 = jiffies;
14690
14691 PHDRS {
14692 text PT_LOAD FLAGS(5); /* R_E */
14693 - data PT_LOAD FLAGS(7); /* RWE */
14694 +#ifdef CONFIG_X86_32
14695 + module PT_LOAD FLAGS(5); /* R_E */
14696 +#endif
14697 +#ifdef CONFIG_XEN
14698 + rodata PT_LOAD FLAGS(5); /* R_E */
14699 +#else
14700 + rodata PT_LOAD FLAGS(4); /* R__ */
14701 +#endif
14702 + data PT_LOAD FLAGS(6); /* RW_ */
14703 #ifdef CONFIG_X86_64
14704 user PT_LOAD FLAGS(5); /* R_E */
14705 +#endif
14706 + init.begin PT_LOAD FLAGS(6); /* RW_ */
14707 #ifdef CONFIG_SMP
14708 percpu PT_LOAD FLAGS(6); /* RW_ */
14709 #endif
14710 + text.init PT_LOAD FLAGS(5); /* R_E */
14711 + text.exit PT_LOAD FLAGS(5); /* R_E */
14712 init PT_LOAD FLAGS(7); /* RWE */
14713 -#endif
14714 note PT_NOTE FLAGS(0); /* ___ */
14715 }
14716
14717 SECTIONS
14718 {
14719 #ifdef CONFIG_X86_32
14720 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
14721 - phys_startup_32 = startup_32 - LOAD_OFFSET;
14722 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
14723 #else
14724 - . = __START_KERNEL;
14725 - phys_startup_64 = startup_64 - LOAD_OFFSET;
14726 + . = __START_KERNEL;
14727 #endif
14728
14729 /* Text and read-only data */
14730 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
14731 - _text = .;
14732 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
14733 /* bootstrapping code */
14734 +#ifdef CONFIG_X86_32
14735 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
14736 +#else
14737 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
14738 +#endif
14739 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
14740 + _text = .;
14741 HEAD_TEXT
14742 #ifdef CONFIG_X86_32
14743 . = ALIGN(PAGE_SIZE);
14744 @@ -108,13 +130,50 @@ SECTIONS
14745 IRQENTRY_TEXT
14746 *(.fixup)
14747 *(.gnu.warning)
14748 - /* End of text section */
14749 - _etext = .;
14750 } :text = 0x9090
14751
14752 - NOTES :text :note
14753 + . += __KERNEL_TEXT_OFFSET;
14754 +
14755 +#ifdef CONFIG_X86_32
14756 + . = ALIGN(PAGE_SIZE);
14757 + .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
14758 + *(.vmi.rom)
14759 + } :module
14760 +
14761 + . = ALIGN(PAGE_SIZE);
14762 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
14763 +
14764 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
14765 + MODULES_EXEC_VADDR = .;
14766 + BYTE(0)
14767 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
14768 + . = ALIGN(HPAGE_SIZE);
14769 + MODULES_EXEC_END = . - 1;
14770 +#endif
14771 +
14772 + } :module
14773 +#endif
14774 +
14775 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
14776 + /* End of text section */
14777 + _etext = . - __KERNEL_TEXT_OFFSET;
14778 + }
14779 +
14780 +#ifdef CONFIG_X86_32
14781 + . = ALIGN(PAGE_SIZE);
14782 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
14783 + *(.idt)
14784 + . = ALIGN(PAGE_SIZE);
14785 + *(.empty_zero_page)
14786 + *(.swapper_pg_pmd)
14787 + *(.swapper_pg_dir)
14788 + } :rodata
14789 +#endif
14790 +
14791 + . = ALIGN(PAGE_SIZE);
14792 + NOTES :rodata :note
14793
14794 - EXCEPTION_TABLE(16) :text = 0x9090
14795 + EXCEPTION_TABLE(16) :rodata
14796
14797 X64_ALIGN_DEBUG_RODATA_BEGIN
14798 RO_DATA(PAGE_SIZE)
14799 @@ -122,16 +181,20 @@ SECTIONS
14800
14801 /* Data */
14802 .data : AT(ADDR(.data) - LOAD_OFFSET) {
14803 +
14804 +#ifdef CONFIG_PAX_KERNEXEC
14805 + . = ALIGN(HPAGE_SIZE);
14806 +#else
14807 + . = ALIGN(PAGE_SIZE);
14808 +#endif
14809 +
14810 /* Start of data section */
14811 _sdata = .;
14812
14813 /* init_task */
14814 INIT_TASK_DATA(THREAD_SIZE)
14815
14816 -#ifdef CONFIG_X86_32
14817 - /* 32 bit has nosave before _edata */
14818 NOSAVE_DATA
14819 -#endif
14820
14821 PAGE_ALIGNED_DATA(PAGE_SIZE)
14822
14823 @@ -194,12 +257,6 @@ SECTIONS
14824 }
14825 vgetcpu_mode = VVIRT(.vgetcpu_mode);
14826
14827 - . = ALIGN(L1_CACHE_BYTES);
14828 - .jiffies : AT(VLOAD(.jiffies)) {
14829 - *(.jiffies)
14830 - }
14831 - jiffies = VVIRT(.jiffies);
14832 -
14833 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
14834 *(.vsyscall_3)
14835 }
14836 @@ -215,12 +272,19 @@ SECTIONS
14837 #endif /* CONFIG_X86_64 */
14838
14839 /* Init code and data - will be freed after init */
14840 - . = ALIGN(PAGE_SIZE);
14841 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
14842 + BYTE(0)
14843 +
14844 +#ifdef CONFIG_PAX_KERNEXEC
14845 + . = ALIGN(HPAGE_SIZE);
14846 +#else
14847 + . = ALIGN(PAGE_SIZE);
14848 +#endif
14849 +
14850 __init_begin = .; /* paired with __init_end */
14851 - }
14852 + } :init.begin
14853
14854 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
14855 +#ifdef CONFIG_SMP
14856 /*
14857 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
14858 * output PHDR, so the next output section - .init.text - should
14859 @@ -229,12 +293,27 @@ SECTIONS
14860 PERCPU_VADDR(0, :percpu)
14861 #endif
14862
14863 - INIT_TEXT_SECTION(PAGE_SIZE)
14864 -#ifdef CONFIG_X86_64
14865 - :init
14866 -#endif
14867 + . = ALIGN(PAGE_SIZE);
14868 + init_begin = .;
14869 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
14870 + VMLINUX_SYMBOL(_sinittext) = .;
14871 + INIT_TEXT
14872 + VMLINUX_SYMBOL(_einittext) = .;
14873 + . = ALIGN(PAGE_SIZE);
14874 + } :text.init
14875 +
14876 + /*
14877 + * .exit.text is discard at runtime, not link time, to deal with
14878 + * references from .altinstructions and .eh_frame
14879 + */
14880 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
14881 + EXIT_TEXT
14882 + . = ALIGN(16);
14883 + } :text.exit
14884 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
14885
14886 - INIT_DATA_SECTION(16)
14887 + . = ALIGN(PAGE_SIZE);
14888 + INIT_DATA_SECTION(16) :init
14889
14890 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
14891 __x86_cpu_dev_start = .;
14892 @@ -260,19 +339,11 @@ SECTIONS
14893 *(.altinstr_replacement)
14894 }
14895
14896 - /*
14897 - * .exit.text is discard at runtime, not link time, to deal with
14898 - * references from .altinstructions and .eh_frame
14899 - */
14900 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
14901 - EXIT_TEXT
14902 - }
14903 -
14904 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
14905 EXIT_DATA
14906 }
14907
14908 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
14909 +#ifndef CONFIG_SMP
14910 PERCPU(PAGE_SIZE)
14911 #endif
14912
14913 @@ -291,16 +362,10 @@ SECTIONS
14914 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
14915 __smp_locks = .;
14916 *(.smp_locks)
14917 - . = ALIGN(PAGE_SIZE);
14918 __smp_locks_end = .;
14919 + . = ALIGN(PAGE_SIZE);
14920 }
14921
14922 -#ifdef CONFIG_X86_64
14923 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
14924 - NOSAVE_DATA
14925 - }
14926 -#endif
14927 -
14928 /* BSS */
14929 . = ALIGN(PAGE_SIZE);
14930 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
14931 @@ -316,6 +381,7 @@ SECTIONS
14932 __brk_base = .;
14933 . += 64 * 1024; /* 64k alignment slop space */
14934 *(.brk_reservation) /* areas brk users have reserved */
14935 + . = ALIGN(HPAGE_SIZE);
14936 __brk_limit = .;
14937 }
14938
14939 @@ -342,13 +408,12 @@ SECTIONS
14940 * for the boot processor.
14941 */
14942 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
14943 -INIT_PER_CPU(gdt_page);
14944 INIT_PER_CPU(irq_stack_union);
14945
14946 /*
14947 * Build-time check on the image size:
14948 */
14949 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
14950 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
14951 "kernel image bigger than KERNEL_IMAGE_SIZE");
14952
14953 #ifdef CONFIG_SMP
14954 diff -urNp linux-2.6.35.4/arch/x86/kernel/vsyscall_64.c linux-2.6.35.4/arch/x86/kernel/vsyscall_64.c
14955 --- linux-2.6.35.4/arch/x86/kernel/vsyscall_64.c 2010-08-26 19:47:12.000000000 -0400
14956 +++ linux-2.6.35.4/arch/x86/kernel/vsyscall_64.c 2010-09-17 20:12:09.000000000 -0400
14957 @@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wa
14958
14959 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
14960 /* copy vsyscall data */
14961 + strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
14962 vsyscall_gtod_data.clock.vread = clock->vread;
14963 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
14964 vsyscall_gtod_data.clock.mask = clock->mask;
14965 @@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s
14966 We do this here because otherwise user space would do it on
14967 its own in a likely inferior way (no access to jiffies).
14968 If you don't like it pass NULL. */
14969 - if (tcache && tcache->blob[0] == (j = __jiffies)) {
14970 + if (tcache && tcache->blob[0] == (j = jiffies)) {
14971 p = tcache->blob[1];
14972 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
14973 /* Load per CPU data from RDTSCP */
14974 diff -urNp linux-2.6.35.4/arch/x86/kernel/x8664_ksyms_64.c linux-2.6.35.4/arch/x86/kernel/x8664_ksyms_64.c
14975 --- linux-2.6.35.4/arch/x86/kernel/x8664_ksyms_64.c 2010-08-26 19:47:12.000000000 -0400
14976 +++ linux-2.6.35.4/arch/x86/kernel/x8664_ksyms_64.c 2010-09-17 20:12:09.000000000 -0400
14977 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
14978 EXPORT_SYMBOL(copy_user_generic_string);
14979 EXPORT_SYMBOL(copy_user_generic_unrolled);
14980 EXPORT_SYMBOL(__copy_user_nocache);
14981 -EXPORT_SYMBOL(_copy_from_user);
14982 -EXPORT_SYMBOL(_copy_to_user);
14983
14984 EXPORT_SYMBOL(copy_page);
14985 EXPORT_SYMBOL(clear_page);
14986 diff -urNp linux-2.6.35.4/arch/x86/kernel/xsave.c linux-2.6.35.4/arch/x86/kernel/xsave.c
14987 --- linux-2.6.35.4/arch/x86/kernel/xsave.c 2010-08-26 19:47:12.000000000 -0400
14988 +++ linux-2.6.35.4/arch/x86/kernel/xsave.c 2010-09-17 20:12:09.000000000 -0400
14989 @@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_
14990 fx_sw_user->xstate_size > fx_sw_user->extended_size)
14991 return -1;
14992
14993 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
14994 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
14995 fx_sw_user->extended_size -
14996 FP_XSTATE_MAGIC2_SIZE));
14997 /*
14998 @@ -196,7 +196,7 @@ fx_only:
14999 * the other extended state.
15000 */
15001 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
15002 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
15003 + return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
15004 }
15005
15006 /*
15007 @@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf
15008 if (use_xsave())
15009 err = restore_user_xstate(buf);
15010 else
15011 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
15012 + err = fxrstor_checking((struct i387_fxsave_struct __user *)
15013 buf);
15014 if (unlikely(err)) {
15015 /*
15016 diff -urNp linux-2.6.35.4/arch/x86/kvm/emulate.c linux-2.6.35.4/arch/x86/kvm/emulate.c
15017 --- linux-2.6.35.4/arch/x86/kvm/emulate.c 2010-08-26 19:47:12.000000000 -0400
15018 +++ linux-2.6.35.4/arch/x86/kvm/emulate.c 2010-09-17 20:12:09.000000000 -0400
15019 @@ -88,11 +88,11 @@
15020 #define Src2CL (1<<29)
15021 #define Src2ImmByte (2<<29)
15022 #define Src2One (3<<29)
15023 -#define Src2Imm16 (4<<29)
15024 -#define Src2Mem16 (5<<29) /* Used for Ep encoding. First argument has to be
15025 +#define Src2Imm16 (4U<<29)
15026 +#define Src2Mem16 (5U<<29) /* Used for Ep encoding. First argument has to be
15027 in memory and second argument is located
15028 immediately after the first one in memory. */
15029 -#define Src2Mask (7<<29)
15030 +#define Src2Mask (7U<<29)
15031
15032 enum {
15033 Group1_80, Group1_81, Group1_82, Group1_83,
15034 @@ -446,6 +446,7 @@ static u32 group2_table[] = {
15035
15036 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
15037 do { \
15038 + unsigned long _tmp; \
15039 __asm__ __volatile__ ( \
15040 _PRE_EFLAGS("0", "4", "2") \
15041 _op _suffix " %"_x"3,%1; " \
15042 @@ -459,8 +460,6 @@ static u32 group2_table[] = {
15043 /* Raw emulation: instruction has two explicit operands. */
15044 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
15045 do { \
15046 - unsigned long _tmp; \
15047 - \
15048 switch ((_dst).bytes) { \
15049 case 2: \
15050 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
15051 @@ -476,7 +475,6 @@ static u32 group2_table[] = {
15052
15053 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
15054 do { \
15055 - unsigned long _tmp; \
15056 switch ((_dst).bytes) { \
15057 case 1: \
15058 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
15059 diff -urNp linux-2.6.35.4/arch/x86/kvm/lapic.c linux-2.6.35.4/arch/x86/kvm/lapic.c
15060 --- linux-2.6.35.4/arch/x86/kvm/lapic.c 2010-08-26 19:47:12.000000000 -0400
15061 +++ linux-2.6.35.4/arch/x86/kvm/lapic.c 2010-09-17 20:12:09.000000000 -0400
15062 @@ -52,7 +52,7 @@
15063 #define APIC_BUS_CYCLE_NS 1
15064
15065 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
15066 -#define apic_debug(fmt, arg...)
15067 +#define apic_debug(fmt, arg...) do {} while (0)
15068
15069 #define APIC_LVT_NUM 6
15070 /* 14 is the version for Xeon and Pentium 8.4.8*/
15071 diff -urNp linux-2.6.35.4/arch/x86/kvm/svm.c linux-2.6.35.4/arch/x86/kvm/svm.c
15072 --- linux-2.6.35.4/arch/x86/kvm/svm.c 2010-08-26 19:47:12.000000000 -0400
15073 +++ linux-2.6.35.4/arch/x86/kvm/svm.c 2010-09-17 20:12:09.000000000 -0400
15074 @@ -2796,7 +2796,11 @@ static void reload_tss(struct kvm_vcpu *
15075 int cpu = raw_smp_processor_id();
15076
15077 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
15078 +
15079 + pax_open_kernel();
15080 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
15081 + pax_close_kernel();
15082 +
15083 load_TR_desc();
15084 }
15085
15086 @@ -3337,7 +3341,7 @@ static void svm_fpu_deactivate(struct kv
15087 update_cr0_intercept(svm);
15088 }
15089
15090 -static struct kvm_x86_ops svm_x86_ops = {
15091 +static const struct kvm_x86_ops svm_x86_ops = {
15092 .cpu_has_kvm_support = has_svm,
15093 .disabled_by_bios = is_disabled,
15094 .hardware_setup = svm_hardware_setup,
15095 diff -urNp linux-2.6.35.4/arch/x86/kvm/vmx.c linux-2.6.35.4/arch/x86/kvm/vmx.c
15096 --- linux-2.6.35.4/arch/x86/kvm/vmx.c 2010-08-26 19:47:12.000000000 -0400
15097 +++ linux-2.6.35.4/arch/x86/kvm/vmx.c 2010-09-17 20:12:09.000000000 -0400
15098 @@ -653,7 +653,11 @@ static void reload_tss(void)
15099
15100 native_store_gdt(&gdt);
15101 descs = (void *)gdt.address;
15102 +
15103 + pax_open_kernel();
15104 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
15105 + pax_close_kernel();
15106 +
15107 load_TR_desc();
15108 }
15109
15110 @@ -1550,8 +1554,11 @@ static __init int hardware_setup(void)
15111 if (!cpu_has_vmx_flexpriority())
15112 flexpriority_enabled = 0;
15113
15114 - if (!cpu_has_vmx_tpr_shadow())
15115 - kvm_x86_ops->update_cr8_intercept = NULL;
15116 + if (!cpu_has_vmx_tpr_shadow()) {
15117 + pax_open_kernel();
15118 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
15119 + pax_close_kernel();
15120 + }
15121
15122 if (enable_ept && !cpu_has_vmx_ept_2m_page())
15123 kvm_disable_largepages();
15124 @@ -2533,7 +2540,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
15125 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
15126
15127 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
15128 - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
15129 + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
15130 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
15131 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
15132 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
15133 @@ -3909,6 +3916,12 @@ static void vmx_vcpu_run(struct kvm_vcpu
15134 "jmp .Lkvm_vmx_return \n\t"
15135 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
15136 ".Lkvm_vmx_return: "
15137 +
15138 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15139 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
15140 + ".Lkvm_vmx_return2: "
15141 +#endif
15142 +
15143 /* Save guest registers, load host registers, keep flags */
15144 "xchg %0, (%%"R"sp) \n\t"
15145 "mov %%"R"ax, %c[rax](%0) \n\t"
15146 @@ -3955,8 +3968,13 @@ static void vmx_vcpu_run(struct kvm_vcpu
15147 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
15148 #endif
15149 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
15150 +
15151 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15152 + ,[cs]"i"(__KERNEL_CS)
15153 +#endif
15154 +
15155 : "cc", "memory"
15156 - , R"bx", R"di", R"si"
15157 + , R"ax", R"bx", R"di", R"si"
15158 #ifdef CONFIG_X86_64
15159 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
15160 #endif
15161 @@ -3970,7 +3988,7 @@ static void vmx_vcpu_run(struct kvm_vcpu
15162 if (vmx->rmode.irq.pending)
15163 fixup_rmode_irq(vmx);
15164
15165 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
15166 + asm("mov %0, %%ds; mov %0, %%es" : : "r"(__KERNEL_DS));
15167 vmx->launched = 1;
15168
15169 vmx_complete_interrupts(vmx);
15170 @@ -4191,7 +4209,7 @@ static void vmx_set_supported_cpuid(u32
15171 {
15172 }
15173
15174 -static struct kvm_x86_ops vmx_x86_ops = {
15175 +static const struct kvm_x86_ops vmx_x86_ops = {
15176 .cpu_has_kvm_support = cpu_has_kvm_support,
15177 .disabled_by_bios = vmx_disabled_by_bios,
15178 .hardware_setup = hardware_setup,
15179 diff -urNp linux-2.6.35.4/arch/x86/kvm/x86.c linux-2.6.35.4/arch/x86/kvm/x86.c
15180 --- linux-2.6.35.4/arch/x86/kvm/x86.c 2010-08-26 19:47:12.000000000 -0400
15181 +++ linux-2.6.35.4/arch/x86/kvm/x86.c 2010-09-17 20:12:09.000000000 -0400
15182 @@ -86,7 +86,7 @@ static void update_cr8_intercept(struct
15183 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
15184 struct kvm_cpuid_entry2 __user *entries);
15185
15186 -struct kvm_x86_ops *kvm_x86_ops;
15187 +const struct kvm_x86_ops *kvm_x86_ops;
15188 EXPORT_SYMBOL_GPL(kvm_x86_ops);
15189
15190 int ignore_msrs = 0;
15191 @@ -112,38 +112,38 @@ static struct kvm_shared_msrs_global __r
15192 static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs);
15193
15194 struct kvm_stats_debugfs_item debugfs_entries[] = {
15195 - { "pf_fixed", VCPU_STAT(pf_fixed) },
15196 - { "pf_guest", VCPU_STAT(pf_guest) },
15197 - { "tlb_flush", VCPU_STAT(tlb_flush) },
15198 - { "invlpg", VCPU_STAT(invlpg) },
15199 - { "exits", VCPU_STAT(exits) },
15200 - { "io_exits", VCPU_STAT(io_exits) },
15201 - { "mmio_exits", VCPU_STAT(mmio_exits) },
15202 - { "signal_exits", VCPU_STAT(signal_exits) },
15203 - { "irq_window", VCPU_STAT(irq_window_exits) },
15204 - { "nmi_window", VCPU_STAT(nmi_window_exits) },
15205 - { "halt_exits", VCPU_STAT(halt_exits) },
15206 - { "halt_wakeup", VCPU_STAT(halt_wakeup) },
15207 - { "hypercalls", VCPU_STAT(hypercalls) },
15208 - { "request_irq", VCPU_STAT(request_irq_exits) },
15209 - { "irq_exits", VCPU_STAT(irq_exits) },
15210 - { "host_state_reload", VCPU_STAT(host_state_reload) },
15211 - { "efer_reload", VCPU_STAT(efer_reload) },
15212 - { "fpu_reload", VCPU_STAT(fpu_reload) },
15213 - { "insn_emulation", VCPU_STAT(insn_emulation) },
15214 - { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
15215 - { "irq_injections", VCPU_STAT(irq_injections) },
15216 - { "nmi_injections", VCPU_STAT(nmi_injections) },
15217 - { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
15218 - { "mmu_pte_write", VM_STAT(mmu_pte_write) },
15219 - { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
15220 - { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
15221 - { "mmu_flooded", VM_STAT(mmu_flooded) },
15222 - { "mmu_recycled", VM_STAT(mmu_recycled) },
15223 - { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
15224 - { "mmu_unsync", VM_STAT(mmu_unsync) },
15225 - { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
15226 - { "largepages", VM_STAT(lpages) },
15227 + { "pf_fixed", VCPU_STAT(pf_fixed), NULL },
15228 + { "pf_guest", VCPU_STAT(pf_guest), NULL },
15229 + { "tlb_flush", VCPU_STAT(tlb_flush), NULL },
15230 + { "invlpg", VCPU_STAT(invlpg), NULL },
15231 + { "exits", VCPU_STAT(exits), NULL },
15232 + { "io_exits", VCPU_STAT(io_exits), NULL },
15233 + { "mmio_exits", VCPU_STAT(mmio_exits), NULL },
15234 + { "signal_exits", VCPU_STAT(signal_exits), NULL },
15235 + { "irq_window", VCPU_STAT(irq_window_exits), NULL },
15236 + { "nmi_window", VCPU_STAT(nmi_window_exits), NULL },
15237 + { "halt_exits", VCPU_STAT(halt_exits), NULL },
15238 + { "halt_wakeup", VCPU_STAT(halt_wakeup), NULL },
15239 + { "hypercalls", VCPU_STAT(hypercalls), NULL },
15240 + { "request_irq", VCPU_STAT(request_irq_exits), NULL },
15241 + { "irq_exits", VCPU_STAT(irq_exits), NULL },
15242 + { "host_state_reload", VCPU_STAT(host_state_reload), NULL },
15243 + { "efer_reload", VCPU_STAT(efer_reload), NULL },
15244 + { "fpu_reload", VCPU_STAT(fpu_reload), NULL },
15245 + { "insn_emulation", VCPU_STAT(insn_emulation), NULL },
15246 + { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail), NULL },
15247 + { "irq_injections", VCPU_STAT(irq_injections), NULL },
15248 + { "nmi_injections", VCPU_STAT(nmi_injections), NULL },
15249 + { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped), NULL },
15250 + { "mmu_pte_write", VM_STAT(mmu_pte_write), NULL },
15251 + { "mmu_pte_updated", VM_STAT(mmu_pte_updated), NULL },
15252 + { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped), NULL },
15253 + { "mmu_flooded", VM_STAT(mmu_flooded), NULL },
15254 + { "mmu_recycled", VM_STAT(mmu_recycled), NULL },
15255 + { "mmu_cache_miss", VM_STAT(mmu_cache_miss), NULL },
15256 + { "mmu_unsync", VM_STAT(mmu_unsync), NULL },
15257 + { "remote_tlb_flush", VM_STAT(remote_tlb_flush), NULL },
15258 + { "largepages", VM_STAT(lpages), NULL },
15259 { NULL }
15260 };
15261
15262 @@ -1672,6 +1672,8 @@ long kvm_arch_dev_ioctl(struct file *fil
15263 if (n < msr_list.nmsrs)
15264 goto out;
15265 r = -EFAULT;
15266 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
15267 + goto out;
15268 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
15269 num_msrs_to_save * sizeof(u32)))
15270 goto out;
15271 @@ -2103,7 +2105,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
15272 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
15273 struct kvm_interrupt *irq)
15274 {
15275 - if (irq->irq < 0 || irq->irq >= 256)
15276 + if (irq->irq >= 256)
15277 return -EINVAL;
15278 if (irqchip_in_kernel(vcpu->kvm))
15279 return -ENXIO;
15280 @@ -4070,10 +4072,10 @@ void kvm_after_handle_nmi(struct kvm_vcp
15281 }
15282 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
15283
15284 -int kvm_arch_init(void *opaque)
15285 +int kvm_arch_init(const void *opaque)
15286 {
15287 int r;
15288 - struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
15289 + const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
15290
15291 if (kvm_x86_ops) {
15292 printk(KERN_ERR "kvm: already loaded the other module\n");
15293 diff -urNp linux-2.6.35.4/arch/x86/lib/checksum_32.S linux-2.6.35.4/arch/x86/lib/checksum_32.S
15294 --- linux-2.6.35.4/arch/x86/lib/checksum_32.S 2010-08-26 19:47:12.000000000 -0400
15295 +++ linux-2.6.35.4/arch/x86/lib/checksum_32.S 2010-09-17 20:12:09.000000000 -0400
15296 @@ -28,7 +28,8 @@
15297 #include <linux/linkage.h>
15298 #include <asm/dwarf2.h>
15299 #include <asm/errno.h>
15300 -
15301 +#include <asm/segment.h>
15302 +
15303 /*
15304 * computes a partial checksum, e.g. for TCP/UDP fragments
15305 */
15306 @@ -304,9 +305,22 @@ unsigned int csum_partial_copy_generic (
15307
15308 #define ARGBASE 16
15309 #define FP 12
15310 -
15311 -ENTRY(csum_partial_copy_generic)
15312 +
15313 +ENTRY(csum_partial_copy_generic_to_user)
15314 CFI_STARTPROC
15315 + pushl $(__USER_DS)
15316 + CFI_ADJUST_CFA_OFFSET 4
15317 + popl %es
15318 + CFI_ADJUST_CFA_OFFSET -4
15319 + jmp csum_partial_copy_generic
15320 +
15321 +ENTRY(csum_partial_copy_generic_from_user)
15322 + pushl $(__USER_DS)
15323 + CFI_ADJUST_CFA_OFFSET 4
15324 + popl %ds
15325 + CFI_ADJUST_CFA_OFFSET -4
15326 +
15327 +ENTRY(csum_partial_copy_generic)
15328 subl $4,%esp
15329 CFI_ADJUST_CFA_OFFSET 4
15330 pushl %edi
15331 @@ -331,7 +345,7 @@ ENTRY(csum_partial_copy_generic)
15332 jmp 4f
15333 SRC(1: movw (%esi), %bx )
15334 addl $2, %esi
15335 -DST( movw %bx, (%edi) )
15336 +DST( movw %bx, %es:(%edi) )
15337 addl $2, %edi
15338 addw %bx, %ax
15339 adcl $0, %eax
15340 @@ -343,30 +357,30 @@ DST( movw %bx, (%edi) )
15341 SRC(1: movl (%esi), %ebx )
15342 SRC( movl 4(%esi), %edx )
15343 adcl %ebx, %eax
15344 -DST( movl %ebx, (%edi) )
15345 +DST( movl %ebx, %es:(%edi) )
15346 adcl %edx, %eax
15347 -DST( movl %edx, 4(%edi) )
15348 +DST( movl %edx, %es:4(%edi) )
15349
15350 SRC( movl 8(%esi), %ebx )
15351 SRC( movl 12(%esi), %edx )
15352 adcl %ebx, %eax
15353 -DST( movl %ebx, 8(%edi) )
15354 +DST( movl %ebx, %es:8(%edi) )
15355 adcl %edx, %eax
15356 -DST( movl %edx, 12(%edi) )
15357 +DST( movl %edx, %es:12(%edi) )
15358
15359 SRC( movl 16(%esi), %ebx )
15360 SRC( movl 20(%esi), %edx )
15361 adcl %ebx, %eax
15362 -DST( movl %ebx, 16(%edi) )
15363 +DST( movl %ebx, %es:16(%edi) )
15364 adcl %edx, %eax
15365 -DST( movl %edx, 20(%edi) )
15366 +DST( movl %edx, %es:20(%edi) )
15367
15368 SRC( movl 24(%esi), %ebx )
15369 SRC( movl 28(%esi), %edx )
15370 adcl %ebx, %eax
15371 -DST( movl %ebx, 24(%edi) )
15372 +DST( movl %ebx, %es:24(%edi) )
15373 adcl %edx, %eax
15374 -DST( movl %edx, 28(%edi) )
15375 +DST( movl %edx, %es:28(%edi) )
15376
15377 lea 32(%esi), %esi
15378 lea 32(%edi), %edi
15379 @@ -380,7 +394,7 @@ DST( movl %edx, 28(%edi) )
15380 shrl $2, %edx # This clears CF
15381 SRC(3: movl (%esi), %ebx )
15382 adcl %ebx, %eax
15383 -DST( movl %ebx, (%edi) )
15384 +DST( movl %ebx, %es:(%edi) )
15385 lea 4(%esi), %esi
15386 lea 4(%edi), %edi
15387 dec %edx
15388 @@ -392,12 +406,12 @@ DST( movl %ebx, (%edi) )
15389 jb 5f
15390 SRC( movw (%esi), %cx )
15391 leal 2(%esi), %esi
15392 -DST( movw %cx, (%edi) )
15393 +DST( movw %cx, %es:(%edi) )
15394 leal 2(%edi), %edi
15395 je 6f
15396 shll $16,%ecx
15397 SRC(5: movb (%esi), %cl )
15398 -DST( movb %cl, (%edi) )
15399 +DST( movb %cl, %es:(%edi) )
15400 6: addl %ecx, %eax
15401 adcl $0, %eax
15402 7:
15403 @@ -408,7 +422,7 @@ DST( movb %cl, (%edi) )
15404
15405 6001:
15406 movl ARGBASE+20(%esp), %ebx # src_err_ptr
15407 - movl $-EFAULT, (%ebx)
15408 + movl $-EFAULT, %ss:(%ebx)
15409
15410 # zero the complete destination - computing the rest
15411 # is too much work
15412 @@ -421,11 +435,19 @@ DST( movb %cl, (%edi) )
15413
15414 6002:
15415 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
15416 - movl $-EFAULT,(%ebx)
15417 + movl $-EFAULT,%ss:(%ebx)
15418 jmp 5000b
15419
15420 .previous
15421
15422 + pushl %ss
15423 + CFI_ADJUST_CFA_OFFSET 4
15424 + popl %ds
15425 + CFI_ADJUST_CFA_OFFSET -4
15426 + pushl %ss
15427 + CFI_ADJUST_CFA_OFFSET 4
15428 + popl %es
15429 + CFI_ADJUST_CFA_OFFSET -4
15430 popl %ebx
15431 CFI_ADJUST_CFA_OFFSET -4
15432 CFI_RESTORE ebx
15433 @@ -439,26 +461,41 @@ DST( movb %cl, (%edi) )
15434 CFI_ADJUST_CFA_OFFSET -4
15435 ret
15436 CFI_ENDPROC
15437 -ENDPROC(csum_partial_copy_generic)
15438 +ENDPROC(csum_partial_copy_generic_to_user)
15439
15440 #else
15441
15442 /* Version for PentiumII/PPro */
15443
15444 #define ROUND1(x) \
15445 + nop; nop; nop; \
15446 SRC(movl x(%esi), %ebx ) ; \
15447 addl %ebx, %eax ; \
15448 - DST(movl %ebx, x(%edi) ) ;
15449 + DST(movl %ebx, %es:x(%edi)) ;
15450
15451 #define ROUND(x) \
15452 + nop; nop; nop; \
15453 SRC(movl x(%esi), %ebx ) ; \
15454 adcl %ebx, %eax ; \
15455 - DST(movl %ebx, x(%edi) ) ;
15456 + DST(movl %ebx, %es:x(%edi)) ;
15457
15458 #define ARGBASE 12
15459 -
15460 -ENTRY(csum_partial_copy_generic)
15461 +
15462 +ENTRY(csum_partial_copy_generic_to_user)
15463 CFI_STARTPROC
15464 + pushl $(__USER_DS)
15465 + CFI_ADJUST_CFA_OFFSET 4
15466 + popl %es
15467 + CFI_ADJUST_CFA_OFFSET -4
15468 + jmp csum_partial_copy_generic
15469 +
15470 +ENTRY(csum_partial_copy_generic_from_user)
15471 + pushl $(__USER_DS)
15472 + CFI_ADJUST_CFA_OFFSET 4
15473 + popl %ds
15474 + CFI_ADJUST_CFA_OFFSET -4
15475 +
15476 +ENTRY(csum_partial_copy_generic)
15477 pushl %ebx
15478 CFI_ADJUST_CFA_OFFSET 4
15479 CFI_REL_OFFSET ebx, 0
15480 @@ -482,7 +519,7 @@ ENTRY(csum_partial_copy_generic)
15481 subl %ebx, %edi
15482 lea -1(%esi),%edx
15483 andl $-32,%edx
15484 - lea 3f(%ebx,%ebx), %ebx
15485 + lea 3f(%ebx,%ebx,2), %ebx
15486 testl %esi, %esi
15487 jmp *%ebx
15488 1: addl $64,%esi
15489 @@ -503,19 +540,19 @@ ENTRY(csum_partial_copy_generic)
15490 jb 5f
15491 SRC( movw (%esi), %dx )
15492 leal 2(%esi), %esi
15493 -DST( movw %dx, (%edi) )
15494 +DST( movw %dx, %es:(%edi) )
15495 leal 2(%edi), %edi
15496 je 6f
15497 shll $16,%edx
15498 5:
15499 SRC( movb (%esi), %dl )
15500 -DST( movb %dl, (%edi) )
15501 +DST( movb %dl, %es:(%edi) )
15502 6: addl %edx, %eax
15503 adcl $0, %eax
15504 7:
15505 .section .fixup, "ax"
15506 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
15507 - movl $-EFAULT, (%ebx)
15508 + movl $-EFAULT, %ss:(%ebx)
15509 # zero the complete destination (computing the rest is too much work)
15510 movl ARGBASE+8(%esp),%edi # dst
15511 movl ARGBASE+12(%esp),%ecx # len
15512 @@ -523,10 +560,18 @@ DST( movb %dl, (%edi) )
15513 rep; stosb
15514 jmp 7b
15515 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
15516 - movl $-EFAULT, (%ebx)
15517 + movl $-EFAULT, %ss:(%ebx)
15518 jmp 7b
15519 .previous
15520
15521 + pushl %ss
15522 + CFI_ADJUST_CFA_OFFSET 4
15523 + popl %ds
15524 + CFI_ADJUST_CFA_OFFSET -4
15525 + pushl %ss
15526 + CFI_ADJUST_CFA_OFFSET 4
15527 + popl %es
15528 + CFI_ADJUST_CFA_OFFSET -4
15529 popl %esi
15530 CFI_ADJUST_CFA_OFFSET -4
15531 CFI_RESTORE esi
15532 @@ -538,7 +583,7 @@ DST( movb %dl, (%edi) )
15533 CFI_RESTORE ebx
15534 ret
15535 CFI_ENDPROC
15536 -ENDPROC(csum_partial_copy_generic)
15537 +ENDPROC(csum_partial_copy_generic_to_user)
15538
15539 #undef ROUND
15540 #undef ROUND1
15541 diff -urNp linux-2.6.35.4/arch/x86/lib/clear_page_64.S linux-2.6.35.4/arch/x86/lib/clear_page_64.S
15542 --- linux-2.6.35.4/arch/x86/lib/clear_page_64.S 2010-08-26 19:47:12.000000000 -0400
15543 +++ linux-2.6.35.4/arch/x86/lib/clear_page_64.S 2010-09-17 20:12:09.000000000 -0400
15544 @@ -43,7 +43,7 @@ ENDPROC(clear_page)
15545
15546 #include <asm/cpufeature.h>
15547
15548 - .section .altinstr_replacement,"ax"
15549 + .section .altinstr_replacement,"a"
15550 1: .byte 0xeb /* jmp <disp8> */
15551 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
15552 2:
15553 diff -urNp linux-2.6.35.4/arch/x86/lib/copy_page_64.S linux-2.6.35.4/arch/x86/lib/copy_page_64.S
15554 --- linux-2.6.35.4/arch/x86/lib/copy_page_64.S 2010-08-26 19:47:12.000000000 -0400
15555 +++ linux-2.6.35.4/arch/x86/lib/copy_page_64.S 2010-09-17 20:12:09.000000000 -0400
15556 @@ -104,7 +104,7 @@ ENDPROC(copy_page)
15557
15558 #include <asm/cpufeature.h>
15559
15560 - .section .altinstr_replacement,"ax"
15561 + .section .altinstr_replacement,"a"
15562 1: .byte 0xeb /* jmp <disp8> */
15563 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
15564 2:
15565 diff -urNp linux-2.6.35.4/arch/x86/lib/copy_user_64.S linux-2.6.35.4/arch/x86/lib/copy_user_64.S
15566 --- linux-2.6.35.4/arch/x86/lib/copy_user_64.S 2010-08-26 19:47:12.000000000 -0400
15567 +++ linux-2.6.35.4/arch/x86/lib/copy_user_64.S 2010-09-17 20:12:09.000000000 -0400
15568 @@ -15,13 +15,14 @@
15569 #include <asm/asm-offsets.h>
15570 #include <asm/thread_info.h>
15571 #include <asm/cpufeature.h>
15572 +#include <asm/pgtable.h>
15573
15574 .macro ALTERNATIVE_JUMP feature,orig,alt
15575 0:
15576 .byte 0xe9 /* 32bit jump */
15577 .long \orig-1f /* by default jump to orig */
15578 1:
15579 - .section .altinstr_replacement,"ax"
15580 + .section .altinstr_replacement,"a"
15581 2: .byte 0xe9 /* near jump with 32bit immediate */
15582 .long \alt-1b /* offset */ /* or alternatively to alt */
15583 .previous
15584 @@ -64,37 +65,13 @@
15585 #endif
15586 .endm
15587
15588 -/* Standard copy_to_user with segment limit checking */
15589 -ENTRY(_copy_to_user)
15590 - CFI_STARTPROC
15591 - GET_THREAD_INFO(%rax)
15592 - movq %rdi,%rcx
15593 - addq %rdx,%rcx
15594 - jc bad_to_user
15595 - cmpq TI_addr_limit(%rax),%rcx
15596 - jae bad_to_user
15597 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
15598 - CFI_ENDPROC
15599 -ENDPROC(_copy_to_user)
15600 -
15601 -/* Standard copy_from_user with segment limit checking */
15602 -ENTRY(_copy_from_user)
15603 - CFI_STARTPROC
15604 - GET_THREAD_INFO(%rax)
15605 - movq %rsi,%rcx
15606 - addq %rdx,%rcx
15607 - jc bad_from_user
15608 - cmpq TI_addr_limit(%rax),%rcx
15609 - jae bad_from_user
15610 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
15611 - CFI_ENDPROC
15612 -ENDPROC(_copy_from_user)
15613 -
15614 .section .fixup,"ax"
15615 /* must zero dest */
15616 ENTRY(bad_from_user)
15617 bad_from_user:
15618 CFI_STARTPROC
15619 + testl %edx,%edx
15620 + js bad_to_user
15621 movl %edx,%ecx
15622 xorl %eax,%eax
15623 rep
15624 diff -urNp linux-2.6.35.4/arch/x86/lib/copy_user_nocache_64.S linux-2.6.35.4/arch/x86/lib/copy_user_nocache_64.S
15625 --- linux-2.6.35.4/arch/x86/lib/copy_user_nocache_64.S 2010-08-26 19:47:12.000000000 -0400
15626 +++ linux-2.6.35.4/arch/x86/lib/copy_user_nocache_64.S 2010-09-17 20:12:09.000000000 -0400
15627 @@ -14,6 +14,7 @@
15628 #include <asm/current.h>
15629 #include <asm/asm-offsets.h>
15630 #include <asm/thread_info.h>
15631 +#include <asm/pgtable.h>
15632
15633 .macro ALIGN_DESTINATION
15634 #ifdef FIX_ALIGNMENT
15635 @@ -50,6 +51,15 @@
15636 */
15637 ENTRY(__copy_user_nocache)
15638 CFI_STARTPROC
15639 +
15640 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15641 + mov $PAX_USER_SHADOW_BASE,%rcx
15642 + cmp %rcx,%rsi
15643 + jae 1f
15644 + add %rcx,%rsi
15645 +1:
15646 +#endif
15647 +
15648 cmpl $8,%edx
15649 jb 20f /* less then 8 bytes, go to byte copy loop */
15650 ALIGN_DESTINATION
15651 diff -urNp linux-2.6.35.4/arch/x86/lib/csum-wrappers_64.c linux-2.6.35.4/arch/x86/lib/csum-wrappers_64.c
15652 --- linux-2.6.35.4/arch/x86/lib/csum-wrappers_64.c 2010-08-26 19:47:12.000000000 -0400
15653 +++ linux-2.6.35.4/arch/x86/lib/csum-wrappers_64.c 2010-09-17 20:12:09.000000000 -0400
15654 @@ -52,6 +52,8 @@ csum_partial_copy_from_user(const void _
15655 len -= 2;
15656 }
15657 }
15658 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
15659 + src += PAX_USER_SHADOW_BASE;
15660 isum = csum_partial_copy_generic((__force const void *)src,
15661 dst, len, isum, errp, NULL);
15662 if (unlikely(*errp))
15663 @@ -105,6 +107,8 @@ csum_partial_copy_to_user(const void *sr
15664 }
15665
15666 *errp = 0;
15667 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
15668 + dst += PAX_USER_SHADOW_BASE;
15669 return csum_partial_copy_generic(src, (void __force *)dst,
15670 len, isum, NULL, errp);
15671 }
15672 diff -urNp linux-2.6.35.4/arch/x86/lib/getuser.S linux-2.6.35.4/arch/x86/lib/getuser.S
15673 --- linux-2.6.35.4/arch/x86/lib/getuser.S 2010-08-26 19:47:12.000000000 -0400
15674 +++ linux-2.6.35.4/arch/x86/lib/getuser.S 2010-09-17 20:12:09.000000000 -0400
15675 @@ -33,14 +33,38 @@
15676 #include <asm/asm-offsets.h>
15677 #include <asm/thread_info.h>
15678 #include <asm/asm.h>
15679 +#include <asm/segment.h>
15680 +#include <asm/pgtable.h>
15681
15682 .text
15683 ENTRY(__get_user_1)
15684 CFI_STARTPROC
15685 +
15686 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15687 + pushl $(__USER_DS)
15688 + popl %ds
15689 +#else
15690 GET_THREAD_INFO(%_ASM_DX)
15691 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
15692 jae bad_get_user
15693 +
15694 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15695 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
15696 + cmp %_ASM_DX,%_ASM_AX
15697 + jae 1234f
15698 + add %_ASM_DX,%_ASM_AX
15699 +1234:
15700 +#endif
15701 +
15702 +#endif
15703 +
15704 1: movzb (%_ASM_AX),%edx
15705 +
15706 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15707 + pushl %ss
15708 + pop %ds
15709 +#endif
15710 +
15711 xor %eax,%eax
15712 ret
15713 CFI_ENDPROC
15714 @@ -49,11 +73,33 @@ ENDPROC(__get_user_1)
15715 ENTRY(__get_user_2)
15716 CFI_STARTPROC
15717 add $1,%_ASM_AX
15718 +
15719 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15720 + pushl $(__USER_DS)
15721 + popl %ds
15722 +#else
15723 jc bad_get_user
15724 GET_THREAD_INFO(%_ASM_DX)
15725 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
15726 jae bad_get_user
15727 +
15728 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15729 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
15730 + cmp %_ASM_DX,%_ASM_AX
15731 + jae 1234f
15732 + add %_ASM_DX,%_ASM_AX
15733 +1234:
15734 +#endif
15735 +
15736 +#endif
15737 +
15738 2: movzwl -1(%_ASM_AX),%edx
15739 +
15740 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15741 + pushl %ss
15742 + pop %ds
15743 +#endif
15744 +
15745 xor %eax,%eax
15746 ret
15747 CFI_ENDPROC
15748 @@ -62,11 +108,33 @@ ENDPROC(__get_user_2)
15749 ENTRY(__get_user_4)
15750 CFI_STARTPROC
15751 add $3,%_ASM_AX
15752 +
15753 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15754 + pushl $(__USER_DS)
15755 + popl %ds
15756 +#else
15757 jc bad_get_user
15758 GET_THREAD_INFO(%_ASM_DX)
15759 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
15760 jae bad_get_user
15761 +
15762 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15763 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
15764 + cmp %_ASM_DX,%_ASM_AX
15765 + jae 1234f
15766 + add %_ASM_DX,%_ASM_AX
15767 +1234:
15768 +#endif
15769 +
15770 +#endif
15771 +
15772 3: mov -3(%_ASM_AX),%edx
15773 +
15774 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15775 + pushl %ss
15776 + pop %ds
15777 +#endif
15778 +
15779 xor %eax,%eax
15780 ret
15781 CFI_ENDPROC
15782 @@ -80,6 +148,15 @@ ENTRY(__get_user_8)
15783 GET_THREAD_INFO(%_ASM_DX)
15784 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
15785 jae bad_get_user
15786 +
15787 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15788 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
15789 + cmp %_ASM_DX,%_ASM_AX
15790 + jae 1234f
15791 + add %_ASM_DX,%_ASM_AX
15792 +1234:
15793 +#endif
15794 +
15795 4: movq -7(%_ASM_AX),%_ASM_DX
15796 xor %eax,%eax
15797 ret
15798 @@ -89,6 +166,12 @@ ENDPROC(__get_user_8)
15799
15800 bad_get_user:
15801 CFI_STARTPROC
15802 +
15803 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15804 + pushl %ss
15805 + pop %ds
15806 +#endif
15807 +
15808 xor %edx,%edx
15809 mov $(-EFAULT),%_ASM_AX
15810 ret
15811 diff -urNp linux-2.6.35.4/arch/x86/lib/insn.c linux-2.6.35.4/arch/x86/lib/insn.c
15812 --- linux-2.6.35.4/arch/x86/lib/insn.c 2010-08-26 19:47:12.000000000 -0400
15813 +++ linux-2.6.35.4/arch/x86/lib/insn.c 2010-09-17 20:12:09.000000000 -0400
15814 @@ -21,6 +21,7 @@
15815 #include <linux/string.h>
15816 #include <asm/inat.h>
15817 #include <asm/insn.h>
15818 +#include <asm/pgtable_types.h>
15819
15820 #define get_next(t, insn) \
15821 ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
15822 @@ -40,8 +41,8 @@
15823 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
15824 {
15825 memset(insn, 0, sizeof(*insn));
15826 - insn->kaddr = kaddr;
15827 - insn->next_byte = kaddr;
15828 + insn->kaddr = ktla_ktva(kaddr);
15829 + insn->next_byte = ktla_ktva(kaddr);
15830 insn->x86_64 = x86_64 ? 1 : 0;
15831 insn->opnd_bytes = 4;
15832 if (x86_64)
15833 diff -urNp linux-2.6.35.4/arch/x86/lib/mmx_32.c linux-2.6.35.4/arch/x86/lib/mmx_32.c
15834 --- linux-2.6.35.4/arch/x86/lib/mmx_32.c 2010-08-26 19:47:12.000000000 -0400
15835 +++ linux-2.6.35.4/arch/x86/lib/mmx_32.c 2010-09-17 20:12:09.000000000 -0400
15836 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
15837 {
15838 void *p;
15839 int i;
15840 + unsigned long cr0;
15841
15842 if (unlikely(in_interrupt()))
15843 return __memcpy(to, from, len);
15844 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
15845 kernel_fpu_begin();
15846
15847 __asm__ __volatile__ (
15848 - "1: prefetch (%0)\n" /* This set is 28 bytes */
15849 - " prefetch 64(%0)\n"
15850 - " prefetch 128(%0)\n"
15851 - " prefetch 192(%0)\n"
15852 - " prefetch 256(%0)\n"
15853 + "1: prefetch (%1)\n" /* This set is 28 bytes */
15854 + " prefetch 64(%1)\n"
15855 + " prefetch 128(%1)\n"
15856 + " prefetch 192(%1)\n"
15857 + " prefetch 256(%1)\n"
15858 "2: \n"
15859 ".section .fixup, \"ax\"\n"
15860 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
15861 + "3: \n"
15862 +
15863 +#ifdef CONFIG_PAX_KERNEXEC
15864 + " movl %%cr0, %0\n"
15865 + " movl %0, %%eax\n"
15866 + " andl $0xFFFEFFFF, %%eax\n"
15867 + " movl %%eax, %%cr0\n"
15868 +#endif
15869 +
15870 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
15871 +
15872 +#ifdef CONFIG_PAX_KERNEXEC
15873 + " movl %0, %%cr0\n"
15874 +#endif
15875 +
15876 " jmp 2b\n"
15877 ".previous\n"
15878 _ASM_EXTABLE(1b, 3b)
15879 - : : "r" (from));
15880 + : "=&r" (cr0) : "r" (from) : "ax");
15881
15882 for ( ; i > 5; i--) {
15883 __asm__ __volatile__ (
15884 - "1: prefetch 320(%0)\n"
15885 - "2: movq (%0), %%mm0\n"
15886 - " movq 8(%0), %%mm1\n"
15887 - " movq 16(%0), %%mm2\n"
15888 - " movq 24(%0), %%mm3\n"
15889 - " movq %%mm0, (%1)\n"
15890 - " movq %%mm1, 8(%1)\n"
15891 - " movq %%mm2, 16(%1)\n"
15892 - " movq %%mm3, 24(%1)\n"
15893 - " movq 32(%0), %%mm0\n"
15894 - " movq 40(%0), %%mm1\n"
15895 - " movq 48(%0), %%mm2\n"
15896 - " movq 56(%0), %%mm3\n"
15897 - " movq %%mm0, 32(%1)\n"
15898 - " movq %%mm1, 40(%1)\n"
15899 - " movq %%mm2, 48(%1)\n"
15900 - " movq %%mm3, 56(%1)\n"
15901 + "1: prefetch 320(%1)\n"
15902 + "2: movq (%1), %%mm0\n"
15903 + " movq 8(%1), %%mm1\n"
15904 + " movq 16(%1), %%mm2\n"
15905 + " movq 24(%1), %%mm3\n"
15906 + " movq %%mm0, (%2)\n"
15907 + " movq %%mm1, 8(%2)\n"
15908 + " movq %%mm2, 16(%2)\n"
15909 + " movq %%mm3, 24(%2)\n"
15910 + " movq 32(%1), %%mm0\n"
15911 + " movq 40(%1), %%mm1\n"
15912 + " movq 48(%1), %%mm2\n"
15913 + " movq 56(%1), %%mm3\n"
15914 + " movq %%mm0, 32(%2)\n"
15915 + " movq %%mm1, 40(%2)\n"
15916 + " movq %%mm2, 48(%2)\n"
15917 + " movq %%mm3, 56(%2)\n"
15918 ".section .fixup, \"ax\"\n"
15919 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
15920 + "3:\n"
15921 +
15922 +#ifdef CONFIG_PAX_KERNEXEC
15923 + " movl %%cr0, %0\n"
15924 + " movl %0, %%eax\n"
15925 + " andl $0xFFFEFFFF, %%eax\n"
15926 + " movl %%eax, %%cr0\n"
15927 +#endif
15928 +
15929 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
15930 +
15931 +#ifdef CONFIG_PAX_KERNEXEC
15932 + " movl %0, %%cr0\n"
15933 +#endif
15934 +
15935 " jmp 2b\n"
15936 ".previous\n"
15937 _ASM_EXTABLE(1b, 3b)
15938 - : : "r" (from), "r" (to) : "memory");
15939 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
15940
15941 from += 64;
15942 to += 64;
15943 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
15944 static void fast_copy_page(void *to, void *from)
15945 {
15946 int i;
15947 + unsigned long cr0;
15948
15949 kernel_fpu_begin();
15950
15951 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
15952 * but that is for later. -AV
15953 */
15954 __asm__ __volatile__(
15955 - "1: prefetch (%0)\n"
15956 - " prefetch 64(%0)\n"
15957 - " prefetch 128(%0)\n"
15958 - " prefetch 192(%0)\n"
15959 - " prefetch 256(%0)\n"
15960 + "1: prefetch (%1)\n"
15961 + " prefetch 64(%1)\n"
15962 + " prefetch 128(%1)\n"
15963 + " prefetch 192(%1)\n"
15964 + " prefetch 256(%1)\n"
15965 "2: \n"
15966 ".section .fixup, \"ax\"\n"
15967 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
15968 + "3: \n"
15969 +
15970 +#ifdef CONFIG_PAX_KERNEXEC
15971 + " movl %%cr0, %0\n"
15972 + " movl %0, %%eax\n"
15973 + " andl $0xFFFEFFFF, %%eax\n"
15974 + " movl %%eax, %%cr0\n"
15975 +#endif
15976 +
15977 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
15978 +
15979 +#ifdef CONFIG_PAX_KERNEXEC
15980 + " movl %0, %%cr0\n"
15981 +#endif
15982 +
15983 " jmp 2b\n"
15984 ".previous\n"
15985 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
15986 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
15987
15988 for (i = 0; i < (4096-320)/64; i++) {
15989 __asm__ __volatile__ (
15990 - "1: prefetch 320(%0)\n"
15991 - "2: movq (%0), %%mm0\n"
15992 - " movntq %%mm0, (%1)\n"
15993 - " movq 8(%0), %%mm1\n"
15994 - " movntq %%mm1, 8(%1)\n"
15995 - " movq 16(%0), %%mm2\n"
15996 - " movntq %%mm2, 16(%1)\n"
15997 - " movq 24(%0), %%mm3\n"
15998 - " movntq %%mm3, 24(%1)\n"
15999 - " movq 32(%0), %%mm4\n"
16000 - " movntq %%mm4, 32(%1)\n"
16001 - " movq 40(%0), %%mm5\n"
16002 - " movntq %%mm5, 40(%1)\n"
16003 - " movq 48(%0), %%mm6\n"
16004 - " movntq %%mm6, 48(%1)\n"
16005 - " movq 56(%0), %%mm7\n"
16006 - " movntq %%mm7, 56(%1)\n"
16007 + "1: prefetch 320(%1)\n"
16008 + "2: movq (%1), %%mm0\n"
16009 + " movntq %%mm0, (%2)\n"
16010 + " movq 8(%1), %%mm1\n"
16011 + " movntq %%mm1, 8(%2)\n"
16012 + " movq 16(%1), %%mm2\n"
16013 + " movntq %%mm2, 16(%2)\n"
16014 + " movq 24(%1), %%mm3\n"
16015 + " movntq %%mm3, 24(%2)\n"
16016 + " movq 32(%1), %%mm4\n"
16017 + " movntq %%mm4, 32(%2)\n"
16018 + " movq 40(%1), %%mm5\n"
16019 + " movntq %%mm5, 40(%2)\n"
16020 + " movq 48(%1), %%mm6\n"
16021 + " movntq %%mm6, 48(%2)\n"
16022 + " movq 56(%1), %%mm7\n"
16023 + " movntq %%mm7, 56(%2)\n"
16024 ".section .fixup, \"ax\"\n"
16025 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
16026 + "3:\n"
16027 +
16028 +#ifdef CONFIG_PAX_KERNEXEC
16029 + " movl %%cr0, %0\n"
16030 + " movl %0, %%eax\n"
16031 + " andl $0xFFFEFFFF, %%eax\n"
16032 + " movl %%eax, %%cr0\n"
16033 +#endif
16034 +
16035 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
16036 +
16037 +#ifdef CONFIG_PAX_KERNEXEC
16038 + " movl %0, %%cr0\n"
16039 +#endif
16040 +
16041 " jmp 2b\n"
16042 ".previous\n"
16043 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
16044 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
16045
16046 from += 64;
16047 to += 64;
16048 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
16049 static void fast_copy_page(void *to, void *from)
16050 {
16051 int i;
16052 + unsigned long cr0;
16053
16054 kernel_fpu_begin();
16055
16056 __asm__ __volatile__ (
16057 - "1: prefetch (%0)\n"
16058 - " prefetch 64(%0)\n"
16059 - " prefetch 128(%0)\n"
16060 - " prefetch 192(%0)\n"
16061 - " prefetch 256(%0)\n"
16062 + "1: prefetch (%1)\n"
16063 + " prefetch 64(%1)\n"
16064 + " prefetch 128(%1)\n"
16065 + " prefetch 192(%1)\n"
16066 + " prefetch 256(%1)\n"
16067 "2: \n"
16068 ".section .fixup, \"ax\"\n"
16069 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
16070 + "3: \n"
16071 +
16072 +#ifdef CONFIG_PAX_KERNEXEC
16073 + " movl %%cr0, %0\n"
16074 + " movl %0, %%eax\n"
16075 + " andl $0xFFFEFFFF, %%eax\n"
16076 + " movl %%eax, %%cr0\n"
16077 +#endif
16078 +
16079 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
16080 +
16081 +#ifdef CONFIG_PAX_KERNEXEC
16082 + " movl %0, %%cr0\n"
16083 +#endif
16084 +
16085 " jmp 2b\n"
16086 ".previous\n"
16087 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
16088 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
16089
16090 for (i = 0; i < 4096/64; i++) {
16091 __asm__ __volatile__ (
16092 - "1: prefetch 320(%0)\n"
16093 - "2: movq (%0), %%mm0\n"
16094 - " movq 8(%0), %%mm1\n"
16095 - " movq 16(%0), %%mm2\n"
16096 - " movq 24(%0), %%mm3\n"
16097 - " movq %%mm0, (%1)\n"
16098 - " movq %%mm1, 8(%1)\n"
16099 - " movq %%mm2, 16(%1)\n"
16100 - " movq %%mm3, 24(%1)\n"
16101 - " movq 32(%0), %%mm0\n"
16102 - " movq 40(%0), %%mm1\n"
16103 - " movq 48(%0), %%mm2\n"
16104 - " movq 56(%0), %%mm3\n"
16105 - " movq %%mm0, 32(%1)\n"
16106 - " movq %%mm1, 40(%1)\n"
16107 - " movq %%mm2, 48(%1)\n"
16108 - " movq %%mm3, 56(%1)\n"
16109 + "1: prefetch 320(%1)\n"
16110 + "2: movq (%1), %%mm0\n"
16111 + " movq 8(%1), %%mm1\n"
16112 + " movq 16(%1), %%mm2\n"
16113 + " movq 24(%1), %%mm3\n"
16114 + " movq %%mm0, (%2)\n"
16115 + " movq %%mm1, 8(%2)\n"
16116 + " movq %%mm2, 16(%2)\n"
16117 + " movq %%mm3, 24(%2)\n"
16118 + " movq 32(%1), %%mm0\n"
16119 + " movq 40(%1), %%mm1\n"
16120 + " movq 48(%1), %%mm2\n"
16121 + " movq 56(%1), %%mm3\n"
16122 + " movq %%mm0, 32(%2)\n"
16123 + " movq %%mm1, 40(%2)\n"
16124 + " movq %%mm2, 48(%2)\n"
16125 + " movq %%mm3, 56(%2)\n"
16126 ".section .fixup, \"ax\"\n"
16127 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
16128 + "3:\n"
16129 +
16130 +#ifdef CONFIG_PAX_KERNEXEC
16131 + " movl %%cr0, %0\n"
16132 + " movl %0, %%eax\n"
16133 + " andl $0xFFFEFFFF, %%eax\n"
16134 + " movl %%eax, %%cr0\n"
16135 +#endif
16136 +
16137 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
16138 +
16139 +#ifdef CONFIG_PAX_KERNEXEC
16140 + " movl %0, %%cr0\n"
16141 +#endif
16142 +
16143 " jmp 2b\n"
16144 ".previous\n"
16145 _ASM_EXTABLE(1b, 3b)
16146 - : : "r" (from), "r" (to) : "memory");
16147 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
16148
16149 from += 64;
16150 to += 64;
16151 diff -urNp linux-2.6.35.4/arch/x86/lib/putuser.S linux-2.6.35.4/arch/x86/lib/putuser.S
16152 --- linux-2.6.35.4/arch/x86/lib/putuser.S 2010-08-26 19:47:12.000000000 -0400
16153 +++ linux-2.6.35.4/arch/x86/lib/putuser.S 2010-09-17 20:12:09.000000000 -0400
16154 @@ -15,7 +15,8 @@
16155 #include <asm/thread_info.h>
16156 #include <asm/errno.h>
16157 #include <asm/asm.h>
16158 -
16159 +#include <asm/segment.h>
16160 +#include <asm/pgtable.h>
16161
16162 /*
16163 * __put_user_X
16164 @@ -29,59 +30,162 @@
16165 * as they get called from within inline assembly.
16166 */
16167
16168 -#define ENTER CFI_STARTPROC ; \
16169 - GET_THREAD_INFO(%_ASM_BX)
16170 +#define ENTER CFI_STARTPROC
16171 #define EXIT ret ; \
16172 CFI_ENDPROC
16173
16174 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16175 +#define _DEST %_ASM_CX,%_ASM_BX
16176 +#else
16177 +#define _DEST %_ASM_CX
16178 +#endif
16179 +
16180 .text
16181 ENTRY(__put_user_1)
16182 ENTER
16183 +
16184 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16185 + pushl $(__USER_DS)
16186 + popl %ds
16187 +#else
16188 + GET_THREAD_INFO(%_ASM_BX)
16189 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
16190 jae bad_put_user
16191 -1: movb %al,(%_ASM_CX)
16192 +
16193 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16194 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
16195 + cmp %_ASM_BX,%_ASM_CX
16196 + jb 1234f
16197 + xor %ebx,%ebx
16198 +1234:
16199 +#endif
16200 +
16201 +#endif
16202 +
16203 +1: movb %al,(_DEST)
16204 +
16205 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16206 + pushl %ss
16207 + popl %ds
16208 +#endif
16209 +
16210 xor %eax,%eax
16211 EXIT
16212 ENDPROC(__put_user_1)
16213
16214 ENTRY(__put_user_2)
16215 ENTER
16216 +
16217 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16218 + pushl $(__USER_DS)
16219 + popl %ds
16220 +#else
16221 + GET_THREAD_INFO(%_ASM_BX)
16222 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
16223 sub $1,%_ASM_BX
16224 cmp %_ASM_BX,%_ASM_CX
16225 jae bad_put_user
16226 -2: movw %ax,(%_ASM_CX)
16227 +
16228 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16229 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
16230 + cmp %_ASM_BX,%_ASM_CX
16231 + jb 1234f
16232 + xor %ebx,%ebx
16233 +1234:
16234 +#endif
16235 +
16236 +#endif
16237 +
16238 +2: movw %ax,(_DEST)
16239 +
16240 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16241 + pushl %ss
16242 + popl %ds
16243 +#endif
16244 +
16245 xor %eax,%eax
16246 EXIT
16247 ENDPROC(__put_user_2)
16248
16249 ENTRY(__put_user_4)
16250 ENTER
16251 +
16252 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16253 + pushl $(__USER_DS)
16254 + popl %ds
16255 +#else
16256 + GET_THREAD_INFO(%_ASM_BX)
16257 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
16258 sub $3,%_ASM_BX
16259 cmp %_ASM_BX,%_ASM_CX
16260 jae bad_put_user
16261 -3: movl %eax,(%_ASM_CX)
16262 +
16263 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16264 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
16265 + cmp %_ASM_BX,%_ASM_CX
16266 + jb 1234f
16267 + xor %ebx,%ebx
16268 +1234:
16269 +#endif
16270 +
16271 +#endif
16272 +
16273 +3: movl %eax,(_DEST)
16274 +
16275 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16276 + pushl %ss
16277 + popl %ds
16278 +#endif
16279 +
16280 xor %eax,%eax
16281 EXIT
16282 ENDPROC(__put_user_4)
16283
16284 ENTRY(__put_user_8)
16285 ENTER
16286 +
16287 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16288 + pushl $(__USER_DS)
16289 + popl %ds
16290 +#else
16291 + GET_THREAD_INFO(%_ASM_BX)
16292 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
16293 sub $7,%_ASM_BX
16294 cmp %_ASM_BX,%_ASM_CX
16295 jae bad_put_user
16296 -4: mov %_ASM_AX,(%_ASM_CX)
16297 +
16298 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16299 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
16300 + cmp %_ASM_BX,%_ASM_CX
16301 + jb 1234f
16302 + xor %ebx,%ebx
16303 +1234:
16304 +#endif
16305 +
16306 +#endif
16307 +
16308 +4: mov %_ASM_AX,(_DEST)
16309 #ifdef CONFIG_X86_32
16310 -5: movl %edx,4(%_ASM_CX)
16311 +5: movl %edx,4(_DEST)
16312 #endif
16313 +
16314 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16315 + pushl %ss
16316 + popl %ds
16317 +#endif
16318 +
16319 xor %eax,%eax
16320 EXIT
16321 ENDPROC(__put_user_8)
16322
16323 bad_put_user:
16324 CFI_STARTPROC
16325 +
16326 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16327 + pushl %ss
16328 + popl %ds
16329 +#endif
16330 +
16331 movl $-EFAULT,%eax
16332 EXIT
16333 END(bad_put_user)
16334 diff -urNp linux-2.6.35.4/arch/x86/lib/usercopy_32.c linux-2.6.35.4/arch/x86/lib/usercopy_32.c
16335 --- linux-2.6.35.4/arch/x86/lib/usercopy_32.c 2010-08-26 19:47:12.000000000 -0400
16336 +++ linux-2.6.35.4/arch/x86/lib/usercopy_32.c 2010-09-17 20:12:09.000000000 -0400
16337 @@ -36,31 +36,38 @@ static inline int __movsl_is_ok(unsigned
16338 * Copy a null terminated string from userspace.
16339 */
16340
16341 -#define __do_strncpy_from_user(dst, src, count, res) \
16342 -do { \
16343 - int __d0, __d1, __d2; \
16344 - might_fault(); \
16345 - __asm__ __volatile__( \
16346 - " testl %1,%1\n" \
16347 - " jz 2f\n" \
16348 - "0: lodsb\n" \
16349 - " stosb\n" \
16350 - " testb %%al,%%al\n" \
16351 - " jz 1f\n" \
16352 - " decl %1\n" \
16353 - " jnz 0b\n" \
16354 - "1: subl %1,%0\n" \
16355 - "2:\n" \
16356 - ".section .fixup,\"ax\"\n" \
16357 - "3: movl %5,%0\n" \
16358 - " jmp 2b\n" \
16359 - ".previous\n" \
16360 - _ASM_EXTABLE(0b,3b) \
16361 - : "=&d"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1), \
16362 - "=&D" (__d2) \
16363 - : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \
16364 - : "memory"); \
16365 -} while (0)
16366 +static long __do_strncpy_from_user(char *dst, const char __user *src, long count)
16367 +{
16368 + int __d0, __d1, __d2;
16369 + long res = -EFAULT;
16370 +
16371 + might_fault();
16372 + __asm__ __volatile__(
16373 + " movw %w10,%%ds\n"
16374 + " testl %1,%1\n"
16375 + " jz 2f\n"
16376 + "0: lodsb\n"
16377 + " stosb\n"
16378 + " testb %%al,%%al\n"
16379 + " jz 1f\n"
16380 + " decl %1\n"
16381 + " jnz 0b\n"
16382 + "1: subl %1,%0\n"
16383 + "2:\n"
16384 + " pushl %%ss\n"
16385 + " popl %%ds\n"
16386 + ".section .fixup,\"ax\"\n"
16387 + "3: movl %5,%0\n"
16388 + " jmp 2b\n"
16389 + ".previous\n"
16390 + _ASM_EXTABLE(0b,3b)
16391 + : "=&d"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1),
16392 + "=&D" (__d2)
16393 + : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst),
16394 + "r"(__USER_DS)
16395 + : "memory");
16396 + return res;
16397 +}
16398
16399 /**
16400 * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
16401 @@ -85,9 +92,7 @@ do { \
16402 long
16403 __strncpy_from_user(char *dst, const char __user *src, long count)
16404 {
16405 - long res;
16406 - __do_strncpy_from_user(dst, src, count, res);
16407 - return res;
16408 + return __do_strncpy_from_user(dst, src, count);
16409 }
16410 EXPORT_SYMBOL(__strncpy_from_user);
16411
16412 @@ -114,7 +119,7 @@ strncpy_from_user(char *dst, const char
16413 {
16414 long res = -EFAULT;
16415 if (access_ok(VERIFY_READ, src, 1))
16416 - __do_strncpy_from_user(dst, src, count, res);
16417 + res = __do_strncpy_from_user(dst, src, count);
16418 return res;
16419 }
16420 EXPORT_SYMBOL(strncpy_from_user);
16421 @@ -123,24 +128,30 @@ EXPORT_SYMBOL(strncpy_from_user);
16422 * Zero Userspace
16423 */
16424
16425 -#define __do_clear_user(addr,size) \
16426 -do { \
16427 - int __d0; \
16428 - might_fault(); \
16429 - __asm__ __volatile__( \
16430 - "0: rep; stosl\n" \
16431 - " movl %2,%0\n" \
16432 - "1: rep; stosb\n" \
16433 - "2:\n" \
16434 - ".section .fixup,\"ax\"\n" \
16435 - "3: lea 0(%2,%0,4),%0\n" \
16436 - " jmp 2b\n" \
16437 - ".previous\n" \
16438 - _ASM_EXTABLE(0b,3b) \
16439 - _ASM_EXTABLE(1b,2b) \
16440 - : "=&c"(size), "=&D" (__d0) \
16441 - : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0)); \
16442 -} while (0)
16443 +static unsigned long __do_clear_user(void __user *addr, unsigned long size)
16444 +{
16445 + int __d0;
16446 +
16447 + might_fault();
16448 + __asm__ __volatile__(
16449 + " movw %w6,%%es\n"
16450 + "0: rep; stosl\n"
16451 + " movl %2,%0\n"
16452 + "1: rep; stosb\n"
16453 + "2:\n"
16454 + " pushl %%ss\n"
16455 + " popl %%es\n"
16456 + ".section .fixup,\"ax\"\n"
16457 + "3: lea 0(%2,%0,4),%0\n"
16458 + " jmp 2b\n"
16459 + ".previous\n"
16460 + _ASM_EXTABLE(0b,3b)
16461 + _ASM_EXTABLE(1b,2b)
16462 + : "=&c"(size), "=&D" (__d0)
16463 + : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0),
16464 + "r"(__USER_DS));
16465 + return size;
16466 +}
16467
16468 /**
16469 * clear_user: - Zero a block of memory in user space.
16470 @@ -157,7 +168,7 @@ clear_user(void __user *to, unsigned lon
16471 {
16472 might_fault();
16473 if (access_ok(VERIFY_WRITE, to, n))
16474 - __do_clear_user(to, n);
16475 + n = __do_clear_user(to, n);
16476 return n;
16477 }
16478 EXPORT_SYMBOL(clear_user);
16479 @@ -176,8 +187,7 @@ EXPORT_SYMBOL(clear_user);
16480 unsigned long
16481 __clear_user(void __user *to, unsigned long n)
16482 {
16483 - __do_clear_user(to, n);
16484 - return n;
16485 + return __do_clear_user(to, n);
16486 }
16487 EXPORT_SYMBOL(__clear_user);
16488
16489 @@ -200,14 +210,17 @@ long strnlen_user(const char __user *s,
16490 might_fault();
16491
16492 __asm__ __volatile__(
16493 + " movw %w8,%%es\n"
16494 " testl %0, %0\n"
16495 " jz 3f\n"
16496 - " andl %0,%%ecx\n"
16497 + " movl %0,%%ecx\n"
16498 "0: repne; scasb\n"
16499 " setne %%al\n"
16500 " subl %%ecx,%0\n"
16501 " addl %0,%%eax\n"
16502 "1:\n"
16503 + " pushl %%ss\n"
16504 + " popl %%es\n"
16505 ".section .fixup,\"ax\"\n"
16506 "2: xorl %%eax,%%eax\n"
16507 " jmp 1b\n"
16508 @@ -219,7 +232,7 @@ long strnlen_user(const char __user *s,
16509 " .long 0b,2b\n"
16510 ".previous"
16511 :"=&r" (n), "=&D" (s), "=&a" (res), "=&c" (tmp)
16512 - :"0" (n), "1" (s), "2" (0), "3" (mask)
16513 + :"0" (n), "1" (s), "2" (0), "3" (mask), "r" (__USER_DS)
16514 :"cc");
16515 return res & mask;
16516 }
16517 @@ -227,10 +240,121 @@ EXPORT_SYMBOL(strnlen_user);
16518
16519 #ifdef CONFIG_X86_INTEL_USERCOPY
16520 static unsigned long
16521 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
16522 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
16523 +{
16524 + int d0, d1;
16525 + __asm__ __volatile__(
16526 + " movw %w6, %%es\n"
16527 + " .align 2,0x90\n"
16528 + "1: movl 32(%4), %%eax\n"
16529 + " cmpl $67, %0\n"
16530 + " jbe 3f\n"
16531 + "2: movl 64(%4), %%eax\n"
16532 + " .align 2,0x90\n"
16533 + "3: movl 0(%4), %%eax\n"
16534 + "4: movl 4(%4), %%edx\n"
16535 + "5: movl %%eax, %%es:0(%3)\n"
16536 + "6: movl %%edx, %%es:4(%3)\n"
16537 + "7: movl 8(%4), %%eax\n"
16538 + "8: movl 12(%4),%%edx\n"
16539 + "9: movl %%eax, %%es:8(%3)\n"
16540 + "10: movl %%edx, %%es:12(%3)\n"
16541 + "11: movl 16(%4), %%eax\n"
16542 + "12: movl 20(%4), %%edx\n"
16543 + "13: movl %%eax, %%es:16(%3)\n"
16544 + "14: movl %%edx, %%es:20(%3)\n"
16545 + "15: movl 24(%4), %%eax\n"
16546 + "16: movl 28(%4), %%edx\n"
16547 + "17: movl %%eax, %%es:24(%3)\n"
16548 + "18: movl %%edx, %%es:28(%3)\n"
16549 + "19: movl 32(%4), %%eax\n"
16550 + "20: movl 36(%4), %%edx\n"
16551 + "21: movl %%eax, %%es:32(%3)\n"
16552 + "22: movl %%edx, %%es:36(%3)\n"
16553 + "23: movl 40(%4), %%eax\n"
16554 + "24: movl 44(%4), %%edx\n"
16555 + "25: movl %%eax, %%es:40(%3)\n"
16556 + "26: movl %%edx, %%es:44(%3)\n"
16557 + "27: movl 48(%4), %%eax\n"
16558 + "28: movl 52(%4), %%edx\n"
16559 + "29: movl %%eax, %%es:48(%3)\n"
16560 + "30: movl %%edx, %%es:52(%3)\n"
16561 + "31: movl 56(%4), %%eax\n"
16562 + "32: movl 60(%4), %%edx\n"
16563 + "33: movl %%eax, %%es:56(%3)\n"
16564 + "34: movl %%edx, %%es:60(%3)\n"
16565 + " addl $-64, %0\n"
16566 + " addl $64, %4\n"
16567 + " addl $64, %3\n"
16568 + " cmpl $63, %0\n"
16569 + " ja 1b\n"
16570 + "35: movl %0, %%eax\n"
16571 + " shrl $2, %0\n"
16572 + " andl $3, %%eax\n"
16573 + " cld\n"
16574 + "99: rep; movsl\n"
16575 + "36: movl %%eax, %0\n"
16576 + "37: rep; movsb\n"
16577 + "100:\n"
16578 + " pushl %%ss\n"
16579 + " popl %%es\n"
16580 + ".section .fixup,\"ax\"\n"
16581 + "101: lea 0(%%eax,%0,4),%0\n"
16582 + " jmp 100b\n"
16583 + ".previous\n"
16584 + ".section __ex_table,\"a\"\n"
16585 + " .align 4\n"
16586 + " .long 1b,100b\n"
16587 + " .long 2b,100b\n"
16588 + " .long 3b,100b\n"
16589 + " .long 4b,100b\n"
16590 + " .long 5b,100b\n"
16591 + " .long 6b,100b\n"
16592 + " .long 7b,100b\n"
16593 + " .long 8b,100b\n"
16594 + " .long 9b,100b\n"
16595 + " .long 10b,100b\n"
16596 + " .long 11b,100b\n"
16597 + " .long 12b,100b\n"
16598 + " .long 13b,100b\n"
16599 + " .long 14b,100b\n"
16600 + " .long 15b,100b\n"
16601 + " .long 16b,100b\n"
16602 + " .long 17b,100b\n"
16603 + " .long 18b,100b\n"
16604 + " .long 19b,100b\n"
16605 + " .long 20b,100b\n"
16606 + " .long 21b,100b\n"
16607 + " .long 22b,100b\n"
16608 + " .long 23b,100b\n"
16609 + " .long 24b,100b\n"
16610 + " .long 25b,100b\n"
16611 + " .long 26b,100b\n"
16612 + " .long 27b,100b\n"
16613 + " .long 28b,100b\n"
16614 + " .long 29b,100b\n"
16615 + " .long 30b,100b\n"
16616 + " .long 31b,100b\n"
16617 + " .long 32b,100b\n"
16618 + " .long 33b,100b\n"
16619 + " .long 34b,100b\n"
16620 + " .long 35b,100b\n"
16621 + " .long 36b,100b\n"
16622 + " .long 37b,100b\n"
16623 + " .long 99b,101b\n"
16624 + ".previous"
16625 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
16626 + : "1"(to), "2"(from), "0"(size), "r"(__USER_DS)
16627 + : "eax", "edx", "memory");
16628 + return size;
16629 +}
16630 +
16631 +static unsigned long
16632 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
16633 {
16634 int d0, d1;
16635 __asm__ __volatile__(
16636 + " movw %w6, %%ds\n"
16637 " .align 2,0x90\n"
16638 "1: movl 32(%4), %%eax\n"
16639 " cmpl $67, %0\n"
16640 @@ -239,36 +363,36 @@ __copy_user_intel(void __user *to, const
16641 " .align 2,0x90\n"
16642 "3: movl 0(%4), %%eax\n"
16643 "4: movl 4(%4), %%edx\n"
16644 - "5: movl %%eax, 0(%3)\n"
16645 - "6: movl %%edx, 4(%3)\n"
16646 + "5: movl %%eax, %%es:0(%3)\n"
16647 + "6: movl %%edx, %%es:4(%3)\n"
16648 "7: movl 8(%4), %%eax\n"
16649 "8: movl 12(%4),%%edx\n"
16650 - "9: movl %%eax, 8(%3)\n"
16651 - "10: movl %%edx, 12(%3)\n"
16652 + "9: movl %%eax, %%es:8(%3)\n"
16653 + "10: movl %%edx, %%es:12(%3)\n"
16654 "11: movl 16(%4), %%eax\n"
16655 "12: movl 20(%4), %%edx\n"
16656 - "13: movl %%eax, 16(%3)\n"
16657 - "14: movl %%edx, 20(%3)\n"
16658 + "13: movl %%eax, %%es:16(%3)\n"
16659 + "14: movl %%edx, %%es:20(%3)\n"
16660 "15: movl 24(%4), %%eax\n"
16661 "16: movl 28(%4), %%edx\n"
16662 - "17: movl %%eax, 24(%3)\n"
16663 - "18: movl %%edx, 28(%3)\n"
16664 + "17: movl %%eax, %%es:24(%3)\n"
16665 + "18: movl %%edx, %%es:28(%3)\n"
16666 "19: movl 32(%4), %%eax\n"
16667 "20: movl 36(%4), %%edx\n"
16668 - "21: movl %%eax, 32(%3)\n"
16669 - "22: movl %%edx, 36(%3)\n"
16670 + "21: movl %%eax, %%es:32(%3)\n"
16671 + "22: movl %%edx, %%es:36(%3)\n"
16672 "23: movl 40(%4), %%eax\n"
16673 "24: movl 44(%4), %%edx\n"
16674 - "25: movl %%eax, 40(%3)\n"
16675 - "26: movl %%edx, 44(%3)\n"
16676 + "25: movl %%eax, %%es:40(%3)\n"
16677 + "26: movl %%edx, %%es:44(%3)\n"
16678 "27: movl 48(%4), %%eax\n"
16679 "28: movl 52(%4), %%edx\n"
16680 - "29: movl %%eax, 48(%3)\n"
16681 - "30: movl %%edx, 52(%3)\n"
16682 + "29: movl %%eax, %%es:48(%3)\n"
16683 + "30: movl %%edx, %%es:52(%3)\n"
16684 "31: movl 56(%4), %%eax\n"
16685 "32: movl 60(%4), %%edx\n"
16686 - "33: movl %%eax, 56(%3)\n"
16687 - "34: movl %%edx, 60(%3)\n"
16688 + "33: movl %%eax, %%es:56(%3)\n"
16689 + "34: movl %%edx, %%es:60(%3)\n"
16690 " addl $-64, %0\n"
16691 " addl $64, %4\n"
16692 " addl $64, %3\n"
16693 @@ -282,6 +406,8 @@ __copy_user_intel(void __user *to, const
16694 "36: movl %%eax, %0\n"
16695 "37: rep; movsb\n"
16696 "100:\n"
16697 + " pushl %%ss\n"
16698 + " popl %%ds\n"
16699 ".section .fixup,\"ax\"\n"
16700 "101: lea 0(%%eax,%0,4),%0\n"
16701 " jmp 100b\n"
16702 @@ -328,7 +454,7 @@ __copy_user_intel(void __user *to, const
16703 " .long 99b,101b\n"
16704 ".previous"
16705 : "=&c"(size), "=&D" (d0), "=&S" (d1)
16706 - : "1"(to), "2"(from), "0"(size)
16707 + : "1"(to), "2"(from), "0"(size), "r"(__USER_DS)
16708 : "eax", "edx", "memory");
16709 return size;
16710 }
16711 @@ -338,6 +464,7 @@ __copy_user_zeroing_intel(void *to, cons
16712 {
16713 int d0, d1;
16714 __asm__ __volatile__(
16715 + " movw %w6, %%ds\n"
16716 " .align 2,0x90\n"
16717 "0: movl 32(%4), %%eax\n"
16718 " cmpl $67, %0\n"
16719 @@ -346,36 +473,36 @@ __copy_user_zeroing_intel(void *to, cons
16720 " .align 2,0x90\n"
16721 "2: movl 0(%4), %%eax\n"
16722 "21: movl 4(%4), %%edx\n"
16723 - " movl %%eax, 0(%3)\n"
16724 - " movl %%edx, 4(%3)\n"
16725 + " movl %%eax, %%es:0(%3)\n"
16726 + " movl %%edx, %%es:4(%3)\n"
16727 "3: movl 8(%4), %%eax\n"
16728 "31: movl 12(%4),%%edx\n"
16729 - " movl %%eax, 8(%3)\n"
16730 - " movl %%edx, 12(%3)\n"
16731 + " movl %%eax, %%es:8(%3)\n"
16732 + " movl %%edx, %%es:12(%3)\n"
16733 "4: movl 16(%4), %%eax\n"
16734 "41: movl 20(%4), %%edx\n"
16735 - " movl %%eax, 16(%3)\n"
16736 - " movl %%edx, 20(%3)\n"
16737 + " movl %%eax, %%es:16(%3)\n"
16738 + " movl %%edx, %%es:20(%3)\n"
16739 "10: movl 24(%4), %%eax\n"
16740 "51: movl 28(%4), %%edx\n"
16741 - " movl %%eax, 24(%3)\n"
16742 - " movl %%edx, 28(%3)\n"
16743 + " movl %%eax, %%es:24(%3)\n"
16744 + " movl %%edx, %%es:28(%3)\n"
16745 "11: movl 32(%4), %%eax\n"
16746 "61: movl 36(%4), %%edx\n"
16747 - " movl %%eax, 32(%3)\n"
16748 - " movl %%edx, 36(%3)\n"
16749 + " movl %%eax, %%es:32(%3)\n"
16750 + " movl %%edx, %%es:36(%3)\n"
16751 "12: movl 40(%4), %%eax\n"
16752 "71: movl 44(%4), %%edx\n"
16753 - " movl %%eax, 40(%3)\n"
16754 - " movl %%edx, 44(%3)\n"
16755 + " movl %%eax, %%es:40(%3)\n"
16756 + " movl %%edx, %%es:44(%3)\n"
16757 "13: movl 48(%4), %%eax\n"
16758 "81: movl 52(%4), %%edx\n"
16759 - " movl %%eax, 48(%3)\n"
16760 - " movl %%edx, 52(%3)\n"
16761 + " movl %%eax, %%es:48(%3)\n"
16762 + " movl %%edx, %%es:52(%3)\n"
16763 "14: movl 56(%4), %%eax\n"
16764 "91: movl 60(%4), %%edx\n"
16765 - " movl %%eax, 56(%3)\n"
16766 - " movl %%edx, 60(%3)\n"
16767 + " movl %%eax, %%es:56(%3)\n"
16768 + " movl %%edx, %%es:60(%3)\n"
16769 " addl $-64, %0\n"
16770 " addl $64, %4\n"
16771 " addl $64, %3\n"
16772 @@ -389,6 +516,8 @@ __copy_user_zeroing_intel(void *to, cons
16773 " movl %%eax,%0\n"
16774 "7: rep; movsb\n"
16775 "8:\n"
16776 + " pushl %%ss\n"
16777 + " popl %%ds\n"
16778 ".section .fixup,\"ax\"\n"
16779 "9: lea 0(%%eax,%0,4),%0\n"
16780 "16: pushl %0\n"
16781 @@ -423,7 +552,7 @@ __copy_user_zeroing_intel(void *to, cons
16782 " .long 7b,16b\n"
16783 ".previous"
16784 : "=&c"(size), "=&D" (d0), "=&S" (d1)
16785 - : "1"(to), "2"(from), "0"(size)
16786 + : "1"(to), "2"(from), "0"(size), "r"(__USER_DS)
16787 : "eax", "edx", "memory");
16788 return size;
16789 }
16790 @@ -439,6 +568,7 @@ static unsigned long __copy_user_zeroing
16791 int d0, d1;
16792
16793 __asm__ __volatile__(
16794 + " movw %w6, %%ds\n"
16795 " .align 2,0x90\n"
16796 "0: movl 32(%4), %%eax\n"
16797 " cmpl $67, %0\n"
16798 @@ -447,36 +577,36 @@ static unsigned long __copy_user_zeroing
16799 " .align 2,0x90\n"
16800 "2: movl 0(%4), %%eax\n"
16801 "21: movl 4(%4), %%edx\n"
16802 - " movnti %%eax, 0(%3)\n"
16803 - " movnti %%edx, 4(%3)\n"
16804 + " movnti %%eax, %%es:0(%3)\n"
16805 + " movnti %%edx, %%es:4(%3)\n"
16806 "3: movl 8(%4), %%eax\n"
16807 "31: movl 12(%4),%%edx\n"
16808 - " movnti %%eax, 8(%3)\n"
16809 - " movnti %%edx, 12(%3)\n"
16810 + " movnti %%eax, %%es:8(%3)\n"
16811 + " movnti %%edx, %%es:12(%3)\n"
16812 "4: movl 16(%4), %%eax\n"
16813 "41: movl 20(%4), %%edx\n"
16814 - " movnti %%eax, 16(%3)\n"
16815 - " movnti %%edx, 20(%3)\n"
16816 + " movnti %%eax, %%es:16(%3)\n"
16817 + " movnti %%edx, %%es:20(%3)\n"
16818 "10: movl 24(%4), %%eax\n"
16819 "51: movl 28(%4), %%edx\n"
16820 - " movnti %%eax, 24(%3)\n"
16821 - " movnti %%edx, 28(%3)\n"
16822 + " movnti %%eax, %%es:24(%3)\n"
16823 + " movnti %%edx, %%es:28(%3)\n"
16824 "11: movl 32(%4), %%eax\n"
16825 "61: movl 36(%4), %%edx\n"
16826 - " movnti %%eax, 32(%3)\n"
16827 - " movnti %%edx, 36(%3)\n"
16828 + " movnti %%eax, %%es:32(%3)\n"
16829 + " movnti %%edx, %%es:36(%3)\n"
16830 "12: movl 40(%4), %%eax\n"
16831 "71: movl 44(%4), %%edx\n"
16832 - " movnti %%eax, 40(%3)\n"
16833 - " movnti %%edx, 44(%3)\n"
16834 + " movnti %%eax, %%es:40(%3)\n"
16835 + " movnti %%edx, %%es:44(%3)\n"
16836 "13: movl 48(%4), %%eax\n"
16837 "81: movl 52(%4), %%edx\n"
16838 - " movnti %%eax, 48(%3)\n"
16839 - " movnti %%edx, 52(%3)\n"
16840 + " movnti %%eax, %%es:48(%3)\n"
16841 + " movnti %%edx, %%es:52(%3)\n"
16842 "14: movl 56(%4), %%eax\n"
16843 "91: movl 60(%4), %%edx\n"
16844 - " movnti %%eax, 56(%3)\n"
16845 - " movnti %%edx, 60(%3)\n"
16846 + " movnti %%eax, %%es:56(%3)\n"
16847 + " movnti %%edx, %%es:60(%3)\n"
16848 " addl $-64, %0\n"
16849 " addl $64, %4\n"
16850 " addl $64, %3\n"
16851 @@ -491,6 +621,8 @@ static unsigned long __copy_user_zeroing
16852 " movl %%eax,%0\n"
16853 "7: rep; movsb\n"
16854 "8:\n"
16855 + " pushl %%ss\n"
16856 + " popl %%ds\n"
16857 ".section .fixup,\"ax\"\n"
16858 "9: lea 0(%%eax,%0,4),%0\n"
16859 "16: pushl %0\n"
16860 @@ -525,7 +657,7 @@ static unsigned long __copy_user_zeroing
16861 " .long 7b,16b\n"
16862 ".previous"
16863 : "=&c"(size), "=&D" (d0), "=&S" (d1)
16864 - : "1"(to), "2"(from), "0"(size)
16865 + : "1"(to), "2"(from), "0"(size), "r"(__USER_DS)
16866 : "eax", "edx", "memory");
16867 return size;
16868 }
16869 @@ -536,6 +668,7 @@ static unsigned long __copy_user_intel_n
16870 int d0, d1;
16871
16872 __asm__ __volatile__(
16873 + " movw %w6, %%ds\n"
16874 " .align 2,0x90\n"
16875 "0: movl 32(%4), %%eax\n"
16876 " cmpl $67, %0\n"
16877 @@ -544,36 +677,36 @@ static unsigned long __copy_user_intel_n
16878 " .align 2,0x90\n"
16879 "2: movl 0(%4), %%eax\n"
16880 "21: movl 4(%4), %%edx\n"
16881 - " movnti %%eax, 0(%3)\n"
16882 - " movnti %%edx, 4(%3)\n"
16883 + " movnti %%eax, %%es:0(%3)\n"
16884 + " movnti %%edx, %%es:4(%3)\n"
16885 "3: movl 8(%4), %%eax\n"
16886 "31: movl 12(%4),%%edx\n"
16887 - " movnti %%eax, 8(%3)\n"
16888 - " movnti %%edx, 12(%3)\n"
16889 + " movnti %%eax, %%es:8(%3)\n"
16890 + " movnti %%edx, %%es:12(%3)\n"
16891 "4: movl 16(%4), %%eax\n"
16892 "41: movl 20(%4), %%edx\n"
16893 - " movnti %%eax, 16(%3)\n"
16894 - " movnti %%edx, 20(%3)\n"
16895 + " movnti %%eax, %%es:16(%3)\n"
16896 + " movnti %%edx, %%es:20(%3)\n"
16897 "10: movl 24(%4), %%eax\n"
16898 "51: movl 28(%4), %%edx\n"
16899 - " movnti %%eax, 24(%3)\n"
16900 - " movnti %%edx, 28(%3)\n"
16901 + " movnti %%eax, %%es:24(%3)\n"
16902 + " movnti %%edx, %%es:28(%3)\n"
16903 "11: movl 32(%4), %%eax\n"
16904 "61: movl 36(%4), %%edx\n"
16905 - " movnti %%eax, 32(%3)\n"
16906 - " movnti %%edx, 36(%3)\n"
16907 + " movnti %%eax, %%es:32(%3)\n"
16908 + " movnti %%edx, %%es:36(%3)\n"
16909 "12: movl 40(%4), %%eax\n"
16910 "71: movl 44(%4), %%edx\n"
16911 - " movnti %%eax, 40(%3)\n"
16912 - " movnti %%edx, 44(%3)\n"
16913 + " movnti %%eax, %%es:40(%3)\n"
16914 + " movnti %%edx, %%es:44(%3)\n"
16915 "13: movl 48(%4), %%eax\n"
16916 "81: movl 52(%4), %%edx\n"
16917 - " movnti %%eax, 48(%3)\n"
16918 - " movnti %%edx, 52(%3)\n"
16919 + " movnti %%eax, %%es:48(%3)\n"
16920 + " movnti %%edx, %%es:52(%3)\n"
16921 "14: movl 56(%4), %%eax\n"
16922 "91: movl 60(%4), %%edx\n"
16923 - " movnti %%eax, 56(%3)\n"
16924 - " movnti %%edx, 60(%3)\n"
16925 + " movnti %%eax, %%es:56(%3)\n"
16926 + " movnti %%edx, %%es:60(%3)\n"
16927 " addl $-64, %0\n"
16928 " addl $64, %4\n"
16929 " addl $64, %3\n"
16930 @@ -588,6 +721,8 @@ static unsigned long __copy_user_intel_n
16931 " movl %%eax,%0\n"
16932 "7: rep; movsb\n"
16933 "8:\n"
16934 + " pushl %%ss\n"
16935 + " popl %%ds\n"
16936 ".section .fixup,\"ax\"\n"
16937 "9: lea 0(%%eax,%0,4),%0\n"
16938 "16: jmp 8b\n"
16939 @@ -616,7 +751,7 @@ static unsigned long __copy_user_intel_n
16940 " .long 7b,16b\n"
16941 ".previous"
16942 : "=&c"(size), "=&D" (d0), "=&S" (d1)
16943 - : "1"(to), "2"(from), "0"(size)
16944 + : "1"(to), "2"(from), "0"(size), "r"(__USER_DS)
16945 : "eax", "edx", "memory");
16946 return size;
16947 }
16948 @@ -629,90 +764,146 @@ static unsigned long __copy_user_intel_n
16949 */
16950 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
16951 unsigned long size);
16952 -unsigned long __copy_user_intel(void __user *to, const void *from,
16953 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
16954 + unsigned long size);
16955 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
16956 unsigned long size);
16957 unsigned long __copy_user_zeroing_intel_nocache(void *to,
16958 const void __user *from, unsigned long size);
16959 #endif /* CONFIG_X86_INTEL_USERCOPY */
16960
16961 /* Generic arbitrary sized copy. */
16962 -#define __copy_user(to, from, size) \
16963 -do { \
16964 - int __d0, __d1, __d2; \
16965 - __asm__ __volatile__( \
16966 - " cmp $7,%0\n" \
16967 - " jbe 1f\n" \
16968 - " movl %1,%0\n" \
16969 - " negl %0\n" \
16970 - " andl $7,%0\n" \
16971 - " subl %0,%3\n" \
16972 - "4: rep; movsb\n" \
16973 - " movl %3,%0\n" \
16974 - " shrl $2,%0\n" \
16975 - " andl $3,%3\n" \
16976 - " .align 2,0x90\n" \
16977 - "0: rep; movsl\n" \
16978 - " movl %3,%0\n" \
16979 - "1: rep; movsb\n" \
16980 - "2:\n" \
16981 - ".section .fixup,\"ax\"\n" \
16982 - "5: addl %3,%0\n" \
16983 - " jmp 2b\n" \
16984 - "3: lea 0(%3,%0,4),%0\n" \
16985 - " jmp 2b\n" \
16986 - ".previous\n" \
16987 - ".section __ex_table,\"a\"\n" \
16988 - " .align 4\n" \
16989 - " .long 4b,5b\n" \
16990 - " .long 0b,3b\n" \
16991 - " .long 1b,2b\n" \
16992 - ".previous" \
16993 - : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
16994 - : "3"(size), "0"(size), "1"(to), "2"(from) \
16995 - : "memory"); \
16996 -} while (0)
16997 -
16998 -#define __copy_user_zeroing(to, from, size) \
16999 -do { \
17000 - int __d0, __d1, __d2; \
17001 - __asm__ __volatile__( \
17002 - " cmp $7,%0\n" \
17003 - " jbe 1f\n" \
17004 - " movl %1,%0\n" \
17005 - " negl %0\n" \
17006 - " andl $7,%0\n" \
17007 - " subl %0,%3\n" \
17008 - "4: rep; movsb\n" \
17009 - " movl %3,%0\n" \
17010 - " shrl $2,%0\n" \
17011 - " andl $3,%3\n" \
17012 - " .align 2,0x90\n" \
17013 - "0: rep; movsl\n" \
17014 - " movl %3,%0\n" \
17015 - "1: rep; movsb\n" \
17016 - "2:\n" \
17017 - ".section .fixup,\"ax\"\n" \
17018 - "5: addl %3,%0\n" \
17019 - " jmp 6f\n" \
17020 - "3: lea 0(%3,%0,4),%0\n" \
17021 - "6: pushl %0\n" \
17022 - " pushl %%eax\n" \
17023 - " xorl %%eax,%%eax\n" \
17024 - " rep; stosb\n" \
17025 - " popl %%eax\n" \
17026 - " popl %0\n" \
17027 - " jmp 2b\n" \
17028 - ".previous\n" \
17029 - ".section __ex_table,\"a\"\n" \
17030 - " .align 4\n" \
17031 - " .long 4b,5b\n" \
17032 - " .long 0b,3b\n" \
17033 - " .long 1b,6b\n" \
17034 - ".previous" \
17035 - : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
17036 - : "3"(size), "0"(size), "1"(to), "2"(from) \
17037 - : "memory"); \
17038 -} while (0)
17039 +static unsigned long
17040 +__generic_copy_to_user(void __user *to, const void *from, unsigned long size)
17041 +{
17042 + int __d0, __d1, __d2;
17043 +
17044 + __asm__ __volatile__(
17045 + " movw %w8,%%es\n"
17046 + " cmp $7,%0\n"
17047 + " jbe 1f\n"
17048 + " movl %1,%0\n"
17049 + " negl %0\n"
17050 + " andl $7,%0\n"
17051 + " subl %0,%3\n"
17052 + "4: rep; movsb\n"
17053 + " movl %3,%0\n"
17054 + " shrl $2,%0\n"
17055 + " andl $3,%3\n"
17056 + " .align 2,0x90\n"
17057 + "0: rep; movsl\n"
17058 + " movl %3,%0\n"
17059 + "1: rep; movsb\n"
17060 + "2:\n"
17061 + " pushl %%ss\n"
17062 + " popl %%es\n"
17063 + ".section .fixup,\"ax\"\n"
17064 + "5: addl %3,%0\n"
17065 + " jmp 2b\n"
17066 + "3: lea 0(%3,%0,4),%0\n"
17067 + " jmp 2b\n"
17068 + ".previous\n"
17069 + ".section __ex_table,\"a\"\n"
17070 + " .align 4\n"
17071 + " .long 4b,5b\n"
17072 + " .long 0b,3b\n"
17073 + " .long 1b,2b\n"
17074 + ".previous"
17075 + : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2)
17076 + : "3"(size), "0"(size), "1"(to), "2"(from), "r"(__USER_DS)
17077 + : "memory");
17078 + return size;
17079 +}
17080 +
17081 +static unsigned long
17082 +__generic_copy_from_user(void *to, const void __user *from, unsigned long size)
17083 +{
17084 + int __d0, __d1, __d2;
17085 +
17086 + __asm__ __volatile__(
17087 + " movw %w8,%%ds\n"
17088 + " cmp $7,%0\n"
17089 + " jbe 1f\n"
17090 + " movl %1,%0\n"
17091 + " negl %0\n"
17092 + " andl $7,%0\n"
17093 + " subl %0,%3\n"
17094 + "4: rep; movsb\n"
17095 + " movl %3,%0\n"
17096 + " shrl $2,%0\n"
17097 + " andl $3,%3\n"
17098 + " .align 2,0x90\n"
17099 + "0: rep; movsl\n"
17100 + " movl %3,%0\n"
17101 + "1: rep; movsb\n"
17102 + "2:\n"
17103 + " pushl %%ss\n"
17104 + " popl %%ds\n"
17105 + ".section .fixup,\"ax\"\n"
17106 + "5: addl %3,%0\n"
17107 + " jmp 2b\n"
17108 + "3: lea 0(%3,%0,4),%0\n"
17109 + " jmp 2b\n"
17110 + ".previous\n"
17111 + ".section __ex_table,\"a\"\n"
17112 + " .align 4\n"
17113 + " .long 4b,5b\n"
17114 + " .long 0b,3b\n"
17115 + " .long 1b,2b\n"
17116 + ".previous"
17117 + : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2)
17118 + : "3"(size), "0"(size), "1"(to), "2"(from), "r"(__USER_DS)
17119 + : "memory");
17120 + return size;
17121 +}
17122 +
17123 +static unsigned long
17124 +__copy_user_zeroing(void *to, const void __user *from, unsigned long size)
17125 +{
17126 + int __d0, __d1, __d2;
17127 +
17128 + __asm__ __volatile__(
17129 + " movw %w8,%%ds\n"
17130 + " cmp $7,%0\n"
17131 + " jbe 1f\n"
17132 + " movl %1,%0\n"
17133 + " negl %0\n"
17134 + " andl $7,%0\n"
17135 + " subl %0,%3\n"
17136 + "4: rep; movsb\n"
17137 + " movl %3,%0\n"
17138 + " shrl $2,%0\n"
17139 + " andl $3,%3\n"
17140 + " .align 2,0x90\n"
17141 + "0: rep; movsl\n"
17142 + " movl %3,%0\n"
17143 + "1: rep; movsb\n"
17144 + "2:\n"
17145 + " pushl %%ss\n"
17146 + " popl %%ds\n"
17147 + ".section .fixup,\"ax\"\n"
17148 + "5: addl %3,%0\n"
17149 + " jmp 6f\n"
17150 + "3: lea 0(%3,%0,4),%0\n"
17151 + "6: pushl %0\n"
17152 + " pushl %%eax\n"
17153 + " xorl %%eax,%%eax\n"
17154 + " rep; stosb\n"
17155 + " popl %%eax\n"
17156 + " popl %0\n"
17157 + " jmp 2b\n"
17158 + ".previous\n"
17159 + ".section __ex_table,\"a\"\n"
17160 + " .align 4\n"
17161 + " .long 4b,5b\n"
17162 + " .long 0b,3b\n"
17163 + " .long 1b,6b\n"
17164 + ".previous"
17165 + : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2)
17166 + : "3"(size), "0"(size), "1"(to), "2"(from), "r"(__USER_DS)
17167 + : "memory");
17168 + return size;
17169 +}
17170
17171 unsigned long __copy_to_user_ll(void __user *to, const void *from,
17172 unsigned long n)
17173 @@ -775,9 +966,9 @@ survive:
17174 }
17175 #endif
17176 if (movsl_is_ok(to, from, n))
17177 - __copy_user(to, from, n);
17178 + n = __generic_copy_to_user(to, from, n);
17179 else
17180 - n = __copy_user_intel(to, from, n);
17181 + n = __generic_copy_to_user_intel(to, from, n);
17182 return n;
17183 }
17184 EXPORT_SYMBOL(__copy_to_user_ll);
17185 @@ -786,7 +977,7 @@ unsigned long __copy_from_user_ll(void *
17186 unsigned long n)
17187 {
17188 if (movsl_is_ok(to, from, n))
17189 - __copy_user_zeroing(to, from, n);
17190 + n = __copy_user_zeroing(to, from, n);
17191 else
17192 n = __copy_user_zeroing_intel(to, from, n);
17193 return n;
17194 @@ -797,10 +988,9 @@ unsigned long __copy_from_user_ll_nozero
17195 unsigned long n)
17196 {
17197 if (movsl_is_ok(to, from, n))
17198 - __copy_user(to, from, n);
17199 + n = __generic_copy_from_user(to, from, n);
17200 else
17201 - n = __copy_user_intel((void __user *)to,
17202 - (const void *)from, n);
17203 + n = __generic_copy_from_user_intel(to, from, n);
17204 return n;
17205 }
17206 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
17207 @@ -812,9 +1002,9 @@ unsigned long __copy_from_user_ll_nocach
17208 if (n > 64 && cpu_has_xmm2)
17209 n = __copy_user_zeroing_intel_nocache(to, from, n);
17210 else
17211 - __copy_user_zeroing(to, from, n);
17212 + n = __copy_user_zeroing(to, from, n);
17213 #else
17214 - __copy_user_zeroing(to, from, n);
17215 + n = __copy_user_zeroing(to, from, n);
17216 #endif
17217 return n;
17218 }
17219 @@ -827,65 +1017,53 @@ unsigned long __copy_from_user_ll_nocach
17220 if (n > 64 && cpu_has_xmm2)
17221 n = __copy_user_intel_nocache(to, from, n);
17222 else
17223 - __copy_user(to, from, n);
17224 + n = __generic_copy_from_user(to, from, n);
17225 #else
17226 - __copy_user(to, from, n);
17227 + n = __generic_copy_from_user(to, from, n);
17228 #endif
17229 return n;
17230 }
17231 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
17232
17233 -/**
17234 - * copy_to_user: - Copy a block of data into user space.
17235 - * @to: Destination address, in user space.
17236 - * @from: Source address, in kernel space.
17237 - * @n: Number of bytes to copy.
17238 - *
17239 - * Context: User context only. This function may sleep.
17240 - *
17241 - * Copy data from kernel space to user space.
17242 - *
17243 - * Returns number of bytes that could not be copied.
17244 - * On success, this will be zero.
17245 - */
17246 -unsigned long
17247 -copy_to_user(void __user *to, const void *from, unsigned long n)
17248 +void copy_from_user_overflow(void)
17249 {
17250 - if (access_ok(VERIFY_WRITE, to, n))
17251 - n = __copy_to_user(to, from, n);
17252 - return n;
17253 + WARN(1, "Buffer overflow detected!\n");
17254 }
17255 -EXPORT_SYMBOL(copy_to_user);
17256 +EXPORT_SYMBOL(copy_from_user_overflow);
17257
17258 -/**
17259 - * copy_from_user: - Copy a block of data from user space.
17260 - * @to: Destination address, in kernel space.
17261 - * @from: Source address, in user space.
17262 - * @n: Number of bytes to copy.
17263 - *
17264 - * Context: User context only. This function may sleep.
17265 - *
17266 - * Copy data from user space to kernel space.
17267 - *
17268 - * Returns number of bytes that could not be copied.
17269 - * On success, this will be zero.
17270 - *
17271 - * If some data could not be copied, this function will pad the copied
17272 - * data to the requested size using zero bytes.
17273 - */
17274 -unsigned long
17275 -_copy_from_user(void *to, const void __user *from, unsigned long n)
17276 +void copy_to_user_overflow(void)
17277 {
17278 - if (access_ok(VERIFY_READ, from, n))
17279 - n = __copy_from_user(to, from, n);
17280 - else
17281 - memset(to, 0, n);
17282 - return n;
17283 + WARN(1, "Buffer overflow detected!\n");
17284 }
17285 -EXPORT_SYMBOL(_copy_from_user);
17286 +EXPORT_SYMBOL(copy_to_user_overflow);
17287
17288 -void copy_from_user_overflow(void)
17289 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17290 +void __set_fs(mm_segment_t x, int cpu)
17291 {
17292 - WARN(1, "Buffer overflow detected!\n");
17293 + unsigned long limit = x.seg;
17294 + struct desc_struct d;
17295 +
17296 + current_thread_info()->addr_limit = x;
17297 + if (unlikely(paravirt_enabled()))
17298 + return;
17299 +
17300 + if (likely(limit))
17301 + limit = (limit - 1UL) >> PAGE_SHIFT;
17302 + pack_descriptor(&d, 0UL, limit, 0xF3, 0xC);
17303 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_DS, &d, DESCTYPE_S);
17304 }
17305 -EXPORT_SYMBOL(copy_from_user_overflow);
17306 +
17307 +void set_fs(mm_segment_t x)
17308 +{
17309 + __set_fs(x, get_cpu());
17310 + put_cpu();
17311 +}
17312 +EXPORT_SYMBOL(copy_from_user);
17313 +#else
17314 +void set_fs(mm_segment_t x)
17315 +{
17316 + current_thread_info()->addr_limit = x;
17317 +}
17318 +#endif
17319 +
17320 +EXPORT_SYMBOL(set_fs);
17321 diff -urNp linux-2.6.35.4/arch/x86/lib/usercopy_64.c linux-2.6.35.4/arch/x86/lib/usercopy_64.c
17322 --- linux-2.6.35.4/arch/x86/lib/usercopy_64.c 2010-08-26 19:47:12.000000000 -0400
17323 +++ linux-2.6.35.4/arch/x86/lib/usercopy_64.c 2010-09-17 20:12:09.000000000 -0400
17324 @@ -42,6 +42,8 @@ long
17325 __strncpy_from_user(char *dst, const char __user *src, long count)
17326 {
17327 long res;
17328 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
17329 + src += PAX_USER_SHADOW_BASE;
17330 __do_strncpy_from_user(dst, src, count, res);
17331 return res;
17332 }
17333 @@ -65,6 +67,8 @@ unsigned long __clear_user(void __user *
17334 {
17335 long __d0;
17336 might_fault();
17337 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
17338 + addr += PAX_USER_SHADOW_BASE;
17339 /* no memory constraint because it doesn't change any memory gcc knows
17340 about */
17341 asm volatile(
17342 @@ -151,10 +155,14 @@ EXPORT_SYMBOL(strlen_user);
17343
17344 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
17345 {
17346 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
17347 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
17348 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
17349 + to += PAX_USER_SHADOW_BASE;
17350 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
17351 + from += PAX_USER_SHADOW_BASE;
17352 return copy_user_generic((__force void *)to, (__force void *)from, len);
17353 - }
17354 - return len;
17355 + }
17356 + return len;
17357 }
17358 EXPORT_SYMBOL(copy_in_user);
17359
17360 diff -urNp linux-2.6.35.4/arch/x86/Makefile linux-2.6.35.4/arch/x86/Makefile
17361 --- linux-2.6.35.4/arch/x86/Makefile 2010-08-26 19:47:12.000000000 -0400
17362 +++ linux-2.6.35.4/arch/x86/Makefile 2010-09-17 20:12:09.000000000 -0400
17363 @@ -191,3 +191,12 @@ define archhelp
17364 echo ' FDARGS="..." arguments for the booted kernel'
17365 echo ' FDINITRD=file initrd for the booted kernel'
17366 endef
17367 +
17368 +define OLD_LD
17369 +
17370 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
17371 +*** Please upgrade your binutils to 2.18 or newer
17372 +endef
17373 +
17374 +archprepare:
17375 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
17376 diff -urNp linux-2.6.35.4/arch/x86/mm/extable.c linux-2.6.35.4/arch/x86/mm/extable.c
17377 --- linux-2.6.35.4/arch/x86/mm/extable.c 2010-08-26 19:47:12.000000000 -0400
17378 +++ linux-2.6.35.4/arch/x86/mm/extable.c 2010-09-17 20:12:09.000000000 -0400
17379 @@ -1,14 +1,71 @@
17380 #include <linux/module.h>
17381 #include <linux/spinlock.h>
17382 +#include <linux/sort.h>
17383 #include <asm/uaccess.h>
17384 +#include <asm/pgtable.h>
17385
17386 +/*
17387 + * The exception table needs to be sorted so that the binary
17388 + * search that we use to find entries in it works properly.
17389 + * This is used both for the kernel exception table and for
17390 + * the exception tables of modules that get loaded.
17391 + */
17392 +static int cmp_ex(const void *a, const void *b)
17393 +{
17394 + const struct exception_table_entry *x = a, *y = b;
17395 +
17396 + /* avoid overflow */
17397 + if (x->insn > y->insn)
17398 + return 1;
17399 + if (x->insn < y->insn)
17400 + return -1;
17401 + return 0;
17402 +}
17403 +
17404 +static void swap_ex(void *a, void *b, int size)
17405 +{
17406 + struct exception_table_entry t, *x = a, *y = b;
17407 +
17408 + t = *x;
17409 +
17410 + pax_open_kernel();
17411 + *x = *y;
17412 + *y = t;
17413 + pax_close_kernel();
17414 +}
17415 +
17416 +void sort_extable(struct exception_table_entry *start,
17417 + struct exception_table_entry *finish)
17418 +{
17419 + sort(start, finish - start, sizeof(struct exception_table_entry),
17420 + cmp_ex, swap_ex);
17421 +}
17422 +
17423 +#ifdef CONFIG_MODULES
17424 +/*
17425 + * If the exception table is sorted, any referring to the module init
17426 + * will be at the beginning or the end.
17427 + */
17428 +void trim_init_extable(struct module *m)
17429 +{
17430 + /*trim the beginning*/
17431 + while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
17432 + m->extable++;
17433 + m->num_exentries--;
17434 + }
17435 + /*trim the end*/
17436 + while (m->num_exentries &&
17437 + within_module_init(m->extable[m->num_exentries-1].insn, m))
17438 + m->num_exentries--;
17439 +}
17440 +#endif /* CONFIG_MODULES */
17441
17442 int fixup_exception(struct pt_regs *regs)
17443 {
17444 const struct exception_table_entry *fixup;
17445
17446 #ifdef CONFIG_PNPBIOS
17447 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
17448 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
17449 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
17450 extern u32 pnp_bios_is_utter_crap;
17451 pnp_bios_is_utter_crap = 1;
17452 diff -urNp linux-2.6.35.4/arch/x86/mm/fault.c linux-2.6.35.4/arch/x86/mm/fault.c
17453 --- linux-2.6.35.4/arch/x86/mm/fault.c 2010-08-26 19:47:12.000000000 -0400
17454 +++ linux-2.6.35.4/arch/x86/mm/fault.c 2010-09-17 20:12:37.000000000 -0400
17455 @@ -11,10 +11,19 @@
17456 #include <linux/kprobes.h> /* __kprobes, ... */
17457 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
17458 #include <linux/perf_event.h> /* perf_sw_event */
17459 +#include <linux/unistd.h>
17460 +#include <linux/compiler.h>
17461
17462 #include <asm/traps.h> /* dotraplinkage, ... */
17463 #include <asm/pgalloc.h> /* pgd_*(), ... */
17464 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
17465 +#include <asm/vsyscall.h>
17466 +#include <asm/tlbflush.h>
17467 +
17468 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17469 +#include <asm/stacktrace.h>
17470 +#include "../kernel/dumpstack.h"
17471 +#endif
17472
17473 /*
17474 * Page fault error code bits:
17475 @@ -52,7 +61,7 @@ static inline int __kprobes notify_page_
17476 int ret = 0;
17477
17478 /* kprobe_running() needs smp_processor_id() */
17479 - if (kprobes_built_in() && !user_mode_vm(regs)) {
17480 + if (kprobes_built_in() && !user_mode(regs)) {
17481 preempt_disable();
17482 if (kprobe_running() && kprobe_fault_handler(regs, 14))
17483 ret = 1;
17484 @@ -173,6 +182,30 @@ force_sig_info_fault(int si_signo, int s
17485 force_sig_info(si_signo, &info, tsk);
17486 }
17487
17488 +#ifdef CONFIG_PAX_EMUTRAMP
17489 +static int pax_handle_fetch_fault(struct pt_regs *regs);
17490 +#endif
17491 +
17492 +#ifdef CONFIG_PAX_PAGEEXEC
17493 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
17494 +{
17495 + pgd_t *pgd;
17496 + pud_t *pud;
17497 + pmd_t *pmd;
17498 +
17499 + pgd = pgd_offset(mm, address);
17500 + if (!pgd_present(*pgd))
17501 + return NULL;
17502 + pud = pud_offset(pgd, address);
17503 + if (!pud_present(*pud))
17504 + return NULL;
17505 + pmd = pmd_offset(pud, address);
17506 + if (!pmd_present(*pmd))
17507 + return NULL;
17508 + return pmd;
17509 +}
17510 +#endif
17511 +
17512 DEFINE_SPINLOCK(pgd_lock);
17513 LIST_HEAD(pgd_list);
17514
17515 @@ -225,11 +258,24 @@ void vmalloc_sync_all(void)
17516 address += PMD_SIZE) {
17517
17518 unsigned long flags;
17519 +
17520 +#ifdef CONFIG_PAX_PER_CPU_PGD
17521 + unsigned long cpu;
17522 +#else
17523 struct page *page;
17524 +#endif
17525
17526 spin_lock_irqsave(&pgd_lock, flags);
17527 +
17528 +#ifdef CONFIG_PAX_PER_CPU_PGD
17529 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
17530 + pgd_t *pgd = get_cpu_pgd(cpu);
17531 +#else
17532 list_for_each_entry(page, &pgd_list, lru) {
17533 - if (!vmalloc_sync_one(page_address(page), address))
17534 + pgd_t *pgd = page_address(page);
17535 +#endif
17536 +
17537 + if (!vmalloc_sync_one(pgd, address))
17538 break;
17539 }
17540 spin_unlock_irqrestore(&pgd_lock, flags);
17541 @@ -259,6 +305,11 @@ static noinline __kprobes int vmalloc_fa
17542 * an interrupt in the middle of a task switch..
17543 */
17544 pgd_paddr = read_cr3();
17545 +
17546 +#ifdef CONFIG_PAX_PER_CPU_PGD
17547 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
17548 +#endif
17549 +
17550 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
17551 if (!pmd_k)
17552 return -1;
17553 @@ -333,15 +384,27 @@ void vmalloc_sync_all(void)
17554
17555 const pgd_t *pgd_ref = pgd_offset_k(address);
17556 unsigned long flags;
17557 +
17558 +#ifdef CONFIG_PAX_PER_CPU_PGD
17559 + unsigned long cpu;
17560 +#else
17561 struct page *page;
17562 +#endif
17563
17564 if (pgd_none(*pgd_ref))
17565 continue;
17566
17567 spin_lock_irqsave(&pgd_lock, flags);
17568 +
17569 +#ifdef CONFIG_PAX_PER_CPU_PGD
17570 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
17571 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
17572 +#else
17573 list_for_each_entry(page, &pgd_list, lru) {
17574 pgd_t *pgd;
17575 pgd = (pgd_t *)page_address(page) + pgd_index(address);
17576 +#endif
17577 +
17578 if (pgd_none(*pgd))
17579 set_pgd(pgd, *pgd_ref);
17580 else
17581 @@ -374,7 +437,14 @@ static noinline __kprobes int vmalloc_fa
17582 * happen within a race in page table update. In the later
17583 * case just flush:
17584 */
17585 +
17586 +#ifdef CONFIG_PAX_PER_CPU_PGD
17587 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
17588 + pgd = pgd_offset_cpu(smp_processor_id(), address);
17589 +#else
17590 pgd = pgd_offset(current->active_mm, address);
17591 +#endif
17592 +
17593 pgd_ref = pgd_offset_k(address);
17594 if (pgd_none(*pgd_ref))
17595 return -1;
17596 @@ -536,7 +606,7 @@ static int is_errata93(struct pt_regs *r
17597 static int is_errata100(struct pt_regs *regs, unsigned long address)
17598 {
17599 #ifdef CONFIG_X86_64
17600 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
17601 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
17602 return 1;
17603 #endif
17604 return 0;
17605 @@ -563,7 +633,7 @@ static int is_f00f_bug(struct pt_regs *r
17606 }
17607
17608 static const char nx_warning[] = KERN_CRIT
17609 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
17610 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
17611
17612 static void
17613 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
17614 @@ -572,15 +642,26 @@ show_fault_oops(struct pt_regs *regs, un
17615 if (!oops_may_print())
17616 return;
17617
17618 - if (error_code & PF_INSTR) {
17619 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
17620 unsigned int level;
17621
17622 pte_t *pte = lookup_address(address, &level);
17623
17624 if (pte && pte_present(*pte) && !pte_exec(*pte))
17625 - printk(nx_warning, current_uid());
17626 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
17627 }
17628
17629 +#ifdef CONFIG_PAX_KERNEXEC
17630 + if (init_mm.start_code <= address && address < init_mm.end_code) {
17631 + if (current->signal->curr_ip)
17632 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
17633 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
17634 + else
17635 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
17636 + current->comm, task_pid_nr(current), current_uid(), current_euid());
17637 + }
17638 +#endif
17639 +
17640 printk(KERN_ALERT "BUG: unable to handle kernel ");
17641 if (address < PAGE_SIZE)
17642 printk(KERN_CONT "NULL pointer dereference");
17643 @@ -705,6 +786,68 @@ __bad_area_nosemaphore(struct pt_regs *r
17644 unsigned long address, int si_code)
17645 {
17646 struct task_struct *tsk = current;
17647 + struct mm_struct *mm = tsk->mm;
17648 +
17649 +#ifdef CONFIG_X86_64
17650 + if (mm && (error_code & PF_INSTR)) {
17651 + if (regs->ip == (unsigned long)vgettimeofday) {
17652 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
17653 + return;
17654 + } else if (regs->ip == (unsigned long)vtime) {
17655 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
17656 + return;
17657 + } else if (regs->ip == (unsigned long)vgetcpu) {
17658 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
17659 + return;
17660 + }
17661 + }
17662 +#endif
17663 +
17664 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
17665 + if (mm && (error_code & PF_USER)) {
17666 + unsigned long ip = regs->ip;
17667 +
17668 + if (v8086_mode(regs))
17669 + ip = ((regs->cs & 0xffff) << 4) + (regs->ip & 0xffff);
17670 +
17671 + /*
17672 + * It's possible to have interrupts off here:
17673 + */
17674 + local_irq_enable();
17675 +
17676 +#ifdef CONFIG_PAX_PAGEEXEC
17677 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
17678 + (((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && regs->ip == address))) {
17679 +
17680 +#ifdef CONFIG_PAX_EMUTRAMP
17681 + switch (pax_handle_fetch_fault(regs)) {
17682 + case 2:
17683 + return;
17684 + }
17685 +#endif
17686 +
17687 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
17688 + do_group_exit(SIGKILL);
17689 + }
17690 +#endif
17691 +
17692 +#ifdef CONFIG_PAX_SEGMEXEC
17693 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (regs->ip + SEGMEXEC_TASK_SIZE == address)) {
17694 +
17695 +#ifdef CONFIG_PAX_EMUTRAMP
17696 + switch (pax_handle_fetch_fault(regs)) {
17697 + case 2:
17698 + return;
17699 + }
17700 +#endif
17701 +
17702 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
17703 + do_group_exit(SIGKILL);
17704 + }
17705 +#endif
17706 +
17707 + }
17708 +#endif
17709
17710 /* User mode accesses just cause a SIGSEGV */
17711 if (error_code & PF_USER) {
17712 @@ -851,6 +994,106 @@ static int spurious_fault_check(unsigned
17713 return 1;
17714 }
17715
17716 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17717 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
17718 +{
17719 + pte_t *pte;
17720 + pmd_t *pmd;
17721 + spinlock_t *ptl;
17722 + unsigned char pte_mask;
17723 +
17724 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
17725 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
17726 + return 0;
17727 +
17728 + /* PaX: it's our fault, let's handle it if we can */
17729 +
17730 + /* PaX: take a look at read faults before acquiring any locks */
17731 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
17732 + /* instruction fetch attempt from a protected page in user mode */
17733 + up_read(&mm->mmap_sem);
17734 +
17735 +#ifdef CONFIG_PAX_EMUTRAMP
17736 + switch (pax_handle_fetch_fault(regs)) {
17737 + case 2:
17738 + return 1;
17739 + }
17740 +#endif
17741 +
17742 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
17743 + do_group_exit(SIGKILL);
17744 + }
17745 +
17746 + pmd = pax_get_pmd(mm, address);
17747 + if (unlikely(!pmd))
17748 + return 0;
17749 +
17750 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
17751 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
17752 + pte_unmap_unlock(pte, ptl);
17753 + return 0;
17754 + }
17755 +
17756 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
17757 + /* write attempt to a protected page in user mode */
17758 + pte_unmap_unlock(pte, ptl);
17759 + return 0;
17760 + }
17761 +
17762 +#ifdef CONFIG_SMP
17763 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
17764 +#else
17765 + if (likely(address > get_limit(regs->cs)))
17766 +#endif
17767 + {
17768 + set_pte(pte, pte_mkread(*pte));
17769 + __flush_tlb_one(address);
17770 + pte_unmap_unlock(pte, ptl);
17771 + up_read(&mm->mmap_sem);
17772 + return 1;
17773 + }
17774 +
17775 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
17776 +
17777 + /*
17778 + * PaX: fill DTLB with user rights and retry
17779 + */
17780 + __asm__ __volatile__ (
17781 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17782 + "movw %w4,%%es\n"
17783 +#endif
17784 + "orb %2,(%1)\n"
17785 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
17786 +/*
17787 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
17788 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
17789 + * page fault when examined during a TLB load attempt. this is true not only
17790 + * for PTEs holding a non-present entry but also present entries that will
17791 + * raise a page fault (such as those set up by PaX, or the copy-on-write
17792 + * mechanism). in effect it means that we do *not* need to flush the TLBs
17793 + * for our target pages since their PTEs are simply not in the TLBs at all.
17794 +
17795 + * the best thing in omitting it is that we gain around 15-20% speed in the
17796 + * fast path of the page fault handler and can get rid of tracing since we
17797 + * can no longer flush unintended entries.
17798 + */
17799 + "invlpg (%0)\n"
17800 +#endif
17801 + "testb $0,%%es:(%0)\n"
17802 + "xorb %3,(%1)\n"
17803 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17804 + "pushl %%ss\n"
17805 + "popl %%es\n"
17806 +#endif
17807 + :
17808 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER), "r" (__USER_DS)
17809 + : "memory", "cc");
17810 + pte_unmap_unlock(pte, ptl);
17811 + up_read(&mm->mmap_sem);
17812 + return 1;
17813 +}
17814 +#endif
17815 +
17816 /*
17817 * Handle a spurious fault caused by a stale TLB entry.
17818 *
17819 @@ -917,6 +1160,9 @@ int show_unhandled_signals = 1;
17820 static inline int
17821 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
17822 {
17823 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
17824 + return 1;
17825 +
17826 if (write) {
17827 /* write, present and write, not present: */
17828 if (unlikely(!(vma->vm_flags & VM_WRITE)))
17829 @@ -950,17 +1196,31 @@ do_page_fault(struct pt_regs *regs, unsi
17830 {
17831 struct vm_area_struct *vma;
17832 struct task_struct *tsk;
17833 - unsigned long address;
17834 struct mm_struct *mm;
17835 int write;
17836 int fault;
17837
17838 + /* Get the faulting address: */
17839 + unsigned long address = read_cr2();
17840 +
17841 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17842 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
17843 + if (!search_exception_tables(regs->ip)) {
17844 + bad_area_nosemaphore(regs, error_code, address);
17845 + return;
17846 + }
17847 + if (address < PAX_USER_SHADOW_BASE) {
17848 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
17849 + printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
17850 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
17851 + } else
17852 + address -= PAX_USER_SHADOW_BASE;
17853 + }
17854 +#endif
17855 +
17856 tsk = current;
17857 mm = tsk->mm;
17858
17859 - /* Get the faulting address: */
17860 - address = read_cr2();
17861 -
17862 /*
17863 * Detect and handle instructions that would cause a page fault for
17864 * both a tracked kernel page and a userspace page.
17865 @@ -1020,7 +1280,7 @@ do_page_fault(struct pt_regs *regs, unsi
17866 * User-mode registers count as a user access even for any
17867 * potential system fault or CPU buglet:
17868 */
17869 - if (user_mode_vm(regs)) {
17870 + if (user_mode(regs)) {
17871 local_irq_enable();
17872 error_code |= PF_USER;
17873 } else {
17874 @@ -1074,6 +1334,11 @@ do_page_fault(struct pt_regs *regs, unsi
17875 might_sleep();
17876 }
17877
17878 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17879 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
17880 + return;
17881 +#endif
17882 +
17883 vma = find_vma(mm, address);
17884 if (unlikely(!vma)) {
17885 bad_area(regs, error_code, address);
17886 @@ -1085,18 +1350,24 @@ do_page_fault(struct pt_regs *regs, unsi
17887 bad_area(regs, error_code, address);
17888 return;
17889 }
17890 - if (error_code & PF_USER) {
17891 - /*
17892 - * Accessing the stack below %sp is always a bug.
17893 - * The large cushion allows instructions like enter
17894 - * and pusha to work. ("enter $65535, $31" pushes
17895 - * 32 pointers and then decrements %sp by 65535.)
17896 - */
17897 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
17898 - bad_area(regs, error_code, address);
17899 - return;
17900 - }
17901 + /*
17902 + * Accessing the stack below %sp is always a bug.
17903 + * The large cushion allows instructions like enter
17904 + * and pusha to work. ("enter $65535, $31" pushes
17905 + * 32 pointers and then decrements %sp by 65535.)
17906 + */
17907 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
17908 + bad_area(regs, error_code, address);
17909 + return;
17910 }
17911 +
17912 +#ifdef CONFIG_PAX_SEGMEXEC
17913 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
17914 + bad_area(regs, error_code, address);
17915 + return;
17916 + }
17917 +#endif
17918 +
17919 if (unlikely(expand_stack(vma, address))) {
17920 bad_area(regs, error_code, address);
17921 return;
17922 @@ -1140,3 +1411,199 @@ good_area:
17923
17924 up_read(&mm->mmap_sem);
17925 }
17926 +
17927 +#ifdef CONFIG_PAX_EMUTRAMP
17928 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
17929 +{
17930 + int err;
17931 +
17932 + do { /* PaX: gcc trampoline emulation #1 */
17933 + unsigned char mov1, mov2;
17934 + unsigned short jmp;
17935 + unsigned int addr1, addr2;
17936 +
17937 +#ifdef CONFIG_X86_64
17938 + if ((regs->ip + 11) >> 32)
17939 + break;
17940 +#endif
17941 +
17942 + err = get_user(mov1, (unsigned char __user *)regs->ip);
17943 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
17944 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
17945 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
17946 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
17947 +
17948 + if (err)
17949 + break;
17950 +
17951 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
17952 + regs->cx = addr1;
17953 + regs->ax = addr2;
17954 + regs->ip = addr2;
17955 + return 2;
17956 + }
17957 + } while (0);
17958 +
17959 + do { /* PaX: gcc trampoline emulation #2 */
17960 + unsigned char mov, jmp;
17961 + unsigned int addr1, addr2;
17962 +
17963 +#ifdef CONFIG_X86_64
17964 + if ((regs->ip + 9) >> 32)
17965 + break;
17966 +#endif
17967 +
17968 + err = get_user(mov, (unsigned char __user *)regs->ip);
17969 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
17970 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
17971 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
17972 +
17973 + if (err)
17974 + break;
17975 +
17976 + if (mov == 0xB9 && jmp == 0xE9) {
17977 + regs->cx = addr1;
17978 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
17979 + return 2;
17980 + }
17981 + } while (0);
17982 +
17983 + return 1; /* PaX in action */
17984 +}
17985 +
17986 +#ifdef CONFIG_X86_64
17987 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
17988 +{
17989 + int err;
17990 +
17991 + do { /* PaX: gcc trampoline emulation #1 */
17992 + unsigned short mov1, mov2, jmp1;
17993 + unsigned char jmp2;
17994 + unsigned int addr1;
17995 + unsigned long addr2;
17996 +
17997 + err = get_user(mov1, (unsigned short __user *)regs->ip);
17998 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
17999 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
18000 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
18001 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
18002 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
18003 +
18004 + if (err)
18005 + break;
18006 +
18007 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
18008 + regs->r11 = addr1;
18009 + regs->r10 = addr2;
18010 + regs->ip = addr1;
18011 + return 2;
18012 + }
18013 + } while (0);
18014 +
18015 + do { /* PaX: gcc trampoline emulation #2 */
18016 + unsigned short mov1, mov2, jmp1;
18017 + unsigned char jmp2;
18018 + unsigned long addr1, addr2;
18019 +
18020 + err = get_user(mov1, (unsigned short __user *)regs->ip);
18021 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
18022 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
18023 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
18024 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
18025 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
18026 +
18027 + if (err)
18028 + break;
18029 +
18030 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
18031 + regs->r11 = addr1;
18032 + regs->r10 = addr2;
18033 + regs->ip = addr1;
18034 + return 2;
18035 + }
18036 + } while (0);
18037 +
18038 + return 1; /* PaX in action */
18039 +}
18040 +#endif
18041 +
18042 +/*
18043 + * PaX: decide what to do with offenders (regs->ip = fault address)
18044 + *
18045 + * returns 1 when task should be killed
18046 + * 2 when gcc trampoline was detected
18047 + */
18048 +static int pax_handle_fetch_fault(struct pt_regs *regs)
18049 +{
18050 + if (v8086_mode(regs))
18051 + return 1;
18052 +
18053 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
18054 + return 1;
18055 +
18056 +#ifdef CONFIG_X86_32
18057 + return pax_handle_fetch_fault_32(regs);
18058 +#else
18059 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
18060 + return pax_handle_fetch_fault_32(regs);
18061 + else
18062 + return pax_handle_fetch_fault_64(regs);
18063 +#endif
18064 +}
18065 +#endif
18066 +
18067 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18068 +void pax_report_insns(void *pc, void *sp)
18069 +{
18070 + long i;
18071 +
18072 + printk(KERN_ERR "PAX: bytes at PC: ");
18073 + for (i = 0; i < 20; i++) {
18074 + unsigned char c;
18075 + if (get_user(c, (__force unsigned char __user *)pc+i))
18076 + printk(KERN_CONT "?? ");
18077 + else
18078 + printk(KERN_CONT "%02x ", c);
18079 + }
18080 + printk("\n");
18081 +
18082 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
18083 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
18084 + unsigned long c;
18085 + if (get_user(c, (__force unsigned long __user *)sp+i))
18086 +#ifdef CONFIG_X86_32
18087 + printk(KERN_CONT "???????? ");
18088 +#else
18089 + printk(KERN_CONT "???????????????? ");
18090 +#endif
18091 + else
18092 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
18093 + }
18094 + printk("\n");
18095 +}
18096 +#endif
18097 +
18098 +/**
18099 + * probe_kernel_write(): safely attempt to write to a location
18100 + * @dst: address to write to
18101 + * @src: pointer to the data that shall be written
18102 + * @size: size of the data chunk
18103 + *
18104 + * Safely write to address @dst from the buffer at @src. If a kernel fault
18105 + * happens, handle that and return -EFAULT.
18106 + */
18107 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
18108 +{
18109 + long ret;
18110 + mm_segment_t old_fs = get_fs();
18111 +
18112 + set_fs(KERNEL_DS);
18113 + pagefault_disable();
18114 + pax_open_kernel();
18115 + ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
18116 + pax_close_kernel();
18117 + pagefault_enable();
18118 + set_fs(old_fs);
18119 +
18120 + return ret ? -EFAULT : 0;
18121 +}
18122 diff -urNp linux-2.6.35.4/arch/x86/mm/gup.c linux-2.6.35.4/arch/x86/mm/gup.c
18123 --- linux-2.6.35.4/arch/x86/mm/gup.c 2010-08-26 19:47:12.000000000 -0400
18124 +++ linux-2.6.35.4/arch/x86/mm/gup.c 2010-09-17 20:12:09.000000000 -0400
18125 @@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long
18126 addr = start;
18127 len = (unsigned long) nr_pages << PAGE_SHIFT;
18128 end = start + len;
18129 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
18130 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
18131 (void __user *)start, len)))
18132 return 0;
18133
18134 diff -urNp linux-2.6.35.4/arch/x86/mm/highmem_32.c linux-2.6.35.4/arch/x86/mm/highmem_32.c
18135 --- linux-2.6.35.4/arch/x86/mm/highmem_32.c 2010-08-26 19:47:12.000000000 -0400
18136 +++ linux-2.6.35.4/arch/x86/mm/highmem_32.c 2010-09-17 20:12:09.000000000 -0400
18137 @@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page
18138 idx = type + KM_TYPE_NR*smp_processor_id();
18139 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
18140 BUG_ON(!pte_none(*(kmap_pte-idx)));
18141 +
18142 + pax_open_kernel();
18143 set_pte(kmap_pte-idx, mk_pte(page, prot));
18144 + pax_close_kernel();
18145
18146 return (void *)vaddr;
18147 }
18148 diff -urNp linux-2.6.35.4/arch/x86/mm/hugetlbpage.c linux-2.6.35.4/arch/x86/mm/hugetlbpage.c
18149 --- linux-2.6.35.4/arch/x86/mm/hugetlbpage.c 2010-08-26 19:47:12.000000000 -0400
18150 +++ linux-2.6.35.4/arch/x86/mm/hugetlbpage.c 2010-09-17 20:12:09.000000000 -0400
18151 @@ -266,13 +266,18 @@ static unsigned long hugetlb_get_unmappe
18152 struct hstate *h = hstate_file(file);
18153 struct mm_struct *mm = current->mm;
18154 struct vm_area_struct *vma;
18155 - unsigned long start_addr;
18156 + unsigned long start_addr, pax_task_size = TASK_SIZE;
18157 +
18158 +#ifdef CONFIG_PAX_SEGMEXEC
18159 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
18160 + pax_task_size = SEGMEXEC_TASK_SIZE;
18161 +#endif
18162
18163 if (len > mm->cached_hole_size) {
18164 - start_addr = mm->free_area_cache;
18165 + start_addr = mm->free_area_cache;
18166 } else {
18167 - start_addr = TASK_UNMAPPED_BASE;
18168 - mm->cached_hole_size = 0;
18169 + start_addr = mm->mmap_base;
18170 + mm->cached_hole_size = 0;
18171 }
18172
18173 full_search:
18174 @@ -280,26 +285,27 @@ full_search:
18175
18176 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
18177 /* At this point: (!vma || addr < vma->vm_end). */
18178 - if (TASK_SIZE - len < addr) {
18179 + if (pax_task_size - len < addr) {
18180 /*
18181 * Start a new search - just in case we missed
18182 * some holes.
18183 */
18184 - if (start_addr != TASK_UNMAPPED_BASE) {
18185 - start_addr = TASK_UNMAPPED_BASE;
18186 + if (start_addr != mm->mmap_base) {
18187 + start_addr = mm->mmap_base;
18188 mm->cached_hole_size = 0;
18189 goto full_search;
18190 }
18191 return -ENOMEM;
18192 }
18193 - if (!vma || addr + len <= vma->vm_start) {
18194 - mm->free_area_cache = addr + len;
18195 - return addr;
18196 - }
18197 + if (check_heap_stack_gap(vma, addr, len))
18198 + break;
18199 if (addr + mm->cached_hole_size < vma->vm_start)
18200 mm->cached_hole_size = vma->vm_start - addr;
18201 addr = ALIGN(vma->vm_end, huge_page_size(h));
18202 }
18203 +
18204 + mm->free_area_cache = addr + len;
18205 + return addr;
18206 }
18207
18208 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
18209 @@ -308,10 +314,9 @@ static unsigned long hugetlb_get_unmappe
18210 {
18211 struct hstate *h = hstate_file(file);
18212 struct mm_struct *mm = current->mm;
18213 - struct vm_area_struct *vma, *prev_vma;
18214 - unsigned long base = mm->mmap_base, addr = addr0;
18215 + struct vm_area_struct *vma;
18216 + unsigned long base = mm->mmap_base, addr;
18217 unsigned long largest_hole = mm->cached_hole_size;
18218 - int first_time = 1;
18219
18220 /* don't allow allocations above current base */
18221 if (mm->free_area_cache > base)
18222 @@ -321,7 +326,7 @@ static unsigned long hugetlb_get_unmappe
18223 largest_hole = 0;
18224 mm->free_area_cache = base;
18225 }
18226 -try_again:
18227 +
18228 /* make sure it can fit in the remaining address space */
18229 if (mm->free_area_cache < len)
18230 goto fail;
18231 @@ -329,33 +334,27 @@ try_again:
18232 /* either no address requested or cant fit in requested address hole */
18233 addr = (mm->free_area_cache - len) & huge_page_mask(h);
18234 do {
18235 + vma = find_vma(mm, addr);
18236 /*
18237 * Lookup failure means no vma is above this address,
18238 * i.e. return with success:
18239 - */
18240 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
18241 - return addr;
18242 -
18243 - /*
18244 * new region fits between prev_vma->vm_end and
18245 * vma->vm_start, use it:
18246 */
18247 - if (addr + len <= vma->vm_start &&
18248 - (!prev_vma || (addr >= prev_vma->vm_end))) {
18249 + if (check_heap_stack_gap(vma, addr, len)) {
18250 /* remember the address as a hint for next time */
18251 - mm->cached_hole_size = largest_hole;
18252 - return (mm->free_area_cache = addr);
18253 - } else {
18254 - /* pull free_area_cache down to the first hole */
18255 - if (mm->free_area_cache == vma->vm_end) {
18256 - mm->free_area_cache = vma->vm_start;
18257 - mm->cached_hole_size = largest_hole;
18258 - }
18259 + mm->cached_hole_size = largest_hole;
18260 + return (mm->free_area_cache = addr);
18261 + }
18262 + /* pull free_area_cache down to the first hole */
18263 + if (mm->free_area_cache == vma->vm_end) {
18264 + mm->free_area_cache = vma->vm_start;
18265 + mm->cached_hole_size = largest_hole;
18266 }
18267
18268 /* remember the largest hole we saw so far */
18269 if (addr + largest_hole < vma->vm_start)
18270 - largest_hole = vma->vm_start - addr;
18271 + largest_hole = vma->vm_start - addr;
18272
18273 /* try just below the current vma->vm_start */
18274 addr = (vma->vm_start - len) & huge_page_mask(h);
18275 @@ -363,22 +362,26 @@ try_again:
18276
18277 fail:
18278 /*
18279 - * if hint left us with no space for the requested
18280 - * mapping then try again:
18281 - */
18282 - if (first_time) {
18283 - mm->free_area_cache = base;
18284 - largest_hole = 0;
18285 - first_time = 0;
18286 - goto try_again;
18287 - }
18288 - /*
18289 * A failed mmap() very likely causes application failure,
18290 * so fall back to the bottom-up function here. This scenario
18291 * can happen with large stack limits and large mmap()
18292 * allocations.
18293 */
18294 - mm->free_area_cache = TASK_UNMAPPED_BASE;
18295 +
18296 +#ifdef CONFIG_PAX_SEGMEXEC
18297 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
18298 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
18299 + else
18300 +#endif
18301 +
18302 + mm->mmap_base = TASK_UNMAPPED_BASE;
18303 +
18304 +#ifdef CONFIG_PAX_RANDMMAP
18305 + if (mm->pax_flags & MF_PAX_RANDMMAP)
18306 + mm->mmap_base += mm->delta_mmap;
18307 +#endif
18308 +
18309 + mm->free_area_cache = mm->mmap_base;
18310 mm->cached_hole_size = ~0UL;
18311 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
18312 len, pgoff, flags);
18313 @@ -386,6 +389,7 @@ fail:
18314 /*
18315 * Restore the topdown base:
18316 */
18317 + mm->mmap_base = base;
18318 mm->free_area_cache = base;
18319 mm->cached_hole_size = ~0UL;
18320
18321 @@ -399,10 +403,17 @@ hugetlb_get_unmapped_area(struct file *f
18322 struct hstate *h = hstate_file(file);
18323 struct mm_struct *mm = current->mm;
18324 struct vm_area_struct *vma;
18325 + unsigned long pax_task_size = TASK_SIZE;
18326
18327 if (len & ~huge_page_mask(h))
18328 return -EINVAL;
18329 - if (len > TASK_SIZE)
18330 +
18331 +#ifdef CONFIG_PAX_SEGMEXEC
18332 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
18333 + pax_task_size = SEGMEXEC_TASK_SIZE;
18334 +#endif
18335 +
18336 + if (len > pax_task_size)
18337 return -ENOMEM;
18338
18339 if (flags & MAP_FIXED) {
18340 @@ -414,8 +425,7 @@ hugetlb_get_unmapped_area(struct file *f
18341 if (addr) {
18342 addr = ALIGN(addr, huge_page_size(h));
18343 vma = find_vma(mm, addr);
18344 - if (TASK_SIZE - len >= addr &&
18345 - (!vma || addr + len <= vma->vm_start))
18346 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
18347 return addr;
18348 }
18349 if (mm->get_unmapped_area == arch_get_unmapped_area)
18350 diff -urNp linux-2.6.35.4/arch/x86/mm/init_32.c linux-2.6.35.4/arch/x86/mm/init_32.c
18351 --- linux-2.6.35.4/arch/x86/mm/init_32.c 2010-08-26 19:47:12.000000000 -0400
18352 +++ linux-2.6.35.4/arch/x86/mm/init_32.c 2010-09-17 20:12:09.000000000 -0400
18353 @@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
18354 }
18355
18356 /*
18357 - * Creates a middle page table and puts a pointer to it in the
18358 - * given global directory entry. This only returns the gd entry
18359 - * in non-PAE compilation mode, since the middle layer is folded.
18360 - */
18361 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
18362 -{
18363 - pud_t *pud;
18364 - pmd_t *pmd_table;
18365 -
18366 -#ifdef CONFIG_X86_PAE
18367 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
18368 - if (after_bootmem)
18369 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
18370 - else
18371 - pmd_table = (pmd_t *)alloc_low_page();
18372 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
18373 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
18374 - pud = pud_offset(pgd, 0);
18375 - BUG_ON(pmd_table != pmd_offset(pud, 0));
18376 -
18377 - return pmd_table;
18378 - }
18379 -#endif
18380 - pud = pud_offset(pgd, 0);
18381 - pmd_table = pmd_offset(pud, 0);
18382 -
18383 - return pmd_table;
18384 -}
18385 -
18386 -/*
18387 * Create a page table and place a pointer to it in a middle page
18388 * directory entry:
18389 */
18390 @@ -121,13 +91,28 @@ static pte_t * __init one_page_table_ini
18391 page_table = (pte_t *)alloc_low_page();
18392
18393 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
18394 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18395 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
18396 +#else
18397 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
18398 +#endif
18399 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
18400 }
18401
18402 return pte_offset_kernel(pmd, 0);
18403 }
18404
18405 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
18406 +{
18407 + pud_t *pud;
18408 + pmd_t *pmd_table;
18409 +
18410 + pud = pud_offset(pgd, 0);
18411 + pmd_table = pmd_offset(pud, 0);
18412 +
18413 + return pmd_table;
18414 +}
18415 +
18416 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
18417 {
18418 int pgd_idx = pgd_index(vaddr);
18419 @@ -201,6 +186,7 @@ page_table_range_init(unsigned long star
18420 int pgd_idx, pmd_idx;
18421 unsigned long vaddr;
18422 pgd_t *pgd;
18423 + pud_t *pud;
18424 pmd_t *pmd;
18425 pte_t *pte = NULL;
18426
18427 @@ -210,8 +196,13 @@ page_table_range_init(unsigned long star
18428 pgd = pgd_base + pgd_idx;
18429
18430 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
18431 - pmd = one_md_table_init(pgd);
18432 - pmd = pmd + pmd_index(vaddr);
18433 + pud = pud_offset(pgd, vaddr);
18434 + pmd = pmd_offset(pud, vaddr);
18435 +
18436 +#ifdef CONFIG_X86_PAE
18437 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
18438 +#endif
18439 +
18440 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
18441 pmd++, pmd_idx++) {
18442 pte = page_table_kmap_check(one_page_table_init(pmd),
18443 @@ -223,11 +214,20 @@ page_table_range_init(unsigned long star
18444 }
18445 }
18446
18447 -static inline int is_kernel_text(unsigned long addr)
18448 +static inline int is_kernel_text(unsigned long start, unsigned long end)
18449 {
18450 - if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
18451 - return 1;
18452 - return 0;
18453 + if ((start > ktla_ktva((unsigned long)_etext) ||
18454 + end <= ktla_ktva((unsigned long)_stext)) &&
18455 + (start > ktla_ktva((unsigned long)_einittext) ||
18456 + end <= ktla_ktva((unsigned long)_sinittext)) &&
18457 +
18458 +#ifdef CONFIG_ACPI_SLEEP
18459 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
18460 +#endif
18461 +
18462 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
18463 + return 0;
18464 + return 1;
18465 }
18466
18467 /*
18468 @@ -244,9 +244,10 @@ kernel_physical_mapping_init(unsigned lo
18469 unsigned long last_map_addr = end;
18470 unsigned long start_pfn, end_pfn;
18471 pgd_t *pgd_base = swapper_pg_dir;
18472 - int pgd_idx, pmd_idx, pte_ofs;
18473 + unsigned int pgd_idx, pmd_idx, pte_ofs;
18474 unsigned long pfn;
18475 pgd_t *pgd;
18476 + pud_t *pud;
18477 pmd_t *pmd;
18478 pte_t *pte;
18479 unsigned pages_2m, pages_4k;
18480 @@ -279,8 +280,13 @@ repeat:
18481 pfn = start_pfn;
18482 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
18483 pgd = pgd_base + pgd_idx;
18484 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
18485 - pmd = one_md_table_init(pgd);
18486 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
18487 + pud = pud_offset(pgd, 0);
18488 + pmd = pmd_offset(pud, 0);
18489 +
18490 +#ifdef CONFIG_X86_PAE
18491 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
18492 +#endif
18493
18494 if (pfn >= end_pfn)
18495 continue;
18496 @@ -292,14 +298,13 @@ repeat:
18497 #endif
18498 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
18499 pmd++, pmd_idx++) {
18500 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
18501 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
18502
18503 /*
18504 * Map with big pages if possible, otherwise
18505 * create normal page tables:
18506 */
18507 if (use_pse) {
18508 - unsigned int addr2;
18509 pgprot_t prot = PAGE_KERNEL_LARGE;
18510 /*
18511 * first pass will use the same initial
18512 @@ -309,11 +314,7 @@ repeat:
18513 __pgprot(PTE_IDENT_ATTR |
18514 _PAGE_PSE);
18515
18516 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
18517 - PAGE_OFFSET + PAGE_SIZE-1;
18518 -
18519 - if (is_kernel_text(addr) ||
18520 - is_kernel_text(addr2))
18521 + if (is_kernel_text(address, address + PMD_SIZE))
18522 prot = PAGE_KERNEL_LARGE_EXEC;
18523
18524 pages_2m++;
18525 @@ -330,7 +331,7 @@ repeat:
18526 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
18527 pte += pte_ofs;
18528 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
18529 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
18530 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
18531 pgprot_t prot = PAGE_KERNEL;
18532 /*
18533 * first pass will use the same initial
18534 @@ -338,7 +339,7 @@ repeat:
18535 */
18536 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
18537
18538 - if (is_kernel_text(addr))
18539 + if (is_kernel_text(address, address + PAGE_SIZE))
18540 prot = PAGE_KERNEL_EXEC;
18541
18542 pages_4k++;
18543 @@ -491,7 +492,7 @@ void __init native_pagetable_setup_start
18544
18545 pud = pud_offset(pgd, va);
18546 pmd = pmd_offset(pud, va);
18547 - if (!pmd_present(*pmd))
18548 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
18549 break;
18550
18551 pte = pte_offset_kernel(pmd, va);
18552 @@ -543,9 +544,7 @@ void __init early_ioremap_page_table_ran
18553
18554 static void __init pagetable_init(void)
18555 {
18556 - pgd_t *pgd_base = swapper_pg_dir;
18557 -
18558 - permanent_kmaps_init(pgd_base);
18559 + permanent_kmaps_init(swapper_pg_dir);
18560 }
18561
18562 #ifdef CONFIG_ACPI_SLEEP
18563 @@ -553,12 +552,12 @@ static void __init pagetable_init(void)
18564 * ACPI suspend needs this for resume, because things like the intel-agp
18565 * driver might have split up a kernel 4MB mapping.
18566 */
18567 -char swsusp_pg_dir[PAGE_SIZE]
18568 +pgd_t swsusp_pg_dir[PTRS_PER_PGD]
18569 __attribute__ ((aligned(PAGE_SIZE)));
18570
18571 static inline void save_pg_dir(void)
18572 {
18573 - memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
18574 + clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
18575 }
18576 #else /* !CONFIG_ACPI_SLEEP */
18577 static inline void save_pg_dir(void)
18578 @@ -590,7 +589,7 @@ void zap_low_mappings(bool early)
18579 flush_tlb_all();
18580 }
18581
18582 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
18583 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
18584 EXPORT_SYMBOL_GPL(__supported_pte_mask);
18585
18586 /* user-defined highmem size */
18587 @@ -781,7 +780,7 @@ void __init setup_bootmem_allocator(void
18588 * Initialize the boot-time allocator (with low memory only):
18589 */
18590 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
18591 - bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
18592 + bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
18593 PAGE_SIZE);
18594 if (bootmap == -1L)
18595 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
18596 @@ -871,6 +870,12 @@ void __init mem_init(void)
18597
18598 pci_iommu_alloc();
18599
18600 +#ifdef CONFIG_PAX_PER_CPU_PGD
18601 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
18602 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
18603 + KERNEL_PGD_PTRS);
18604 +#endif
18605 +
18606 #ifdef CONFIG_FLATMEM
18607 BUG_ON(!mem_map);
18608 #endif
18609 @@ -888,7 +893,7 @@ void __init mem_init(void)
18610 set_highmem_pages_init();
18611
18612 codesize = (unsigned long) &_etext - (unsigned long) &_text;
18613 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
18614 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
18615 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
18616
18617 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
18618 @@ -929,10 +934,10 @@ void __init mem_init(void)
18619 ((unsigned long)&__init_end -
18620 (unsigned long)&__init_begin) >> 10,
18621
18622 - (unsigned long)&_etext, (unsigned long)&_edata,
18623 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
18624 + (unsigned long)&_sdata, (unsigned long)&_edata,
18625 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
18626
18627 - (unsigned long)&_text, (unsigned long)&_etext,
18628 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
18629 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
18630
18631 /*
18632 @@ -1013,6 +1018,7 @@ void set_kernel_text_rw(void)
18633 if (!kernel_set_to_readonly)
18634 return;
18635
18636 + start = ktla_ktva(start);
18637 pr_debug("Set kernel text: %lx - %lx for read write\n",
18638 start, start+size);
18639
18640 @@ -1027,6 +1033,7 @@ void set_kernel_text_ro(void)
18641 if (!kernel_set_to_readonly)
18642 return;
18643
18644 + start = ktla_ktva(start);
18645 pr_debug("Set kernel text: %lx - %lx for read only\n",
18646 start, start+size);
18647
18648 @@ -1038,6 +1045,7 @@ void mark_rodata_ro(void)
18649 unsigned long start = PFN_ALIGN(_text);
18650 unsigned long size = PFN_ALIGN(_etext) - start;
18651
18652 + start = ktla_ktva(start);
18653 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
18654 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
18655 size >> 10);
18656 diff -urNp linux-2.6.35.4/arch/x86/mm/init_64.c linux-2.6.35.4/arch/x86/mm/init_64.c
18657 --- linux-2.6.35.4/arch/x86/mm/init_64.c 2010-08-26 19:47:12.000000000 -0400
18658 +++ linux-2.6.35.4/arch/x86/mm/init_64.c 2010-09-17 20:12:09.000000000 -0400
18659 @@ -50,7 +50,6 @@
18660 #include <asm/numa.h>
18661 #include <asm/cacheflush.h>
18662 #include <asm/init.h>
18663 -#include <linux/bootmem.h>
18664
18665 static unsigned long dma_reserve __initdata;
18666
18667 @@ -74,7 +73,7 @@ early_param("gbpages", parse_direct_gbpa
18668 * around without checking the pgd every time.
18669 */
18670
18671 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
18672 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
18673 EXPORT_SYMBOL_GPL(__supported_pte_mask);
18674
18675 int force_personality32;
18676 @@ -165,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
18677 pmd = fill_pmd(pud, vaddr);
18678 pte = fill_pte(pmd, vaddr);
18679
18680 + pax_open_kernel();
18681 set_pte(pte, new_pte);
18682 + pax_close_kernel();
18683
18684 /*
18685 * It's enough to flush this one mapping.
18686 @@ -224,14 +225,12 @@ static void __init __init_extra_mapping(
18687 pgd = pgd_offset_k((unsigned long)__va(phys));
18688 if (pgd_none(*pgd)) {
18689 pud = (pud_t *) spp_getpage();
18690 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
18691 - _PAGE_USER));
18692 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
18693 }
18694 pud = pud_offset(pgd, (unsigned long)__va(phys));
18695 if (pud_none(*pud)) {
18696 pmd = (pmd_t *) spp_getpage();
18697 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
18698 - _PAGE_USER));
18699 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
18700 }
18701 pmd = pmd_offset(pud, phys);
18702 BUG_ON(!pmd_none(*pmd));
18703 @@ -680,6 +679,12 @@ void __init mem_init(void)
18704
18705 pci_iommu_alloc();
18706
18707 +#ifdef CONFIG_PAX_PER_CPU_PGD
18708 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
18709 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
18710 + KERNEL_PGD_PTRS);
18711 +#endif
18712 +
18713 /* clear_bss() already clear the empty_zero_page */
18714
18715 reservedpages = 0;
18716 @@ -886,8 +891,8 @@ int kern_addr_valid(unsigned long addr)
18717 static struct vm_area_struct gate_vma = {
18718 .vm_start = VSYSCALL_START,
18719 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
18720 - .vm_page_prot = PAGE_READONLY_EXEC,
18721 - .vm_flags = VM_READ | VM_EXEC
18722 + .vm_page_prot = PAGE_READONLY,
18723 + .vm_flags = VM_READ
18724 };
18725
18726 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
18727 @@ -921,7 +926,7 @@ int in_gate_area_no_task(unsigned long a
18728
18729 const char *arch_vma_name(struct vm_area_struct *vma)
18730 {
18731 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
18732 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
18733 return "[vdso]";
18734 if (vma == &gate_vma)
18735 return "[vsyscall]";
18736 diff -urNp linux-2.6.35.4/arch/x86/mm/init.c linux-2.6.35.4/arch/x86/mm/init.c
18737 --- linux-2.6.35.4/arch/x86/mm/init.c 2010-08-26 19:47:12.000000000 -0400
18738 +++ linux-2.6.35.4/arch/x86/mm/init.c 2010-09-17 20:12:09.000000000 -0400
18739 @@ -70,11 +70,7 @@ static void __init find_early_table_spac
18740 * cause a hotspot and fill up ZONE_DMA. The page tables
18741 * need roughly 0.5KB per GB.
18742 */
18743 -#ifdef CONFIG_X86_32
18744 - start = 0x7000;
18745 -#else
18746 - start = 0x8000;
18747 -#endif
18748 + start = 0x100000;
18749 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
18750 tables, PAGE_SIZE);
18751 if (e820_table_start == -1UL)
18752 @@ -321,7 +317,13 @@ unsigned long __init_refok init_memory_m
18753 */
18754 int devmem_is_allowed(unsigned long pagenr)
18755 {
18756 - if (pagenr <= 256)
18757 + if (!pagenr)
18758 + return 1;
18759 +#ifdef CONFIG_VM86
18760 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
18761 + return 1;
18762 +#endif
18763 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
18764 return 1;
18765 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
18766 return 0;
18767 @@ -380,6 +382,88 @@ void free_init_pages(char *what, unsigne
18768
18769 void free_initmem(void)
18770 {
18771 +
18772 +#ifdef CONFIG_PAX_KERNEXEC
18773 +#ifdef CONFIG_X86_32
18774 + /* PaX: limit KERNEL_CS to actual size */
18775 + unsigned long addr, limit;
18776 + struct desc_struct d;
18777 + int cpu;
18778 +
18779 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
18780 + limit = (limit - 1UL) >> PAGE_SHIFT;
18781 +
18782 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
18783 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
18784 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
18785 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
18786 + }
18787 +
18788 + /* PaX: make KERNEL_CS read-only */
18789 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
18790 + if (!paravirt_enabled())
18791 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
18792 +/*
18793 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
18794 + pgd = pgd_offset_k(addr);
18795 + pud = pud_offset(pgd, addr);
18796 + pmd = pmd_offset(pud, addr);
18797 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
18798 + }
18799 +*/
18800 +#ifdef CONFIG_X86_PAE
18801 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
18802 +/*
18803 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
18804 + pgd = pgd_offset_k(addr);
18805 + pud = pud_offset(pgd, addr);
18806 + pmd = pmd_offset(pud, addr);
18807 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
18808 + }
18809 +*/
18810 +#endif
18811 +
18812 +#ifdef CONFIG_MODULES
18813 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
18814 +#endif
18815 +
18816 +#else
18817 + pgd_t *pgd;
18818 + pud_t *pud;
18819 + pmd_t *pmd;
18820 + unsigned long addr, end;
18821 +
18822 + /* PaX: make kernel code/rodata read-only, rest non-executable */
18823 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
18824 + pgd = pgd_offset_k(addr);
18825 + pud = pud_offset(pgd, addr);
18826 + pmd = pmd_offset(pud, addr);
18827 + if (!pmd_present(*pmd))
18828 + continue;
18829 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
18830 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
18831 + else
18832 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
18833 + }
18834 +
18835 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
18836 + end = addr + KERNEL_IMAGE_SIZE;
18837 + for (; addr < end; addr += PMD_SIZE) {
18838 + pgd = pgd_offset_k(addr);
18839 + pud = pud_offset(pgd, addr);
18840 + pmd = pmd_offset(pud, addr);
18841 + if (!pmd_present(*pmd))
18842 + continue;
18843 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
18844 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
18845 + else
18846 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
18847 + }
18848 +#endif
18849 +
18850 + flush_tlb_all();
18851 +#endif
18852 +
18853 free_init_pages("unused kernel memory",
18854 (unsigned long)(&__init_begin),
18855 (unsigned long)(&__init_end));
18856 diff -urNp linux-2.6.35.4/arch/x86/mm/iomap_32.c linux-2.6.35.4/arch/x86/mm/iomap_32.c
18857 --- linux-2.6.35.4/arch/x86/mm/iomap_32.c 2010-08-26 19:47:12.000000000 -0400
18858 +++ linux-2.6.35.4/arch/x86/mm/iomap_32.c 2010-09-17 20:12:09.000000000 -0400
18859 @@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long
18860 debug_kmap_atomic(type);
18861 idx = type + KM_TYPE_NR * smp_processor_id();
18862 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
18863 +
18864 + pax_open_kernel();
18865 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
18866 + pax_close_kernel();
18867 +
18868 arch_flush_lazy_mmu_mode();
18869
18870 return (void *)vaddr;
18871 diff -urNp linux-2.6.35.4/arch/x86/mm/ioremap.c linux-2.6.35.4/arch/x86/mm/ioremap.c
18872 --- linux-2.6.35.4/arch/x86/mm/ioremap.c 2010-08-26 19:47:12.000000000 -0400
18873 +++ linux-2.6.35.4/arch/x86/mm/ioremap.c 2010-09-17 20:12:09.000000000 -0400
18874 @@ -100,13 +100,10 @@ static void __iomem *__ioremap_caller(re
18875 /*
18876 * Don't allow anybody to remap normal RAM that we're using..
18877 */
18878 - for (pfn = phys_addr >> PAGE_SHIFT;
18879 - (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
18880 - pfn++) {
18881 -
18882 + for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
18883 int is_ram = page_is_ram(pfn);
18884
18885 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
18886 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
18887 return NULL;
18888 WARN_ON_ONCE(is_ram);
18889 }
18890 @@ -346,7 +343,7 @@ static int __init early_ioremap_debug_se
18891 early_param("early_ioremap_debug", early_ioremap_debug_setup);
18892
18893 static __initdata int after_paging_init;
18894 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
18895 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
18896
18897 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
18898 {
18899 @@ -378,8 +375,7 @@ void __init early_ioremap_init(void)
18900 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
18901
18902 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
18903 - memset(bm_pte, 0, sizeof(bm_pte));
18904 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
18905 + pmd_populate_user(&init_mm, pmd, bm_pte);
18906
18907 /*
18908 * The boot-ioremap range spans multiple pmds, for which
18909 diff -urNp linux-2.6.35.4/arch/x86/mm/kmemcheck/kmemcheck.c linux-2.6.35.4/arch/x86/mm/kmemcheck/kmemcheck.c
18910 --- linux-2.6.35.4/arch/x86/mm/kmemcheck/kmemcheck.c 2010-08-26 19:47:12.000000000 -0400
18911 +++ linux-2.6.35.4/arch/x86/mm/kmemcheck/kmemcheck.c 2010-09-17 20:12:09.000000000 -0400
18912 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
18913 * memory (e.g. tracked pages)? For now, we need this to avoid
18914 * invoking kmemcheck for PnP BIOS calls.
18915 */
18916 - if (regs->flags & X86_VM_MASK)
18917 + if (v8086_mode(regs))
18918 return false;
18919 - if (regs->cs != __KERNEL_CS)
18920 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
18921 return false;
18922
18923 pte = kmemcheck_pte_lookup(address);
18924 diff -urNp linux-2.6.35.4/arch/x86/mm/mmap.c linux-2.6.35.4/arch/x86/mm/mmap.c
18925 --- linux-2.6.35.4/arch/x86/mm/mmap.c 2010-08-26 19:47:12.000000000 -0400
18926 +++ linux-2.6.35.4/arch/x86/mm/mmap.c 2010-09-17 20:12:09.000000000 -0400
18927 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
18928 * Leave an at least ~128 MB hole with possible stack randomization.
18929 */
18930 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
18931 -#define MAX_GAP (TASK_SIZE/6*5)
18932 +#define MAX_GAP (pax_task_size/6*5)
18933
18934 /*
18935 * True on X86_32 or when emulating IA32 on X86_64
18936 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
18937 return rnd << PAGE_SHIFT;
18938 }
18939
18940 -static unsigned long mmap_base(void)
18941 +static unsigned long mmap_base(struct mm_struct *mm)
18942 {
18943 unsigned long gap = rlimit(RLIMIT_STACK);
18944 + unsigned long pax_task_size = TASK_SIZE;
18945 +
18946 +#ifdef CONFIG_PAX_SEGMEXEC
18947 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
18948 + pax_task_size = SEGMEXEC_TASK_SIZE;
18949 +#endif
18950
18951 if (gap < MIN_GAP)
18952 gap = MIN_GAP;
18953 else if (gap > MAX_GAP)
18954 gap = MAX_GAP;
18955
18956 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
18957 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
18958 }
18959
18960 /*
18961 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
18962 * does, but not when emulating X86_32
18963 */
18964 -static unsigned long mmap_legacy_base(void)
18965 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
18966 {
18967 - if (mmap_is_ia32())
18968 + if (mmap_is_ia32()) {
18969 +
18970 +#ifdef CONFIG_PAX_SEGMEXEC
18971 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
18972 + return SEGMEXEC_TASK_UNMAPPED_BASE;
18973 + else
18974 +#endif
18975 +
18976 return TASK_UNMAPPED_BASE;
18977 - else
18978 + } else
18979 return TASK_UNMAPPED_BASE + mmap_rnd();
18980 }
18981
18982 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
18983 void arch_pick_mmap_layout(struct mm_struct *mm)
18984 {
18985 if (mmap_is_legacy()) {
18986 - mm->mmap_base = mmap_legacy_base();
18987 + mm->mmap_base = mmap_legacy_base(mm);
18988 +
18989 +#ifdef CONFIG_PAX_RANDMMAP
18990 + if (mm->pax_flags & MF_PAX_RANDMMAP)
18991 + mm->mmap_base += mm->delta_mmap;
18992 +#endif
18993 +
18994 mm->get_unmapped_area = arch_get_unmapped_area;
18995 mm->unmap_area = arch_unmap_area;
18996 } else {
18997 - mm->mmap_base = mmap_base();
18998 + mm->mmap_base = mmap_base(mm);
18999 +
19000 +#ifdef CONFIG_PAX_RANDMMAP
19001 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19002 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
19003 +#endif
19004 +
19005 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
19006 mm->unmap_area = arch_unmap_area_topdown;
19007 }
19008 diff -urNp linux-2.6.35.4/arch/x86/mm/numa_32.c linux-2.6.35.4/arch/x86/mm/numa_32.c
19009 --- linux-2.6.35.4/arch/x86/mm/numa_32.c 2010-08-26 19:47:12.000000000 -0400
19010 +++ linux-2.6.35.4/arch/x86/mm/numa_32.c 2010-09-17 20:12:09.000000000 -0400
19011 @@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int
19012 }
19013 #endif
19014
19015 -extern unsigned long find_max_low_pfn(void);
19016 extern unsigned long highend_pfn, highstart_pfn;
19017
19018 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
19019 diff -urNp linux-2.6.35.4/arch/x86/mm/pageattr.c linux-2.6.35.4/arch/x86/mm/pageattr.c
19020 --- linux-2.6.35.4/arch/x86/mm/pageattr.c 2010-08-26 19:47:12.000000000 -0400
19021 +++ linux-2.6.35.4/arch/x86/mm/pageattr.c 2010-09-17 20:12:09.000000000 -0400
19022 @@ -261,16 +261,17 @@ static inline pgprot_t static_protection
19023 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
19024 */
19025 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
19026 - pgprot_val(forbidden) |= _PAGE_NX;
19027 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
19028
19029 /*
19030 * The kernel text needs to be executable for obvious reasons
19031 * Does not cover __inittext since that is gone later on. On
19032 * 64bit we do not enforce !NX on the low mapping
19033 */
19034 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
19035 - pgprot_val(forbidden) |= _PAGE_NX;
19036 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
19037 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
19038
19039 +#ifdef CONFIG_DEBUG_RODATA
19040 /*
19041 * The .rodata section needs to be read-only. Using the pfn
19042 * catches all aliases.
19043 @@ -278,6 +279,7 @@ static inline pgprot_t static_protection
19044 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
19045 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
19046 pgprot_val(forbidden) |= _PAGE_RW;
19047 +#endif
19048
19049 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
19050 /*
19051 @@ -316,6 +318,13 @@ static inline pgprot_t static_protection
19052 }
19053 #endif
19054
19055 +#ifdef CONFIG_PAX_KERNEXEC
19056 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
19057 + pgprot_val(forbidden) |= _PAGE_RW;
19058 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
19059 + }
19060 +#endif
19061 +
19062 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
19063
19064 return prot;
19065 @@ -368,23 +377,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
19066 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
19067 {
19068 /* change init_mm */
19069 + pax_open_kernel();
19070 set_pte_atomic(kpte, pte);
19071 +
19072 #ifdef CONFIG_X86_32
19073 if (!SHARED_KERNEL_PMD) {
19074 +
19075 +#ifdef CONFIG_PAX_PER_CPU_PGD
19076 + unsigned long cpu;
19077 +#else
19078 struct page *page;
19079 +#endif
19080
19081 +#ifdef CONFIG_PAX_PER_CPU_PGD
19082 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19083 + pgd_t *pgd = get_cpu_pgd(cpu);
19084 +#else
19085 list_for_each_entry(page, &pgd_list, lru) {
19086 - pgd_t *pgd;
19087 + pgd_t *pgd = (pgd_t *)page_address(page);
19088 +#endif
19089 +
19090 pud_t *pud;
19091 pmd_t *pmd;
19092
19093 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
19094 + pgd += pgd_index(address);
19095 pud = pud_offset(pgd, address);
19096 pmd = pmd_offset(pud, address);
19097 set_pte_atomic((pte_t *)pmd, pte);
19098 }
19099 }
19100 #endif
19101 + pax_close_kernel();
19102 }
19103
19104 static int
19105 diff -urNp linux-2.6.35.4/arch/x86/mm/pageattr-test.c linux-2.6.35.4/arch/x86/mm/pageattr-test.c
19106 --- linux-2.6.35.4/arch/x86/mm/pageattr-test.c 2010-08-26 19:47:12.000000000 -0400
19107 +++ linux-2.6.35.4/arch/x86/mm/pageattr-test.c 2010-09-17 20:12:09.000000000 -0400
19108 @@ -36,7 +36,7 @@ enum {
19109
19110 static int pte_testbit(pte_t pte)
19111 {
19112 - return pte_flags(pte) & _PAGE_UNUSED1;
19113 + return pte_flags(pte) & _PAGE_CPA_TEST;
19114 }
19115
19116 struct split_state {
19117 diff -urNp linux-2.6.35.4/arch/x86/mm/pat.c linux-2.6.35.4/arch/x86/mm/pat.c
19118 --- linux-2.6.35.4/arch/x86/mm/pat.c 2010-08-26 19:47:12.000000000 -0400
19119 +++ linux-2.6.35.4/arch/x86/mm/pat.c 2010-09-17 20:12:09.000000000 -0400
19120 @@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
19121
19122 if (!entry) {
19123 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
19124 - current->comm, current->pid, start, end);
19125 + current->comm, task_pid_nr(current), start, end);
19126 return -EINVAL;
19127 }
19128
19129 @@ -492,8 +492,8 @@ static inline int range_is_allowed(unsig
19130 while (cursor < to) {
19131 if (!devmem_is_allowed(pfn)) {
19132 printk(KERN_INFO
19133 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
19134 - current->comm, from, to);
19135 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
19136 + current->comm, from, to, cursor);
19137 return 0;
19138 }
19139 cursor += PAGE_SIZE;
19140 @@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, un
19141 printk(KERN_INFO
19142 "%s:%d ioremap_change_attr failed %s "
19143 "for %Lx-%Lx\n",
19144 - current->comm, current->pid,
19145 + current->comm, task_pid_nr(current),
19146 cattr_name(flags),
19147 base, (unsigned long long)(base + size));
19148 return -EINVAL;
19149 @@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr,
19150 if (want_flags != flags) {
19151 printk(KERN_WARNING
19152 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
19153 - current->comm, current->pid,
19154 + current->comm, task_pid_nr(current),
19155 cattr_name(want_flags),
19156 (unsigned long long)paddr,
19157 (unsigned long long)(paddr + size),
19158 @@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr,
19159 free_memtype(paddr, paddr + size);
19160 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
19161 " for %Lx-%Lx, got %s\n",
19162 - current->comm, current->pid,
19163 + current->comm, task_pid_nr(current),
19164 cattr_name(want_flags),
19165 (unsigned long long)paddr,
19166 (unsigned long long)(paddr + size),
19167 diff -urNp linux-2.6.35.4/arch/x86/mm/pgtable_32.c linux-2.6.35.4/arch/x86/mm/pgtable_32.c
19168 --- linux-2.6.35.4/arch/x86/mm/pgtable_32.c 2010-08-26 19:47:12.000000000 -0400
19169 +++ linux-2.6.35.4/arch/x86/mm/pgtable_32.c 2010-09-17 20:12:09.000000000 -0400
19170 @@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr,
19171 return;
19172 }
19173 pte = pte_offset_kernel(pmd, vaddr);
19174 +
19175 + pax_open_kernel();
19176 if (pte_val(pteval))
19177 set_pte_at(&init_mm, vaddr, pte, pteval);
19178 else
19179 pte_clear(&init_mm, vaddr, pte);
19180 + pax_close_kernel();
19181
19182 /*
19183 * It's enough to flush this one mapping.
19184 diff -urNp linux-2.6.35.4/arch/x86/mm/pgtable.c linux-2.6.35.4/arch/x86/mm/pgtable.c
19185 --- linux-2.6.35.4/arch/x86/mm/pgtable.c 2010-08-26 19:47:12.000000000 -0400
19186 +++ linux-2.6.35.4/arch/x86/mm/pgtable.c 2010-09-17 20:12:09.000000000 -0400
19187 @@ -84,8 +84,59 @@ static inline void pgd_list_del(pgd_t *p
19188 list_del(&page->lru);
19189 }
19190
19191 -#define UNSHARED_PTRS_PER_PGD \
19192 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
19193 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19194 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
19195 +
19196 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
19197 +{
19198 + while (count--)
19199 + *dst++ = __pgd((pgd_val(*src++) | _PAGE_NX) & ~_PAGE_USER);
19200 +
19201 +}
19202 +#endif
19203 +
19204 +#ifdef CONFIG_PAX_PER_CPU_PGD
19205 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
19206 +{
19207 + while (count--)
19208 +
19209 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19210 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
19211 +#else
19212 + *dst++ = *src++;
19213 +#endif
19214 +
19215 +}
19216 +#endif
19217 +
19218 +#ifdef CONFIG_PAX_PER_CPU_PGD
19219 +static inline void pgd_ctor(pgd_t *pgd) {}
19220 +static inline void pgd_dtor(pgd_t *pgd) {}
19221 +#ifdef CONFIG_X86_64
19222 +#define pxd_t pud_t
19223 +#define pyd_t pgd_t
19224 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
19225 +#define pxd_free(mm, pud) pud_free((mm), (pud))
19226 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
19227 +#define pyd_offset(mm ,address) pgd_offset((mm), (address))
19228 +#define PYD_SIZE PGDIR_SIZE
19229 +#else
19230 +#define pxd_t pmd_t
19231 +#define pyd_t pud_t
19232 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
19233 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
19234 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
19235 +#define pyd_offset(mm ,address) pud_offset((mm), (address))
19236 +#define PYD_SIZE PUD_SIZE
19237 +#endif
19238 +#else
19239 +#define pxd_t pmd_t
19240 +#define pyd_t pud_t
19241 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
19242 +#define pxd_free(mm, pmd) pmd_free((mm), (pmd))
19243 +#define pyd_populate(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
19244 +#define pyd_offset(mm ,address) pud_offset((mm), (address))
19245 +#define PYD_SIZE PUD_SIZE
19246
19247 static void pgd_ctor(pgd_t *pgd)
19248 {
19249 @@ -120,6 +171,7 @@ static void pgd_dtor(pgd_t *pgd)
19250 pgd_list_del(pgd);
19251 spin_unlock_irqrestore(&pgd_lock, flags);
19252 }
19253 +#endif
19254
19255 /*
19256 * List of all pgd's needed for non-PAE so it can invalidate entries
19257 @@ -132,7 +184,7 @@ static void pgd_dtor(pgd_t *pgd)
19258 * -- wli
19259 */
19260
19261 -#ifdef CONFIG_X86_PAE
19262 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
19263 /*
19264 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
19265 * updating the top-level pagetable entries to guarantee the
19266 @@ -144,7 +196,7 @@ static void pgd_dtor(pgd_t *pgd)
19267 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
19268 * and initialize the kernel pmds here.
19269 */
19270 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
19271 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
19272
19273 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
19274 {
19275 @@ -163,36 +215,38 @@ void pud_populate(struct mm_struct *mm,
19276 if (mm == current->active_mm)
19277 write_cr3(read_cr3());
19278 }
19279 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
19280 +#define PREALLOCATED_PXDS USER_PGD_PTRS
19281 #else /* !CONFIG_X86_PAE */
19282
19283 /* No need to prepopulate any pagetable entries in non-PAE modes. */
19284 -#define PREALLOCATED_PMDS 0
19285 +#define PREALLOCATED_PXDS 0
19286
19287 #endif /* CONFIG_X86_PAE */
19288
19289 -static void free_pmds(pmd_t *pmds[])
19290 +static void free_pxds(pxd_t *pxds[])
19291 {
19292 int i;
19293
19294 - for(i = 0; i < PREALLOCATED_PMDS; i++)
19295 - if (pmds[i])
19296 - free_page((unsigned long)pmds[i]);
19297 + for(i = 0; i < PREALLOCATED_PXDS; i++)
19298 + if (pxds[i])
19299 + free_page((unsigned long)pxds[i]);
19300 }
19301
19302 -static int preallocate_pmds(pmd_t *pmds[])
19303 +static int preallocate_pxds(pxd_t *pxds[])
19304 {
19305 int i;
19306 bool failed = false;
19307
19308 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
19309 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
19310 - if (pmd == NULL)
19311 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
19312 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
19313 + if (pxd == NULL)
19314 failed = true;
19315 - pmds[i] = pmd;
19316 + pxds[i] = pxd;
19317 }
19318
19319 if (failed) {
19320 - free_pmds(pmds);
19321 + free_pxds(pxds);
19322 return -ENOMEM;
19323 }
19324
19325 @@ -205,51 +259,56 @@ static int preallocate_pmds(pmd_t *pmds[
19326 * preallocate which never got a corresponding vma will need to be
19327 * freed manually.
19328 */
19329 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
19330 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
19331 {
19332 int i;
19333
19334 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
19335 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
19336 pgd_t pgd = pgdp[i];
19337
19338 if (pgd_val(pgd) != 0) {
19339 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
19340 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
19341
19342 - pgdp[i] = native_make_pgd(0);
19343 + set_pgd(pgdp + i, native_make_pgd(0));
19344
19345 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
19346 - pmd_free(mm, pmd);
19347 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
19348 + pxd_free(mm, pxd);
19349 }
19350 }
19351 }
19352
19353 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
19354 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
19355 {
19356 - pud_t *pud;
19357 + pyd_t *pyd;
19358 unsigned long addr;
19359 int i;
19360
19361 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
19362 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
19363 return;
19364
19365 - pud = pud_offset(pgd, 0);
19366 +#ifdef CONFIG_X86_64
19367 + pyd = pyd_offset(mm, 0L);
19368 +#else
19369 + pyd = pyd_offset(pgd, 0L);
19370 +#endif
19371
19372 - for (addr = i = 0; i < PREALLOCATED_PMDS;
19373 - i++, pud++, addr += PUD_SIZE) {
19374 - pmd_t *pmd = pmds[i];
19375 + for (addr = i = 0; i < PREALLOCATED_PXDS;
19376 + i++, pyd++, addr += PYD_SIZE) {
19377 + pxd_t *pxd = pxds[i];
19378
19379 if (i >= KERNEL_PGD_BOUNDARY)
19380 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
19381 - sizeof(pmd_t) * PTRS_PER_PMD);
19382 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
19383 + sizeof(pxd_t) * PTRS_PER_PMD);
19384
19385 - pud_populate(mm, pud, pmd);
19386 + pyd_populate(mm, pyd, pxd);
19387 }
19388 }
19389
19390 pgd_t *pgd_alloc(struct mm_struct *mm)
19391 {
19392 pgd_t *pgd;
19393 - pmd_t *pmds[PREALLOCATED_PMDS];
19394 + pxd_t *pxds[PREALLOCATED_PXDS];
19395 +
19396 unsigned long flags;
19397
19398 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
19399 @@ -259,11 +318,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
19400
19401 mm->pgd = pgd;
19402
19403 - if (preallocate_pmds(pmds) != 0)
19404 + if (preallocate_pxds(pxds) != 0)
19405 goto out_free_pgd;
19406
19407 if (paravirt_pgd_alloc(mm) != 0)
19408 - goto out_free_pmds;
19409 + goto out_free_pxds;
19410
19411 /*
19412 * Make sure that pre-populating the pmds is atomic with
19413 @@ -273,14 +332,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
19414 spin_lock_irqsave(&pgd_lock, flags);
19415
19416 pgd_ctor(pgd);
19417 - pgd_prepopulate_pmd(mm, pgd, pmds);
19418 + pgd_prepopulate_pxd(mm, pgd, pxds);
19419
19420 spin_unlock_irqrestore(&pgd_lock, flags);
19421
19422 return pgd;
19423
19424 -out_free_pmds:
19425 - free_pmds(pmds);
19426 +out_free_pxds:
19427 + free_pxds(pxds);
19428 out_free_pgd:
19429 free_page((unsigned long)pgd);
19430 out:
19431 @@ -289,7 +348,7 @@ out:
19432
19433 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
19434 {
19435 - pgd_mop_up_pmds(mm, pgd);
19436 + pgd_mop_up_pxds(mm, pgd);
19437 pgd_dtor(pgd);
19438 paravirt_pgd_free(mm, pgd);
19439 free_page((unsigned long)pgd);
19440 diff -urNp linux-2.6.35.4/arch/x86/mm/setup_nx.c linux-2.6.35.4/arch/x86/mm/setup_nx.c
19441 --- linux-2.6.35.4/arch/x86/mm/setup_nx.c 2010-08-26 19:47:12.000000000 -0400
19442 +++ linux-2.6.35.4/arch/x86/mm/setup_nx.c 2010-09-17 20:12:09.000000000 -0400
19443 @@ -5,8 +5,10 @@
19444 #include <asm/pgtable.h>
19445 #include <asm/proto.h>
19446
19447 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
19448 static int disable_nx __cpuinitdata;
19449
19450 +#ifndef CONFIG_PAX_PAGEEXEC
19451 /*
19452 * noexec = on|off
19453 *
19454 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str
19455 return 0;
19456 }
19457 early_param("noexec", noexec_setup);
19458 +#endif
19459 +
19460 +#endif
19461
19462 void __cpuinit x86_configure_nx(void)
19463 {
19464 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
19465 if (cpu_has_nx && !disable_nx)
19466 __supported_pte_mask |= _PAGE_NX;
19467 else
19468 +#endif
19469 __supported_pte_mask &= ~_PAGE_NX;
19470 }
19471
19472 diff -urNp linux-2.6.35.4/arch/x86/mm/tlb.c linux-2.6.35.4/arch/x86/mm/tlb.c
19473 --- linux-2.6.35.4/arch/x86/mm/tlb.c 2010-08-26 19:47:12.000000000 -0400
19474 +++ linux-2.6.35.4/arch/x86/mm/tlb.c 2010-09-17 20:12:09.000000000 -0400
19475 @@ -13,7 +13,7 @@
19476 #include <asm/uv/uv.h>
19477
19478 DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
19479 - = { &init_mm, 0, };
19480 + = { &init_mm, 0 };
19481
19482 /*
19483 * Smarter SMP flushing macros.
19484 @@ -62,7 +62,11 @@ void leave_mm(int cpu)
19485 BUG();
19486 cpumask_clear_cpu(cpu,
19487 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
19488 +
19489 +#ifndef CONFIG_PAX_PER_CPU_PGD
19490 load_cr3(swapper_pg_dir);
19491 +#endif
19492 +
19493 }
19494 EXPORT_SYMBOL_GPL(leave_mm);
19495
19496 diff -urNp linux-2.6.35.4/arch/x86/oprofile/backtrace.c linux-2.6.35.4/arch/x86/oprofile/backtrace.c
19497 --- linux-2.6.35.4/arch/x86/oprofile/backtrace.c 2010-08-26 19:47:12.000000000 -0400
19498 +++ linux-2.6.35.4/arch/x86/oprofile/backtrace.c 2010-09-17 20:12:09.000000000 -0400
19499 @@ -58,7 +58,7 @@ static struct frame_head *dump_user_back
19500 struct frame_head bufhead[2];
19501
19502 /* Also check accessibility of one struct frame_head beyond */
19503 - if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
19504 + if (!__access_ok(VERIFY_READ, head, sizeof(bufhead)))
19505 return NULL;
19506 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
19507 return NULL;
19508 @@ -78,7 +78,7 @@ x86_backtrace(struct pt_regs * const reg
19509 {
19510 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
19511
19512 - if (!user_mode_vm(regs)) {
19513 + if (!user_mode(regs)) {
19514 unsigned long stack = kernel_stack_pointer(regs);
19515 if (depth)
19516 dump_trace(NULL, regs, (unsigned long *)stack, 0,
19517 diff -urNp linux-2.6.35.4/arch/x86/oprofile/op_model_p4.c linux-2.6.35.4/arch/x86/oprofile/op_model_p4.c
19518 --- linux-2.6.35.4/arch/x86/oprofile/op_model_p4.c 2010-08-26 19:47:12.000000000 -0400
19519 +++ linux-2.6.35.4/arch/x86/oprofile/op_model_p4.c 2010-09-17 20:12:09.000000000 -0400
19520 @@ -50,7 +50,7 @@ static inline void setup_num_counters(vo
19521 #endif
19522 }
19523
19524 -static int inline addr_increment(void)
19525 +static inline int addr_increment(void)
19526 {
19527 #ifdef CONFIG_SMP
19528 return smp_num_siblings == 2 ? 2 : 1;
19529 diff -urNp linux-2.6.35.4/arch/x86/pci/common.c linux-2.6.35.4/arch/x86/pci/common.c
19530 --- linux-2.6.35.4/arch/x86/pci/common.c 2010-08-26 19:47:12.000000000 -0400
19531 +++ linux-2.6.35.4/arch/x86/pci/common.c 2010-09-17 20:12:09.000000000 -0400
19532 @@ -32,8 +32,8 @@ int noioapicreroute = 1;
19533 int pcibios_last_bus = -1;
19534 unsigned long pirq_table_addr;
19535 struct pci_bus *pci_root_bus;
19536 -struct pci_raw_ops *raw_pci_ops;
19537 -struct pci_raw_ops *raw_pci_ext_ops;
19538 +const struct pci_raw_ops *raw_pci_ops;
19539 +const struct pci_raw_ops *raw_pci_ext_ops;
19540
19541 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
19542 int reg, int len, u32 *val)
19543 @@ -365,7 +365,7 @@ static const struct dmi_system_id __devi
19544 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL585 G2"),
19545 },
19546 },
19547 - {}
19548 + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL}
19549 };
19550
19551 void __init dmi_check_pciprobe(void)
19552 diff -urNp linux-2.6.35.4/arch/x86/pci/direct.c linux-2.6.35.4/arch/x86/pci/direct.c
19553 --- linux-2.6.35.4/arch/x86/pci/direct.c 2010-08-26 19:47:12.000000000 -0400
19554 +++ linux-2.6.35.4/arch/x86/pci/direct.c 2010-09-17 20:12:09.000000000 -0400
19555 @@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int
19556
19557 #undef PCI_CONF1_ADDRESS
19558
19559 -struct pci_raw_ops pci_direct_conf1 = {
19560 +const struct pci_raw_ops pci_direct_conf1 = {
19561 .read = pci_conf1_read,
19562 .write = pci_conf1_write,
19563 };
19564 @@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int
19565
19566 #undef PCI_CONF2_ADDRESS
19567
19568 -struct pci_raw_ops pci_direct_conf2 = {
19569 +const struct pci_raw_ops pci_direct_conf2 = {
19570 .read = pci_conf2_read,
19571 .write = pci_conf2_write,
19572 };
19573 @@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
19574 * This should be close to trivial, but it isn't, because there are buggy
19575 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
19576 */
19577 -static int __init pci_sanity_check(struct pci_raw_ops *o)
19578 +static int __init pci_sanity_check(const struct pci_raw_ops *o)
19579 {
19580 u32 x = 0;
19581 int year, devfn;
19582 diff -urNp linux-2.6.35.4/arch/x86/pci/fixup.c linux-2.6.35.4/arch/x86/pci/fixup.c
19583 --- linux-2.6.35.4/arch/x86/pci/fixup.c 2010-08-26 19:47:12.000000000 -0400
19584 +++ linux-2.6.35.4/arch/x86/pci/fixup.c 2010-09-17 20:12:09.000000000 -0400
19585 @@ -364,7 +364,7 @@ static const struct dmi_system_id __devi
19586 DMI_MATCH(DMI_PRODUCT_NAME, "MS-6702E"),
19587 },
19588 },
19589 - {}
19590 + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
19591 };
19592
19593 /*
19594 @@ -435,7 +435,7 @@ static const struct dmi_system_id __devi
19595 DMI_MATCH(DMI_PRODUCT_VERSION, "PSA40U"),
19596 },
19597 },
19598 - { }
19599 + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
19600 };
19601
19602 static void __devinit pci_pre_fixup_toshiba_ohci1394(struct pci_dev *dev)
19603 diff -urNp linux-2.6.35.4/arch/x86/pci/irq.c linux-2.6.35.4/arch/x86/pci/irq.c
19604 --- linux-2.6.35.4/arch/x86/pci/irq.c 2010-08-26 19:47:12.000000000 -0400
19605 +++ linux-2.6.35.4/arch/x86/pci/irq.c 2010-09-17 20:12:09.000000000 -0400
19606 @@ -542,7 +542,7 @@ static __init int intel_router_probe(str
19607 static struct pci_device_id __initdata pirq_440gx[] = {
19608 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0) },
19609 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_2) },
19610 - { },
19611 + { PCI_DEVICE(0, 0) }
19612 };
19613
19614 /* 440GX has a proprietary PIRQ router -- don't use it */
19615 @@ -1113,7 +1113,7 @@ static struct dmi_system_id __initdata p
19616 DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
19617 },
19618 },
19619 - { }
19620 + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
19621 };
19622
19623 void __init pcibios_irq_init(void)
19624 diff -urNp linux-2.6.35.4/arch/x86/pci/mmconfig_32.c linux-2.6.35.4/arch/x86/pci/mmconfig_32.c
19625 --- linux-2.6.35.4/arch/x86/pci/mmconfig_32.c 2010-08-26 19:47:12.000000000 -0400
19626 +++ linux-2.6.35.4/arch/x86/pci/mmconfig_32.c 2010-09-17 20:12:09.000000000 -0400
19627 @@ -117,7 +117,7 @@ static int pci_mmcfg_write(unsigned int
19628 return 0;
19629 }
19630
19631 -static struct pci_raw_ops pci_mmcfg = {
19632 +static const struct pci_raw_ops pci_mmcfg = {
19633 .read = pci_mmcfg_read,
19634 .write = pci_mmcfg_write,
19635 };
19636 diff -urNp linux-2.6.35.4/arch/x86/pci/mmconfig_64.c linux-2.6.35.4/arch/x86/pci/mmconfig_64.c
19637 --- linux-2.6.35.4/arch/x86/pci/mmconfig_64.c 2010-08-26 19:47:12.000000000 -0400
19638 +++ linux-2.6.35.4/arch/x86/pci/mmconfig_64.c 2010-09-17 20:12:09.000000000 -0400
19639 @@ -81,7 +81,7 @@ static int pci_mmcfg_write(unsigned int
19640 return 0;
19641 }
19642
19643 -static struct pci_raw_ops pci_mmcfg = {
19644 +static const struct pci_raw_ops pci_mmcfg = {
19645 .read = pci_mmcfg_read,
19646 .write = pci_mmcfg_write,
19647 };
19648 diff -urNp linux-2.6.35.4/arch/x86/pci/numaq_32.c linux-2.6.35.4/arch/x86/pci/numaq_32.c
19649 --- linux-2.6.35.4/arch/x86/pci/numaq_32.c 2010-08-26 19:47:12.000000000 -0400
19650 +++ linux-2.6.35.4/arch/x86/pci/numaq_32.c 2010-09-17 20:12:09.000000000 -0400
19651 @@ -108,7 +108,7 @@ static int pci_conf1_mq_write(unsigned i
19652
19653 #undef PCI_CONF1_MQ_ADDRESS
19654
19655 -static struct pci_raw_ops pci_direct_conf1_mq = {
19656 +static const struct pci_raw_ops pci_direct_conf1_mq = {
19657 .read = pci_conf1_mq_read,
19658 .write = pci_conf1_mq_write
19659 };
19660 diff -urNp linux-2.6.35.4/arch/x86/pci/olpc.c linux-2.6.35.4/arch/x86/pci/olpc.c
19661 --- linux-2.6.35.4/arch/x86/pci/olpc.c 2010-08-26 19:47:12.000000000 -0400
19662 +++ linux-2.6.35.4/arch/x86/pci/olpc.c 2010-09-17 20:12:09.000000000 -0400
19663 @@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int s
19664 return 0;
19665 }
19666
19667 -static struct pci_raw_ops pci_olpc_conf = {
19668 +static const struct pci_raw_ops pci_olpc_conf = {
19669 .read = pci_olpc_read,
19670 .write = pci_olpc_write,
19671 };
19672 diff -urNp linux-2.6.35.4/arch/x86/pci/pcbios.c linux-2.6.35.4/arch/x86/pci/pcbios.c
19673 --- linux-2.6.35.4/arch/x86/pci/pcbios.c 2010-08-26 19:47:12.000000000 -0400
19674 +++ linux-2.6.35.4/arch/x86/pci/pcbios.c 2010-09-17 20:12:09.000000000 -0400
19675 @@ -57,50 +57,93 @@ union bios32 {
19676 static struct {
19677 unsigned long address;
19678 unsigned short segment;
19679 -} bios32_indirect = { 0, __KERNEL_CS };
19680 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
19681
19682 /*
19683 * Returns the entry point for the given service, NULL on error
19684 */
19685
19686 -static unsigned long bios32_service(unsigned long service)
19687 +static unsigned long __devinit bios32_service(unsigned long service)
19688 {
19689 unsigned char return_code; /* %al */
19690 unsigned long address; /* %ebx */
19691 unsigned long length; /* %ecx */
19692 unsigned long entry; /* %edx */
19693 unsigned long flags;
19694 + struct desc_struct d, *gdt;
19695
19696 local_irq_save(flags);
19697 - __asm__("lcall *(%%edi); cld"
19698 +
19699 + gdt = get_cpu_gdt_table(smp_processor_id());
19700 +
19701 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
19702 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
19703 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
19704 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
19705 +
19706 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
19707 : "=a" (return_code),
19708 "=b" (address),
19709 "=c" (length),
19710 "=d" (entry)
19711 : "0" (service),
19712 "1" (0),
19713 - "D" (&bios32_indirect));
19714 + "D" (&bios32_indirect),
19715 + "r"(__PCIBIOS_DS)
19716 + : "memory");
19717 +
19718 + pax_open_kernel();
19719 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
19720 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
19721 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
19722 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
19723 + pax_close_kernel();
19724 +
19725 local_irq_restore(flags);
19726
19727 switch (return_code) {
19728 - case 0:
19729 - return address + entry;
19730 - case 0x80: /* Not present */
19731 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
19732 - return 0;
19733 - default: /* Shouldn't happen */
19734 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
19735 - service, return_code);
19736 + case 0: {
19737 + int cpu;
19738 + unsigned char flags;
19739 +
19740 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
19741 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
19742 + printk(KERN_WARNING "bios32_service: not valid\n");
19743 return 0;
19744 + }
19745 + address = address + PAGE_OFFSET;
19746 + length += 16UL; /* some BIOSs underreport this... */
19747 + flags = 4;
19748 + if (length >= 64*1024*1024) {
19749 + length >>= PAGE_SHIFT;
19750 + flags |= 8;
19751 + }
19752 +
19753 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
19754 + gdt = get_cpu_gdt_table(cpu);
19755 + pack_descriptor(&d, address, length, 0x9b, flags);
19756 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
19757 + pack_descriptor(&d, address, length, 0x93, flags);
19758 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
19759 + }
19760 + return entry;
19761 + }
19762 + case 0x80: /* Not present */
19763 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
19764 + return 0;
19765 + default: /* Shouldn't happen */
19766 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
19767 + service, return_code);
19768 + return 0;
19769 }
19770 }
19771
19772 static struct {
19773 unsigned long address;
19774 unsigned short segment;
19775 -} pci_indirect = { 0, __KERNEL_CS };
19776 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
19777
19778 -static int pci_bios_present;
19779 +static int pci_bios_present __read_only;
19780
19781 static int __devinit check_pcibios(void)
19782 {
19783 @@ -109,11 +152,13 @@ static int __devinit check_pcibios(void)
19784 unsigned long flags, pcibios_entry;
19785
19786 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
19787 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
19788 + pci_indirect.address = pcibios_entry;
19789
19790 local_irq_save(flags);
19791 - __asm__(
19792 - "lcall *(%%edi); cld\n\t"
19793 + __asm__("movw %w6, %%ds\n\t"
19794 + "lcall *%%ss:(%%edi); cld\n\t"
19795 + "push %%ss\n\t"
19796 + "pop %%ds\n\t"
19797 "jc 1f\n\t"
19798 "xor %%ah, %%ah\n"
19799 "1:"
19800 @@ -122,7 +167,8 @@ static int __devinit check_pcibios(void)
19801 "=b" (ebx),
19802 "=c" (ecx)
19803 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
19804 - "D" (&pci_indirect)
19805 + "D" (&pci_indirect),
19806 + "r" (__PCIBIOS_DS)
19807 : "memory");
19808 local_irq_restore(flags);
19809
19810 @@ -166,7 +212,10 @@ static int pci_bios_read(unsigned int se
19811
19812 switch (len) {
19813 case 1:
19814 - __asm__("lcall *(%%esi); cld\n\t"
19815 + __asm__("movw %w6, %%ds\n\t"
19816 + "lcall *%%ss:(%%esi); cld\n\t"
19817 + "push %%ss\n\t"
19818 + "pop %%ds\n\t"
19819 "jc 1f\n\t"
19820 "xor %%ah, %%ah\n"
19821 "1:"
19822 @@ -175,7 +224,8 @@ static int pci_bios_read(unsigned int se
19823 : "1" (PCIBIOS_READ_CONFIG_BYTE),
19824 "b" (bx),
19825 "D" ((long)reg),
19826 - "S" (&pci_indirect));
19827 + "S" (&pci_indirect),
19828 + "r" (__PCIBIOS_DS));
19829 /*
19830 * Zero-extend the result beyond 8 bits, do not trust the
19831 * BIOS having done it:
19832 @@ -183,7 +233,10 @@ static int pci_bios_read(unsigned int se
19833 *value &= 0xff;
19834 break;
19835 case 2:
19836 - __asm__("lcall *(%%esi); cld\n\t"
19837 + __asm__("movw %w6, %%ds\n\t"
19838 + "lcall *%%ss:(%%esi); cld\n\t"
19839 + "push %%ss\n\t"
19840 + "pop %%ds\n\t"
19841 "jc 1f\n\t"
19842 "xor %%ah, %%ah\n"
19843 "1:"
19844 @@ -192,7 +245,8 @@ static int pci_bios_read(unsigned int se
19845 : "1" (PCIBIOS_READ_CONFIG_WORD),
19846 "b" (bx),
19847 "D" ((long)reg),
19848 - "S" (&pci_indirect));
19849 + "S" (&pci_indirect),
19850 + "r" (__PCIBIOS_DS));
19851 /*
19852 * Zero-extend the result beyond 16 bits, do not trust the
19853 * BIOS having done it:
19854 @@ -200,7 +254,10 @@ static int pci_bios_read(unsigned int se
19855 *value &= 0xffff;
19856 break;
19857 case 4:
19858 - __asm__("lcall *(%%esi); cld\n\t"
19859 + __asm__("movw %w6, %%ds\n\t"
19860 + "lcall *%%ss:(%%esi); cld\n\t"
19861 + "push %%ss\n\t"
19862 + "pop %%ds\n\t"
19863 "jc 1f\n\t"
19864 "xor %%ah, %%ah\n"
19865 "1:"
19866 @@ -209,7 +266,8 @@ static int pci_bios_read(unsigned int se
19867 : "1" (PCIBIOS_READ_CONFIG_DWORD),
19868 "b" (bx),
19869 "D" ((long)reg),
19870 - "S" (&pci_indirect));
19871 + "S" (&pci_indirect),
19872 + "r" (__PCIBIOS_DS));
19873 break;
19874 }
19875
19876 @@ -232,7 +290,10 @@ static int pci_bios_write(unsigned int s
19877
19878 switch (len) {
19879 case 1:
19880 - __asm__("lcall *(%%esi); cld\n\t"
19881 + __asm__("movw %w6, %%ds\n\t"
19882 + "lcall *%%ss:(%%esi); cld\n\t"
19883 + "push %%ss\n\t"
19884 + "pop %%ds\n\t"
19885 "jc 1f\n\t"
19886 "xor %%ah, %%ah\n"
19887 "1:"
19888 @@ -241,10 +302,14 @@ static int pci_bios_write(unsigned int s
19889 "c" (value),
19890 "b" (bx),
19891 "D" ((long)reg),
19892 - "S" (&pci_indirect));
19893 + "S" (&pci_indirect),
19894 + "r" (__PCIBIOS_DS));
19895 break;
19896 case 2:
19897 - __asm__("lcall *(%%esi); cld\n\t"
19898 + __asm__("movw %w6, %%ds\n\t"
19899 + "lcall *%%ss:(%%esi); cld\n\t"
19900 + "push %%ss\n\t"
19901 + "pop %%ds\n\t"
19902 "jc 1f\n\t"
19903 "xor %%ah, %%ah\n"
19904 "1:"
19905 @@ -253,10 +318,14 @@ static int pci_bios_write(unsigned int s
19906 "c" (value),
19907 "b" (bx),
19908 "D" ((long)reg),
19909 - "S" (&pci_indirect));
19910 + "S" (&pci_indirect),
19911 + "r" (__PCIBIOS_DS));
19912 break;
19913 case 4:
19914 - __asm__("lcall *(%%esi); cld\n\t"
19915 + __asm__("movw %w6, %%ds\n\t"
19916 + "lcall *%%ss:(%%esi); cld\n\t"
19917 + "push %%ss\n\t"
19918 + "pop %%ds\n\t"
19919 "jc 1f\n\t"
19920 "xor %%ah, %%ah\n"
19921 "1:"
19922 @@ -265,7 +334,8 @@ static int pci_bios_write(unsigned int s
19923 "c" (value),
19924 "b" (bx),
19925 "D" ((long)reg),
19926 - "S" (&pci_indirect));
19927 + "S" (&pci_indirect),
19928 + "r" (__PCIBIOS_DS));
19929 break;
19930 }
19931
19932 @@ -279,7 +349,7 @@ static int pci_bios_write(unsigned int s
19933 * Function table for BIOS32 access
19934 */
19935
19936 -static struct pci_raw_ops pci_bios_access = {
19937 +static const struct pci_raw_ops pci_bios_access = {
19938 .read = pci_bios_read,
19939 .write = pci_bios_write
19940 };
19941 @@ -288,7 +358,7 @@ static struct pci_raw_ops pci_bios_acces
19942 * Try to find PCI BIOS.
19943 */
19944
19945 -static struct pci_raw_ops * __devinit pci_find_bios(void)
19946 +static const struct pci_raw_ops * __devinit pci_find_bios(void)
19947 {
19948 union bios32 *check;
19949 unsigned char sum;
19950 @@ -369,10 +439,13 @@ struct irq_routing_table * pcibios_get_i
19951
19952 DBG("PCI: Fetching IRQ routing table... ");
19953 __asm__("push %%es\n\t"
19954 + "movw %w8, %%ds\n\t"
19955 "push %%ds\n\t"
19956 "pop %%es\n\t"
19957 - "lcall *(%%esi); cld\n\t"
19958 + "lcall *%%ss:(%%esi); cld\n\t"
19959 "pop %%es\n\t"
19960 + "push %%ss\n\t"
19961 + "pop %%ds\n"
19962 "jc 1f\n\t"
19963 "xor %%ah, %%ah\n"
19964 "1:"
19965 @@ -383,7 +456,8 @@ struct irq_routing_table * pcibios_get_i
19966 "1" (0),
19967 "D" ((long) &opt),
19968 "S" (&pci_indirect),
19969 - "m" (opt)
19970 + "m" (opt),
19971 + "r" (__PCIBIOS_DS)
19972 : "memory");
19973 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
19974 if (ret & 0xff00)
19975 @@ -407,7 +481,10 @@ int pcibios_set_irq_routing(struct pci_d
19976 {
19977 int ret;
19978
19979 - __asm__("lcall *(%%esi); cld\n\t"
19980 + __asm__("movw %w5, %%ds\n\t"
19981 + "lcall *%%ss:(%%esi); cld\n\t"
19982 + "push %%ss\n\t"
19983 + "pop %%ds\n"
19984 "jc 1f\n\t"
19985 "xor %%ah, %%ah\n"
19986 "1:"
19987 @@ -415,7 +492,8 @@ int pcibios_set_irq_routing(struct pci_d
19988 : "0" (PCIBIOS_SET_PCI_HW_INT),
19989 "b" ((dev->bus->number << 8) | dev->devfn),
19990 "c" ((irq << 8) | (pin + 10)),
19991 - "S" (&pci_indirect));
19992 + "S" (&pci_indirect),
19993 + "r" (__PCIBIOS_DS));
19994 return !(ret & 0xff00);
19995 }
19996 EXPORT_SYMBOL(pcibios_set_irq_routing);
19997 diff -urNp linux-2.6.35.4/arch/x86/power/cpu.c linux-2.6.35.4/arch/x86/power/cpu.c
19998 --- linux-2.6.35.4/arch/x86/power/cpu.c 2010-08-26 19:47:12.000000000 -0400
19999 +++ linux-2.6.35.4/arch/x86/power/cpu.c 2010-09-17 20:12:09.000000000 -0400
20000 @@ -129,7 +129,7 @@ static void do_fpu_end(void)
20001 static void fix_processor_context(void)
20002 {
20003 int cpu = smp_processor_id();
20004 - struct tss_struct *t = &per_cpu(init_tss, cpu);
20005 + struct tss_struct *t = init_tss + cpu;
20006
20007 set_tss_desc(cpu, t); /*
20008 * This just modifies memory; should not be
20009 @@ -139,7 +139,9 @@ static void fix_processor_context(void)
20010 */
20011
20012 #ifdef CONFIG_X86_64
20013 + pax_open_kernel();
20014 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
20015 + pax_close_kernel();
20016
20017 syscall_init(); /* This sets MSR_*STAR and related */
20018 #endif
20019 diff -urNp linux-2.6.35.4/arch/x86/vdso/Makefile linux-2.6.35.4/arch/x86/vdso/Makefile
20020 --- linux-2.6.35.4/arch/x86/vdso/Makefile 2010-08-26 19:47:12.000000000 -0400
20021 +++ linux-2.6.35.4/arch/x86/vdso/Makefile 2010-09-17 20:12:09.000000000 -0400
20022 @@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
20023 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
20024 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
20025
20026 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
20027 +VDSO_LDFLAGS = -fPIC -shared --no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
20028 GCOV_PROFILE := n
20029
20030 #
20031 diff -urNp linux-2.6.35.4/arch/x86/vdso/vclock_gettime.c linux-2.6.35.4/arch/x86/vdso/vclock_gettime.c
20032 --- linux-2.6.35.4/arch/x86/vdso/vclock_gettime.c 2010-08-26 19:47:12.000000000 -0400
20033 +++ linux-2.6.35.4/arch/x86/vdso/vclock_gettime.c 2010-09-17 20:12:09.000000000 -0400
20034 @@ -22,24 +22,48 @@
20035 #include <asm/hpet.h>
20036 #include <asm/unistd.h>
20037 #include <asm/io.h>
20038 +#include <asm/fixmap.h>
20039 #include "vextern.h"
20040
20041 #define gtod vdso_vsyscall_gtod_data
20042
20043 +notrace noinline long __vdso_fallback_time(long *t)
20044 +{
20045 + long secs;
20046 + asm volatile("syscall"
20047 + : "=a" (secs)
20048 + : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
20049 + return secs;
20050 +}
20051 +
20052 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
20053 {
20054 long ret;
20055 asm("syscall" : "=a" (ret) :
20056 - "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
20057 + "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
20058 return ret;
20059 }
20060
20061 +notrace static inline cycle_t __vdso_vread_hpet(void)
20062 +{
20063 + return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
20064 +}
20065 +
20066 +notrace static inline cycle_t __vdso_vread_tsc(void)
20067 +{
20068 + cycle_t ret = (cycle_t)vget_cycles();
20069 +
20070 + return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
20071 +}
20072 +
20073 notrace static inline long vgetns(void)
20074 {
20075 long v;
20076 - cycles_t (*vread)(void);
20077 - vread = gtod->clock.vread;
20078 - v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
20079 + if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
20080 + v = __vdso_vread_tsc();
20081 + else
20082 + v = __vdso_vread_hpet();
20083 + v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
20084 return (v * gtod->clock.mult) >> gtod->clock.shift;
20085 }
20086
20087 @@ -113,7 +137,9 @@ notrace static noinline int do_monotonic
20088
20089 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
20090 {
20091 - if (likely(gtod->sysctl_enabled))
20092 + if (likely(gtod->sysctl_enabled &&
20093 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
20094 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
20095 switch (clock) {
20096 case CLOCK_REALTIME:
20097 if (likely(gtod->clock.vread))
20098 @@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid
20099 int clock_gettime(clockid_t, struct timespec *)
20100 __attribute__((weak, alias("__vdso_clock_gettime")));
20101
20102 -notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
20103 +notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
20104 {
20105 long ret;
20106 - if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
20107 + asm("syscall" : "=a" (ret) :
20108 + "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
20109 + return ret;
20110 +}
20111 +
20112 +notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
20113 +{
20114 + if (likely(gtod->sysctl_enabled &&
20115 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
20116 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
20117 + {
20118 if (likely(tv != NULL)) {
20119 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
20120 offsetof(struct timespec, tv_nsec) ||
20121 @@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct t
20122 }
20123 return 0;
20124 }
20125 - asm("syscall" : "=a" (ret) :
20126 - "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
20127 - return ret;
20128 + return __vdso_fallback_gettimeofday(tv, tz);
20129 }
20130 int gettimeofday(struct timeval *, struct timezone *)
20131 __attribute__((weak, alias("__vdso_gettimeofday")));
20132 diff -urNp linux-2.6.35.4/arch/x86/vdso/vdso32-setup.c linux-2.6.35.4/arch/x86/vdso/vdso32-setup.c
20133 --- linux-2.6.35.4/arch/x86/vdso/vdso32-setup.c 2010-08-26 19:47:12.000000000 -0400
20134 +++ linux-2.6.35.4/arch/x86/vdso/vdso32-setup.c 2010-09-17 20:12:09.000000000 -0400
20135 @@ -25,6 +25,7 @@
20136 #include <asm/tlbflush.h>
20137 #include <asm/vdso.h>
20138 #include <asm/proto.h>
20139 +#include <asm/mman.h>
20140
20141 enum {
20142 VDSO_DISABLED = 0,
20143 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
20144 void enable_sep_cpu(void)
20145 {
20146 int cpu = get_cpu();
20147 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
20148 + struct tss_struct *tss = init_tss + cpu;
20149
20150 if (!boot_cpu_has(X86_FEATURE_SEP)) {
20151 put_cpu();
20152 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
20153 gate_vma.vm_start = FIXADDR_USER_START;
20154 gate_vma.vm_end = FIXADDR_USER_END;
20155 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
20156 - gate_vma.vm_page_prot = __P101;
20157 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
20158 /*
20159 * Make sure the vDSO gets into every core dump.
20160 * Dumping its contents makes post-mortem fully interpretable later
20161 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
20162 if (compat)
20163 addr = VDSO_HIGH_BASE;
20164 else {
20165 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
20166 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
20167 if (IS_ERR_VALUE(addr)) {
20168 ret = addr;
20169 goto up_fail;
20170 }
20171 }
20172
20173 - current->mm->context.vdso = (void *)addr;
20174 + current->mm->context.vdso = addr;
20175
20176 if (compat_uses_vma || !compat) {
20177 /*
20178 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
20179 }
20180
20181 current_thread_info()->sysenter_return =
20182 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
20183 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
20184
20185 up_fail:
20186 if (ret)
20187 - current->mm->context.vdso = NULL;
20188 + current->mm->context.vdso = 0;
20189
20190 up_write(&mm->mmap_sem);
20191
20192 @@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
20193
20194 const char *arch_vma_name(struct vm_area_struct *vma)
20195 {
20196 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
20197 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
20198 return "[vdso]";
20199 +
20200 +#ifdef CONFIG_PAX_SEGMEXEC
20201 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
20202 + return "[vdso]";
20203 +#endif
20204 +
20205 return NULL;
20206 }
20207
20208 @@ -422,7 +429,7 @@ struct vm_area_struct *get_gate_vma(stru
20209 struct mm_struct *mm = tsk->mm;
20210
20211 /* Check to see if this task was created in compat vdso mode */
20212 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
20213 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
20214 return &gate_vma;
20215 return NULL;
20216 }
20217 diff -urNp linux-2.6.35.4/arch/x86/vdso/vdso.lds.S linux-2.6.35.4/arch/x86/vdso/vdso.lds.S
20218 --- linux-2.6.35.4/arch/x86/vdso/vdso.lds.S 2010-08-26 19:47:12.000000000 -0400
20219 +++ linux-2.6.35.4/arch/x86/vdso/vdso.lds.S 2010-09-17 20:12:09.000000000 -0400
20220 @@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
20221 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
20222 #include "vextern.h"
20223 #undef VEXTERN
20224 +
20225 +#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
20226 +VEXTERN(fallback_gettimeofday)
20227 +VEXTERN(fallback_time)
20228 +VEXTERN(getcpu)
20229 +#undef VEXTERN
20230 diff -urNp linux-2.6.35.4/arch/x86/vdso/vextern.h linux-2.6.35.4/arch/x86/vdso/vextern.h
20231 --- linux-2.6.35.4/arch/x86/vdso/vextern.h 2010-08-26 19:47:12.000000000 -0400
20232 +++ linux-2.6.35.4/arch/x86/vdso/vextern.h 2010-09-17 20:12:09.000000000 -0400
20233 @@ -11,6 +11,5 @@
20234 put into vextern.h and be referenced as a pointer with vdso prefix.
20235 The main kernel later fills in the values. */
20236
20237 -VEXTERN(jiffies)
20238 VEXTERN(vgetcpu_mode)
20239 VEXTERN(vsyscall_gtod_data)
20240 diff -urNp linux-2.6.35.4/arch/x86/vdso/vma.c linux-2.6.35.4/arch/x86/vdso/vma.c
20241 --- linux-2.6.35.4/arch/x86/vdso/vma.c 2010-08-26 19:47:12.000000000 -0400
20242 +++ linux-2.6.35.4/arch/x86/vdso/vma.c 2010-09-17 20:12:09.000000000 -0400
20243 @@ -58,7 +58,7 @@ static int __init init_vdso_vars(void)
20244 if (!vbase)
20245 goto oom;
20246
20247 - if (memcmp(vbase, "\177ELF", 4)) {
20248 + if (memcmp(vbase, ELFMAG, SELFMAG)) {
20249 printk("VDSO: I'm broken; not ELF\n");
20250 vdso_enabled = 0;
20251 }
20252 @@ -67,6 +67,7 @@ static int __init init_vdso_vars(void)
20253 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
20254 #include "vextern.h"
20255 #undef VEXTERN
20256 + vunmap(vbase);
20257 return 0;
20258
20259 oom:
20260 @@ -117,7 +118,7 @@ int arch_setup_additional_pages(struct l
20261 goto up_fail;
20262 }
20263
20264 - current->mm->context.vdso = (void *)addr;
20265 + current->mm->context.vdso = addr;
20266
20267 ret = install_special_mapping(mm, addr, vdso_size,
20268 VM_READ|VM_EXEC|
20269 @@ -125,7 +126,7 @@ int arch_setup_additional_pages(struct l
20270 VM_ALWAYSDUMP,
20271 vdso_pages);
20272 if (ret) {
20273 - current->mm->context.vdso = NULL;
20274 + current->mm->context.vdso = 0;
20275 goto up_fail;
20276 }
20277
20278 @@ -133,10 +134,3 @@ up_fail:
20279 up_write(&mm->mmap_sem);
20280 return ret;
20281 }
20282 -
20283 -static __init int vdso_setup(char *s)
20284 -{
20285 - vdso_enabled = simple_strtoul(s, NULL, 0);
20286 - return 0;
20287 -}
20288 -__setup("vdso=", vdso_setup);
20289 diff -urNp linux-2.6.35.4/arch/x86/xen/enlighten.c linux-2.6.35.4/arch/x86/xen/enlighten.c
20290 --- linux-2.6.35.4/arch/x86/xen/enlighten.c 2010-08-26 19:47:12.000000000 -0400
20291 +++ linux-2.6.35.4/arch/x86/xen/enlighten.c 2010-09-17 20:12:09.000000000 -0400
20292 @@ -74,8 +74,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
20293
20294 struct shared_info xen_dummy_shared_info;
20295
20296 -void *xen_initial_gdt;
20297 -
20298 /*
20299 * Point at some empty memory to start with. We map the real shared_info
20300 * page as soon as fixmap is up and running.
20301 @@ -551,7 +549,7 @@ static void xen_write_idt_entry(gate_des
20302
20303 preempt_disable();
20304
20305 - start = __get_cpu_var(idt_desc).address;
20306 + start = (unsigned long)__get_cpu_var(idt_desc).address;
20307 end = start + __get_cpu_var(idt_desc).size + 1;
20308
20309 xen_mc_flush();
20310 @@ -1103,7 +1101,17 @@ asmlinkage void __init xen_start_kernel(
20311 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
20312
20313 /* Work out if we support NX */
20314 - x86_configure_nx();
20315 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
20316 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
20317 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
20318 + unsigned l, h;
20319 +
20320 + __supported_pte_mask |= _PAGE_NX;
20321 + rdmsr(MSR_EFER, l, h);
20322 + l |= EFER_NX;
20323 + wrmsr(MSR_EFER, l, h);
20324 + }
20325 +#endif
20326
20327 xen_setup_features();
20328
20329 @@ -1134,13 +1142,6 @@ asmlinkage void __init xen_start_kernel(
20330
20331 machine_ops = xen_machine_ops;
20332
20333 - /*
20334 - * The only reliable way to retain the initial address of the
20335 - * percpu gdt_page is to remember it here, so we can go and
20336 - * mark it RW later, when the initial percpu area is freed.
20337 - */
20338 - xen_initial_gdt = &per_cpu(gdt_page, 0);
20339 -
20340 xen_smp_init();
20341
20342 pgd = (pgd_t *)xen_start_info->pt_base;
20343 diff -urNp linux-2.6.35.4/arch/x86/xen/mmu.c linux-2.6.35.4/arch/x86/xen/mmu.c
20344 --- linux-2.6.35.4/arch/x86/xen/mmu.c 2010-08-26 19:47:12.000000000 -0400
20345 +++ linux-2.6.35.4/arch/x86/xen/mmu.c 2010-09-17 20:12:09.000000000 -0400
20346 @@ -1694,6 +1694,8 @@ __init pgd_t *xen_setup_kernel_pagetable
20347 convert_pfn_mfn(init_level4_pgt);
20348 convert_pfn_mfn(level3_ident_pgt);
20349 convert_pfn_mfn(level3_kernel_pgt);
20350 + convert_pfn_mfn(level3_vmalloc_pgt);
20351 + convert_pfn_mfn(level3_vmemmap_pgt);
20352
20353 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
20354 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
20355 @@ -1712,7 +1714,10 @@ __init pgd_t *xen_setup_kernel_pagetable
20356 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
20357 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
20358 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
20359 + set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
20360 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
20361 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
20362 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
20363 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
20364 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
20365
20366 diff -urNp linux-2.6.35.4/arch/x86/xen/smp.c linux-2.6.35.4/arch/x86/xen/smp.c
20367 --- linux-2.6.35.4/arch/x86/xen/smp.c 2010-08-26 19:47:12.000000000 -0400
20368 +++ linux-2.6.35.4/arch/x86/xen/smp.c 2010-09-17 20:12:09.000000000 -0400
20369 @@ -169,11 +169,6 @@ static void __init xen_smp_prepare_boot_
20370 {
20371 BUG_ON(smp_processor_id() != 0);
20372 native_smp_prepare_boot_cpu();
20373 -
20374 - /* We've switched to the "real" per-cpu gdt, so make sure the
20375 - old memory can be recycled */
20376 - make_lowmem_page_readwrite(xen_initial_gdt);
20377 -
20378 xen_setup_vcpu_info_placement();
20379 }
20380
20381 @@ -233,8 +228,8 @@ cpu_initialize_context(unsigned int cpu,
20382 gdt = get_cpu_gdt_table(cpu);
20383
20384 ctxt->flags = VGCF_IN_KERNEL;
20385 - ctxt->user_regs.ds = __USER_DS;
20386 - ctxt->user_regs.es = __USER_DS;
20387 + ctxt->user_regs.ds = __KERNEL_DS;
20388 + ctxt->user_regs.es = __KERNEL_DS;
20389 ctxt->user_regs.ss = __KERNEL_DS;
20390 #ifdef CONFIG_X86_32
20391 ctxt->user_regs.fs = __KERNEL_PERCPU;
20392 diff -urNp linux-2.6.35.4/arch/x86/xen/xen-head.S linux-2.6.35.4/arch/x86/xen/xen-head.S
20393 --- linux-2.6.35.4/arch/x86/xen/xen-head.S 2010-08-26 19:47:12.000000000 -0400
20394 +++ linux-2.6.35.4/arch/x86/xen/xen-head.S 2010-09-17 20:12:09.000000000 -0400
20395 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
20396 #ifdef CONFIG_X86_32
20397 mov %esi,xen_start_info
20398 mov $init_thread_union+THREAD_SIZE,%esp
20399 +#ifdef CONFIG_SMP
20400 + movl $cpu_gdt_table,%edi
20401 + movl $__per_cpu_load,%eax
20402 + movw %ax,__KERNEL_PERCPU + 2(%edi)
20403 + rorl $16,%eax
20404 + movb %al,__KERNEL_PERCPU + 4(%edi)
20405 + movb %ah,__KERNEL_PERCPU + 7(%edi)
20406 + movl $__per_cpu_end - 1,%eax
20407 + subl $__per_cpu_start,%eax
20408 + movw %ax,__KERNEL_PERCPU + 0(%edi)
20409 +#endif
20410 #else
20411 mov %rsi,xen_start_info
20412 mov $init_thread_union+THREAD_SIZE,%rsp
20413 diff -urNp linux-2.6.35.4/arch/x86/xen/xen-ops.h linux-2.6.35.4/arch/x86/xen/xen-ops.h
20414 --- linux-2.6.35.4/arch/x86/xen/xen-ops.h 2010-08-26 19:47:12.000000000 -0400
20415 +++ linux-2.6.35.4/arch/x86/xen/xen-ops.h 2010-09-17 20:12:09.000000000 -0400
20416 @@ -10,8 +10,6 @@
20417 extern const char xen_hypervisor_callback[];
20418 extern const char xen_failsafe_callback[];
20419
20420 -extern void *xen_initial_gdt;
20421 -
20422 struct trap_info;
20423 void xen_copy_trap_info(struct trap_info *traps);
20424
20425 diff -urNp linux-2.6.35.4/block/blk-iopoll.c linux-2.6.35.4/block/blk-iopoll.c
20426 --- linux-2.6.35.4/block/blk-iopoll.c 2010-08-26 19:47:12.000000000 -0400
20427 +++ linux-2.6.35.4/block/blk-iopoll.c 2010-09-17 20:12:09.000000000 -0400
20428 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
20429 }
20430 EXPORT_SYMBOL(blk_iopoll_complete);
20431
20432 -static void blk_iopoll_softirq(struct softirq_action *h)
20433 +static void blk_iopoll_softirq(void)
20434 {
20435 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
20436 int rearm = 0, budget = blk_iopoll_budget;
20437 diff -urNp linux-2.6.35.4/block/blk-map.c linux-2.6.35.4/block/blk-map.c
20438 --- linux-2.6.35.4/block/blk-map.c 2010-08-26 19:47:12.000000000 -0400
20439 +++ linux-2.6.35.4/block/blk-map.c 2010-09-17 20:12:09.000000000 -0400
20440 @@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct requ
20441 * direct dma. else, set up kernel bounce buffers
20442 */
20443 uaddr = (unsigned long) ubuf;
20444 - if (blk_rq_aligned(q, ubuf, len) && !map_data)
20445 + if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
20446 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
20447 else
20448 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
20449 @@ -297,7 +297,7 @@ int blk_rq_map_kern(struct request_queue
20450 if (!len || !kbuf)
20451 return -EINVAL;
20452
20453 - do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
20454 + do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
20455 if (do_copy)
20456 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
20457 else
20458 diff -urNp linux-2.6.35.4/block/blk-softirq.c linux-2.6.35.4/block/blk-softirq.c
20459 --- linux-2.6.35.4/block/blk-softirq.c 2010-08-26 19:47:12.000000000 -0400
20460 +++ linux-2.6.35.4/block/blk-softirq.c 2010-09-17 20:12:09.000000000 -0400
20461 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
20462 * Softirq action handler - move entries to local list and loop over them
20463 * while passing them to the queue registered handler.
20464 */
20465 -static void blk_done_softirq(struct softirq_action *h)
20466 +static void blk_done_softirq(void)
20467 {
20468 struct list_head *cpu_list, local_list;
20469
20470 diff -urNp linux-2.6.35.4/crypto/lrw.c linux-2.6.35.4/crypto/lrw.c
20471 --- linux-2.6.35.4/crypto/lrw.c 2010-08-26 19:47:12.000000000 -0400
20472 +++ linux-2.6.35.4/crypto/lrw.c 2010-09-17 20:12:09.000000000 -0400
20473 @@ -60,7 +60,7 @@ static int setkey(struct crypto_tfm *par
20474 struct priv *ctx = crypto_tfm_ctx(parent);
20475 struct crypto_cipher *child = ctx->child;
20476 int err, i;
20477 - be128 tmp = { 0 };
20478 + be128 tmp = { 0, 0 };
20479 int bsize = crypto_cipher_blocksize(child);
20480
20481 crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
20482 diff -urNp linux-2.6.35.4/Documentation/dontdiff linux-2.6.35.4/Documentation/dontdiff
20483 --- linux-2.6.35.4/Documentation/dontdiff 2010-08-26 19:47:12.000000000 -0400
20484 +++ linux-2.6.35.4/Documentation/dontdiff 2010-09-17 20:12:09.000000000 -0400
20485 @@ -3,6 +3,7 @@
20486 *.bin
20487 *.cpio
20488 *.csp
20489 +*.dbg
20490 *.dsp
20491 *.dvi
20492 *.elf
20493 @@ -38,8 +39,10 @@
20494 *.tab.h
20495 *.tex
20496 *.ver
20497 +*.vim
20498 *.xml
20499 *_MODULES
20500 +*_reg_safe.h
20501 *_vga16.c
20502 *~
20503 *.9
20504 @@ -49,11 +52,16 @@
20505 53c700_d.h
20506 CVS
20507 ChangeSet
20508 +GPATH
20509 +GRTAGS
20510 +GSYMS
20511 +GTAGS
20512 Image
20513 Kerntypes
20514 Module.markers
20515 Module.symvers
20516 PENDING
20517 +PERF*
20518 SCCS
20519 System.map*
20520 TAGS
20521 @@ -76,7 +84,10 @@ btfixupprep
20522 build
20523 bvmlinux
20524 bzImage*
20525 +capflags.c
20526 classlist.h*
20527 +clut_vga16.c
20528 +common-cmds.h
20529 comp*.log
20530 compile.h*
20531 conf
20532 @@ -100,19 +111,22 @@ fore200e_mkfirm
20533 fore200e_pca_fw.c*
20534 gconf
20535 gen-devlist
20536 +gen-kdb_cmds.c
20537 gen_crc32table
20538 gen_init_cpio
20539 generated
20540 genheaders
20541 genksyms
20542 *_gray256.c
20543 +hash
20544 ihex2fw
20545 ikconfig.h*
20546 +inat-tables.c
20547 initramfs_data.cpio
20548 +initramfs_data.cpio.bz2
20549 initramfs_data.cpio.gz
20550 initramfs_list
20551 kallsyms
20552 -kconfig
20553 keywords.c
20554 ksym.c*
20555 ksym.h*
20556 @@ -136,10 +150,13 @@ mkboot
20557 mkbugboot
20558 mkcpustr
20559 mkdep
20560 +mkpiggy
20561 mkprep
20562 +mkregtable
20563 mktables
20564 mktree
20565 modpost
20566 +modules.builtin
20567 modules.order
20568 modversions.h*
20569 ncscope.*
20570 @@ -151,7 +168,9 @@ parse.h
20571 patches*
20572 pca200e.bin
20573 pca200e_ecd.bin2
20574 +perf-archive
20575 piggy.gz
20576 +piggy.S
20577 piggyback
20578 pnmtologo
20579 ppc_defs.h*
20580 @@ -160,12 +179,14 @@ qconf
20581 raid6altivec*.c
20582 raid6int*.c
20583 raid6tables.c
20584 +regdb.c
20585 relocs
20586 series
20587 setup
20588 setup.bin
20589 setup.elf
20590 sImage
20591 +slabinfo
20592 sm_tbl*
20593 split-include
20594 syscalltab.h
20595 @@ -189,14 +210,20 @@ version.h*
20596 vmlinux
20597 vmlinux-*
20598 vmlinux.aout
20599 +vmlinux.bin.all
20600 +vmlinux.bin.bz2
20601 vmlinux.lds
20602 +vmlinux.relocs
20603 +voffset.h
20604 vsyscall.lds
20605 vsyscall_32.lds
20606 wanxlfw.inc
20607 uImage
20608 unifdef
20609 +utsrelease.h
20610 wakeup.bin
20611 wakeup.elf
20612 wakeup.lds
20613 zImage*
20614 zconf.hash.c
20615 +zoffset.h
20616 diff -urNp linux-2.6.35.4/Documentation/filesystems/sysfs.txt linux-2.6.35.4/Documentation/filesystems/sysfs.txt
20617 --- linux-2.6.35.4/Documentation/filesystems/sysfs.txt 2010-08-26 19:47:12.000000000 -0400
20618 +++ linux-2.6.35.4/Documentation/filesystems/sysfs.txt 2010-09-17 20:12:09.000000000 -0400
20619 @@ -123,8 +123,8 @@ set of sysfs operations for forwarding r
20620 show and store methods of the attribute owners.
20621
20622 struct sysfs_ops {
20623 - ssize_t (*show)(struct kobject *, struct attribute *, char *);
20624 - ssize_t (*store)(struct kobject *, struct attribute *, const char *);
20625 + ssize_t (* const show)(struct kobject *, struct attribute *, char *);
20626 + ssize_t (* const store)(struct kobject *, struct attribute *, const char *);
20627 };
20628
20629 [ Subsystems should have already defined a struct kobj_type as a
20630 diff -urNp linux-2.6.35.4/Documentation/kernel-parameters.txt linux-2.6.35.4/Documentation/kernel-parameters.txt
20631 --- linux-2.6.35.4/Documentation/kernel-parameters.txt 2010-08-26 19:47:12.000000000 -0400
20632 +++ linux-2.6.35.4/Documentation/kernel-parameters.txt 2010-09-17 20:12:09.000000000 -0400
20633 @@ -1910,6 +1910,12 @@ and is between 256 and 4096 characters.
20634 the specified number of seconds. This is to be used if
20635 your oopses keep scrolling off the screen.
20636
20637 + pax_nouderef [X86-32] disables UDEREF. Most likely needed under certain
20638 + virtualization environments that don't cope well with the
20639 + expand down segment used by UDEREF on X86-32.
20640 +
20641 + pax_softmode= [X86-32] 0/1 to disable/enable PaX softmode on boot already.
20642 +
20643 pcbit= [HW,ISDN]
20644
20645 pcd. [PARIDE]
20646 diff -urNp linux-2.6.35.4/drivers/acpi/battery.c linux-2.6.35.4/drivers/acpi/battery.c
20647 --- linux-2.6.35.4/drivers/acpi/battery.c 2010-08-26 19:47:12.000000000 -0400
20648 +++ linux-2.6.35.4/drivers/acpi/battery.c 2010-09-17 20:12:09.000000000 -0400
20649 @@ -810,7 +810,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
20650 }
20651
20652 static struct battery_file {
20653 - struct file_operations ops;
20654 + const struct file_operations ops;
20655 mode_t mode;
20656 const char *name;
20657 } acpi_battery_file[] = {
20658 diff -urNp linux-2.6.35.4/drivers/acpi/blacklist.c linux-2.6.35.4/drivers/acpi/blacklist.c
20659 --- linux-2.6.35.4/drivers/acpi/blacklist.c 2010-08-26 19:47:12.000000000 -0400
20660 +++ linux-2.6.35.4/drivers/acpi/blacklist.c 2010-09-17 20:12:09.000000000 -0400
20661 @@ -73,7 +73,7 @@ static struct acpi_blacklist_item acpi_b
20662 {"IBM ", "TP600E ", 0x00000105, ACPI_SIG_DSDT, less_than_or_equal,
20663 "Incorrect _ADR", 1},
20664
20665 - {""}
20666 + {"", "", 0, NULL, all_versions, NULL, 0}
20667 };
20668
20669 #if CONFIG_ACPI_BLACKLIST_YEAR
20670 diff -urNp linux-2.6.35.4/drivers/acpi/dock.c linux-2.6.35.4/drivers/acpi/dock.c
20671 --- linux-2.6.35.4/drivers/acpi/dock.c 2010-08-26 19:47:12.000000000 -0400
20672 +++ linux-2.6.35.4/drivers/acpi/dock.c 2010-09-17 20:12:09.000000000 -0400
20673 @@ -77,7 +77,7 @@ struct dock_dependent_device {
20674 struct list_head list;
20675 struct list_head hotplug_list;
20676 acpi_handle handle;
20677 - struct acpi_dock_ops *ops;
20678 + const struct acpi_dock_ops *ops;
20679 void *context;
20680 };
20681
20682 @@ -589,7 +589,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifi
20683 * the dock driver after _DCK is executed.
20684 */
20685 int
20686 -register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
20687 +register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
20688 void *context)
20689 {
20690 struct dock_dependent_device *dd;
20691 diff -urNp linux-2.6.35.4/drivers/acpi/osl.c linux-2.6.35.4/drivers/acpi/osl.c
20692 --- linux-2.6.35.4/drivers/acpi/osl.c 2010-08-26 19:47:12.000000000 -0400
20693 +++ linux-2.6.35.4/drivers/acpi/osl.c 2010-09-17 20:12:09.000000000 -0400
20694 @@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_addres
20695 void __iomem *virt_addr;
20696
20697 virt_addr = ioremap(phys_addr, width);
20698 + if (!virt_addr)
20699 + return AE_NO_MEMORY;
20700 if (!value)
20701 value = &dummy;
20702
20703 @@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_addre
20704 void __iomem *virt_addr;
20705
20706 virt_addr = ioremap(phys_addr, width);
20707 + if (!virt_addr)
20708 + return AE_NO_MEMORY;
20709
20710 switch (width) {
20711 case 8:
20712 diff -urNp linux-2.6.35.4/drivers/acpi/power_meter.c linux-2.6.35.4/drivers/acpi/power_meter.c
20713 --- linux-2.6.35.4/drivers/acpi/power_meter.c 2010-08-26 19:47:12.000000000 -0400
20714 +++ linux-2.6.35.4/drivers/acpi/power_meter.c 2010-09-17 20:12:09.000000000 -0400
20715 @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *d
20716 return res;
20717
20718 temp /= 1000;
20719 - if (temp < 0)
20720 - return -EINVAL;
20721
20722 mutex_lock(&resource->lock);
20723 resource->trip[attr->index - 7] = temp;
20724 diff -urNp linux-2.6.35.4/drivers/acpi/proc.c linux-2.6.35.4/drivers/acpi/proc.c
20725 --- linux-2.6.35.4/drivers/acpi/proc.c 2010-08-26 19:47:12.000000000 -0400
20726 +++ linux-2.6.35.4/drivers/acpi/proc.c 2010-09-17 20:12:09.000000000 -0400
20727 @@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct f
20728 size_t count, loff_t * ppos)
20729 {
20730 struct list_head *node, *next;
20731 - char strbuf[5];
20732 - char str[5] = "";
20733 - unsigned int len = count;
20734 + char strbuf[5] = {0};
20735 struct acpi_device *found_dev = NULL;
20736
20737 - if (len > 4)
20738 - len = 4;
20739 - if (len < 0)
20740 - return -EFAULT;
20741 + if (count > 4)
20742 + count = 4;
20743
20744 - if (copy_from_user(strbuf, buffer, len))
20745 + if (copy_from_user(strbuf, buffer, count))
20746 return -EFAULT;
20747 - strbuf[len] = '\0';
20748 - sscanf(strbuf, "%s", str);
20749 + strbuf[count] = '\0';
20750
20751 mutex_lock(&acpi_device_lock);
20752 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
20753 @@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct f
20754 if (!dev->wakeup.flags.valid)
20755 continue;
20756
20757 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
20758 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
20759 dev->wakeup.state.enabled =
20760 dev->wakeup.state.enabled ? 0 : 1;
20761 found_dev = dev;
20762 diff -urNp linux-2.6.35.4/drivers/acpi/processor_driver.c linux-2.6.35.4/drivers/acpi/processor_driver.c
20763 --- linux-2.6.35.4/drivers/acpi/processor_driver.c 2010-08-26 19:47:12.000000000 -0400
20764 +++ linux-2.6.35.4/drivers/acpi/processor_driver.c 2010-09-17 20:12:09.000000000 -0400
20765 @@ -586,7 +586,7 @@ static int __cpuinit acpi_processor_add(
20766 return 0;
20767 #endif
20768
20769 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
20770 + BUG_ON(pr->id >= nr_cpu_ids);
20771
20772 /*
20773 * Buggy BIOS check
20774 diff -urNp linux-2.6.35.4/drivers/acpi/processor_idle.c linux-2.6.35.4/drivers/acpi/processor_idle.c
20775 --- linux-2.6.35.4/drivers/acpi/processor_idle.c 2010-08-26 19:47:12.000000000 -0400
20776 +++ linux-2.6.35.4/drivers/acpi/processor_idle.c 2010-09-17 20:12:09.000000000 -0400
20777 @@ -124,7 +124,7 @@ static struct dmi_system_id __cpuinitdat
20778 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
20779 DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
20780 (void *)1},
20781 - {},
20782 + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL},
20783 };
20784
20785
20786 diff -urNp linux-2.6.35.4/drivers/acpi/sleep.c linux-2.6.35.4/drivers/acpi/sleep.c
20787 --- linux-2.6.35.4/drivers/acpi/sleep.c 2010-08-26 19:47:12.000000000 -0400
20788 +++ linux-2.6.35.4/drivers/acpi/sleep.c 2010-09-17 20:12:09.000000000 -0400
20789 @@ -318,7 +318,7 @@ static int acpi_suspend_state_valid(susp
20790 }
20791 }
20792
20793 -static struct platform_suspend_ops acpi_suspend_ops = {
20794 +static const struct platform_suspend_ops acpi_suspend_ops = {
20795 .valid = acpi_suspend_state_valid,
20796 .begin = acpi_suspend_begin,
20797 .prepare_late = acpi_pm_prepare,
20798 @@ -346,7 +346,7 @@ static int acpi_suspend_begin_old(suspen
20799 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
20800 * been requested.
20801 */
20802 -static struct platform_suspend_ops acpi_suspend_ops_old = {
20803 +static const struct platform_suspend_ops acpi_suspend_ops_old = {
20804 .valid = acpi_suspend_state_valid,
20805 .begin = acpi_suspend_begin_old,
20806 .prepare_late = acpi_pm_freeze,
20807 @@ -478,7 +478,7 @@ static void acpi_pm_thaw(void)
20808 acpi_enable_all_runtime_gpes();
20809 }
20810
20811 -static struct platform_hibernation_ops acpi_hibernation_ops = {
20812 +static const struct platform_hibernation_ops acpi_hibernation_ops = {
20813 .begin = acpi_hibernation_begin,
20814 .end = acpi_pm_end,
20815 .pre_snapshot = acpi_hibernation_pre_snapshot,
20816 @@ -528,7 +528,7 @@ static int acpi_hibernation_pre_snapshot
20817 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
20818 * been requested.
20819 */
20820 -static struct platform_hibernation_ops acpi_hibernation_ops_old = {
20821 +static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
20822 .begin = acpi_hibernation_begin_old,
20823 .end = acpi_pm_end,
20824 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
20825 diff -urNp linux-2.6.35.4/drivers/acpi/video.c linux-2.6.35.4/drivers/acpi/video.c
20826 --- linux-2.6.35.4/drivers/acpi/video.c 2010-08-26 19:47:12.000000000 -0400
20827 +++ linux-2.6.35.4/drivers/acpi/video.c 2010-09-17 20:12:09.000000000 -0400
20828 @@ -363,7 +363,7 @@ static int acpi_video_set_brightness(str
20829 vd->brightness->levels[request_level]);
20830 }
20831
20832 -static struct backlight_ops acpi_backlight_ops = {
20833 +static const struct backlight_ops acpi_backlight_ops = {
20834 .get_brightness = acpi_video_get_brightness,
20835 .update_status = acpi_video_set_brightness,
20836 };
20837 diff -urNp linux-2.6.35.4/drivers/ata/ahci.c linux-2.6.35.4/drivers/ata/ahci.c
20838 --- linux-2.6.35.4/drivers/ata/ahci.c 2010-08-26 19:47:12.000000000 -0400
20839 +++ linux-2.6.35.4/drivers/ata/ahci.c 2010-09-17 20:12:09.000000000 -0400
20840 @@ -89,17 +89,17 @@ static int ahci_pci_device_suspend(struc
20841 static int ahci_pci_device_resume(struct pci_dev *pdev);
20842 #endif
20843
20844 -static struct ata_port_operations ahci_vt8251_ops = {
20845 +static const struct ata_port_operations ahci_vt8251_ops = {
20846 .inherits = &ahci_ops,
20847 .hardreset = ahci_vt8251_hardreset,
20848 };
20849
20850 -static struct ata_port_operations ahci_p5wdh_ops = {
20851 +static const struct ata_port_operations ahci_p5wdh_ops = {
20852 .inherits = &ahci_ops,
20853 .hardreset = ahci_p5wdh_hardreset,
20854 };
20855
20856 -static struct ata_port_operations ahci_sb600_ops = {
20857 +static const struct ata_port_operations ahci_sb600_ops = {
20858 .inherits = &ahci_ops,
20859 .softreset = ahci_sb600_softreset,
20860 .pmp_softreset = ahci_sb600_softreset,
20861 @@ -370,7 +370,7 @@ static const struct pci_device_id ahci_p
20862 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
20863 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
20864
20865 - { } /* terminate list */
20866 + { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
20867 };
20868
20869
20870 diff -urNp linux-2.6.35.4/drivers/ata/ahci.h linux-2.6.35.4/drivers/ata/ahci.h
20871 --- linux-2.6.35.4/drivers/ata/ahci.h 2010-08-26 19:47:12.000000000 -0400
20872 +++ linux-2.6.35.4/drivers/ata/ahci.h 2010-09-17 20:12:09.000000000 -0400
20873 @@ -298,7 +298,7 @@ struct ahci_host_priv {
20874 extern int ahci_ignore_sss;
20875
20876 extern struct scsi_host_template ahci_sht;
20877 -extern struct ata_port_operations ahci_ops;
20878 +extern const struct ata_port_operations ahci_ops;
20879
20880 void ahci_save_initial_config(struct device *dev,
20881 struct ahci_host_priv *hpriv,
20882 diff -urNp linux-2.6.35.4/drivers/ata/ata_generic.c linux-2.6.35.4/drivers/ata/ata_generic.c
20883 --- linux-2.6.35.4/drivers/ata/ata_generic.c 2010-08-26 19:47:12.000000000 -0400
20884 +++ linux-2.6.35.4/drivers/ata/ata_generic.c 2010-09-17 20:12:09.000000000 -0400
20885 @@ -104,7 +104,7 @@ static struct scsi_host_template generic
20886 ATA_BMDMA_SHT(DRV_NAME),
20887 };
20888
20889 -static struct ata_port_operations generic_port_ops = {
20890 +static const struct ata_port_operations generic_port_ops = {
20891 .inherits = &ata_bmdma_port_ops,
20892 .cable_detect = ata_cable_unknown,
20893 .set_mode = generic_set_mode,
20894 diff -urNp linux-2.6.35.4/drivers/ata/ata_piix.c linux-2.6.35.4/drivers/ata/ata_piix.c
20895 --- linux-2.6.35.4/drivers/ata/ata_piix.c 2010-08-26 19:47:12.000000000 -0400
20896 +++ linux-2.6.35.4/drivers/ata/ata_piix.c 2010-09-17 20:12:09.000000000 -0400
20897 @@ -302,7 +302,7 @@ static const struct pci_device_id piix_p
20898 { 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
20899 /* SATA Controller IDE (CPT) */
20900 { 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
20901 - { } /* terminate list */
20902 + { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
20903 };
20904
20905 static struct pci_driver piix_pci_driver = {
20906 @@ -320,12 +320,12 @@ static struct scsi_host_template piix_sh
20907 ATA_BMDMA_SHT(DRV_NAME),
20908 };
20909
20910 -static struct ata_port_operations piix_sata_ops = {
20911 +static const struct ata_port_operations piix_sata_ops = {
20912 .inherits = &ata_bmdma32_port_ops,
20913 .sff_irq_check = piix_irq_check,
20914 };
20915
20916 -static struct ata_port_operations piix_pata_ops = {
20917 +static const struct ata_port_operations piix_pata_ops = {
20918 .inherits = &piix_sata_ops,
20919 .cable_detect = ata_cable_40wire,
20920 .set_piomode = piix_set_piomode,
20921 @@ -333,18 +333,18 @@ static struct ata_port_operations piix_p
20922 .prereset = piix_pata_prereset,
20923 };
20924
20925 -static struct ata_port_operations piix_vmw_ops = {
20926 +static const struct ata_port_operations piix_vmw_ops = {
20927 .inherits = &piix_pata_ops,
20928 .bmdma_status = piix_vmw_bmdma_status,
20929 };
20930
20931 -static struct ata_port_operations ich_pata_ops = {
20932 +static const struct ata_port_operations ich_pata_ops = {
20933 .inherits = &piix_pata_ops,
20934 .cable_detect = ich_pata_cable_detect,
20935 .set_dmamode = ich_set_dmamode,
20936 };
20937
20938 -static struct ata_port_operations piix_sidpr_sata_ops = {
20939 +static const struct ata_port_operations piix_sidpr_sata_ops = {
20940 .inherits = &piix_sata_ops,
20941 .hardreset = sata_std_hardreset,
20942 .scr_read = piix_sidpr_scr_read,
20943 @@ -620,7 +620,7 @@ static const struct ich_laptop ich_lapto
20944 { 0x2653, 0x1043, 0x82D8 }, /* ICH6M on Asus Eee 701 */
20945 { 0x27df, 0x104d, 0x900e }, /* ICH7 on Sony TZ-90 */
20946 /* end marker */
20947 - { 0, }
20948 + { 0, 0, 0 }
20949 };
20950
20951 /**
20952 @@ -1112,7 +1112,7 @@ static int piix_broken_suspend(void)
20953 },
20954 },
20955
20956 - { } /* terminate list */
20957 + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL } /* terminate list */
20958 };
20959 static const char *oemstrs[] = {
20960 "Tecra M3,",
20961 diff -urNp linux-2.6.35.4/drivers/ata/libahci.c linux-2.6.35.4/drivers/ata/libahci.c
20962 --- linux-2.6.35.4/drivers/ata/libahci.c 2010-08-26 19:47:12.000000000 -0400
20963 +++ linux-2.6.35.4/drivers/ata/libahci.c 2010-09-17 20:12:09.000000000 -0400
20964 @@ -149,7 +149,7 @@ struct scsi_host_template ahci_sht = {
20965 };
20966 EXPORT_SYMBOL_GPL(ahci_sht);
20967
20968 -struct ata_port_operations ahci_ops = {
20969 +const struct ata_port_operations ahci_ops = {
20970 .inherits = &sata_pmp_port_ops,
20971
20972 .qc_defer = ahci_pmp_qc_defer,
20973 diff -urNp linux-2.6.35.4/drivers/ata/libata-acpi.c linux-2.6.35.4/drivers/ata/libata-acpi.c
20974 --- linux-2.6.35.4/drivers/ata/libata-acpi.c 2010-08-26 19:47:12.000000000 -0400
20975 +++ linux-2.6.35.4/drivers/ata/libata-acpi.c 2010-09-17 20:12:09.000000000 -0400
20976 @@ -224,12 +224,12 @@ static void ata_acpi_dev_uevent(acpi_han
20977 ata_acpi_uevent(dev->link->ap, dev, event);
20978 }
20979
20980 -static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
20981 +static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
20982 .handler = ata_acpi_dev_notify_dock,
20983 .uevent = ata_acpi_dev_uevent,
20984 };
20985
20986 -static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
20987 +static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
20988 .handler = ata_acpi_ap_notify_dock,
20989 .uevent = ata_acpi_ap_uevent,
20990 };
20991 diff -urNp linux-2.6.35.4/drivers/ata/libata-core.c linux-2.6.35.4/drivers/ata/libata-core.c
20992 --- linux-2.6.35.4/drivers/ata/libata-core.c 2010-08-26 19:47:12.000000000 -0400
20993 +++ linux-2.6.35.4/drivers/ata/libata-core.c 2010-09-17 20:12:09.000000000 -0400
20994 @@ -901,7 +901,7 @@ static const struct ata_xfer_ent {
20995 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
20996 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
20997 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
20998 - { -1, },
20999 + { -1, 0, 0 }
21000 };
21001
21002 /**
21003 @@ -3073,7 +3073,7 @@ static const struct ata_timing ata_timin
21004 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 },
21005 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 },
21006
21007 - { 0xFF }
21008 + { 0xFF, 0, 0, 0, 0, 0, 0, 0, 0 }
21009 };
21010
21011 #define ENOUGH(v, unit) (((v)-1)/(unit)+1)
21012 @@ -4323,7 +4323,7 @@ static const struct ata_blacklist_entry
21013 { "PIONEER DVD-RW DVRTD08", "1.00", ATA_HORKAGE_NOSETXFER },
21014
21015 /* End Marker */
21016 - { }
21017 + { NULL, NULL, 0 }
21018 };
21019
21020 static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
21021 @@ -5869,7 +5869,7 @@ static void ata_host_stop(struct device
21022 * LOCKING:
21023 * None.
21024 */
21025 -static void ata_finalize_port_ops(struct ata_port_operations *ops)
21026 +static void ata_finalize_port_ops(const struct ata_port_operations *ops)
21027 {
21028 static DEFINE_SPINLOCK(lock);
21029 const struct ata_port_operations *cur;
21030 @@ -5881,6 +5881,7 @@ static void ata_finalize_port_ops(struct
21031 return;
21032
21033 spin_lock(&lock);
21034 + pax_open_kernel();
21035
21036 for (cur = ops->inherits; cur; cur = cur->inherits) {
21037 void **inherit = (void **)cur;
21038 @@ -5894,8 +5895,9 @@ static void ata_finalize_port_ops(struct
21039 if (IS_ERR(*pp))
21040 *pp = NULL;
21041
21042 - ops->inherits = NULL;
21043 + ((struct ata_port_operations *)ops)->inherits = NULL;
21044
21045 + pax_close_kernel();
21046 spin_unlock(&lock);
21047 }
21048
21049 @@ -5992,7 +5994,7 @@ int ata_host_start(struct ata_host *host
21050 */
21051 /* KILLME - the only user left is ipr */
21052 void ata_host_init(struct ata_host *host, struct device *dev,
21053 - unsigned long flags, struct ata_port_operations *ops)
21054 + unsigned long flags, const struct ata_port_operations *ops)
21055 {
21056 spin_lock_init(&host->lock);
21057 host->dev = dev;
21058 @@ -6642,7 +6644,7 @@ static void ata_dummy_error_handler(stru
21059 /* truly dummy */
21060 }
21061
21062 -struct ata_port_operations ata_dummy_port_ops = {
21063 +const struct ata_port_operations ata_dummy_port_ops = {
21064 .qc_prep = ata_noop_qc_prep,
21065 .qc_issue = ata_dummy_qc_issue,
21066 .error_handler = ata_dummy_error_handler,
21067 diff -urNp linux-2.6.35.4/drivers/ata/libata-eh.c linux-2.6.35.4/drivers/ata/libata-eh.c
21068 --- linux-2.6.35.4/drivers/ata/libata-eh.c 2010-08-26 19:47:12.000000000 -0400
21069 +++ linux-2.6.35.4/drivers/ata/libata-eh.c 2010-09-17 20:12:09.000000000 -0400
21070 @@ -3680,7 +3680,7 @@ void ata_do_eh(struct ata_port *ap, ata_
21071 */
21072 void ata_std_error_handler(struct ata_port *ap)
21073 {
21074 - struct ata_port_operations *ops = ap->ops;
21075 + const struct ata_port_operations *ops = ap->ops;
21076 ata_reset_fn_t hardreset = ops->hardreset;
21077
21078 /* ignore built-in hardreset if SCR access is not available */
21079 diff -urNp linux-2.6.35.4/drivers/ata/libata-pmp.c linux-2.6.35.4/drivers/ata/libata-pmp.c
21080 --- linux-2.6.35.4/drivers/ata/libata-pmp.c 2010-08-26 19:47:12.000000000 -0400
21081 +++ linux-2.6.35.4/drivers/ata/libata-pmp.c 2010-09-17 20:12:09.000000000 -0400
21082 @@ -868,7 +868,7 @@ static int sata_pmp_handle_link_fail(str
21083 */
21084 static int sata_pmp_eh_recover(struct ata_port *ap)
21085 {
21086 - struct ata_port_operations *ops = ap->ops;
21087 + const struct ata_port_operations *ops = ap->ops;
21088 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
21089 struct ata_link *pmp_link = &ap->link;
21090 struct ata_device *pmp_dev = pmp_link->device;
21091 diff -urNp linux-2.6.35.4/drivers/ata/pata_acpi.c linux-2.6.35.4/drivers/ata/pata_acpi.c
21092 --- linux-2.6.35.4/drivers/ata/pata_acpi.c 2010-08-26 19:47:12.000000000 -0400
21093 +++ linux-2.6.35.4/drivers/ata/pata_acpi.c 2010-09-17 20:12:09.000000000 -0400
21094 @@ -216,7 +216,7 @@ static struct scsi_host_template pacpi_s
21095 ATA_BMDMA_SHT(DRV_NAME),
21096 };
21097
21098 -static struct ata_port_operations pacpi_ops = {
21099 +static const struct ata_port_operations pacpi_ops = {
21100 .inherits = &ata_bmdma_port_ops,
21101 .qc_issue = pacpi_qc_issue,
21102 .cable_detect = pacpi_cable_detect,
21103 diff -urNp linux-2.6.35.4/drivers/ata/pata_ali.c linux-2.6.35.4/drivers/ata/pata_ali.c
21104 --- linux-2.6.35.4/drivers/ata/pata_ali.c 2010-08-26 19:47:12.000000000 -0400
21105 +++ linux-2.6.35.4/drivers/ata/pata_ali.c 2010-09-17 20:12:09.000000000 -0400
21106 @@ -363,7 +363,7 @@ static struct scsi_host_template ali_sht
21107 * Port operations for PIO only ALi
21108 */
21109
21110 -static struct ata_port_operations ali_early_port_ops = {
21111 +static const struct ata_port_operations ali_early_port_ops = {
21112 .inherits = &ata_sff_port_ops,
21113 .cable_detect = ata_cable_40wire,
21114 .set_piomode = ali_set_piomode,
21115 @@ -380,7 +380,7 @@ static const struct ata_port_operations
21116 * Port operations for DMA capable ALi without cable
21117 * detect
21118 */
21119 -static struct ata_port_operations ali_20_port_ops = {
21120 +static const struct ata_port_operations ali_20_port_ops = {
21121 .inherits = &ali_dma_base_ops,
21122 .cable_detect = ata_cable_40wire,
21123 .mode_filter = ali_20_filter,
21124 @@ -391,7 +391,7 @@ static struct ata_port_operations ali_20
21125 /*
21126 * Port operations for DMA capable ALi with cable detect
21127 */
21128 -static struct ata_port_operations ali_c2_port_ops = {
21129 +static const struct ata_port_operations ali_c2_port_ops = {
21130 .inherits = &ali_dma_base_ops,
21131 .check_atapi_dma = ali_check_atapi_dma,
21132 .cable_detect = ali_c2_cable_detect,
21133 @@ -402,7 +402,7 @@ static struct ata_port_operations ali_c2
21134 /*
21135 * Port operations for DMA capable ALi with cable detect
21136 */
21137 -static struct ata_port_operations ali_c4_port_ops = {
21138 +static const struct ata_port_operations ali_c4_port_ops = {
21139 .inherits = &ali_dma_base_ops,
21140 .check_atapi_dma = ali_check_atapi_dma,
21141 .cable_detect = ali_c2_cable_detect,
21142 @@ -412,7 +412,7 @@ static struct ata_port_operations ali_c4
21143 /*
21144 * Port operations for DMA capable ALi with cable detect and LBA48
21145 */
21146 -static struct ata_port_operations ali_c5_port_ops = {
21147 +static const struct ata_port_operations ali_c5_port_ops = {
21148 .inherits = &ali_dma_base_ops,
21149 .check_atapi_dma = ali_check_atapi_dma,
21150 .dev_config = ali_warn_atapi_dma,
21151 diff -urNp linux-2.6.35.4/drivers/ata/pata_amd.c linux-2.6.35.4/drivers/ata/pata_amd.c
21152 --- linux-2.6.35.4/drivers/ata/pata_amd.c 2010-08-26 19:47:12.000000000 -0400
21153 +++ linux-2.6.35.4/drivers/ata/pata_amd.c 2010-09-17 20:12:09.000000000 -0400
21154 @@ -397,28 +397,28 @@ static const struct ata_port_operations
21155 .prereset = amd_pre_reset,
21156 };
21157
21158 -static struct ata_port_operations amd33_port_ops = {
21159 +static const struct ata_port_operations amd33_port_ops = {
21160 .inherits = &amd_base_port_ops,
21161 .cable_detect = ata_cable_40wire,
21162 .set_piomode = amd33_set_piomode,
21163 .set_dmamode = amd33_set_dmamode,
21164 };
21165
21166 -static struct ata_port_operations amd66_port_ops = {
21167 +static const struct ata_port_operations amd66_port_ops = {
21168 .inherits = &amd_base_port_ops,
21169 .cable_detect = ata_cable_unknown,
21170 .set_piomode = amd66_set_piomode,
21171 .set_dmamode = amd66_set_dmamode,
21172 };
21173
21174 -static struct ata_port_operations amd100_port_ops = {
21175 +static const struct ata_port_operations amd100_port_ops = {
21176 .inherits = &amd_base_port_ops,
21177 .cable_detect = ata_cable_unknown,
21178 .set_piomode = amd100_set_piomode,
21179 .set_dmamode = amd100_set_dmamode,
21180 };
21181
21182 -static struct ata_port_operations amd133_port_ops = {
21183 +static const struct ata_port_operations amd133_port_ops = {
21184 .inherits = &amd_base_port_ops,
21185 .cable_detect = amd_cable_detect,
21186 .set_piomode = amd133_set_piomode,
21187 @@ -433,13 +433,13 @@ static const struct ata_port_operations
21188 .host_stop = nv_host_stop,
21189 };
21190
21191 -static struct ata_port_operations nv100_port_ops = {
21192 +static const struct ata_port_operations nv100_port_ops = {
21193 .inherits = &nv_base_port_ops,
21194 .set_piomode = nv100_set_piomode,
21195 .set_dmamode = nv100_set_dmamode,
21196 };
21197
21198 -static struct ata_port_operations nv133_port_ops = {
21199 +static const struct ata_port_operations nv133_port_ops = {
21200 .inherits = &nv_base_port_ops,
21201 .set_piomode = nv133_set_piomode,
21202 .set_dmamode = nv133_set_dmamode,
21203 diff -urNp linux-2.6.35.4/drivers/ata/pata_artop.c linux-2.6.35.4/drivers/ata/pata_artop.c
21204 --- linux-2.6.35.4/drivers/ata/pata_artop.c 2010-08-26 19:47:12.000000000 -0400
21205 +++ linux-2.6.35.4/drivers/ata/pata_artop.c 2010-09-17 20:12:09.000000000 -0400
21206 @@ -311,7 +311,7 @@ static struct scsi_host_template artop_s
21207 ATA_BMDMA_SHT(DRV_NAME),
21208 };
21209
21210 -static struct ata_port_operations artop6210_ops = {
21211 +static const struct ata_port_operations artop6210_ops = {
21212 .inherits = &ata_bmdma_port_ops,
21213 .cable_detect = ata_cable_40wire,
21214 .set_piomode = artop6210_set_piomode,
21215 @@ -320,7 +320,7 @@ static struct ata_port_operations artop6
21216 .qc_defer = artop6210_qc_defer,
21217 };
21218
21219 -static struct ata_port_operations artop6260_ops = {
21220 +static const struct ata_port_operations artop6260_ops = {
21221 .inherits = &ata_bmdma_port_ops,
21222 .cable_detect = artop6260_cable_detect,
21223 .set_piomode = artop6260_set_piomode,
21224 diff -urNp linux-2.6.35.4/drivers/ata/pata_at32.c linux-2.6.35.4/drivers/ata/pata_at32.c
21225 --- linux-2.6.35.4/drivers/ata/pata_at32.c 2010-08-26 19:47:12.000000000 -0400
21226 +++ linux-2.6.35.4/drivers/ata/pata_at32.c 2010-09-17 20:12:09.000000000 -0400
21227 @@ -173,7 +173,7 @@ static struct scsi_host_template at32_sh
21228 ATA_PIO_SHT(DRV_NAME),
21229 };
21230
21231 -static struct ata_port_operations at32_port_ops = {
21232 +static const struct ata_port_operations at32_port_ops = {
21233 .inherits = &ata_sff_port_ops,
21234 .cable_detect = ata_cable_40wire,
21235 .set_piomode = pata_at32_set_piomode,
21236 diff -urNp linux-2.6.35.4/drivers/ata/pata_at91.c linux-2.6.35.4/drivers/ata/pata_at91.c
21237 --- linux-2.6.35.4/drivers/ata/pata_at91.c 2010-08-26 19:47:12.000000000 -0400
21238 +++ linux-2.6.35.4/drivers/ata/pata_at91.c 2010-09-17 20:12:09.000000000 -0400
21239 @@ -196,7 +196,7 @@ static struct scsi_host_template pata_at
21240 ATA_PIO_SHT(DRV_NAME),
21241 };
21242
21243 -static struct ata_port_operations pata_at91_port_ops = {
21244 +static const struct ata_port_operations pata_at91_port_ops = {
21245 .inherits = &ata_sff_port_ops,
21246
21247 .sff_data_xfer = pata_at91_data_xfer_noirq,
21248 diff -urNp linux-2.6.35.4/drivers/ata/pata_atiixp.c linux-2.6.35.4/drivers/ata/pata_atiixp.c
21249 --- linux-2.6.35.4/drivers/ata/pata_atiixp.c 2010-08-26 19:47:12.000000000 -0400
21250 +++ linux-2.6.35.4/drivers/ata/pata_atiixp.c 2010-09-17 20:12:09.000000000 -0400
21251 @@ -214,7 +214,7 @@ static struct scsi_host_template atiixp_
21252 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
21253 };
21254
21255 -static struct ata_port_operations atiixp_port_ops = {
21256 +static const struct ata_port_operations atiixp_port_ops = {
21257 .inherits = &ata_bmdma_port_ops,
21258
21259 .qc_prep = ata_bmdma_dumb_qc_prep,
21260 diff -urNp linux-2.6.35.4/drivers/ata/pata_atp867x.c linux-2.6.35.4/drivers/ata/pata_atp867x.c
21261 --- linux-2.6.35.4/drivers/ata/pata_atp867x.c 2010-08-26 19:47:12.000000000 -0400
21262 +++ linux-2.6.35.4/drivers/ata/pata_atp867x.c 2010-09-17 20:12:09.000000000 -0400
21263 @@ -275,7 +275,7 @@ static struct scsi_host_template atp867x
21264 ATA_BMDMA_SHT(DRV_NAME),
21265 };
21266
21267 -static struct ata_port_operations atp867x_ops = {
21268 +static const struct ata_port_operations atp867x_ops = {
21269 .inherits = &ata_bmdma_port_ops,
21270 .cable_detect = atp867x_cable_detect,
21271 .set_piomode = atp867x_set_piomode,
21272 diff -urNp linux-2.6.35.4/drivers/ata/pata_bf54x.c linux-2.6.35.4/drivers/ata/pata_bf54x.c
21273 --- linux-2.6.35.4/drivers/ata/pata_bf54x.c 2010-08-26 19:47:12.000000000 -0400
21274 +++ linux-2.6.35.4/drivers/ata/pata_bf54x.c 2010-09-17 20:12:09.000000000 -0400
21275 @@ -1420,7 +1420,7 @@ static struct scsi_host_template bfin_sh
21276 .dma_boundary = ATA_DMA_BOUNDARY,
21277 };
21278
21279 -static struct ata_port_operations bfin_pata_ops = {
21280 +static const struct ata_port_operations bfin_pata_ops = {
21281 .inherits = &ata_bmdma_port_ops,
21282
21283 .set_piomode = bfin_set_piomode,
21284 diff -urNp linux-2.6.35.4/drivers/ata/pata_cmd640.c linux-2.6.35.4/drivers/ata/pata_cmd640.c
21285 --- linux-2.6.35.4/drivers/ata/pata_cmd640.c 2010-08-26 19:47:12.000000000 -0400
21286 +++ linux-2.6.35.4/drivers/ata/pata_cmd640.c 2010-09-17 20:12:09.000000000 -0400
21287 @@ -165,7 +165,7 @@ static struct scsi_host_template cmd640_
21288 ATA_PIO_SHT(DRV_NAME),
21289 };
21290
21291 -static struct ata_port_operations cmd640_port_ops = {
21292 +static const struct ata_port_operations cmd640_port_ops = {
21293 .inherits = &ata_sff_port_ops,
21294 /* In theory xfer_noirq is not needed once we kill the prefetcher */
21295 .sff_data_xfer = ata_sff_data_xfer_noirq,
21296 diff -urNp linux-2.6.35.4/drivers/ata/pata_cmd64x.c linux-2.6.35.4/drivers/ata/pata_cmd64x.c
21297 --- linux-2.6.35.4/drivers/ata/pata_cmd64x.c 2010-08-26 19:47:12.000000000 -0400
21298 +++ linux-2.6.35.4/drivers/ata/pata_cmd64x.c 2010-09-17 20:12:09.000000000 -0400
21299 @@ -274,18 +274,18 @@ static const struct ata_port_operations
21300 .set_dmamode = cmd64x_set_dmamode,
21301 };
21302
21303 -static struct ata_port_operations cmd64x_port_ops = {
21304 +static const struct ata_port_operations cmd64x_port_ops = {
21305 .inherits = &cmd64x_base_ops,
21306 .cable_detect = ata_cable_40wire,
21307 };
21308
21309 -static struct ata_port_operations cmd646r1_port_ops = {
21310 +static const struct ata_port_operations cmd646r1_port_ops = {
21311 .inherits = &cmd64x_base_ops,
21312 .bmdma_stop = cmd646r1_bmdma_stop,
21313 .cable_detect = ata_cable_40wire,
21314 };
21315
21316 -static struct ata_port_operations cmd648_port_ops = {
21317 +static const struct ata_port_operations cmd648_port_ops = {
21318 .inherits = &cmd64x_base_ops,
21319 .bmdma_stop = cmd648_bmdma_stop,
21320 .cable_detect = cmd648_cable_detect,
21321 diff -urNp linux-2.6.35.4/drivers/ata/pata_cs5520.c linux-2.6.35.4/drivers/ata/pata_cs5520.c
21322 --- linux-2.6.35.4/drivers/ata/pata_cs5520.c 2010-08-26 19:47:12.000000000 -0400
21323 +++ linux-2.6.35.4/drivers/ata/pata_cs5520.c 2010-09-17 20:12:09.000000000 -0400
21324 @@ -108,7 +108,7 @@ static struct scsi_host_template cs5520_
21325 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
21326 };
21327
21328 -static struct ata_port_operations cs5520_port_ops = {
21329 +static const struct ata_port_operations cs5520_port_ops = {
21330 .inherits = &ata_bmdma_port_ops,
21331 .qc_prep = ata_bmdma_dumb_qc_prep,
21332 .cable_detect = ata_cable_40wire,
21333 diff -urNp linux-2.6.35.4/drivers/ata/pata_cs5530.c linux-2.6.35.4/drivers/ata/pata_cs5530.c
21334 --- linux-2.6.35.4/drivers/ata/pata_cs5530.c 2010-08-26 19:47:12.000000000 -0400
21335 +++ linux-2.6.35.4/drivers/ata/pata_cs5530.c 2010-09-17 20:12:09.000000000 -0400
21336 @@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_
21337 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
21338 };
21339
21340 -static struct ata_port_operations cs5530_port_ops = {
21341 +static const struct ata_port_operations cs5530_port_ops = {
21342 .inherits = &ata_bmdma_port_ops,
21343
21344 .qc_prep = ata_bmdma_dumb_qc_prep,
21345 diff -urNp linux-2.6.35.4/drivers/ata/pata_cs5535.c linux-2.6.35.4/drivers/ata/pata_cs5535.c
21346 --- linux-2.6.35.4/drivers/ata/pata_cs5535.c 2010-08-26 19:47:12.000000000 -0400
21347 +++ linux-2.6.35.4/drivers/ata/pata_cs5535.c 2010-09-17 20:12:09.000000000 -0400
21348 @@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_
21349 ATA_BMDMA_SHT(DRV_NAME),
21350 };
21351
21352 -static struct ata_port_operations cs5535_port_ops = {
21353 +static const struct ata_port_operations cs5535_port_ops = {
21354 .inherits = &ata_bmdma_port_ops,
21355 .cable_detect = cs5535_cable_detect,
21356 .set_piomode = cs5535_set_piomode,
21357 diff -urNp linux-2.6.35.4/drivers/ata/pata_cs5536.c linux-2.6.35.4/drivers/ata/pata_cs5536.c
21358 --- linux-2.6.35.4/drivers/ata/pata_cs5536.c 2010-08-26 19:47:12.000000000 -0400
21359 +++ linux-2.6.35.4/drivers/ata/pata_cs5536.c 2010-09-17 20:12:09.000000000 -0400
21360 @@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_
21361 ATA_BMDMA_SHT(DRV_NAME),
21362 };
21363
21364 -static struct ata_port_operations cs5536_port_ops = {
21365 +static const struct ata_port_operations cs5536_port_ops = {
21366 .inherits = &ata_bmdma32_port_ops,
21367 .cable_detect = cs5536_cable_detect,
21368 .set_piomode = cs5536_set_piomode,
21369 diff -urNp linux-2.6.35.4/drivers/ata/pata_cypress.c linux-2.6.35.4/drivers/ata/pata_cypress.c
21370 --- linux-2.6.35.4/drivers/ata/pata_cypress.c 2010-08-26 19:47:12.000000000 -0400
21371 +++ linux-2.6.35.4/drivers/ata/pata_cypress.c 2010-09-17 20:12:09.000000000 -0400
21372 @@ -115,7 +115,7 @@ static struct scsi_host_template cy82c69
21373 ATA_BMDMA_SHT(DRV_NAME),
21374 };
21375
21376 -static struct ata_port_operations cy82c693_port_ops = {
21377 +static const struct ata_port_operations cy82c693_port_ops = {
21378 .inherits = &ata_bmdma_port_ops,
21379 .cable_detect = ata_cable_40wire,
21380 .set_piomode = cy82c693_set_piomode,
21381 diff -urNp linux-2.6.35.4/drivers/ata/pata_efar.c linux-2.6.35.4/drivers/ata/pata_efar.c
21382 --- linux-2.6.35.4/drivers/ata/pata_efar.c 2010-08-26 19:47:12.000000000 -0400
21383 +++ linux-2.6.35.4/drivers/ata/pata_efar.c 2010-09-17 20:12:09.000000000 -0400
21384 @@ -238,7 +238,7 @@ static struct scsi_host_template efar_sh
21385 ATA_BMDMA_SHT(DRV_NAME),
21386 };
21387
21388 -static struct ata_port_operations efar_ops = {
21389 +static const struct ata_port_operations efar_ops = {
21390 .inherits = &ata_bmdma_port_ops,
21391 .cable_detect = efar_cable_detect,
21392 .set_piomode = efar_set_piomode,
21393 diff -urNp linux-2.6.35.4/drivers/ata/pata_hpt366.c linux-2.6.35.4/drivers/ata/pata_hpt366.c
21394 --- linux-2.6.35.4/drivers/ata/pata_hpt366.c 2010-08-26 19:47:12.000000000 -0400
21395 +++ linux-2.6.35.4/drivers/ata/pata_hpt366.c 2010-09-17 20:12:09.000000000 -0400
21396 @@ -269,7 +269,7 @@ static struct scsi_host_template hpt36x_
21397 * Configuration for HPT366/68
21398 */
21399
21400 -static struct ata_port_operations hpt366_port_ops = {
21401 +static const struct ata_port_operations hpt366_port_ops = {
21402 .inherits = &ata_bmdma_port_ops,
21403 .cable_detect = hpt36x_cable_detect,
21404 .mode_filter = hpt366_filter,
21405 diff -urNp linux-2.6.35.4/drivers/ata/pata_hpt37x.c linux-2.6.35.4/drivers/ata/pata_hpt37x.c
21406 --- linux-2.6.35.4/drivers/ata/pata_hpt37x.c 2010-08-26 19:47:12.000000000 -0400
21407 +++ linux-2.6.35.4/drivers/ata/pata_hpt37x.c 2010-09-17 20:12:09.000000000 -0400
21408 @@ -564,7 +564,7 @@ static struct scsi_host_template hpt37x_
21409 * Configuration for HPT370
21410 */
21411
21412 -static struct ata_port_operations hpt370_port_ops = {
21413 +static const struct ata_port_operations hpt370_port_ops = {
21414 .inherits = &ata_bmdma_port_ops,
21415
21416 .bmdma_stop = hpt370_bmdma_stop,
21417 @@ -580,7 +580,7 @@ static struct ata_port_operations hpt370
21418 * Configuration for HPT370A. Close to 370 but less filters
21419 */
21420
21421 -static struct ata_port_operations hpt370a_port_ops = {
21422 +static const struct ata_port_operations hpt370a_port_ops = {
21423 .inherits = &hpt370_port_ops,
21424 .mode_filter = hpt370a_filter,
21425 };
21426 @@ -590,7 +590,7 @@ static struct ata_port_operations hpt370
21427 * and DMA mode setting functionality.
21428 */
21429
21430 -static struct ata_port_operations hpt372_port_ops = {
21431 +static const struct ata_port_operations hpt372_port_ops = {
21432 .inherits = &ata_bmdma_port_ops,
21433
21434 .bmdma_stop = hpt37x_bmdma_stop,
21435 @@ -606,7 +606,7 @@ static struct ata_port_operations hpt372
21436 * but we have a different cable detection procedure for function 1.
21437 */
21438
21439 -static struct ata_port_operations hpt374_fn1_port_ops = {
21440 +static const struct ata_port_operations hpt374_fn1_port_ops = {
21441 .inherits = &hpt372_port_ops,
21442 .cable_detect = hpt374_fn1_cable_detect,
21443 .prereset = hpt37x_pre_reset,
21444 diff -urNp linux-2.6.35.4/drivers/ata/pata_hpt3x2n.c linux-2.6.35.4/drivers/ata/pata_hpt3x2n.c
21445 --- linux-2.6.35.4/drivers/ata/pata_hpt3x2n.c 2010-08-26 19:47:12.000000000 -0400
21446 +++ linux-2.6.35.4/drivers/ata/pata_hpt3x2n.c 2010-09-17 20:12:09.000000000 -0400
21447 @@ -331,7 +331,7 @@ static struct scsi_host_template hpt3x2n
21448 * Configuration for HPT3x2n.
21449 */
21450
21451 -static struct ata_port_operations hpt3x2n_port_ops = {
21452 +static const struct ata_port_operations hpt3x2n_port_ops = {
21453 .inherits = &ata_bmdma_port_ops,
21454
21455 .bmdma_stop = hpt3x2n_bmdma_stop,
21456 diff -urNp linux-2.6.35.4/drivers/ata/pata_hpt3x3.c linux-2.6.35.4/drivers/ata/pata_hpt3x3.c
21457 --- linux-2.6.35.4/drivers/ata/pata_hpt3x3.c 2010-08-26 19:47:12.000000000 -0400
21458 +++ linux-2.6.35.4/drivers/ata/pata_hpt3x3.c 2010-09-17 20:12:09.000000000 -0400
21459 @@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_
21460 ATA_BMDMA_SHT(DRV_NAME),
21461 };
21462
21463 -static struct ata_port_operations hpt3x3_port_ops = {
21464 +static const struct ata_port_operations hpt3x3_port_ops = {
21465 .inherits = &ata_bmdma_port_ops,
21466 .cable_detect = ata_cable_40wire,
21467 .set_piomode = hpt3x3_set_piomode,
21468 diff -urNp linux-2.6.35.4/drivers/ata/pata_icside.c linux-2.6.35.4/drivers/ata/pata_icside.c
21469 --- linux-2.6.35.4/drivers/ata/pata_icside.c 2010-08-26 19:47:12.000000000 -0400
21470 +++ linux-2.6.35.4/drivers/ata/pata_icside.c 2010-09-17 20:12:09.000000000 -0400
21471 @@ -320,7 +320,7 @@ static void pata_icside_postreset(struct
21472 }
21473 }
21474
21475 -static struct ata_port_operations pata_icside_port_ops = {
21476 +static const struct ata_port_operations pata_icside_port_ops = {
21477 .inherits = &ata_bmdma_port_ops,
21478 /* no need to build any PRD tables for DMA */
21479 .qc_prep = ata_noop_qc_prep,
21480 diff -urNp linux-2.6.35.4/drivers/ata/pata_isapnp.c linux-2.6.35.4/drivers/ata/pata_isapnp.c
21481 --- linux-2.6.35.4/drivers/ata/pata_isapnp.c 2010-08-26 19:47:12.000000000 -0400
21482 +++ linux-2.6.35.4/drivers/ata/pata_isapnp.c 2010-09-17 20:12:09.000000000 -0400
21483 @@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_
21484 ATA_PIO_SHT(DRV_NAME),
21485 };
21486
21487 -static struct ata_port_operations isapnp_port_ops = {
21488 +static const struct ata_port_operations isapnp_port_ops = {
21489 .inherits = &ata_sff_port_ops,
21490 .cable_detect = ata_cable_40wire,
21491 };
21492
21493 -static struct ata_port_operations isapnp_noalt_port_ops = {
21494 +static const struct ata_port_operations isapnp_noalt_port_ops = {
21495 .inherits = &ata_sff_port_ops,
21496 .cable_detect = ata_cable_40wire,
21497 /* No altstatus so we don't want to use the lost interrupt poll */
21498 diff -urNp linux-2.6.35.4/drivers/ata/pata_it8213.c linux-2.6.35.4/drivers/ata/pata_it8213.c
21499 --- linux-2.6.35.4/drivers/ata/pata_it8213.c 2010-08-26 19:47:12.000000000 -0400
21500 +++ linux-2.6.35.4/drivers/ata/pata_it8213.c 2010-09-17 20:12:09.000000000 -0400
21501 @@ -233,7 +233,7 @@ static struct scsi_host_template it8213_
21502 };
21503
21504
21505 -static struct ata_port_operations it8213_ops = {
21506 +static const struct ata_port_operations it8213_ops = {
21507 .inherits = &ata_bmdma_port_ops,
21508 .cable_detect = it8213_cable_detect,
21509 .set_piomode = it8213_set_piomode,
21510 diff -urNp linux-2.6.35.4/drivers/ata/pata_it821x.c linux-2.6.35.4/drivers/ata/pata_it821x.c
21511 --- linux-2.6.35.4/drivers/ata/pata_it821x.c 2010-08-26 19:47:12.000000000 -0400
21512 +++ linux-2.6.35.4/drivers/ata/pata_it821x.c 2010-09-17 20:12:09.000000000 -0400
21513 @@ -801,7 +801,7 @@ static struct scsi_host_template it821x_
21514 ATA_BMDMA_SHT(DRV_NAME),
21515 };
21516
21517 -static struct ata_port_operations it821x_smart_port_ops = {
21518 +static const struct ata_port_operations it821x_smart_port_ops = {
21519 .inherits = &ata_bmdma_port_ops,
21520
21521 .check_atapi_dma= it821x_check_atapi_dma,
21522 @@ -815,7 +815,7 @@ static struct ata_port_operations it821x
21523 .port_start = it821x_port_start,
21524 };
21525
21526 -static struct ata_port_operations it821x_passthru_port_ops = {
21527 +static const struct ata_port_operations it821x_passthru_port_ops = {
21528 .inherits = &ata_bmdma_port_ops,
21529
21530 .check_atapi_dma= it821x_check_atapi_dma,
21531 @@ -831,7 +831,7 @@ static struct ata_port_operations it821x
21532 .port_start = it821x_port_start,
21533 };
21534
21535 -static struct ata_port_operations it821x_rdc_port_ops = {
21536 +static const struct ata_port_operations it821x_rdc_port_ops = {
21537 .inherits = &ata_bmdma_port_ops,
21538
21539 .check_atapi_dma= it821x_check_atapi_dma,
21540 diff -urNp linux-2.6.35.4/drivers/ata/pata_ixp4xx_cf.c linux-2.6.35.4/drivers/ata/pata_ixp4xx_cf.c
21541 --- linux-2.6.35.4/drivers/ata/pata_ixp4xx_cf.c 2010-08-26 19:47:12.000000000 -0400
21542 +++ linux-2.6.35.4/drivers/ata/pata_ixp4xx_cf.c 2010-09-17 20:12:09.000000000 -0400
21543 @@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_
21544 ATA_PIO_SHT(DRV_NAME),
21545 };
21546
21547 -static struct ata_port_operations ixp4xx_port_ops = {
21548 +static const struct ata_port_operations ixp4xx_port_ops = {
21549 .inherits = &ata_sff_port_ops,
21550 .sff_data_xfer = ixp4xx_mmio_data_xfer,
21551 .cable_detect = ata_cable_40wire,
21552 diff -urNp linux-2.6.35.4/drivers/ata/pata_jmicron.c linux-2.6.35.4/drivers/ata/pata_jmicron.c
21553 --- linux-2.6.35.4/drivers/ata/pata_jmicron.c 2010-08-26 19:47:12.000000000 -0400
21554 +++ linux-2.6.35.4/drivers/ata/pata_jmicron.c 2010-09-17 20:12:09.000000000 -0400
21555 @@ -111,7 +111,7 @@ static struct scsi_host_template jmicron
21556 ATA_BMDMA_SHT(DRV_NAME),
21557 };
21558
21559 -static struct ata_port_operations jmicron_ops = {
21560 +static const struct ata_port_operations jmicron_ops = {
21561 .inherits = &ata_bmdma_port_ops,
21562 .prereset = jmicron_pre_reset,
21563 };
21564 diff -urNp linux-2.6.35.4/drivers/ata/pata_legacy.c linux-2.6.35.4/drivers/ata/pata_legacy.c
21565 --- linux-2.6.35.4/drivers/ata/pata_legacy.c 2010-08-26 19:47:12.000000000 -0400
21566 +++ linux-2.6.35.4/drivers/ata/pata_legacy.c 2010-09-17 20:12:09.000000000 -0400
21567 @@ -113,7 +113,7 @@ struct legacy_probe {
21568
21569 struct legacy_controller {
21570 const char *name;
21571 - struct ata_port_operations *ops;
21572 + const struct ata_port_operations *ops;
21573 unsigned int pio_mask;
21574 unsigned int flags;
21575 unsigned int pflags;
21576 @@ -230,12 +230,12 @@ static const struct ata_port_operations
21577 * pio_mask as well.
21578 */
21579
21580 -static struct ata_port_operations simple_port_ops = {
21581 +static const struct ata_port_operations simple_port_ops = {
21582 .inherits = &legacy_base_port_ops,
21583 .sff_data_xfer = ata_sff_data_xfer_noirq,
21584 };
21585
21586 -static struct ata_port_operations legacy_port_ops = {
21587 +static const struct ata_port_operations legacy_port_ops = {
21588 .inherits = &legacy_base_port_ops,
21589 .sff_data_xfer = ata_sff_data_xfer_noirq,
21590 .set_mode = legacy_set_mode,
21591 @@ -331,7 +331,7 @@ static unsigned int pdc_data_xfer_vlb(st
21592 return buflen;
21593 }
21594
21595 -static struct ata_port_operations pdc20230_port_ops = {
21596 +static const struct ata_port_operations pdc20230_port_ops = {
21597 .inherits = &legacy_base_port_ops,
21598 .set_piomode = pdc20230_set_piomode,
21599 .sff_data_xfer = pdc_data_xfer_vlb,
21600 @@ -364,7 +364,7 @@ static void ht6560a_set_piomode(struct a
21601 ioread8(ap->ioaddr.status_addr);
21602 }
21603
21604 -static struct ata_port_operations ht6560a_port_ops = {
21605 +static const struct ata_port_operations ht6560a_port_ops = {
21606 .inherits = &legacy_base_port_ops,
21607 .set_piomode = ht6560a_set_piomode,
21608 };
21609 @@ -407,7 +407,7 @@ static void ht6560b_set_piomode(struct a
21610 ioread8(ap->ioaddr.status_addr);
21611 }
21612
21613 -static struct ata_port_operations ht6560b_port_ops = {
21614 +static const struct ata_port_operations ht6560b_port_ops = {
21615 .inherits = &legacy_base_port_ops,
21616 .set_piomode = ht6560b_set_piomode,
21617 };
21618 @@ -506,7 +506,7 @@ static void opti82c611a_set_piomode(stru
21619 }
21620
21621
21622 -static struct ata_port_operations opti82c611a_port_ops = {
21623 +static const struct ata_port_operations opti82c611a_port_ops = {
21624 .inherits = &legacy_base_port_ops,
21625 .set_piomode = opti82c611a_set_piomode,
21626 };
21627 @@ -616,7 +616,7 @@ static unsigned int opti82c46x_qc_issue(
21628 return ata_sff_qc_issue(qc);
21629 }
21630
21631 -static struct ata_port_operations opti82c46x_port_ops = {
21632 +static const struct ata_port_operations opti82c46x_port_ops = {
21633 .inherits = &legacy_base_port_ops,
21634 .set_piomode = opti82c46x_set_piomode,
21635 .qc_issue = opti82c46x_qc_issue,
21636 @@ -778,20 +778,20 @@ static int qdi_port(struct platform_devi
21637 return 0;
21638 }
21639
21640 -static struct ata_port_operations qdi6500_port_ops = {
21641 +static const struct ata_port_operations qdi6500_port_ops = {
21642 .inherits = &legacy_base_port_ops,
21643 .set_piomode = qdi6500_set_piomode,
21644 .qc_issue = qdi_qc_issue,
21645 .sff_data_xfer = vlb32_data_xfer,
21646 };
21647
21648 -static struct ata_port_operations qdi6580_port_ops = {
21649 +static const struct ata_port_operations qdi6580_port_ops = {
21650 .inherits = &legacy_base_port_ops,
21651 .set_piomode = qdi6580_set_piomode,
21652 .sff_data_xfer = vlb32_data_xfer,
21653 };
21654
21655 -static struct ata_port_operations qdi6580dp_port_ops = {
21656 +static const struct ata_port_operations qdi6580dp_port_ops = {
21657 .inherits = &legacy_base_port_ops,
21658 .set_piomode = qdi6580dp_set_piomode,
21659 .qc_issue = qdi_qc_issue,
21660 @@ -863,7 +863,7 @@ static int winbond_port(struct platform_
21661 return 0;
21662 }
21663
21664 -static struct ata_port_operations winbond_port_ops = {
21665 +static const struct ata_port_operations winbond_port_ops = {
21666 .inherits = &legacy_base_port_ops,
21667 .set_piomode = winbond_set_piomode,
21668 .sff_data_xfer = vlb32_data_xfer,
21669 @@ -986,7 +986,7 @@ static __init int legacy_init_one(struct
21670 int pio_modes = controller->pio_mask;
21671 unsigned long io = probe->port;
21672 u32 mask = (1 << probe->slot);
21673 - struct ata_port_operations *ops = controller->ops;
21674 + const struct ata_port_operations *ops = controller->ops;
21675 struct legacy_data *ld = &legacy_data[probe->slot];
21676 struct ata_host *host = NULL;
21677 struct ata_port *ap;
21678 diff -urNp linux-2.6.35.4/drivers/ata/pata_macio.c linux-2.6.35.4/drivers/ata/pata_macio.c
21679 --- linux-2.6.35.4/drivers/ata/pata_macio.c 2010-08-26 19:47:12.000000000 -0400
21680 +++ linux-2.6.35.4/drivers/ata/pata_macio.c 2010-09-17 20:12:09.000000000 -0400
21681 @@ -918,9 +918,8 @@ static struct scsi_host_template pata_ma
21682 .slave_configure = pata_macio_slave_config,
21683 };
21684
21685 -static struct ata_port_operations pata_macio_ops = {
21686 +static const struct ata_port_operations pata_macio_ops = {
21687 .inherits = &ata_bmdma_port_ops,
21688 -
21689 .freeze = pata_macio_freeze,
21690 .set_piomode = pata_macio_set_timings,
21691 .set_dmamode = pata_macio_set_timings,
21692 diff -urNp linux-2.6.35.4/drivers/ata/pata_marvell.c linux-2.6.35.4/drivers/ata/pata_marvell.c
21693 --- linux-2.6.35.4/drivers/ata/pata_marvell.c 2010-08-26 19:47:12.000000000 -0400
21694 +++ linux-2.6.35.4/drivers/ata/pata_marvell.c 2010-09-17 20:12:09.000000000 -0400
21695 @@ -100,7 +100,7 @@ static struct scsi_host_template marvell
21696 ATA_BMDMA_SHT(DRV_NAME),
21697 };
21698
21699 -static struct ata_port_operations marvell_ops = {
21700 +static const struct ata_port_operations marvell_ops = {
21701 .inherits = &ata_bmdma_port_ops,
21702 .cable_detect = marvell_cable_detect,
21703 .prereset = marvell_pre_reset,
21704 diff -urNp linux-2.6.35.4/drivers/ata/pata_mpc52xx.c linux-2.6.35.4/drivers/ata/pata_mpc52xx.c
21705 --- linux-2.6.35.4/drivers/ata/pata_mpc52xx.c 2010-08-26 19:47:12.000000000 -0400
21706 +++ linux-2.6.35.4/drivers/ata/pata_mpc52xx.c 2010-09-17 20:12:09.000000000 -0400
21707 @@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx
21708 ATA_PIO_SHT(DRV_NAME),
21709 };
21710
21711 -static struct ata_port_operations mpc52xx_ata_port_ops = {
21712 +static const struct ata_port_operations mpc52xx_ata_port_ops = {
21713 .inherits = &ata_sff_port_ops,
21714 .sff_dev_select = mpc52xx_ata_dev_select,
21715 .set_piomode = mpc52xx_ata_set_piomode,
21716 diff -urNp linux-2.6.35.4/drivers/ata/pata_mpiix.c linux-2.6.35.4/drivers/ata/pata_mpiix.c
21717 --- linux-2.6.35.4/drivers/ata/pata_mpiix.c 2010-08-26 19:47:12.000000000 -0400
21718 +++ linux-2.6.35.4/drivers/ata/pata_mpiix.c 2010-09-17 20:12:09.000000000 -0400
21719 @@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_s
21720 ATA_PIO_SHT(DRV_NAME),
21721 };
21722
21723 -static struct ata_port_operations mpiix_port_ops = {
21724 +static const struct ata_port_operations mpiix_port_ops = {
21725 .inherits = &ata_sff_port_ops,
21726 .qc_issue = mpiix_qc_issue,
21727 .cable_detect = ata_cable_40wire,
21728 diff -urNp linux-2.6.35.4/drivers/ata/pata_netcell.c linux-2.6.35.4/drivers/ata/pata_netcell.c
21729 --- linux-2.6.35.4/drivers/ata/pata_netcell.c 2010-08-26 19:47:12.000000000 -0400
21730 +++ linux-2.6.35.4/drivers/ata/pata_netcell.c 2010-09-17 20:12:09.000000000 -0400
21731 @@ -34,7 +34,7 @@ static struct scsi_host_template netcell
21732 ATA_BMDMA_SHT(DRV_NAME),
21733 };
21734
21735 -static struct ata_port_operations netcell_ops = {
21736 +static const struct ata_port_operations netcell_ops = {
21737 .inherits = &ata_bmdma_port_ops,
21738 .cable_detect = ata_cable_80wire,
21739 .read_id = netcell_read_id,
21740 diff -urNp linux-2.6.35.4/drivers/ata/pata_ninja32.c linux-2.6.35.4/drivers/ata/pata_ninja32.c
21741 --- linux-2.6.35.4/drivers/ata/pata_ninja32.c 2010-08-26 19:47:12.000000000 -0400
21742 +++ linux-2.6.35.4/drivers/ata/pata_ninja32.c 2010-09-17 20:12:09.000000000 -0400
21743 @@ -81,7 +81,7 @@ static struct scsi_host_template ninja32
21744 ATA_BMDMA_SHT(DRV_NAME),
21745 };
21746
21747 -static struct ata_port_operations ninja32_port_ops = {
21748 +static const struct ata_port_operations ninja32_port_ops = {
21749 .inherits = &ata_bmdma_port_ops,
21750 .sff_dev_select = ninja32_dev_select,
21751 .cable_detect = ata_cable_40wire,
21752 diff -urNp linux-2.6.35.4/drivers/ata/pata_ns87410.c linux-2.6.35.4/drivers/ata/pata_ns87410.c
21753 --- linux-2.6.35.4/drivers/ata/pata_ns87410.c 2010-08-26 19:47:12.000000000 -0400
21754 +++ linux-2.6.35.4/drivers/ata/pata_ns87410.c 2010-09-17 20:12:09.000000000 -0400
21755 @@ -132,7 +132,7 @@ static struct scsi_host_template ns87410
21756 ATA_PIO_SHT(DRV_NAME),
21757 };
21758
21759 -static struct ata_port_operations ns87410_port_ops = {
21760 +static const struct ata_port_operations ns87410_port_ops = {
21761 .inherits = &ata_sff_port_ops,
21762 .qc_issue = ns87410_qc_issue,
21763 .cable_detect = ata_cable_40wire,
21764 diff -urNp linux-2.6.35.4/drivers/ata/pata_ns87415.c linux-2.6.35.4/drivers/ata/pata_ns87415.c
21765 --- linux-2.6.35.4/drivers/ata/pata_ns87415.c 2010-08-26 19:47:12.000000000 -0400
21766 +++ linux-2.6.35.4/drivers/ata/pata_ns87415.c 2010-09-17 20:12:09.000000000 -0400
21767 @@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct at
21768 }
21769 #endif /* 87560 SuperIO Support */
21770
21771 -static struct ata_port_operations ns87415_pata_ops = {
21772 +static const struct ata_port_operations ns87415_pata_ops = {
21773 .inherits = &ata_bmdma_port_ops,
21774
21775 .check_atapi_dma = ns87415_check_atapi_dma,
21776 @@ -313,7 +313,7 @@ static struct ata_port_operations ns8741
21777 };
21778
21779 #if defined(CONFIG_SUPERIO)
21780 -static struct ata_port_operations ns87560_pata_ops = {
21781 +static const struct ata_port_operations ns87560_pata_ops = {
21782 .inherits = &ns87415_pata_ops,
21783 .sff_tf_read = ns87560_tf_read,
21784 .sff_check_status = ns87560_check_status,
21785 diff -urNp linux-2.6.35.4/drivers/ata/pata_octeon_cf.c linux-2.6.35.4/drivers/ata/pata_octeon_cf.c
21786 --- linux-2.6.35.4/drivers/ata/pata_octeon_cf.c 2010-08-26 19:47:12.000000000 -0400
21787 +++ linux-2.6.35.4/drivers/ata/pata_octeon_cf.c 2010-09-17 20:12:09.000000000 -0400
21788 @@ -782,6 +782,7 @@ static unsigned int octeon_cf_qc_issue(s
21789 return 0;
21790 }
21791
21792 +/* cannot be const */
21793 static struct ata_port_operations octeon_cf_ops = {
21794 .inherits = &ata_sff_port_ops,
21795 .check_atapi_dma = octeon_cf_check_atapi_dma,
21796 diff -urNp linux-2.6.35.4/drivers/ata/pata_oldpiix.c linux-2.6.35.4/drivers/ata/pata_oldpiix.c
21797 --- linux-2.6.35.4/drivers/ata/pata_oldpiix.c 2010-08-26 19:47:12.000000000 -0400
21798 +++ linux-2.6.35.4/drivers/ata/pata_oldpiix.c 2010-09-17 20:12:09.000000000 -0400
21799 @@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix
21800 ATA_BMDMA_SHT(DRV_NAME),
21801 };
21802
21803 -static struct ata_port_operations oldpiix_pata_ops = {
21804 +static const struct ata_port_operations oldpiix_pata_ops = {
21805 .inherits = &ata_bmdma_port_ops,
21806 .qc_issue = oldpiix_qc_issue,
21807 .cable_detect = ata_cable_40wire,
21808 diff -urNp linux-2.6.35.4/drivers/ata/pata_opti.c linux-2.6.35.4/drivers/ata/pata_opti.c
21809 --- linux-2.6.35.4/drivers/ata/pata_opti.c 2010-08-26 19:47:12.000000000 -0400
21810 +++ linux-2.6.35.4/drivers/ata/pata_opti.c 2010-09-17 20:12:09.000000000 -0400
21811 @@ -152,7 +152,7 @@ static struct scsi_host_template opti_sh
21812 ATA_PIO_SHT(DRV_NAME),
21813 };
21814
21815 -static struct ata_port_operations opti_port_ops = {
21816 +static const struct ata_port_operations opti_port_ops = {
21817 .inherits = &ata_sff_port_ops,
21818 .cable_detect = ata_cable_40wire,
21819 .set_piomode = opti_set_piomode,
21820 diff -urNp linux-2.6.35.4/drivers/ata/pata_optidma.c linux-2.6.35.4/drivers/ata/pata_optidma.c
21821 --- linux-2.6.35.4/drivers/ata/pata_optidma.c 2010-08-26 19:47:12.000000000 -0400
21822 +++ linux-2.6.35.4/drivers/ata/pata_optidma.c 2010-09-17 20:12:09.000000000 -0400
21823 @@ -337,7 +337,7 @@ static struct scsi_host_template optidma
21824 ATA_BMDMA_SHT(DRV_NAME),
21825 };
21826
21827 -static struct ata_port_operations optidma_port_ops = {
21828 +static const struct ata_port_operations optidma_port_ops = {
21829 .inherits = &ata_bmdma_port_ops,
21830 .cable_detect = ata_cable_40wire,
21831 .set_piomode = optidma_set_pio_mode,
21832 @@ -346,7 +346,7 @@ static struct ata_port_operations optidm
21833 .prereset = optidma_pre_reset,
21834 };
21835
21836 -static struct ata_port_operations optiplus_port_ops = {
21837 +static const struct ata_port_operations optiplus_port_ops = {
21838 .inherits = &optidma_port_ops,
21839 .set_piomode = optiplus_set_pio_mode,
21840 .set_dmamode = optiplus_set_dma_mode,
21841 diff -urNp linux-2.6.35.4/drivers/ata/pata_palmld.c linux-2.6.35.4/drivers/ata/pata_palmld.c
21842 --- linux-2.6.35.4/drivers/ata/pata_palmld.c 2010-08-26 19:47:12.000000000 -0400
21843 +++ linux-2.6.35.4/drivers/ata/pata_palmld.c 2010-09-17 20:12:09.000000000 -0400
21844 @@ -37,7 +37,7 @@ static struct scsi_host_template palmld_
21845 ATA_PIO_SHT(DRV_NAME),
21846 };
21847
21848 -static struct ata_port_operations palmld_port_ops = {
21849 +static const struct ata_port_operations palmld_port_ops = {
21850 .inherits = &ata_sff_port_ops,
21851 .sff_data_xfer = ata_sff_data_xfer_noirq,
21852 .cable_detect = ata_cable_40wire,
21853 diff -urNp linux-2.6.35.4/drivers/ata/pata_pcmcia.c linux-2.6.35.4/drivers/ata/pata_pcmcia.c
21854 --- linux-2.6.35.4/drivers/ata/pata_pcmcia.c 2010-08-26 19:47:12.000000000 -0400
21855 +++ linux-2.6.35.4/drivers/ata/pata_pcmcia.c 2010-09-17 20:12:09.000000000 -0400
21856 @@ -153,14 +153,14 @@ static struct scsi_host_template pcmcia_
21857 ATA_PIO_SHT(DRV_NAME),
21858 };
21859
21860 -static struct ata_port_operations pcmcia_port_ops = {
21861 +static const struct ata_port_operations pcmcia_port_ops = {
21862 .inherits = &ata_sff_port_ops,
21863 .sff_data_xfer = ata_sff_data_xfer_noirq,
21864 .cable_detect = ata_cable_40wire,
21865 .set_mode = pcmcia_set_mode,
21866 };
21867
21868 -static struct ata_port_operations pcmcia_8bit_port_ops = {
21869 +static const struct ata_port_operations pcmcia_8bit_port_ops = {
21870 .inherits = &ata_sff_port_ops,
21871 .sff_data_xfer = ata_data_xfer_8bit,
21872 .cable_detect = ata_cable_40wire,
21873 @@ -243,7 +243,7 @@ static int pcmcia_init_one(struct pcmcia
21874 unsigned long io_base, ctl_base;
21875 void __iomem *io_addr, *ctl_addr;
21876 int n_ports = 1;
21877 - struct ata_port_operations *ops = &pcmcia_port_ops;
21878 + const struct ata_port_operations *ops = &pcmcia_port_ops;
21879
21880 /* Set up attributes in order to probe card and get resources */
21881 pdev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
21882 diff -urNp linux-2.6.35.4/drivers/ata/pata_pdc2027x.c linux-2.6.35.4/drivers/ata/pata_pdc2027x.c
21883 --- linux-2.6.35.4/drivers/ata/pata_pdc2027x.c 2010-08-26 19:47:12.000000000 -0400
21884 +++ linux-2.6.35.4/drivers/ata/pata_pdc2027x.c 2010-09-17 20:12:09.000000000 -0400
21885 @@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027
21886 ATA_BMDMA_SHT(DRV_NAME),
21887 };
21888
21889 -static struct ata_port_operations pdc2027x_pata100_ops = {
21890 +static const struct ata_port_operations pdc2027x_pata100_ops = {
21891 .inherits = &ata_bmdma_port_ops,
21892 .check_atapi_dma = pdc2027x_check_atapi_dma,
21893 .cable_detect = pdc2027x_cable_detect,
21894 .prereset = pdc2027x_prereset,
21895 };
21896
21897 -static struct ata_port_operations pdc2027x_pata133_ops = {
21898 +static const struct ata_port_operations pdc2027x_pata133_ops = {
21899 .inherits = &pdc2027x_pata100_ops,
21900 .mode_filter = pdc2027x_mode_filter,
21901 .set_piomode = pdc2027x_set_piomode,
21902 diff -urNp linux-2.6.35.4/drivers/ata/pata_pdc202xx_old.c linux-2.6.35.4/drivers/ata/pata_pdc202xx_old.c
21903 --- linux-2.6.35.4/drivers/ata/pata_pdc202xx_old.c 2010-08-26 19:47:12.000000000 -0400
21904 +++ linux-2.6.35.4/drivers/ata/pata_pdc202xx_old.c 2010-09-17 20:12:09.000000000 -0400
21905 @@ -274,7 +274,7 @@ static struct scsi_host_template pdc202x
21906 ATA_BMDMA_SHT(DRV_NAME),
21907 };
21908
21909 -static struct ata_port_operations pdc2024x_port_ops = {
21910 +static const struct ata_port_operations pdc2024x_port_ops = {
21911 .inherits = &ata_bmdma_port_ops,
21912
21913 .cable_detect = ata_cable_40wire,
21914 @@ -284,7 +284,7 @@ static struct ata_port_operations pdc202
21915 .sff_exec_command = pdc202xx_exec_command,
21916 };
21917
21918 -static struct ata_port_operations pdc2026x_port_ops = {
21919 +static const struct ata_port_operations pdc2026x_port_ops = {
21920 .inherits = &pdc2024x_port_ops,
21921
21922 .check_atapi_dma = pdc2026x_check_atapi_dma,
21923 diff -urNp linux-2.6.35.4/drivers/ata/pata_piccolo.c linux-2.6.35.4/drivers/ata/pata_piccolo.c
21924 --- linux-2.6.35.4/drivers/ata/pata_piccolo.c 2010-08-26 19:47:12.000000000 -0400
21925 +++ linux-2.6.35.4/drivers/ata/pata_piccolo.c 2010-09-17 20:12:09.000000000 -0400
21926 @@ -67,7 +67,7 @@ static struct scsi_host_template tosh_sh
21927 ATA_BMDMA_SHT(DRV_NAME),
21928 };
21929
21930 -static struct ata_port_operations tosh_port_ops = {
21931 +static const struct ata_port_operations tosh_port_ops = {
21932 .inherits = &ata_bmdma_port_ops,
21933 .cable_detect = ata_cable_unknown,
21934 .set_piomode = tosh_set_piomode,
21935 diff -urNp linux-2.6.35.4/drivers/ata/pata_platform.c linux-2.6.35.4/drivers/ata/pata_platform.c
21936 --- linux-2.6.35.4/drivers/ata/pata_platform.c 2010-08-26 19:47:12.000000000 -0400
21937 +++ linux-2.6.35.4/drivers/ata/pata_platform.c 2010-09-17 20:12:09.000000000 -0400
21938 @@ -48,7 +48,7 @@ static struct scsi_host_template pata_pl
21939 ATA_PIO_SHT(DRV_NAME),
21940 };
21941
21942 -static struct ata_port_operations pata_platform_port_ops = {
21943 +static const struct ata_port_operations pata_platform_port_ops = {
21944 .inherits = &ata_sff_port_ops,
21945 .sff_data_xfer = ata_sff_data_xfer_noirq,
21946 .cable_detect = ata_cable_unknown,
21947 diff -urNp linux-2.6.35.4/drivers/ata/pata_qdi.c linux-2.6.35.4/drivers/ata/pata_qdi.c
21948 --- linux-2.6.35.4/drivers/ata/pata_qdi.c 2010-08-26 19:47:12.000000000 -0400
21949 +++ linux-2.6.35.4/drivers/ata/pata_qdi.c 2010-09-17 20:12:09.000000000 -0400
21950 @@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht
21951 ATA_PIO_SHT(DRV_NAME),
21952 };
21953
21954 -static struct ata_port_operations qdi6500_port_ops = {
21955 +static const struct ata_port_operations qdi6500_port_ops = {
21956 .inherits = &ata_sff_port_ops,
21957 .qc_issue = qdi_qc_issue,
21958 .sff_data_xfer = qdi_data_xfer,
21959 @@ -165,7 +165,7 @@ static struct ata_port_operations qdi650
21960 .set_piomode = qdi6500_set_piomode,
21961 };
21962
21963 -static struct ata_port_operations qdi6580_port_ops = {
21964 +static const struct ata_port_operations qdi6580_port_ops = {
21965 .inherits = &qdi6500_port_ops,
21966 .set_piomode = qdi6580_set_piomode,
21967 };
21968 diff -urNp linux-2.6.35.4/drivers/ata/pata_radisys.c linux-2.6.35.4/drivers/ata/pata_radisys.c
21969 --- linux-2.6.35.4/drivers/ata/pata_radisys.c 2010-08-26 19:47:12.000000000 -0400
21970 +++ linux-2.6.35.4/drivers/ata/pata_radisys.c 2010-09-17 20:12:09.000000000 -0400
21971 @@ -187,7 +187,7 @@ static struct scsi_host_template radisys
21972 ATA_BMDMA_SHT(DRV_NAME),
21973 };
21974
21975 -static struct ata_port_operations radisys_pata_ops = {
21976 +static const struct ata_port_operations radisys_pata_ops = {
21977 .inherits = &ata_bmdma_port_ops,
21978 .qc_issue = radisys_qc_issue,
21979 .cable_detect = ata_cable_unknown,
21980 diff -urNp linux-2.6.35.4/drivers/ata/pata_rb532_cf.c linux-2.6.35.4/drivers/ata/pata_rb532_cf.c
21981 --- linux-2.6.35.4/drivers/ata/pata_rb532_cf.c 2010-08-26 19:47:12.000000000 -0400
21982 +++ linux-2.6.35.4/drivers/ata/pata_rb532_cf.c 2010-09-17 20:12:09.000000000 -0400
21983 @@ -69,7 +69,7 @@ static irqreturn_t rb532_pata_irq_handle
21984 return IRQ_HANDLED;
21985 }
21986
21987 -static struct ata_port_operations rb532_pata_port_ops = {
21988 +static const struct ata_port_operations rb532_pata_port_ops = {
21989 .inherits = &ata_sff_port_ops,
21990 .sff_data_xfer = ata_sff_data_xfer32,
21991 };
21992 diff -urNp linux-2.6.35.4/drivers/ata/pata_rdc.c linux-2.6.35.4/drivers/ata/pata_rdc.c
21993 --- linux-2.6.35.4/drivers/ata/pata_rdc.c 2010-08-26 19:47:12.000000000 -0400
21994 +++ linux-2.6.35.4/drivers/ata/pata_rdc.c 2010-09-17 20:12:09.000000000 -0400
21995 @@ -273,7 +273,7 @@ static void rdc_set_dmamode(struct ata_p
21996 pci_write_config_byte(dev, 0x48, udma_enable);
21997 }
21998
21999 -static struct ata_port_operations rdc_pata_ops = {
22000 +static const struct ata_port_operations rdc_pata_ops = {
22001 .inherits = &ata_bmdma32_port_ops,
22002 .cable_detect = rdc_pata_cable_detect,
22003 .set_piomode = rdc_set_piomode,
22004 diff -urNp linux-2.6.35.4/drivers/ata/pata_rz1000.c linux-2.6.35.4/drivers/ata/pata_rz1000.c
22005 --- linux-2.6.35.4/drivers/ata/pata_rz1000.c 2010-08-26 19:47:12.000000000 -0400
22006 +++ linux-2.6.35.4/drivers/ata/pata_rz1000.c 2010-09-17 20:12:09.000000000 -0400
22007 @@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_
22008 ATA_PIO_SHT(DRV_NAME),
22009 };
22010
22011 -static struct ata_port_operations rz1000_port_ops = {
22012 +static const struct ata_port_operations rz1000_port_ops = {
22013 .inherits = &ata_sff_port_ops,
22014 .cable_detect = ata_cable_40wire,
22015 .set_mode = rz1000_set_mode,
22016 diff -urNp linux-2.6.35.4/drivers/ata/pata_sc1200.c linux-2.6.35.4/drivers/ata/pata_sc1200.c
22017 --- linux-2.6.35.4/drivers/ata/pata_sc1200.c 2010-08-26 19:47:12.000000000 -0400
22018 +++ linux-2.6.35.4/drivers/ata/pata_sc1200.c 2010-09-17 20:12:09.000000000 -0400
22019 @@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_
22020 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
22021 };
22022
22023 -static struct ata_port_operations sc1200_port_ops = {
22024 +static const struct ata_port_operations sc1200_port_ops = {
22025 .inherits = &ata_bmdma_port_ops,
22026 .qc_prep = ata_bmdma_dumb_qc_prep,
22027 .qc_issue = sc1200_qc_issue,
22028 diff -urNp linux-2.6.35.4/drivers/ata/pata_scc.c linux-2.6.35.4/drivers/ata/pata_scc.c
22029 --- linux-2.6.35.4/drivers/ata/pata_scc.c 2010-08-26 19:47:12.000000000 -0400
22030 +++ linux-2.6.35.4/drivers/ata/pata_scc.c 2010-09-17 20:12:09.000000000 -0400
22031 @@ -927,7 +927,7 @@ static struct scsi_host_template scc_sht
22032 ATA_BMDMA_SHT(DRV_NAME),
22033 };
22034
22035 -static struct ata_port_operations scc_pata_ops = {
22036 +static const struct ata_port_operations scc_pata_ops = {
22037 .inherits = &ata_bmdma_port_ops,
22038
22039 .set_piomode = scc_set_piomode,
22040 diff -urNp linux-2.6.35.4/drivers/ata/pata_sch.c linux-2.6.35.4/drivers/ata/pata_sch.c
22041 --- linux-2.6.35.4/drivers/ata/pata_sch.c 2010-08-26 19:47:12.000000000 -0400
22042 +++ linux-2.6.35.4/drivers/ata/pata_sch.c 2010-09-17 20:12:09.000000000 -0400
22043 @@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht
22044 ATA_BMDMA_SHT(DRV_NAME),
22045 };
22046
22047 -static struct ata_port_operations sch_pata_ops = {
22048 +static const struct ata_port_operations sch_pata_ops = {
22049 .inherits = &ata_bmdma_port_ops,
22050 .cable_detect = ata_cable_unknown,
22051 .set_piomode = sch_set_piomode,
22052 diff -urNp linux-2.6.35.4/drivers/ata/pata_serverworks.c linux-2.6.35.4/drivers/ata/pata_serverworks.c
22053 --- linux-2.6.35.4/drivers/ata/pata_serverworks.c 2010-08-26 19:47:12.000000000 -0400
22054 +++ linux-2.6.35.4/drivers/ata/pata_serverworks.c 2010-09-17 20:12:09.000000000 -0400
22055 @@ -300,7 +300,7 @@ static struct scsi_host_template serverw
22056 ATA_BMDMA_SHT(DRV_NAME),
22057 };
22058
22059 -static struct ata_port_operations serverworks_osb4_port_ops = {
22060 +static const struct ata_port_operations serverworks_osb4_port_ops = {
22061 .inherits = &ata_bmdma_port_ops,
22062 .cable_detect = serverworks_cable_detect,
22063 .mode_filter = serverworks_osb4_filter,
22064 @@ -308,7 +308,7 @@ static struct ata_port_operations server
22065 .set_dmamode = serverworks_set_dmamode,
22066 };
22067
22068 -static struct ata_port_operations serverworks_csb_port_ops = {
22069 +static const struct ata_port_operations serverworks_csb_port_ops = {
22070 .inherits = &serverworks_osb4_port_ops,
22071 .mode_filter = serverworks_csb_filter,
22072 };
22073 diff -urNp linux-2.6.35.4/drivers/ata/pata_sil680.c linux-2.6.35.4/drivers/ata/pata_sil680.c
22074 --- linux-2.6.35.4/drivers/ata/pata_sil680.c 2010-08-26 19:47:12.000000000 -0400
22075 +++ linux-2.6.35.4/drivers/ata/pata_sil680.c 2010-09-17 20:12:09.000000000 -0400
22076 @@ -214,8 +214,7 @@ static struct scsi_host_template sil680_
22077 ATA_BMDMA_SHT(DRV_NAME),
22078 };
22079
22080 -
22081 -static struct ata_port_operations sil680_port_ops = {
22082 +static const struct ata_port_operations sil680_port_ops = {
22083 .inherits = &ata_bmdma32_port_ops,
22084 .sff_exec_command = sil680_sff_exec_command,
22085 .cable_detect = sil680_cable_detect,
22086 diff -urNp linux-2.6.35.4/drivers/ata/pata_sis.c linux-2.6.35.4/drivers/ata/pata_sis.c
22087 --- linux-2.6.35.4/drivers/ata/pata_sis.c 2010-08-26 19:47:12.000000000 -0400
22088 +++ linux-2.6.35.4/drivers/ata/pata_sis.c 2010-09-17 20:12:09.000000000 -0400
22089 @@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht
22090 ATA_BMDMA_SHT(DRV_NAME),
22091 };
22092
22093 -static struct ata_port_operations sis_133_for_sata_ops = {
22094 +static const struct ata_port_operations sis_133_for_sata_ops = {
22095 .inherits = &ata_bmdma_port_ops,
22096 .set_piomode = sis_133_set_piomode,
22097 .set_dmamode = sis_133_set_dmamode,
22098 .cable_detect = sis_133_cable_detect,
22099 };
22100
22101 -static struct ata_port_operations sis_base_ops = {
22102 +static const struct ata_port_operations sis_base_ops = {
22103 .inherits = &ata_bmdma_port_ops,
22104 .prereset = sis_pre_reset,
22105 };
22106
22107 -static struct ata_port_operations sis_133_ops = {
22108 +static const struct ata_port_operations sis_133_ops = {
22109 .inherits = &sis_base_ops,
22110 .set_piomode = sis_133_set_piomode,
22111 .set_dmamode = sis_133_set_dmamode,
22112 .cable_detect = sis_133_cable_detect,
22113 };
22114
22115 -static struct ata_port_operations sis_133_early_ops = {
22116 +static const struct ata_port_operations sis_133_early_ops = {
22117 .inherits = &sis_base_ops,
22118 .set_piomode = sis_100_set_piomode,
22119 .set_dmamode = sis_133_early_set_dmamode,
22120 .cable_detect = sis_66_cable_detect,
22121 };
22122
22123 -static struct ata_port_operations sis_100_ops = {
22124 +static const struct ata_port_operations sis_100_ops = {
22125 .inherits = &sis_base_ops,
22126 .set_piomode = sis_100_set_piomode,
22127 .set_dmamode = sis_100_set_dmamode,
22128 .cable_detect = sis_66_cable_detect,
22129 };
22130
22131 -static struct ata_port_operations sis_66_ops = {
22132 +static const struct ata_port_operations sis_66_ops = {
22133 .inherits = &sis_base_ops,
22134 .set_piomode = sis_old_set_piomode,
22135 .set_dmamode = sis_66_set_dmamode,
22136 .cable_detect = sis_66_cable_detect,
22137 };
22138
22139 -static struct ata_port_operations sis_old_ops = {
22140 +static const struct ata_port_operations sis_old_ops = {
22141 .inherits = &sis_base_ops,
22142 .set_piomode = sis_old_set_piomode,
22143 .set_dmamode = sis_old_set_dmamode,
22144 diff -urNp linux-2.6.35.4/drivers/ata/pata_sl82c105.c linux-2.6.35.4/drivers/ata/pata_sl82c105.c
22145 --- linux-2.6.35.4/drivers/ata/pata_sl82c105.c 2010-08-26 19:47:12.000000000 -0400
22146 +++ linux-2.6.35.4/drivers/ata/pata_sl82c105.c 2010-09-17 20:12:09.000000000 -0400
22147 @@ -231,7 +231,7 @@ static struct scsi_host_template sl82c10
22148 ATA_BMDMA_SHT(DRV_NAME),
22149 };
22150
22151 -static struct ata_port_operations sl82c105_port_ops = {
22152 +static const struct ata_port_operations sl82c105_port_ops = {
22153 .inherits = &ata_bmdma_port_ops,
22154 .qc_defer = sl82c105_qc_defer,
22155 .bmdma_start = sl82c105_bmdma_start,
22156 diff -urNp linux-2.6.35.4/drivers/ata/pata_triflex.c linux-2.6.35.4/drivers/ata/pata_triflex.c
22157 --- linux-2.6.35.4/drivers/ata/pata_triflex.c 2010-08-26 19:47:12.000000000 -0400
22158 +++ linux-2.6.35.4/drivers/ata/pata_triflex.c 2010-09-17 20:12:09.000000000 -0400
22159 @@ -178,7 +178,7 @@ static struct scsi_host_template triflex
22160 ATA_BMDMA_SHT(DRV_NAME),
22161 };
22162
22163 -static struct ata_port_operations triflex_port_ops = {
22164 +static const struct ata_port_operations triflex_port_ops = {
22165 .inherits = &ata_bmdma_port_ops,
22166 .bmdma_start = triflex_bmdma_start,
22167 .bmdma_stop = triflex_bmdma_stop,
22168 diff -urNp linux-2.6.35.4/drivers/ata/pata_via.c linux-2.6.35.4/drivers/ata/pata_via.c
22169 --- linux-2.6.35.4/drivers/ata/pata_via.c 2010-08-26 19:47:12.000000000 -0400
22170 +++ linux-2.6.35.4/drivers/ata/pata_via.c 2010-09-17 20:12:09.000000000 -0400
22171 @@ -439,7 +439,7 @@ static struct scsi_host_template via_sht
22172 ATA_BMDMA_SHT(DRV_NAME),
22173 };
22174
22175 -static struct ata_port_operations via_port_ops = {
22176 +static const struct ata_port_operations via_port_ops = {
22177 .inherits = &ata_bmdma_port_ops,
22178 .cable_detect = via_cable_detect,
22179 .set_piomode = via_set_piomode,
22180 @@ -450,7 +450,7 @@ static struct ata_port_operations via_po
22181 .mode_filter = via_mode_filter,
22182 };
22183
22184 -static struct ata_port_operations via_port_ops_noirq = {
22185 +static const struct ata_port_operations via_port_ops_noirq = {
22186 .inherits = &via_port_ops,
22187 .sff_data_xfer = ata_sff_data_xfer_noirq,
22188 };
22189 diff -urNp linux-2.6.35.4/drivers/ata/pata_winbond.c linux-2.6.35.4/drivers/ata/pata_winbond.c
22190 --- linux-2.6.35.4/drivers/ata/pata_winbond.c 2010-08-26 19:47:12.000000000 -0400
22191 +++ linux-2.6.35.4/drivers/ata/pata_winbond.c 2010-09-17 20:12:09.000000000 -0400
22192 @@ -125,7 +125,7 @@ static struct scsi_host_template winbond
22193 ATA_PIO_SHT(DRV_NAME),
22194 };
22195
22196 -static struct ata_port_operations winbond_port_ops = {
22197 +static const struct ata_port_operations winbond_port_ops = {
22198 .inherits = &ata_sff_port_ops,
22199 .sff_data_xfer = winbond_data_xfer,
22200 .cable_detect = ata_cable_40wire,
22201 diff -urNp linux-2.6.35.4/drivers/ata/pdc_adma.c linux-2.6.35.4/drivers/ata/pdc_adma.c
22202 --- linux-2.6.35.4/drivers/ata/pdc_adma.c 2010-08-26 19:47:12.000000000 -0400
22203 +++ linux-2.6.35.4/drivers/ata/pdc_adma.c 2010-09-17 20:12:09.000000000 -0400
22204 @@ -146,7 +146,7 @@ static struct scsi_host_template adma_at
22205 .dma_boundary = ADMA_DMA_BOUNDARY,
22206 };
22207
22208 -static struct ata_port_operations adma_ata_ops = {
22209 +static const struct ata_port_operations adma_ata_ops = {
22210 .inherits = &ata_sff_port_ops,
22211
22212 .lost_interrupt = ATA_OP_NULL,
22213 diff -urNp linux-2.6.35.4/drivers/ata/sata_fsl.c linux-2.6.35.4/drivers/ata/sata_fsl.c
22214 --- linux-2.6.35.4/drivers/ata/sata_fsl.c 2010-08-26 19:47:12.000000000 -0400
22215 +++ linux-2.6.35.4/drivers/ata/sata_fsl.c 2010-09-17 20:12:09.000000000 -0400
22216 @@ -1261,7 +1261,7 @@ static struct scsi_host_template sata_fs
22217 .dma_boundary = ATA_DMA_BOUNDARY,
22218 };
22219
22220 -static struct ata_port_operations sata_fsl_ops = {
22221 +static const struct ata_port_operations sata_fsl_ops = {
22222 .inherits = &sata_pmp_port_ops,
22223
22224 .qc_defer = ata_std_qc_defer,
22225 diff -urNp linux-2.6.35.4/drivers/ata/sata_inic162x.c linux-2.6.35.4/drivers/ata/sata_inic162x.c
22226 --- linux-2.6.35.4/drivers/ata/sata_inic162x.c 2010-08-26 19:47:12.000000000 -0400
22227 +++ linux-2.6.35.4/drivers/ata/sata_inic162x.c 2010-09-17 20:12:09.000000000 -0400
22228 @@ -705,7 +705,7 @@ static int inic_port_start(struct ata_po
22229 return 0;
22230 }
22231
22232 -static struct ata_port_operations inic_port_ops = {
22233 +static const struct ata_port_operations inic_port_ops = {
22234 .inherits = &sata_port_ops,
22235
22236 .check_atapi_dma = inic_check_atapi_dma,
22237 diff -urNp linux-2.6.35.4/drivers/ata/sata_mv.c linux-2.6.35.4/drivers/ata/sata_mv.c
22238 --- linux-2.6.35.4/drivers/ata/sata_mv.c 2010-08-26 19:47:12.000000000 -0400
22239 +++ linux-2.6.35.4/drivers/ata/sata_mv.c 2010-09-17 20:12:09.000000000 -0400
22240 @@ -663,7 +663,7 @@ static struct scsi_host_template mv6_sht
22241 .dma_boundary = MV_DMA_BOUNDARY,
22242 };
22243
22244 -static struct ata_port_operations mv5_ops = {
22245 +static const struct ata_port_operations mv5_ops = {
22246 .inherits = &ata_sff_port_ops,
22247
22248 .lost_interrupt = ATA_OP_NULL,
22249 @@ -683,7 +683,7 @@ static struct ata_port_operations mv5_op
22250 .port_stop = mv_port_stop,
22251 };
22252
22253 -static struct ata_port_operations mv6_ops = {
22254 +static const struct ata_port_operations mv6_ops = {
22255 .inherits = &ata_bmdma_port_ops,
22256
22257 .lost_interrupt = ATA_OP_NULL,
22258 @@ -717,7 +717,7 @@ static struct ata_port_operations mv6_op
22259 .port_stop = mv_port_stop,
22260 };
22261
22262 -static struct ata_port_operations mv_iie_ops = {
22263 +static const struct ata_port_operations mv_iie_ops = {
22264 .inherits = &mv6_ops,
22265 .dev_config = ATA_OP_NULL,
22266 .qc_prep = mv_qc_prep_iie,
22267 diff -urNp linux-2.6.35.4/drivers/ata/sata_nv.c linux-2.6.35.4/drivers/ata/sata_nv.c
22268 --- linux-2.6.35.4/drivers/ata/sata_nv.c 2010-08-26 19:47:12.000000000 -0400
22269 +++ linux-2.6.35.4/drivers/ata/sata_nv.c 2010-09-17 20:12:09.000000000 -0400
22270 @@ -465,7 +465,7 @@ static struct scsi_host_template nv_swnc
22271 * cases. Define nv_hardreset() which only kicks in for post-boot
22272 * probing and use it for all variants.
22273 */
22274 -static struct ata_port_operations nv_generic_ops = {
22275 +static const struct ata_port_operations nv_generic_ops = {
22276 .inherits = &ata_bmdma_port_ops,
22277 .lost_interrupt = ATA_OP_NULL,
22278 .scr_read = nv_scr_read,
22279 @@ -473,20 +473,20 @@ static struct ata_port_operations nv_gen
22280 .hardreset = nv_hardreset,
22281 };
22282
22283 -static struct ata_port_operations nv_nf2_ops = {
22284 +static const struct ata_port_operations nv_nf2_ops = {
22285 .inherits = &nv_generic_ops,
22286 .freeze = nv_nf2_freeze,
22287 .thaw = nv_nf2_thaw,
22288 };
22289
22290 -static struct ata_port_operations nv_ck804_ops = {
22291 +static const struct ata_port_operations nv_ck804_ops = {
22292 .inherits = &nv_generic_ops,
22293 .freeze = nv_ck804_freeze,
22294 .thaw = nv_ck804_thaw,
22295 .host_stop = nv_ck804_host_stop,
22296 };
22297
22298 -static struct ata_port_operations nv_adma_ops = {
22299 +static const struct ata_port_operations nv_adma_ops = {
22300 .inherits = &nv_ck804_ops,
22301
22302 .check_atapi_dma = nv_adma_check_atapi_dma,
22303 @@ -510,7 +510,7 @@ static struct ata_port_operations nv_adm
22304 .host_stop = nv_adma_host_stop,
22305 };
22306
22307 -static struct ata_port_operations nv_swncq_ops = {
22308 +static const struct ata_port_operations nv_swncq_ops = {
22309 .inherits = &nv_generic_ops,
22310
22311 .qc_defer = ata_std_qc_defer,
22312 diff -urNp linux-2.6.35.4/drivers/ata/sata_promise.c linux-2.6.35.4/drivers/ata/sata_promise.c
22313 --- linux-2.6.35.4/drivers/ata/sata_promise.c 2010-08-26 19:47:12.000000000 -0400
22314 +++ linux-2.6.35.4/drivers/ata/sata_promise.c 2010-09-17 20:12:09.000000000 -0400
22315 @@ -196,7 +196,7 @@ static const struct ata_port_operations
22316 .error_handler = pdc_error_handler,
22317 };
22318
22319 -static struct ata_port_operations pdc_sata_ops = {
22320 +static const struct ata_port_operations pdc_sata_ops = {
22321 .inherits = &pdc_common_ops,
22322 .cable_detect = pdc_sata_cable_detect,
22323 .freeze = pdc_sata_freeze,
22324 @@ -209,14 +209,14 @@ static struct ata_port_operations pdc_sa
22325
22326 /* First-generation chips need a more restrictive ->check_atapi_dma op,
22327 and ->freeze/thaw that ignore the hotplug controls. */
22328 -static struct ata_port_operations pdc_old_sata_ops = {
22329 +static const struct ata_port_operations pdc_old_sata_ops = {
22330 .inherits = &pdc_sata_ops,
22331 .freeze = pdc_freeze,
22332 .thaw = pdc_thaw,
22333 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
22334 };
22335
22336 -static struct ata_port_operations pdc_pata_ops = {
22337 +static const struct ata_port_operations pdc_pata_ops = {
22338 .inherits = &pdc_common_ops,
22339 .cable_detect = pdc_pata_cable_detect,
22340 .freeze = pdc_freeze,
22341 diff -urNp linux-2.6.35.4/drivers/ata/sata_qstor.c linux-2.6.35.4/drivers/ata/sata_qstor.c
22342 --- linux-2.6.35.4/drivers/ata/sata_qstor.c 2010-08-26 19:47:12.000000000 -0400
22343 +++ linux-2.6.35.4/drivers/ata/sata_qstor.c 2010-09-17 20:12:09.000000000 -0400
22344 @@ -131,7 +131,7 @@ static struct scsi_host_template qs_ata_
22345 .dma_boundary = QS_DMA_BOUNDARY,
22346 };
22347
22348 -static struct ata_port_operations qs_ata_ops = {
22349 +static const struct ata_port_operations qs_ata_ops = {
22350 .inherits = &ata_sff_port_ops,
22351
22352 .check_atapi_dma = qs_check_atapi_dma,
22353 diff -urNp linux-2.6.35.4/drivers/ata/sata_sil24.c linux-2.6.35.4/drivers/ata/sata_sil24.c
22354 --- linux-2.6.35.4/drivers/ata/sata_sil24.c 2010-08-26 19:47:12.000000000 -0400
22355 +++ linux-2.6.35.4/drivers/ata/sata_sil24.c 2010-09-17 20:12:09.000000000 -0400
22356 @@ -389,7 +389,7 @@ static struct scsi_host_template sil24_s
22357 .dma_boundary = ATA_DMA_BOUNDARY,
22358 };
22359
22360 -static struct ata_port_operations sil24_ops = {
22361 +static const struct ata_port_operations sil24_ops = {
22362 .inherits = &sata_pmp_port_ops,
22363
22364 .qc_defer = sil24_qc_defer,
22365 diff -urNp linux-2.6.35.4/drivers/ata/sata_sil.c linux-2.6.35.4/drivers/ata/sata_sil.c
22366 --- linux-2.6.35.4/drivers/ata/sata_sil.c 2010-08-26 19:47:12.000000000 -0400
22367 +++ linux-2.6.35.4/drivers/ata/sata_sil.c 2010-09-17 20:12:09.000000000 -0400
22368 @@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht
22369 .sg_tablesize = ATA_MAX_PRD
22370 };
22371
22372 -static struct ata_port_operations sil_ops = {
22373 +static const struct ata_port_operations sil_ops = {
22374 .inherits = &ata_bmdma32_port_ops,
22375 .dev_config = sil_dev_config,
22376 .set_mode = sil_set_mode,
22377 diff -urNp linux-2.6.35.4/drivers/ata/sata_sis.c linux-2.6.35.4/drivers/ata/sata_sis.c
22378 --- linux-2.6.35.4/drivers/ata/sata_sis.c 2010-08-26 19:47:12.000000000 -0400
22379 +++ linux-2.6.35.4/drivers/ata/sata_sis.c 2010-09-17 20:12:09.000000000 -0400
22380 @@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht
22381 ATA_BMDMA_SHT(DRV_NAME),
22382 };
22383
22384 -static struct ata_port_operations sis_ops = {
22385 +static const struct ata_port_operations sis_ops = {
22386 .inherits = &ata_bmdma_port_ops,
22387 .scr_read = sis_scr_read,
22388 .scr_write = sis_scr_write,
22389 diff -urNp linux-2.6.35.4/drivers/ata/sata_svw.c linux-2.6.35.4/drivers/ata/sata_svw.c
22390 --- linux-2.6.35.4/drivers/ata/sata_svw.c 2010-08-26 19:47:12.000000000 -0400
22391 +++ linux-2.6.35.4/drivers/ata/sata_svw.c 2010-09-17 20:12:09.000000000 -0400
22392 @@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata
22393 };
22394
22395
22396 -static struct ata_port_operations k2_sata_ops = {
22397 +static const struct ata_port_operations k2_sata_ops = {
22398 .inherits = &ata_bmdma_port_ops,
22399 .sff_tf_load = k2_sata_tf_load,
22400 .sff_tf_read = k2_sata_tf_read,
22401 diff -urNp linux-2.6.35.4/drivers/ata/sata_sx4.c linux-2.6.35.4/drivers/ata/sata_sx4.c
22402 --- linux-2.6.35.4/drivers/ata/sata_sx4.c 2010-08-26 19:47:12.000000000 -0400
22403 +++ linux-2.6.35.4/drivers/ata/sata_sx4.c 2010-09-17 20:12:09.000000000 -0400
22404 @@ -249,7 +249,7 @@ static struct scsi_host_template pdc_sat
22405 };
22406
22407 /* TODO: inherit from base port_ops after converting to new EH */
22408 -static struct ata_port_operations pdc_20621_ops = {
22409 +static const struct ata_port_operations pdc_20621_ops = {
22410 .inherits = &ata_sff_port_ops,
22411
22412 .check_atapi_dma = pdc_check_atapi_dma,
22413 diff -urNp linux-2.6.35.4/drivers/ata/sata_uli.c linux-2.6.35.4/drivers/ata/sata_uli.c
22414 --- linux-2.6.35.4/drivers/ata/sata_uli.c 2010-08-26 19:47:12.000000000 -0400
22415 +++ linux-2.6.35.4/drivers/ata/sata_uli.c 2010-09-17 20:12:09.000000000 -0400
22416 @@ -80,7 +80,7 @@ static struct scsi_host_template uli_sht
22417 ATA_BMDMA_SHT(DRV_NAME),
22418 };
22419
22420 -static struct ata_port_operations uli_ops = {
22421 +static const struct ata_port_operations uli_ops = {
22422 .inherits = &ata_bmdma_port_ops,
22423 .scr_read = uli_scr_read,
22424 .scr_write = uli_scr_write,
22425 diff -urNp linux-2.6.35.4/drivers/ata/sata_via.c linux-2.6.35.4/drivers/ata/sata_via.c
22426 --- linux-2.6.35.4/drivers/ata/sata_via.c 2010-08-26 19:47:12.000000000 -0400
22427 +++ linux-2.6.35.4/drivers/ata/sata_via.c 2010-09-17 20:12:09.000000000 -0400
22428 @@ -115,32 +115,32 @@ static struct scsi_host_template svia_sh
22429 ATA_BMDMA_SHT(DRV_NAME),
22430 };
22431
22432 -static struct ata_port_operations svia_base_ops = {
22433 +static const struct ata_port_operations svia_base_ops = {
22434 .inherits = &ata_bmdma_port_ops,
22435 .sff_tf_load = svia_tf_load,
22436 };
22437
22438 -static struct ata_port_operations vt6420_sata_ops = {
22439 +static const struct ata_port_operations vt6420_sata_ops = {
22440 .inherits = &svia_base_ops,
22441 .freeze = svia_noop_freeze,
22442 .prereset = vt6420_prereset,
22443 .bmdma_start = vt6420_bmdma_start,
22444 };
22445
22446 -static struct ata_port_operations vt6421_pata_ops = {
22447 +static const struct ata_port_operations vt6421_pata_ops = {
22448 .inherits = &svia_base_ops,
22449 .cable_detect = vt6421_pata_cable_detect,
22450 .set_piomode = vt6421_set_pio_mode,
22451 .set_dmamode = vt6421_set_dma_mode,
22452 };
22453
22454 -static struct ata_port_operations vt6421_sata_ops = {
22455 +static const struct ata_port_operations vt6421_sata_ops = {
22456 .inherits = &svia_base_ops,
22457 .scr_read = svia_scr_read,
22458 .scr_write = svia_scr_write,
22459 };
22460
22461 -static struct ata_port_operations vt8251_ops = {
22462 +static const struct ata_port_operations vt8251_ops = {
22463 .inherits = &svia_base_ops,
22464 .hardreset = sata_std_hardreset,
22465 .scr_read = vt8251_scr_read,
22466 diff -urNp linux-2.6.35.4/drivers/ata/sata_vsc.c linux-2.6.35.4/drivers/ata/sata_vsc.c
22467 --- linux-2.6.35.4/drivers/ata/sata_vsc.c 2010-08-26 19:47:12.000000000 -0400
22468 +++ linux-2.6.35.4/drivers/ata/sata_vsc.c 2010-09-17 20:12:09.000000000 -0400
22469 @@ -300,7 +300,7 @@ static struct scsi_host_template vsc_sat
22470 };
22471
22472
22473 -static struct ata_port_operations vsc_sata_ops = {
22474 +static const struct ata_port_operations vsc_sata_ops = {
22475 .inherits = &ata_bmdma_port_ops,
22476 /* The IRQ handling is not quite standard SFF behaviour so we
22477 cannot use the default lost interrupt handler */
22478 diff -urNp linux-2.6.35.4/drivers/atm/adummy.c linux-2.6.35.4/drivers/atm/adummy.c
22479 --- linux-2.6.35.4/drivers/atm/adummy.c 2010-08-26 19:47:12.000000000 -0400
22480 +++ linux-2.6.35.4/drivers/atm/adummy.c 2010-09-17 20:12:09.000000000 -0400
22481 @@ -78,7 +78,7 @@ adummy_send(struct atm_vcc *vcc, struct
22482 vcc->pop(vcc, skb);
22483 else
22484 dev_kfree_skb_any(skb);
22485 - atomic_inc(&vcc->stats->tx);
22486 + atomic_inc_unchecked(&vcc->stats->tx);
22487
22488 return 0;
22489 }
22490 diff -urNp linux-2.6.35.4/drivers/atm/ambassador.c linux-2.6.35.4/drivers/atm/ambassador.c
22491 --- linux-2.6.35.4/drivers/atm/ambassador.c 2010-08-26 19:47:12.000000000 -0400
22492 +++ linux-2.6.35.4/drivers/atm/ambassador.c 2010-09-17 20:12:09.000000000 -0400
22493 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev,
22494 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
22495
22496 // VC layer stats
22497 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22498 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22499
22500 // free the descriptor
22501 kfree (tx_descr);
22502 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev,
22503 dump_skb ("<<<", vc, skb);
22504
22505 // VC layer stats
22506 - atomic_inc(&atm_vcc->stats->rx);
22507 + atomic_inc_unchecked(&atm_vcc->stats->rx);
22508 __net_timestamp(skb);
22509 // end of our responsability
22510 atm_vcc->push (atm_vcc, skb);
22511 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev,
22512 } else {
22513 PRINTK (KERN_INFO, "dropped over-size frame");
22514 // should we count this?
22515 - atomic_inc(&atm_vcc->stats->rx_drop);
22516 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22517 }
22518
22519 } else {
22520 @@ -1342,7 +1342,7 @@ static int amb_send (struct atm_vcc * at
22521 }
22522
22523 if (check_area (skb->data, skb->len)) {
22524 - atomic_inc(&atm_vcc->stats->tx_err);
22525 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
22526 return -ENOMEM; // ?
22527 }
22528
22529 diff -urNp linux-2.6.35.4/drivers/atm/atmtcp.c linux-2.6.35.4/drivers/atm/atmtcp.c
22530 --- linux-2.6.35.4/drivers/atm/atmtcp.c 2010-08-26 19:47:12.000000000 -0400
22531 +++ linux-2.6.35.4/drivers/atm/atmtcp.c 2010-09-17 20:12:09.000000000 -0400
22532 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc
22533 if (vcc->pop) vcc->pop(vcc,skb);
22534 else dev_kfree_skb(skb);
22535 if (dev_data) return 0;
22536 - atomic_inc(&vcc->stats->tx_err);
22537 + atomic_inc_unchecked(&vcc->stats->tx_err);
22538 return -ENOLINK;
22539 }
22540 size = skb->len+sizeof(struct atmtcp_hdr);
22541 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc
22542 if (!new_skb) {
22543 if (vcc->pop) vcc->pop(vcc,skb);
22544 else dev_kfree_skb(skb);
22545 - atomic_inc(&vcc->stats->tx_err);
22546 + atomic_inc_unchecked(&vcc->stats->tx_err);
22547 return -ENOBUFS;
22548 }
22549 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
22550 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc
22551 if (vcc->pop) vcc->pop(vcc,skb);
22552 else dev_kfree_skb(skb);
22553 out_vcc->push(out_vcc,new_skb);
22554 - atomic_inc(&vcc->stats->tx);
22555 - atomic_inc(&out_vcc->stats->rx);
22556 + atomic_inc_unchecked(&vcc->stats->tx);
22557 + atomic_inc_unchecked(&out_vcc->stats->rx);
22558 return 0;
22559 }
22560
22561 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc
22562 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
22563 read_unlock(&vcc_sklist_lock);
22564 if (!out_vcc) {
22565 - atomic_inc(&vcc->stats->tx_err);
22566 + atomic_inc_unchecked(&vcc->stats->tx_err);
22567 goto done;
22568 }
22569 skb_pull(skb,sizeof(struct atmtcp_hdr));
22570 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc
22571 __net_timestamp(new_skb);
22572 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
22573 out_vcc->push(out_vcc,new_skb);
22574 - atomic_inc(&vcc->stats->tx);
22575 - atomic_inc(&out_vcc->stats->rx);
22576 + atomic_inc_unchecked(&vcc->stats->tx);
22577 + atomic_inc_unchecked(&out_vcc->stats->rx);
22578 done:
22579 if (vcc->pop) vcc->pop(vcc,skb);
22580 else dev_kfree_skb(skb);
22581 diff -urNp linux-2.6.35.4/drivers/atm/eni.c linux-2.6.35.4/drivers/atm/eni.c
22582 --- linux-2.6.35.4/drivers/atm/eni.c 2010-08-26 19:47:12.000000000 -0400
22583 +++ linux-2.6.35.4/drivers/atm/eni.c 2010-09-17 20:12:09.000000000 -0400
22584 @@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
22585 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
22586 vcc->dev->number);
22587 length = 0;
22588 - atomic_inc(&vcc->stats->rx_err);
22589 + atomic_inc_unchecked(&vcc->stats->rx_err);
22590 }
22591 else {
22592 length = ATM_CELL_SIZE-1; /* no HEC */
22593 @@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22594 size);
22595 }
22596 eff = length = 0;
22597 - atomic_inc(&vcc->stats->rx_err);
22598 + atomic_inc_unchecked(&vcc->stats->rx_err);
22599 }
22600 else {
22601 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
22602 @@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22603 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
22604 vcc->dev->number,vcc->vci,length,size << 2,descr);
22605 length = eff = 0;
22606 - atomic_inc(&vcc->stats->rx_err);
22607 + atomic_inc_unchecked(&vcc->stats->rx_err);
22608 }
22609 }
22610 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
22611 @@ -771,7 +771,7 @@ rx_dequeued++;
22612 vcc->push(vcc,skb);
22613 pushed++;
22614 }
22615 - atomic_inc(&vcc->stats->rx);
22616 + atomic_inc_unchecked(&vcc->stats->rx);
22617 }
22618 wake_up(&eni_dev->rx_wait);
22619 }
22620 @@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *d
22621 PCI_DMA_TODEVICE);
22622 if (vcc->pop) vcc->pop(vcc,skb);
22623 else dev_kfree_skb_irq(skb);
22624 - atomic_inc(&vcc->stats->tx);
22625 + atomic_inc_unchecked(&vcc->stats->tx);
22626 wake_up(&eni_dev->tx_wait);
22627 dma_complete++;
22628 }
22629 diff -urNp linux-2.6.35.4/drivers/atm/firestream.c linux-2.6.35.4/drivers/atm/firestream.c
22630 --- linux-2.6.35.4/drivers/atm/firestream.c 2010-08-26 19:47:12.000000000 -0400
22631 +++ linux-2.6.35.4/drivers/atm/firestream.c 2010-09-17 20:12:09.000000000 -0400
22632 @@ -749,7 +749,7 @@ static void process_txdone_queue (struct
22633 }
22634 }
22635
22636 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22637 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22638
22639 fs_dprintk (FS_DEBUG_TXMEM, "i");
22640 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
22641 @@ -816,7 +816,7 @@ static void process_incoming (struct fs_
22642 #endif
22643 skb_put (skb, qe->p1 & 0xffff);
22644 ATM_SKB(skb)->vcc = atm_vcc;
22645 - atomic_inc(&atm_vcc->stats->rx);
22646 + atomic_inc_unchecked(&atm_vcc->stats->rx);
22647 __net_timestamp(skb);
22648 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
22649 atm_vcc->push (atm_vcc, skb);
22650 @@ -837,12 +837,12 @@ static void process_incoming (struct fs_
22651 kfree (pe);
22652 }
22653 if (atm_vcc)
22654 - atomic_inc(&atm_vcc->stats->rx_drop);
22655 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22656 break;
22657 case 0x1f: /* Reassembly abort: no buffers. */
22658 /* Silently increment error counter. */
22659 if (atm_vcc)
22660 - atomic_inc(&atm_vcc->stats->rx_drop);
22661 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22662 break;
22663 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
22664 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
22665 diff -urNp linux-2.6.35.4/drivers/atm/fore200e.c linux-2.6.35.4/drivers/atm/fore200e.c
22666 --- linux-2.6.35.4/drivers/atm/fore200e.c 2010-08-26 19:47:12.000000000 -0400
22667 +++ linux-2.6.35.4/drivers/atm/fore200e.c 2010-09-17 20:12:09.000000000 -0400
22668 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200
22669 #endif
22670 /* check error condition */
22671 if (*entry->status & STATUS_ERROR)
22672 - atomic_inc(&vcc->stats->tx_err);
22673 + atomic_inc_unchecked(&vcc->stats->tx_err);
22674 else
22675 - atomic_inc(&vcc->stats->tx);
22676 + atomic_inc_unchecked(&vcc->stats->tx);
22677 }
22678 }
22679
22680 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore2
22681 if (skb == NULL) {
22682 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
22683
22684 - atomic_inc(&vcc->stats->rx_drop);
22685 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22686 return -ENOMEM;
22687 }
22688
22689 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore2
22690
22691 dev_kfree_skb_any(skb);
22692
22693 - atomic_inc(&vcc->stats->rx_drop);
22694 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22695 return -ENOMEM;
22696 }
22697
22698 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22699
22700 vcc->push(vcc, skb);
22701 - atomic_inc(&vcc->stats->rx);
22702 + atomic_inc_unchecked(&vcc->stats->rx);
22703
22704 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22705
22706 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200
22707 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
22708 fore200e->atm_dev->number,
22709 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
22710 - atomic_inc(&vcc->stats->rx_err);
22711 + atomic_inc_unchecked(&vcc->stats->rx_err);
22712 }
22713 }
22714
22715 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struc
22716 goto retry_here;
22717 }
22718
22719 - atomic_inc(&vcc->stats->tx_err);
22720 + atomic_inc_unchecked(&vcc->stats->tx_err);
22721
22722 fore200e->tx_sat++;
22723 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
22724 diff -urNp linux-2.6.35.4/drivers/atm/he.c linux-2.6.35.4/drivers/atm/he.c
22725 --- linux-2.6.35.4/drivers/atm/he.c 2010-08-26 19:47:12.000000000 -0400
22726 +++ linux-2.6.35.4/drivers/atm/he.c 2010-09-17 20:12:09.000000000 -0400
22727 @@ -1770,7 +1770,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22728
22729 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
22730 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
22731 - atomic_inc(&vcc->stats->rx_drop);
22732 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22733 goto return_host_buffers;
22734 }
22735
22736 @@ -1803,7 +1803,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22737 RBRQ_LEN_ERR(he_dev->rbrq_head)
22738 ? "LEN_ERR" : "",
22739 vcc->vpi, vcc->vci);
22740 - atomic_inc(&vcc->stats->rx_err);
22741 + atomic_inc_unchecked(&vcc->stats->rx_err);
22742 goto return_host_buffers;
22743 }
22744
22745 @@ -1862,7 +1862,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22746 vcc->push(vcc, skb);
22747 spin_lock(&he_dev->global_lock);
22748
22749 - atomic_inc(&vcc->stats->rx);
22750 + atomic_inc_unchecked(&vcc->stats->rx);
22751
22752 return_host_buffers:
22753 ++pdus_assembled;
22754 @@ -2207,7 +2207,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
22755 tpd->vcc->pop(tpd->vcc, tpd->skb);
22756 else
22757 dev_kfree_skb_any(tpd->skb);
22758 - atomic_inc(&tpd->vcc->stats->tx_err);
22759 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
22760 }
22761 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
22762 return;
22763 @@ -2619,7 +2619,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22764 vcc->pop(vcc, skb);
22765 else
22766 dev_kfree_skb_any(skb);
22767 - atomic_inc(&vcc->stats->tx_err);
22768 + atomic_inc_unchecked(&vcc->stats->tx_err);
22769 return -EINVAL;
22770 }
22771
22772 @@ -2630,7 +2630,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22773 vcc->pop(vcc, skb);
22774 else
22775 dev_kfree_skb_any(skb);
22776 - atomic_inc(&vcc->stats->tx_err);
22777 + atomic_inc_unchecked(&vcc->stats->tx_err);
22778 return -EINVAL;
22779 }
22780 #endif
22781 @@ -2642,7 +2642,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22782 vcc->pop(vcc, skb);
22783 else
22784 dev_kfree_skb_any(skb);
22785 - atomic_inc(&vcc->stats->tx_err);
22786 + atomic_inc_unchecked(&vcc->stats->tx_err);
22787 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22788 return -ENOMEM;
22789 }
22790 @@ -2684,7 +2684,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22791 vcc->pop(vcc, skb);
22792 else
22793 dev_kfree_skb_any(skb);
22794 - atomic_inc(&vcc->stats->tx_err);
22795 + atomic_inc_unchecked(&vcc->stats->tx_err);
22796 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22797 return -ENOMEM;
22798 }
22799 @@ -2715,7 +2715,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22800 __enqueue_tpd(he_dev, tpd, cid);
22801 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22802
22803 - atomic_inc(&vcc->stats->tx);
22804 + atomic_inc_unchecked(&vcc->stats->tx);
22805
22806 return 0;
22807 }
22808 diff -urNp linux-2.6.35.4/drivers/atm/horizon.c linux-2.6.35.4/drivers/atm/horizon.c
22809 --- linux-2.6.35.4/drivers/atm/horizon.c 2010-08-26 19:47:12.000000000 -0400
22810 +++ linux-2.6.35.4/drivers/atm/horizon.c 2010-09-17 20:12:09.000000000 -0400
22811 @@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev,
22812 {
22813 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
22814 // VC layer stats
22815 - atomic_inc(&vcc->stats->rx);
22816 + atomic_inc_unchecked(&vcc->stats->rx);
22817 __net_timestamp(skb);
22818 // end of our responsability
22819 vcc->push (vcc, skb);
22820 @@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const
22821 dev->tx_iovec = NULL;
22822
22823 // VC layer stats
22824 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22825 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22826
22827 // free the skb
22828 hrz_kfree_skb (skb);
22829 diff -urNp linux-2.6.35.4/drivers/atm/idt77252.c linux-2.6.35.4/drivers/atm/idt77252.c
22830 --- linux-2.6.35.4/drivers/atm/idt77252.c 2010-08-26 19:47:12.000000000 -0400
22831 +++ linux-2.6.35.4/drivers/atm/idt77252.c 2010-09-17 20:12:09.000000000 -0400
22832 @@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, str
22833 else
22834 dev_kfree_skb(skb);
22835
22836 - atomic_inc(&vcc->stats->tx);
22837 + atomic_inc_unchecked(&vcc->stats->tx);
22838 }
22839
22840 atomic_dec(&scq->used);
22841 @@ -1074,13 +1074,13 @@ dequeue_rx(struct idt77252_dev *card, st
22842 if ((sb = dev_alloc_skb(64)) == NULL) {
22843 printk("%s: Can't allocate buffers for aal0.\n",
22844 card->name);
22845 - atomic_add(i, &vcc->stats->rx_drop);
22846 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
22847 break;
22848 }
22849 if (!atm_charge(vcc, sb->truesize)) {
22850 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
22851 card->name);
22852 - atomic_add(i - 1, &vcc->stats->rx_drop);
22853 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
22854 dev_kfree_skb(sb);
22855 break;
22856 }
22857 @@ -1097,7 +1097,7 @@ dequeue_rx(struct idt77252_dev *card, st
22858 ATM_SKB(sb)->vcc = vcc;
22859 __net_timestamp(sb);
22860 vcc->push(vcc, sb);
22861 - atomic_inc(&vcc->stats->rx);
22862 + atomic_inc_unchecked(&vcc->stats->rx);
22863
22864 cell += ATM_CELL_PAYLOAD;
22865 }
22866 @@ -1134,13 +1134,13 @@ dequeue_rx(struct idt77252_dev *card, st
22867 "(CDC: %08x)\n",
22868 card->name, len, rpp->len, readl(SAR_REG_CDC));
22869 recycle_rx_pool_skb(card, rpp);
22870 - atomic_inc(&vcc->stats->rx_err);
22871 + atomic_inc_unchecked(&vcc->stats->rx_err);
22872 return;
22873 }
22874 if (stat & SAR_RSQE_CRC) {
22875 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
22876 recycle_rx_pool_skb(card, rpp);
22877 - atomic_inc(&vcc->stats->rx_err);
22878 + atomic_inc_unchecked(&vcc->stats->rx_err);
22879 return;
22880 }
22881 if (skb_queue_len(&rpp->queue) > 1) {
22882 @@ -1151,7 +1151,7 @@ dequeue_rx(struct idt77252_dev *card, st
22883 RXPRINTK("%s: Can't alloc RX skb.\n",
22884 card->name);
22885 recycle_rx_pool_skb(card, rpp);
22886 - atomic_inc(&vcc->stats->rx_err);
22887 + atomic_inc_unchecked(&vcc->stats->rx_err);
22888 return;
22889 }
22890 if (!atm_charge(vcc, skb->truesize)) {
22891 @@ -1170,7 +1170,7 @@ dequeue_rx(struct idt77252_dev *card, st
22892 __net_timestamp(skb);
22893
22894 vcc->push(vcc, skb);
22895 - atomic_inc(&vcc->stats->rx);
22896 + atomic_inc_unchecked(&vcc->stats->rx);
22897
22898 return;
22899 }
22900 @@ -1192,7 +1192,7 @@ dequeue_rx(struct idt77252_dev *card, st
22901 __net_timestamp(skb);
22902
22903 vcc->push(vcc, skb);
22904 - atomic_inc(&vcc->stats->rx);
22905 + atomic_inc_unchecked(&vcc->stats->rx);
22906
22907 if (skb->truesize > SAR_FB_SIZE_3)
22908 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
22909 @@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *car
22910 if (vcc->qos.aal != ATM_AAL0) {
22911 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
22912 card->name, vpi, vci);
22913 - atomic_inc(&vcc->stats->rx_drop);
22914 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22915 goto drop;
22916 }
22917
22918 if ((sb = dev_alloc_skb(64)) == NULL) {
22919 printk("%s: Can't allocate buffers for AAL0.\n",
22920 card->name);
22921 - atomic_inc(&vcc->stats->rx_err);
22922 + atomic_inc_unchecked(&vcc->stats->rx_err);
22923 goto drop;
22924 }
22925
22926 @@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *car
22927 ATM_SKB(sb)->vcc = vcc;
22928 __net_timestamp(sb);
22929 vcc->push(vcc, sb);
22930 - atomic_inc(&vcc->stats->rx);
22931 + atomic_inc_unchecked(&vcc->stats->rx);
22932
22933 drop:
22934 skb_pull(queue, 64);
22935 @@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22936
22937 if (vc == NULL) {
22938 printk("%s: NULL connection in send().\n", card->name);
22939 - atomic_inc(&vcc->stats->tx_err);
22940 + atomic_inc_unchecked(&vcc->stats->tx_err);
22941 dev_kfree_skb(skb);
22942 return -EINVAL;
22943 }
22944 if (!test_bit(VCF_TX, &vc->flags)) {
22945 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
22946 - atomic_inc(&vcc->stats->tx_err);
22947 + atomic_inc_unchecked(&vcc->stats->tx_err);
22948 dev_kfree_skb(skb);
22949 return -EINVAL;
22950 }
22951 @@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22952 break;
22953 default:
22954 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
22955 - atomic_inc(&vcc->stats->tx_err);
22956 + atomic_inc_unchecked(&vcc->stats->tx_err);
22957 dev_kfree_skb(skb);
22958 return -EINVAL;
22959 }
22960
22961 if (skb_shinfo(skb)->nr_frags != 0) {
22962 printk("%s: No scatter-gather yet.\n", card->name);
22963 - atomic_inc(&vcc->stats->tx_err);
22964 + atomic_inc_unchecked(&vcc->stats->tx_err);
22965 dev_kfree_skb(skb);
22966 return -EINVAL;
22967 }
22968 @@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22969
22970 err = queue_skb(card, vc, skb, oam);
22971 if (err) {
22972 - atomic_inc(&vcc->stats->tx_err);
22973 + atomic_inc_unchecked(&vcc->stats->tx_err);
22974 dev_kfree_skb(skb);
22975 return err;
22976 }
22977 @@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
22978 skb = dev_alloc_skb(64);
22979 if (!skb) {
22980 printk("%s: Out of memory in send_oam().\n", card->name);
22981 - atomic_inc(&vcc->stats->tx_err);
22982 + atomic_inc_unchecked(&vcc->stats->tx_err);
22983 return -ENOMEM;
22984 }
22985 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
22986 diff -urNp linux-2.6.35.4/drivers/atm/iphase.c linux-2.6.35.4/drivers/atm/iphase.c
22987 --- linux-2.6.35.4/drivers/atm/iphase.c 2010-08-26 19:47:12.000000000 -0400
22988 +++ linux-2.6.35.4/drivers/atm/iphase.c 2010-09-17 20:12:09.000000000 -0400
22989 @@ -1124,7 +1124,7 @@ static int rx_pkt(struct atm_dev *dev)
22990 status = (u_short) (buf_desc_ptr->desc_mode);
22991 if (status & (RX_CER | RX_PTE | RX_OFL))
22992 {
22993 - atomic_inc(&vcc->stats->rx_err);
22994 + atomic_inc_unchecked(&vcc->stats->rx_err);
22995 IF_ERR(printk("IA: bad packet, dropping it");)
22996 if (status & RX_CER) {
22997 IF_ERR(printk(" cause: packet CRC error\n");)
22998 @@ -1147,7 +1147,7 @@ static int rx_pkt(struct atm_dev *dev)
22999 len = dma_addr - buf_addr;
23000 if (len > iadev->rx_buf_sz) {
23001 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
23002 - atomic_inc(&vcc->stats->rx_err);
23003 + atomic_inc_unchecked(&vcc->stats->rx_err);
23004 goto out_free_desc;
23005 }
23006
23007 @@ -1297,7 +1297,7 @@ static void rx_dle_intr(struct atm_dev *
23008 ia_vcc = INPH_IA_VCC(vcc);
23009 if (ia_vcc == NULL)
23010 {
23011 - atomic_inc(&vcc->stats->rx_err);
23012 + atomic_inc_unchecked(&vcc->stats->rx_err);
23013 dev_kfree_skb_any(skb);
23014 atm_return(vcc, atm_guess_pdu2truesize(len));
23015 goto INCR_DLE;
23016 @@ -1309,7 +1309,7 @@ static void rx_dle_intr(struct atm_dev *
23017 if ((length > iadev->rx_buf_sz) || (length >
23018 (skb->len - sizeof(struct cpcs_trailer))))
23019 {
23020 - atomic_inc(&vcc->stats->rx_err);
23021 + atomic_inc_unchecked(&vcc->stats->rx_err);
23022 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
23023 length, skb->len);)
23024 dev_kfree_skb_any(skb);
23025 @@ -1325,7 +1325,7 @@ static void rx_dle_intr(struct atm_dev *
23026
23027 IF_RX(printk("rx_dle_intr: skb push");)
23028 vcc->push(vcc,skb);
23029 - atomic_inc(&vcc->stats->rx);
23030 + atomic_inc_unchecked(&vcc->stats->rx);
23031 iadev->rx_pkt_cnt++;
23032 }
23033 INCR_DLE:
23034 @@ -2807,15 +2807,15 @@ static int ia_ioctl(struct atm_dev *dev,
23035 {
23036 struct k_sonet_stats *stats;
23037 stats = &PRIV(_ia_dev[board])->sonet_stats;
23038 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
23039 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
23040 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
23041 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
23042 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
23043 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
23044 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
23045 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
23046 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
23047 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
23048 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
23049 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
23050 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
23051 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
23052 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
23053 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
23054 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
23055 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
23056 }
23057 ia_cmds.status = 0;
23058 break;
23059 @@ -2920,7 +2920,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
23060 if ((desc == 0) || (desc > iadev->num_tx_desc))
23061 {
23062 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
23063 - atomic_inc(&vcc->stats->tx);
23064 + atomic_inc_unchecked(&vcc->stats->tx);
23065 if (vcc->pop)
23066 vcc->pop(vcc, skb);
23067 else
23068 @@ -3025,14 +3025,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
23069 ATM_DESC(skb) = vcc->vci;
23070 skb_queue_tail(&iadev->tx_dma_q, skb);
23071
23072 - atomic_inc(&vcc->stats->tx);
23073 + atomic_inc_unchecked(&vcc->stats->tx);
23074 iadev->tx_pkt_cnt++;
23075 /* Increment transaction counter */
23076 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
23077
23078 #if 0
23079 /* add flow control logic */
23080 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
23081 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
23082 if (iavcc->vc_desc_cnt > 10) {
23083 vcc->tx_quota = vcc->tx_quota * 3 / 4;
23084 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
23085 diff -urNp linux-2.6.35.4/drivers/atm/lanai.c linux-2.6.35.4/drivers/atm/lanai.c
23086 --- linux-2.6.35.4/drivers/atm/lanai.c 2010-08-26 19:47:12.000000000 -0400
23087 +++ linux-2.6.35.4/drivers/atm/lanai.c 2010-09-17 20:12:09.000000000 -0400
23088 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct l
23089 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
23090 lanai_endtx(lanai, lvcc);
23091 lanai_free_skb(lvcc->tx.atmvcc, skb);
23092 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
23093 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
23094 }
23095
23096 /* Try to fill the buffer - don't call unless there is backlog */
23097 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc
23098 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
23099 __net_timestamp(skb);
23100 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
23101 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
23102 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
23103 out:
23104 lvcc->rx.buf.ptr = end;
23105 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
23106 @@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_d
23107 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
23108 "vcc %d\n", lanai->number, (unsigned int) s, vci);
23109 lanai->stats.service_rxnotaal5++;
23110 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
23111 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
23112 return 0;
23113 }
23114 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
23115 @@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_d
23116 int bytes;
23117 read_unlock(&vcc_sklist_lock);
23118 DPRINTK("got trashed rx pdu on vci %d\n", vci);
23119 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
23120 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
23121 lvcc->stats.x.aal5.service_trash++;
23122 bytes = (SERVICE_GET_END(s) * 16) -
23123 (((unsigned long) lvcc->rx.buf.ptr) -
23124 @@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_d
23125 }
23126 if (s & SERVICE_STREAM) {
23127 read_unlock(&vcc_sklist_lock);
23128 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
23129 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
23130 lvcc->stats.x.aal5.service_stream++;
23131 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
23132 "PDU on VCI %d!\n", lanai->number, vci);
23133 @@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_d
23134 return 0;
23135 }
23136 DPRINTK("got rx crc error on vci %d\n", vci);
23137 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
23138 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
23139 lvcc->stats.x.aal5.service_rxcrc++;
23140 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
23141 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
23142 diff -urNp linux-2.6.35.4/drivers/atm/nicstar.c linux-2.6.35.4/drivers/atm/nicstar.c
23143 --- linux-2.6.35.4/drivers/atm/nicstar.c 2010-08-26 19:47:12.000000000 -0400
23144 +++ linux-2.6.35.4/drivers/atm/nicstar.c 2010-09-17 20:12:09.000000000 -0400
23145 @@ -1722,7 +1722,7 @@ static int ns_send(struct atm_vcc *vcc,
23146 if ((vc = (vc_map *) vcc->dev_data) == NULL)
23147 {
23148 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
23149 - atomic_inc(&vcc->stats->tx_err);
23150 + atomic_inc_unchecked(&vcc->stats->tx_err);
23151 dev_kfree_skb_any(skb);
23152 return -EINVAL;
23153 }
23154 @@ -1730,7 +1730,7 @@ static int ns_send(struct atm_vcc *vcc,
23155 if (!vc->tx)
23156 {
23157 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
23158 - atomic_inc(&vcc->stats->tx_err);
23159 + atomic_inc_unchecked(&vcc->stats->tx_err);
23160 dev_kfree_skb_any(skb);
23161 return -EINVAL;
23162 }
23163 @@ -1738,7 +1738,7 @@ static int ns_send(struct atm_vcc *vcc,
23164 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
23165 {
23166 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
23167 - atomic_inc(&vcc->stats->tx_err);
23168 + atomic_inc_unchecked(&vcc->stats->tx_err);
23169 dev_kfree_skb_any(skb);
23170 return -EINVAL;
23171 }
23172 @@ -1746,7 +1746,7 @@ static int ns_send(struct atm_vcc *vcc,
23173 if (skb_shinfo(skb)->nr_frags != 0)
23174 {
23175 printk("nicstar%d: No scatter-gather yet.\n", card->index);
23176 - atomic_inc(&vcc->stats->tx_err);
23177 + atomic_inc_unchecked(&vcc->stats->tx_err);
23178 dev_kfree_skb_any(skb);
23179 return -EINVAL;
23180 }
23181 @@ -1791,11 +1791,11 @@ static int ns_send(struct atm_vcc *vcc,
23182
23183 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
23184 {
23185 - atomic_inc(&vcc->stats->tx_err);
23186 + atomic_inc_unchecked(&vcc->stats->tx_err);
23187 dev_kfree_skb_any(skb);
23188 return -EIO;
23189 }
23190 - atomic_inc(&vcc->stats->tx);
23191 + atomic_inc_unchecked(&vcc->stats->tx);
23192
23193 return 0;
23194 }
23195 @@ -2110,14 +2110,14 @@ static void dequeue_rx(ns_dev *card, ns_
23196 {
23197 printk("nicstar%d: Can't allocate buffers for aal0.\n",
23198 card->index);
23199 - atomic_add(i,&vcc->stats->rx_drop);
23200 + atomic_add_unchecked(i,&vcc->stats->rx_drop);
23201 break;
23202 }
23203 if (!atm_charge(vcc, sb->truesize))
23204 {
23205 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
23206 card->index);
23207 - atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
23208 + atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
23209 dev_kfree_skb_any(sb);
23210 break;
23211 }
23212 @@ -2132,7 +2132,7 @@ static void dequeue_rx(ns_dev *card, ns_
23213 ATM_SKB(sb)->vcc = vcc;
23214 __net_timestamp(sb);
23215 vcc->push(vcc, sb);
23216 - atomic_inc(&vcc->stats->rx);
23217 + atomic_inc_unchecked(&vcc->stats->rx);
23218 cell += ATM_CELL_PAYLOAD;
23219 }
23220
23221 @@ -2151,7 +2151,7 @@ static void dequeue_rx(ns_dev *card, ns_
23222 if (iovb == NULL)
23223 {
23224 printk("nicstar%d: Out of iovec buffers.\n", card->index);
23225 - atomic_inc(&vcc->stats->rx_drop);
23226 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23227 recycle_rx_buf(card, skb);
23228 return;
23229 }
23230 @@ -2181,7 +2181,7 @@ static void dequeue_rx(ns_dev *card, ns_
23231 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
23232 {
23233 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
23234 - atomic_inc(&vcc->stats->rx_err);
23235 + atomic_inc_unchecked(&vcc->stats->rx_err);
23236 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
23237 NS_SKB(iovb)->iovcnt = 0;
23238 iovb->len = 0;
23239 @@ -2201,7 +2201,7 @@ static void dequeue_rx(ns_dev *card, ns_
23240 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
23241 card->index);
23242 which_list(card, skb);
23243 - atomic_inc(&vcc->stats->rx_err);
23244 + atomic_inc_unchecked(&vcc->stats->rx_err);
23245 recycle_rx_buf(card, skb);
23246 vc->rx_iov = NULL;
23247 recycle_iov_buf(card, iovb);
23248 @@ -2215,7 +2215,7 @@ static void dequeue_rx(ns_dev *card, ns_
23249 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
23250 card->index);
23251 which_list(card, skb);
23252 - atomic_inc(&vcc->stats->rx_err);
23253 + atomic_inc_unchecked(&vcc->stats->rx_err);
23254 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
23255 NS_SKB(iovb)->iovcnt);
23256 vc->rx_iov = NULL;
23257 @@ -2239,7 +2239,7 @@ static void dequeue_rx(ns_dev *card, ns_
23258 printk(" - PDU size mismatch.\n");
23259 else
23260 printk(".\n");
23261 - atomic_inc(&vcc->stats->rx_err);
23262 + atomic_inc_unchecked(&vcc->stats->rx_err);
23263 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
23264 NS_SKB(iovb)->iovcnt);
23265 vc->rx_iov = NULL;
23266 @@ -2255,7 +2255,7 @@ static void dequeue_rx(ns_dev *card, ns_
23267 if (!atm_charge(vcc, skb->truesize))
23268 {
23269 push_rxbufs(card, skb);
23270 - atomic_inc(&vcc->stats->rx_drop);
23271 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23272 }
23273 else
23274 {
23275 @@ -2267,7 +2267,7 @@ static void dequeue_rx(ns_dev *card, ns_
23276 ATM_SKB(skb)->vcc = vcc;
23277 __net_timestamp(skb);
23278 vcc->push(vcc, skb);
23279 - atomic_inc(&vcc->stats->rx);
23280 + atomic_inc_unchecked(&vcc->stats->rx);
23281 }
23282 }
23283 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
23284 @@ -2282,7 +2282,7 @@ static void dequeue_rx(ns_dev *card, ns_
23285 if (!atm_charge(vcc, sb->truesize))
23286 {
23287 push_rxbufs(card, sb);
23288 - atomic_inc(&vcc->stats->rx_drop);
23289 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23290 }
23291 else
23292 {
23293 @@ -2294,7 +2294,7 @@ static void dequeue_rx(ns_dev *card, ns_
23294 ATM_SKB(sb)->vcc = vcc;
23295 __net_timestamp(sb);
23296 vcc->push(vcc, sb);
23297 - atomic_inc(&vcc->stats->rx);
23298 + atomic_inc_unchecked(&vcc->stats->rx);
23299 }
23300
23301 push_rxbufs(card, skb);
23302 @@ -2305,7 +2305,7 @@ static void dequeue_rx(ns_dev *card, ns_
23303 if (!atm_charge(vcc, skb->truesize))
23304 {
23305 push_rxbufs(card, skb);
23306 - atomic_inc(&vcc->stats->rx_drop);
23307 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23308 }
23309 else
23310 {
23311 @@ -2319,7 +2319,7 @@ static void dequeue_rx(ns_dev *card, ns_
23312 ATM_SKB(skb)->vcc = vcc;
23313 __net_timestamp(skb);
23314 vcc->push(vcc, skb);
23315 - atomic_inc(&vcc->stats->rx);
23316 + atomic_inc_unchecked(&vcc->stats->rx);
23317 }
23318
23319 push_rxbufs(card, sb);
23320 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev *card, ns_
23321 if (hb == NULL)
23322 {
23323 printk("nicstar%d: Out of huge buffers.\n", card->index);
23324 - atomic_inc(&vcc->stats->rx_drop);
23325 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23326 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
23327 NS_SKB(iovb)->iovcnt);
23328 vc->rx_iov = NULL;
23329 @@ -2392,7 +2392,7 @@ static void dequeue_rx(ns_dev *card, ns_
23330 }
23331 else
23332 dev_kfree_skb_any(hb);
23333 - atomic_inc(&vcc->stats->rx_drop);
23334 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23335 }
23336 else
23337 {
23338 @@ -2426,7 +2426,7 @@ static void dequeue_rx(ns_dev *card, ns_
23339 #endif /* NS_USE_DESTRUCTORS */
23340 __net_timestamp(hb);
23341 vcc->push(vcc, hb);
23342 - atomic_inc(&vcc->stats->rx);
23343 + atomic_inc_unchecked(&vcc->stats->rx);
23344 }
23345 }
23346
23347 diff -urNp linux-2.6.35.4/drivers/atm/solos-pci.c linux-2.6.35.4/drivers/atm/solos-pci.c
23348 --- linux-2.6.35.4/drivers/atm/solos-pci.c 2010-08-26 19:47:12.000000000 -0400
23349 +++ linux-2.6.35.4/drivers/atm/solos-pci.c 2010-09-17 20:12:09.000000000 -0400
23350 @@ -715,7 +715,7 @@ void solos_bh(unsigned long card_arg)
23351 }
23352 atm_charge(vcc, skb->truesize);
23353 vcc->push(vcc, skb);
23354 - atomic_inc(&vcc->stats->rx);
23355 + atomic_inc_unchecked(&vcc->stats->rx);
23356 break;
23357
23358 case PKT_STATUS:
23359 @@ -1023,7 +1023,7 @@ static uint32_t fpga_tx(struct solos_car
23360 vcc = SKB_CB(oldskb)->vcc;
23361
23362 if (vcc) {
23363 - atomic_inc(&vcc->stats->tx);
23364 + atomic_inc_unchecked(&vcc->stats->tx);
23365 solos_pop(vcc, oldskb);
23366 } else
23367 dev_kfree_skb_irq(oldskb);
23368 diff -urNp linux-2.6.35.4/drivers/atm/suni.c linux-2.6.35.4/drivers/atm/suni.c
23369 --- linux-2.6.35.4/drivers/atm/suni.c 2010-08-26 19:47:12.000000000 -0400
23370 +++ linux-2.6.35.4/drivers/atm/suni.c 2010-09-17 20:12:09.000000000 -0400
23371 @@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
23372
23373
23374 #define ADD_LIMITED(s,v) \
23375 - atomic_add((v),&stats->s); \
23376 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
23377 + atomic_add_unchecked((v),&stats->s); \
23378 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
23379
23380
23381 static void suni_hz(unsigned long from_timer)
23382 diff -urNp linux-2.6.35.4/drivers/atm/uPD98402.c linux-2.6.35.4/drivers/atm/uPD98402.c
23383 --- linux-2.6.35.4/drivers/atm/uPD98402.c 2010-08-26 19:47:12.000000000 -0400
23384 +++ linux-2.6.35.4/drivers/atm/uPD98402.c 2010-09-17 20:12:09.000000000 -0400
23385 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *d
23386 struct sonet_stats tmp;
23387 int error = 0;
23388
23389 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
23390 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
23391 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
23392 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
23393 if (zero && !error) {
23394 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev
23395
23396
23397 #define ADD_LIMITED(s,v) \
23398 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
23399 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
23400 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
23401 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
23402 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
23403 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
23404
23405
23406 static void stat_event(struct atm_dev *dev)
23407 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev
23408 if (reason & uPD98402_INT_PFM) stat_event(dev);
23409 if (reason & uPD98402_INT_PCO) {
23410 (void) GET(PCOCR); /* clear interrupt cause */
23411 - atomic_add(GET(HECCT),
23412 + atomic_add_unchecked(GET(HECCT),
23413 &PRIV(dev)->sonet_stats.uncorr_hcs);
23414 }
23415 if ((reason & uPD98402_INT_RFO) &&
23416 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev
23417 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
23418 uPD98402_INT_LOS),PIMR); /* enable them */
23419 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
23420 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
23421 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
23422 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
23423 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
23424 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
23425 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
23426 return 0;
23427 }
23428
23429 diff -urNp linux-2.6.35.4/drivers/atm/zatm.c linux-2.6.35.4/drivers/atm/zatm.c
23430 --- linux-2.6.35.4/drivers/atm/zatm.c 2010-08-26 19:47:12.000000000 -0400
23431 +++ linux-2.6.35.4/drivers/atm/zatm.c 2010-09-17 20:12:09.000000000 -0400
23432 @@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
23433 }
23434 if (!size) {
23435 dev_kfree_skb_irq(skb);
23436 - if (vcc) atomic_inc(&vcc->stats->rx_err);
23437 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
23438 continue;
23439 }
23440 if (!atm_charge(vcc,skb->truesize)) {
23441 @@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
23442 skb->len = size;
23443 ATM_SKB(skb)->vcc = vcc;
23444 vcc->push(vcc,skb);
23445 - atomic_inc(&vcc->stats->rx);
23446 + atomic_inc_unchecked(&vcc->stats->rx);
23447 }
23448 zout(pos & 0xffff,MTA(mbx));
23449 #if 0 /* probably a stupid idea */
23450 @@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
23451 skb_queue_head(&zatm_vcc->backlog,skb);
23452 break;
23453 }
23454 - atomic_inc(&vcc->stats->tx);
23455 + atomic_inc_unchecked(&vcc->stats->tx);
23456 wake_up(&zatm_vcc->tx_wait);
23457 }
23458
23459 diff -urNp linux-2.6.35.4/drivers/char/agp/frontend.c linux-2.6.35.4/drivers/char/agp/frontend.c
23460 --- linux-2.6.35.4/drivers/char/agp/frontend.c 2010-08-26 19:47:12.000000000 -0400
23461 +++ linux-2.6.35.4/drivers/char/agp/frontend.c 2010-09-17 20:12:09.000000000 -0400
23462 @@ -818,7 +818,7 @@ static int agpioc_reserve_wrap(struct ag
23463 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
23464 return -EFAULT;
23465
23466 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
23467 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
23468 return -EFAULT;
23469
23470 client = agp_find_client_by_pid(reserve.pid);
23471 diff -urNp linux-2.6.35.4/drivers/char/agp/intel-agp.c linux-2.6.35.4/drivers/char/agp/intel-agp.c
23472 --- linux-2.6.35.4/drivers/char/agp/intel-agp.c 2010-08-26 19:47:12.000000000 -0400
23473 +++ linux-2.6.35.4/drivers/char/agp/intel-agp.c 2010-09-17 20:12:09.000000000 -0400
23474 @@ -1036,7 +1036,7 @@ static struct pci_device_id agp_intel_pc
23475 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
23476 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB),
23477 ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB),
23478 - { }
23479 + { 0, 0, 0, 0, 0, 0, 0 }
23480 };
23481
23482 MODULE_DEVICE_TABLE(pci, agp_intel_pci_table);
23483 diff -urNp linux-2.6.35.4/drivers/char/hpet.c linux-2.6.35.4/drivers/char/hpet.c
23484 --- linux-2.6.35.4/drivers/char/hpet.c 2010-08-26 19:47:12.000000000 -0400
23485 +++ linux-2.6.35.4/drivers/char/hpet.c 2010-09-17 20:12:09.000000000 -0400
23486 @@ -429,7 +429,7 @@ static int hpet_release(struct inode *in
23487 return 0;
23488 }
23489
23490 -static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
23491 +static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
23492
23493 static long hpet_ioctl(struct file *file, unsigned int cmd,
23494 unsigned long arg)
23495 @@ -553,7 +553,7 @@ static inline unsigned long hpet_time_di
23496 }
23497
23498 static int
23499 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
23500 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
23501 {
23502 struct hpet_timer __iomem *timer;
23503 struct hpet __iomem *hpet;
23504 @@ -998,7 +998,7 @@ static struct acpi_driver hpet_acpi_driv
23505 },
23506 };
23507
23508 -static struct miscdevice hpet_misc = { HPET_MINOR, "hpet", &hpet_fops };
23509 +static struct miscdevice hpet_misc = { HPET_MINOR, "hpet", &hpet_fops, {NULL, NULL}, NULL, NULL };
23510
23511 static int __init hpet_init(void)
23512 {
23513 diff -urNp linux-2.6.35.4/drivers/char/hvc_console.h linux-2.6.35.4/drivers/char/hvc_console.h
23514 --- linux-2.6.35.4/drivers/char/hvc_console.h 2010-08-26 19:47:12.000000000 -0400
23515 +++ linux-2.6.35.4/drivers/char/hvc_console.h 2010-09-17 20:12:09.000000000 -0400
23516 @@ -82,6 +82,7 @@ extern int hvc_instantiate(uint32_t vter
23517 /* register a vterm for hvc tty operation (module_init or hotplug add) */
23518 extern struct hvc_struct * hvc_alloc(uint32_t vtermno, int data,
23519 const struct hv_ops *ops, int outbuf_size);
23520 +
23521 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
23522 extern int hvc_remove(struct hvc_struct *hp);
23523
23524 diff -urNp linux-2.6.35.4/drivers/char/hvcs.c linux-2.6.35.4/drivers/char/hvcs.c
23525 --- linux-2.6.35.4/drivers/char/hvcs.c 2010-08-26 19:47:12.000000000 -0400
23526 +++ linux-2.6.35.4/drivers/char/hvcs.c 2010-09-17 20:12:09.000000000 -0400
23527 @@ -270,7 +270,7 @@ struct hvcs_struct {
23528 unsigned int index;
23529
23530 struct tty_struct *tty;
23531 - int open_count;
23532 + atomic_t open_count;
23533
23534 /*
23535 * Used to tell the driver kernel_thread what operations need to take
23536 @@ -420,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(st
23537
23538 spin_lock_irqsave(&hvcsd->lock, flags);
23539
23540 - if (hvcsd->open_count > 0) {
23541 + if (atomic_read(&hvcsd->open_count) > 0) {
23542 spin_unlock_irqrestore(&hvcsd->lock, flags);
23543 printk(KERN_INFO "HVCS: vterm state unchanged. "
23544 "The hvcs device node is still in use.\n");
23545 @@ -1136,7 +1136,7 @@ static int hvcs_open(struct tty_struct *
23546 if ((retval = hvcs_partner_connect(hvcsd)))
23547 goto error_release;
23548
23549 - hvcsd->open_count = 1;
23550 + atomic_set(&hvcsd->open_count, 1);
23551 hvcsd->tty = tty;
23552 tty->driver_data = hvcsd;
23553
23554 @@ -1170,7 +1170,7 @@ fast_open:
23555
23556 spin_lock_irqsave(&hvcsd->lock, flags);
23557 kref_get(&hvcsd->kref);
23558 - hvcsd->open_count++;
23559 + atomic_inc(&hvcsd->open_count);
23560 hvcsd->todo_mask |= HVCS_SCHED_READ;
23561 spin_unlock_irqrestore(&hvcsd->lock, flags);
23562
23563 @@ -1214,7 +1214,7 @@ static void hvcs_close(struct tty_struct
23564 hvcsd = tty->driver_data;
23565
23566 spin_lock_irqsave(&hvcsd->lock, flags);
23567 - if (--hvcsd->open_count == 0) {
23568 + if (atomic_dec_and_test(&hvcsd->open_count)) {
23569
23570 vio_disable_interrupts(hvcsd->vdev);
23571
23572 @@ -1240,10 +1240,10 @@ static void hvcs_close(struct tty_struct
23573 free_irq(irq, hvcsd);
23574 kref_put(&hvcsd->kref, destroy_hvcs_struct);
23575 return;
23576 - } else if (hvcsd->open_count < 0) {
23577 + } else if (atomic_read(&hvcsd->open_count) < 0) {
23578 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
23579 " is missmanaged.\n",
23580 - hvcsd->vdev->unit_address, hvcsd->open_count);
23581 + hvcsd->vdev->unit_address, atomic_read(&hvcsd->open_count));
23582 }
23583
23584 spin_unlock_irqrestore(&hvcsd->lock, flags);
23585 @@ -1259,7 +1259,7 @@ static void hvcs_hangup(struct tty_struc
23586
23587 spin_lock_irqsave(&hvcsd->lock, flags);
23588 /* Preserve this so that we know how many kref refs to put */
23589 - temp_open_count = hvcsd->open_count;
23590 + temp_open_count = atomic_read(&hvcsd->open_count);
23591
23592 /*
23593 * Don't kref put inside the spinlock because the destruction
23594 @@ -1274,7 +1274,7 @@ static void hvcs_hangup(struct tty_struc
23595 hvcsd->tty->driver_data = NULL;
23596 hvcsd->tty = NULL;
23597
23598 - hvcsd->open_count = 0;
23599 + atomic_set(&hvcsd->open_count, 0);
23600
23601 /* This will drop any buffered data on the floor which is OK in a hangup
23602 * scenario. */
23603 @@ -1345,7 +1345,7 @@ static int hvcs_write(struct tty_struct
23604 * the middle of a write operation? This is a crummy place to do this
23605 * but we want to keep it all in the spinlock.
23606 */
23607 - if (hvcsd->open_count <= 0) {
23608 + if (atomic_read(&hvcsd->open_count) <= 0) {
23609 spin_unlock_irqrestore(&hvcsd->lock, flags);
23610 return -ENODEV;
23611 }
23612 @@ -1419,7 +1419,7 @@ static int hvcs_write_room(struct tty_st
23613 {
23614 struct hvcs_struct *hvcsd = tty->driver_data;
23615
23616 - if (!hvcsd || hvcsd->open_count <= 0)
23617 + if (!hvcsd || atomic_read(&hvcsd->open_count) <= 0)
23618 return 0;
23619
23620 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
23621 diff -urNp linux-2.6.35.4/drivers/char/ipmi/ipmi_msghandler.c linux-2.6.35.4/drivers/char/ipmi/ipmi_msghandler.c
23622 --- linux-2.6.35.4/drivers/char/ipmi/ipmi_msghandler.c 2010-08-26 19:47:12.000000000 -0400
23623 +++ linux-2.6.35.4/drivers/char/ipmi/ipmi_msghandler.c 2010-09-17 20:12:09.000000000 -0400
23624 @@ -414,7 +414,7 @@ struct ipmi_smi {
23625 struct proc_dir_entry *proc_dir;
23626 char proc_dir_name[10];
23627
23628 - atomic_t stats[IPMI_NUM_STATS];
23629 + atomic_unchecked_t stats[IPMI_NUM_STATS];
23630
23631 /*
23632 * run_to_completion duplicate of smb_info, smi_info
23633 @@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
23634
23635
23636 #define ipmi_inc_stat(intf, stat) \
23637 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
23638 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
23639 #define ipmi_get_stat(intf, stat) \
23640 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
23641 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
23642
23643 static int is_lan_addr(struct ipmi_addr *addr)
23644 {
23645 @@ -2817,7 +2817,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
23646 INIT_LIST_HEAD(&intf->cmd_rcvrs);
23647 init_waitqueue_head(&intf->waitq);
23648 for (i = 0; i < IPMI_NUM_STATS; i++)
23649 - atomic_set(&intf->stats[i], 0);
23650 + atomic_set_unchecked(&intf->stats[i], 0);
23651
23652 intf->proc_dir = NULL;
23653
23654 diff -urNp linux-2.6.35.4/drivers/char/ipmi/ipmi_si_intf.c linux-2.6.35.4/drivers/char/ipmi/ipmi_si_intf.c
23655 --- linux-2.6.35.4/drivers/char/ipmi/ipmi_si_intf.c 2010-08-26 19:47:12.000000000 -0400
23656 +++ linux-2.6.35.4/drivers/char/ipmi/ipmi_si_intf.c 2010-09-17 20:12:09.000000000 -0400
23657 @@ -286,7 +286,7 @@ struct smi_info {
23658 unsigned char slave_addr;
23659
23660 /* Counters and things for the proc filesystem. */
23661 - atomic_t stats[SI_NUM_STATS];
23662 + atomic_unchecked_t stats[SI_NUM_STATS];
23663
23664 struct task_struct *thread;
23665
23666 @@ -294,9 +294,9 @@ struct smi_info {
23667 };
23668
23669 #define smi_inc_stat(smi, stat) \
23670 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
23671 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
23672 #define smi_get_stat(smi, stat) \
23673 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
23674 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
23675
23676 #define SI_MAX_PARMS 4
23677
23678 @@ -3143,7 +3143,7 @@ static int try_smi_init(struct smi_info
23679 atomic_set(&new_smi->req_events, 0);
23680 new_smi->run_to_completion = 0;
23681 for (i = 0; i < SI_NUM_STATS; i++)
23682 - atomic_set(&new_smi->stats[i], 0);
23683 + atomic_set_unchecked(&new_smi->stats[i], 0);
23684
23685 new_smi->interrupt_disabled = 1;
23686 atomic_set(&new_smi->stop_operation, 0);
23687 diff -urNp linux-2.6.35.4/drivers/char/keyboard.c linux-2.6.35.4/drivers/char/keyboard.c
23688 --- linux-2.6.35.4/drivers/char/keyboard.c 2010-08-26 19:47:12.000000000 -0400
23689 +++ linux-2.6.35.4/drivers/char/keyboard.c 2010-09-17 20:12:37.000000000 -0400
23690 @@ -640,6 +640,16 @@ static void k_spec(struct vc_data *vc, u
23691 kbd->kbdmode == VC_MEDIUMRAW) &&
23692 value != KVAL(K_SAK))
23693 return; /* SAK is allowed even in raw mode */
23694 +
23695 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
23696 + {
23697 + void *func = fn_handler[value];
23698 + if (func == fn_show_state || func == fn_show_ptregs ||
23699 + func == fn_show_mem)
23700 + return;
23701 + }
23702 +#endif
23703 +
23704 fn_handler[value](vc);
23705 }
23706
23707 @@ -1392,7 +1402,7 @@ static const struct input_device_id kbd_
23708 .evbit = { BIT_MASK(EV_SND) },
23709 },
23710
23711 - { }, /* Terminating entry */
23712 + { 0 }, /* Terminating entry */
23713 };
23714
23715 MODULE_DEVICE_TABLE(input, kbd_ids);
23716 diff -urNp linux-2.6.35.4/drivers/char/mem.c linux-2.6.35.4/drivers/char/mem.c
23717 --- linux-2.6.35.4/drivers/char/mem.c 2010-08-26 19:47:12.000000000 -0400
23718 +++ linux-2.6.35.4/drivers/char/mem.c 2010-09-17 20:12:37.000000000 -0400
23719 @@ -18,6 +18,7 @@
23720 #include <linux/raw.h>
23721 #include <linux/tty.h>
23722 #include <linux/capability.h>
23723 +#include <linux/security.h>
23724 #include <linux/ptrace.h>
23725 #include <linux/device.h>
23726 #include <linux/highmem.h>
23727 @@ -34,6 +35,10 @@
23728 # include <linux/efi.h>
23729 #endif
23730
23731 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
23732 +extern struct file_operations grsec_fops;
23733 +#endif
23734 +
23735 static inline unsigned long size_inside_page(unsigned long start,
23736 unsigned long size)
23737 {
23738 @@ -120,6 +125,7 @@ static ssize_t read_mem(struct file *fil
23739
23740 while (count > 0) {
23741 unsigned long remaining;
23742 + char *temp;
23743
23744 sz = size_inside_page(p, count);
23745
23746 @@ -135,7 +141,23 @@ static ssize_t read_mem(struct file *fil
23747 if (!ptr)
23748 return -EFAULT;
23749
23750 - remaining = copy_to_user(buf, ptr, sz);
23751 +#ifdef CONFIG_PAX_USERCOPY
23752 + temp = kmalloc(sz, GFP_KERNEL);
23753 + if (!temp) {
23754 + unxlate_dev_mem_ptr(p, ptr);
23755 + return -ENOMEM;
23756 + }
23757 + memcpy(temp, ptr, sz);
23758 +#else
23759 + temp = ptr;
23760 +#endif
23761 +
23762 + remaining = copy_to_user(buf, temp, sz);
23763 +
23764 +#ifdef CONFIG_PAX_USERCOPY
23765 + kfree(temp);
23766 +#endif
23767 +
23768 unxlate_dev_mem_ptr(p, ptr);
23769 if (remaining)
23770 return -EFAULT;
23771 @@ -161,6 +183,11 @@ static ssize_t write_mem(struct file *fi
23772 if (!valid_phys_addr_range(p, count))
23773 return -EFAULT;
23774
23775 +#ifdef CONFIG_GRKERNSEC_KMEM
23776 + gr_handle_mem_write();
23777 + return -EPERM;
23778 +#endif
23779 +
23780 written = 0;
23781
23782 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
23783 @@ -316,6 +343,11 @@ static int mmap_mem(struct file *file, s
23784 &vma->vm_page_prot))
23785 return -EINVAL;
23786
23787 +#ifdef CONFIG_GRKERNSEC_KMEM
23788 + if (gr_handle_mem_mmap(vma->vm_pgoff << PAGE_SHIFT, vma))
23789 + return -EPERM;
23790 +#endif
23791 +
23792 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
23793 size,
23794 vma->vm_page_prot);
23795 @@ -398,9 +430,8 @@ static ssize_t read_kmem(struct file *fi
23796 size_t count, loff_t *ppos)
23797 {
23798 unsigned long p = *ppos;
23799 - ssize_t low_count, read, sz;
23800 + ssize_t low_count, read, sz, err = 0;
23801 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
23802 - int err = 0;
23803
23804 read = 0;
23805 if (p < (unsigned long) high_memory) {
23806 @@ -422,6 +453,8 @@ static ssize_t read_kmem(struct file *fi
23807 }
23808 #endif
23809 while (low_count > 0) {
23810 + char *temp;
23811 +
23812 sz = size_inside_page(p, low_count);
23813
23814 /*
23815 @@ -431,7 +464,22 @@ static ssize_t read_kmem(struct file *fi
23816 */
23817 kbuf = xlate_dev_kmem_ptr((char *)p);
23818
23819 - if (copy_to_user(buf, kbuf, sz))
23820 +#ifdef CONFIG_PAX_USERCOPY
23821 + temp = kmalloc(sz, GFP_KERNEL);
23822 + if (!temp)
23823 + return -ENOMEM;
23824 + memcpy(temp, kbuf, sz);
23825 +#else
23826 + temp = kbuf;
23827 +#endif
23828 +
23829 + err = copy_to_user(buf, temp, sz);
23830 +
23831 +#ifdef CONFIG_PAX_USERCOPY
23832 + kfree(temp);
23833 +#endif
23834 +
23835 + if (err)
23836 return -EFAULT;
23837 buf += sz;
23838 p += sz;
23839 @@ -530,6 +578,11 @@ static ssize_t write_kmem(struct file *f
23840 char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
23841 int err = 0;
23842
23843 +#ifdef CONFIG_GRKERNSEC_KMEM
23844 + gr_handle_kmem_write();
23845 + return -EPERM;
23846 +#endif
23847 +
23848 if (p < (unsigned long) high_memory) {
23849 unsigned long to_write = min_t(unsigned long, count,
23850 (unsigned long)high_memory - p);
23851 @@ -731,6 +784,16 @@ static loff_t memory_lseek(struct file *
23852
23853 static int open_port(struct inode * inode, struct file * filp)
23854 {
23855 +#ifdef CONFIG_GRKERNSEC_KMEM
23856 + gr_handle_open_port();
23857 + return -EPERM;
23858 +#endif
23859 +
23860 + return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
23861 +}
23862 +
23863 +static int open_mem(struct inode * inode, struct file * filp)
23864 +{
23865 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
23866 }
23867
23868 @@ -738,7 +801,6 @@ static int open_port(struct inode * inod
23869 #define full_lseek null_lseek
23870 #define write_zero write_null
23871 #define read_full read_zero
23872 -#define open_mem open_port
23873 #define open_kmem open_mem
23874 #define open_oldmem open_mem
23875
23876 @@ -854,6 +916,9 @@ static const struct memdev {
23877 #ifdef CONFIG_CRASH_DUMP
23878 [12] = { "oldmem", 0, &oldmem_fops, NULL },
23879 #endif
23880 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
23881 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
23882 +#endif
23883 };
23884
23885 static int memory_open(struct inode *inode, struct file *filp)
23886 diff -urNp linux-2.6.35.4/drivers/char/n_tty.c linux-2.6.35.4/drivers/char/n_tty.c
23887 --- linux-2.6.35.4/drivers/char/n_tty.c 2010-08-26 19:47:12.000000000 -0400
23888 +++ linux-2.6.35.4/drivers/char/n_tty.c 2010-09-17 20:12:09.000000000 -0400
23889 @@ -2105,6 +2105,7 @@ void n_tty_inherit_ops(struct tty_ldisc_
23890 {
23891 *ops = tty_ldisc_N_TTY;
23892 ops->owner = NULL;
23893 - ops->refcount = ops->flags = 0;
23894 + atomic_set(&ops->refcount, 0);
23895 + ops->flags = 0;
23896 }
23897 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
23898 diff -urNp linux-2.6.35.4/drivers/char/nvram.c linux-2.6.35.4/drivers/char/nvram.c
23899 --- linux-2.6.35.4/drivers/char/nvram.c 2010-08-26 19:47:12.000000000 -0400
23900 +++ linux-2.6.35.4/drivers/char/nvram.c 2010-09-17 20:12:09.000000000 -0400
23901 @@ -245,7 +245,7 @@ static ssize_t nvram_read(struct file *f
23902
23903 spin_unlock_irq(&rtc_lock);
23904
23905 - if (copy_to_user(buf, contents, tmp - contents))
23906 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
23907 return -EFAULT;
23908
23909 *ppos = i;
23910 @@ -434,7 +434,10 @@ static const struct file_operations nvra
23911 static struct miscdevice nvram_dev = {
23912 NVRAM_MINOR,
23913 "nvram",
23914 - &nvram_fops
23915 + &nvram_fops,
23916 + {NULL, NULL},
23917 + NULL,
23918 + NULL
23919 };
23920
23921 static int __init nvram_init(void)
23922 diff -urNp linux-2.6.35.4/drivers/char/pcmcia/ipwireless/tty.c linux-2.6.35.4/drivers/char/pcmcia/ipwireless/tty.c
23923 --- linux-2.6.35.4/drivers/char/pcmcia/ipwireless/tty.c 2010-08-26 19:47:12.000000000 -0400
23924 +++ linux-2.6.35.4/drivers/char/pcmcia/ipwireless/tty.c 2010-09-17 20:12:09.000000000 -0400
23925 @@ -51,7 +51,7 @@ struct ipw_tty {
23926 int tty_type;
23927 struct ipw_network *network;
23928 struct tty_struct *linux_tty;
23929 - int open_count;
23930 + atomic_t open_count;
23931 unsigned int control_lines;
23932 struct mutex ipw_tty_mutex;
23933 int tx_bytes_queued;
23934 @@ -127,10 +127,10 @@ static int ipw_open(struct tty_struct *l
23935 mutex_unlock(&tty->ipw_tty_mutex);
23936 return -ENODEV;
23937 }
23938 - if (tty->open_count == 0)
23939 + if (atomic_read(&tty->open_count) == 0)
23940 tty->tx_bytes_queued = 0;
23941
23942 - tty->open_count++;
23943 + atomic_inc(&tty->open_count);
23944
23945 tty->linux_tty = linux_tty;
23946 linux_tty->driver_data = tty;
23947 @@ -146,9 +146,7 @@ static int ipw_open(struct tty_struct *l
23948
23949 static void do_ipw_close(struct ipw_tty *tty)
23950 {
23951 - tty->open_count--;
23952 -
23953 - if (tty->open_count == 0) {
23954 + if (atomic_dec_return(&tty->open_count) == 0) {
23955 struct tty_struct *linux_tty = tty->linux_tty;
23956
23957 if (linux_tty != NULL) {
23958 @@ -169,7 +167,7 @@ static void ipw_hangup(struct tty_struct
23959 return;
23960
23961 mutex_lock(&tty->ipw_tty_mutex);
23962 - if (tty->open_count == 0) {
23963 + if (atomic_read(&tty->open_count) == 0) {
23964 mutex_unlock(&tty->ipw_tty_mutex);
23965 return;
23966 }
23967 @@ -198,7 +196,7 @@ void ipwireless_tty_received(struct ipw_
23968 return;
23969 }
23970
23971 - if (!tty->open_count) {
23972 + if (!atomic_read(&tty->open_count)) {
23973 mutex_unlock(&tty->ipw_tty_mutex);
23974 return;
23975 }
23976 @@ -240,7 +238,7 @@ static int ipw_write(struct tty_struct *
23977 return -ENODEV;
23978
23979 mutex_lock(&tty->ipw_tty_mutex);
23980 - if (!tty->open_count) {
23981 + if (!atomic_read(&tty->open_count)) {
23982 mutex_unlock(&tty->ipw_tty_mutex);
23983 return -EINVAL;
23984 }
23985 @@ -280,7 +278,7 @@ static int ipw_write_room(struct tty_str
23986 if (!tty)
23987 return -ENODEV;
23988
23989 - if (!tty->open_count)
23990 + if (!atomic_read(&tty->open_count))
23991 return -EINVAL;
23992
23993 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
23994 @@ -322,7 +320,7 @@ static int ipw_chars_in_buffer(struct tt
23995 if (!tty)
23996 return 0;
23997
23998 - if (!tty->open_count)
23999 + if (!atomic_read(&tty->open_count))
24000 return 0;
24001
24002 return tty->tx_bytes_queued;
24003 @@ -403,7 +401,7 @@ static int ipw_tiocmget(struct tty_struc
24004 if (!tty)
24005 return -ENODEV;
24006
24007 - if (!tty->open_count)
24008 + if (!atomic_read(&tty->open_count))
24009 return -EINVAL;
24010
24011 return get_control_lines(tty);
24012 @@ -419,7 +417,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
24013 if (!tty)
24014 return -ENODEV;
24015
24016 - if (!tty->open_count)
24017 + if (!atomic_read(&tty->open_count))
24018 return -EINVAL;
24019
24020 return set_control_lines(tty, set, clear);
24021 @@ -433,7 +431,7 @@ static int ipw_ioctl(struct tty_struct *
24022 if (!tty)
24023 return -ENODEV;
24024
24025 - if (!tty->open_count)
24026 + if (!atomic_read(&tty->open_count))
24027 return -EINVAL;
24028
24029 /* FIXME: Exactly how is the tty object locked here .. */
24030 @@ -582,7 +580,7 @@ void ipwireless_tty_free(struct ipw_tty
24031 against a parallel ioctl etc */
24032 mutex_lock(&ttyj->ipw_tty_mutex);
24033 }
24034 - while (ttyj->open_count)
24035 + while (atomic_read(&ttyj->open_count))
24036 do_ipw_close(ttyj);
24037 ipwireless_disassociate_network_ttys(network,
24038 ttyj->channel_idx);
24039 diff -urNp linux-2.6.35.4/drivers/char/pty.c linux-2.6.35.4/drivers/char/pty.c
24040 --- linux-2.6.35.4/drivers/char/pty.c 2010-08-26 19:47:12.000000000 -0400
24041 +++ linux-2.6.35.4/drivers/char/pty.c 2010-09-17 20:12:09.000000000 -0400
24042 @@ -677,7 +677,18 @@ static int ptmx_open(struct inode *inode
24043 return ret;
24044 }
24045
24046 -static struct file_operations ptmx_fops;
24047 +static const struct file_operations ptmx_fops = {
24048 + .llseek = no_llseek,
24049 + .read = tty_read,
24050 + .write = tty_write,
24051 + .poll = tty_poll,
24052 + .unlocked_ioctl = tty_ioctl,
24053 + .compat_ioctl = tty_compat_ioctl,
24054 + .open = ptmx_open,
24055 + .release = tty_release,
24056 + .fasync = tty_fasync,
24057 +};
24058 +
24059
24060 static void __init unix98_pty_init(void)
24061 {
24062 @@ -731,9 +742,6 @@ static void __init unix98_pty_init(void)
24063 register_sysctl_table(pty_root_table);
24064
24065 /* Now create the /dev/ptmx special device */
24066 - tty_default_fops(&ptmx_fops);
24067 - ptmx_fops.open = ptmx_open;
24068 -
24069 cdev_init(&ptmx_cdev, &ptmx_fops);
24070 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
24071 register_chrdev_region(MKDEV(TTYAUX_MAJOR, 2), 1, "/dev/ptmx") < 0)
24072 diff -urNp linux-2.6.35.4/drivers/char/random.c linux-2.6.35.4/drivers/char/random.c
24073 --- linux-2.6.35.4/drivers/char/random.c 2010-08-26 19:47:12.000000000 -0400
24074 +++ linux-2.6.35.4/drivers/char/random.c 2010-09-17 20:24:41.000000000 -0400
24075 @@ -254,8 +254,13 @@
24076 /*
24077 * Configuration information
24078 */
24079 +#ifdef CONFIG_GRKERNSEC_RANDNET
24080 +#define INPUT_POOL_WORDS 512
24081 +#define OUTPUT_POOL_WORDS 128
24082 +#else
24083 #define INPUT_POOL_WORDS 128
24084 #define OUTPUT_POOL_WORDS 32
24085 +#endif
24086 #define SEC_XFER_SIZE 512
24087 #define EXTRACT_SIZE 10
24088
24089 @@ -293,10 +298,17 @@ static struct poolinfo {
24090 int poolwords;
24091 int tap1, tap2, tap3, tap4, tap5;
24092 } poolinfo_table[] = {
24093 +#ifdef CONFIG_GRKERNSEC_RANDNET
24094 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
24095 + { 512, 411, 308, 208, 104, 1 },
24096 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
24097 + { 128, 103, 76, 51, 25, 1 },
24098 +#else
24099 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
24100 { 128, 103, 76, 51, 25, 1 },
24101 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
24102 { 32, 26, 20, 14, 7, 1 },
24103 +#endif
24104 #if 0
24105 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
24106 { 2048, 1638, 1231, 819, 411, 1 },
24107 @@ -902,7 +914,7 @@ static ssize_t extract_entropy_user(stru
24108
24109 extract_buf(r, tmp);
24110 i = min_t(int, nbytes, EXTRACT_SIZE);
24111 - if (copy_to_user(buf, tmp, i)) {
24112 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
24113 ret = -EFAULT;
24114 break;
24115 }
24116 @@ -1205,7 +1217,7 @@ EXPORT_SYMBOL(generate_random_uuid);
24117 #include <linux/sysctl.h>
24118
24119 static int min_read_thresh = 8, min_write_thresh;
24120 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
24121 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
24122 static int max_write_thresh = INPUT_POOL_WORDS * 32;
24123 static char sysctl_bootid[16];
24124
24125 diff -urNp linux-2.6.35.4/drivers/char/sonypi.c linux-2.6.35.4/drivers/char/sonypi.c
24126 --- linux-2.6.35.4/drivers/char/sonypi.c 2010-08-26 19:47:12.000000000 -0400
24127 +++ linux-2.6.35.4/drivers/char/sonypi.c 2010-09-17 20:12:09.000000000 -0400
24128 @@ -491,7 +491,7 @@ static struct sonypi_device {
24129 spinlock_t fifo_lock;
24130 wait_queue_head_t fifo_proc_list;
24131 struct fasync_struct *fifo_async;
24132 - int open_count;
24133 + atomic_t open_count;
24134 int model;
24135 struct input_dev *input_jog_dev;
24136 struct input_dev *input_key_dev;
24137 @@ -898,7 +898,7 @@ static int sonypi_misc_fasync(int fd, st
24138 static int sonypi_misc_release(struct inode *inode, struct file *file)
24139 {
24140 mutex_lock(&sonypi_device.lock);
24141 - sonypi_device.open_count--;
24142 + atomic_dec(&sonypi_device.open_count);
24143 mutex_unlock(&sonypi_device.lock);
24144 return 0;
24145 }
24146 @@ -907,9 +907,9 @@ static int sonypi_misc_open(struct inode
24147 {
24148 mutex_lock(&sonypi_device.lock);
24149 /* Flush input queue on first open */
24150 - if (!sonypi_device.open_count)
24151 + if (!atomic_read(&sonypi_device.open_count))
24152 kfifo_reset(&sonypi_device.fifo);
24153 - sonypi_device.open_count++;
24154 + atomic_inc(&sonypi_device.open_count);
24155 mutex_unlock(&sonypi_device.lock);
24156
24157 return 0;
24158 diff -urNp linux-2.6.35.4/drivers/char/tpm/tpm_bios.c linux-2.6.35.4/drivers/char/tpm/tpm_bios.c
24159 --- linux-2.6.35.4/drivers/char/tpm/tpm_bios.c 2010-08-26 19:47:12.000000000 -0400
24160 +++ linux-2.6.35.4/drivers/char/tpm/tpm_bios.c 2010-09-17 20:12:09.000000000 -0400
24161 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start
24162 event = addr;
24163
24164 if ((event->event_type == 0 && event->event_size == 0) ||
24165 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
24166 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
24167 return NULL;
24168
24169 return addr;
24170 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(
24171 return NULL;
24172
24173 if ((event->event_type == 0 && event->event_size == 0) ||
24174 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
24175 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
24176 return NULL;
24177
24178 (*pos)++;
24179 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_
24180 int i;
24181
24182 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
24183 - seq_putc(m, data[i]);
24184 + if (!seq_putc(m, data[i]))
24185 + return -EFAULT;
24186
24187 return 0;
24188 }
24189 @@ -410,6 +411,11 @@ static int read_log(struct tpm_bios_log
24190 log->bios_event_log_end = log->bios_event_log + len;
24191
24192 virt = acpi_os_map_memory(start, len);
24193 + if (!virt) {
24194 + kfree(log->bios_event_log);
24195 + log->bios_event_log = NULL;
24196 + return -EFAULT;
24197 + }
24198
24199 memcpy(log->bios_event_log, virt, len);
24200
24201 diff -urNp linux-2.6.35.4/drivers/char/tty_io.c linux-2.6.35.4/drivers/char/tty_io.c
24202 --- linux-2.6.35.4/drivers/char/tty_io.c 2010-08-26 19:47:12.000000000 -0400
24203 +++ linux-2.6.35.4/drivers/char/tty_io.c 2010-09-17 20:12:09.000000000 -0400
24204 @@ -136,20 +136,10 @@ LIST_HEAD(tty_drivers); /* linked list
24205 DEFINE_MUTEX(tty_mutex);
24206 EXPORT_SYMBOL(tty_mutex);
24207
24208 -static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
24209 -static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
24210 ssize_t redirected_tty_write(struct file *, const char __user *,
24211 size_t, loff_t *);
24212 -static unsigned int tty_poll(struct file *, poll_table *);
24213 static int tty_open(struct inode *, struct file *);
24214 long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
24215 -#ifdef CONFIG_COMPAT
24216 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
24217 - unsigned long arg);
24218 -#else
24219 -#define tty_compat_ioctl NULL
24220 -#endif
24221 -static int tty_fasync(int fd, struct file *filp, int on);
24222 static void release_tty(struct tty_struct *tty, int idx);
24223 static void __proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
24224 static void proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
24225 @@ -871,7 +861,7 @@ EXPORT_SYMBOL(start_tty);
24226 * read calls may be outstanding in parallel.
24227 */
24228
24229 -static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
24230 +ssize_t tty_read(struct file *file, char __user *buf, size_t count,
24231 loff_t *ppos)
24232 {
24233 int i;
24234 @@ -899,6 +889,8 @@ static ssize_t tty_read(struct file *fil
24235 return i;
24236 }
24237
24238 +EXPORT_SYMBOL(tty_read);
24239 +
24240 void tty_write_unlock(struct tty_struct *tty)
24241 {
24242 mutex_unlock(&tty->atomic_write_lock);
24243 @@ -1048,7 +1040,7 @@ void tty_write_message(struct tty_struct
24244 * write method will not be invoked in parallel for each device.
24245 */
24246
24247 -static ssize_t tty_write(struct file *file, const char __user *buf,
24248 +ssize_t tty_write(struct file *file, const char __user *buf,
24249 size_t count, loff_t *ppos)
24250 {
24251 struct tty_struct *tty;
24252 @@ -1075,6 +1067,8 @@ static ssize_t tty_write(struct file *fi
24253 return ret;
24254 }
24255
24256 +EXPORT_SYMBOL(tty_write);
24257 +
24258 ssize_t redirected_tty_write(struct file *file, const char __user *buf,
24259 size_t count, loff_t *ppos)
24260 {
24261 @@ -1897,6 +1891,8 @@ got_driver:
24262
24263
24264
24265 +EXPORT_SYMBOL(tty_release);
24266 +
24267 /**
24268 * tty_poll - check tty status
24269 * @filp: file being polled
24270 @@ -1909,7 +1905,7 @@ got_driver:
24271 * may be re-entered freely by other callers.
24272 */
24273
24274 -static unsigned int tty_poll(struct file *filp, poll_table *wait)
24275 +unsigned int tty_poll(struct file *filp, poll_table *wait)
24276 {
24277 struct tty_struct *tty;
24278 struct tty_ldisc *ld;
24279 @@ -1926,7 +1922,9 @@ static unsigned int tty_poll(struct file
24280 return ret;
24281 }
24282
24283 -static int tty_fasync(int fd, struct file *filp, int on)
24284 +EXPORT_SYMBOL(tty_poll);
24285 +
24286 +int tty_fasync(int fd, struct file *filp, int on)
24287 {
24288 struct tty_struct *tty;
24289 unsigned long flags;
24290 @@ -1970,6 +1968,8 @@ out:
24291 return retval;
24292 }
24293
24294 +EXPORT_SYMBOL(tty_fasync);
24295 +
24296 /**
24297 * tiocsti - fake input character
24298 * @tty: tty to fake input into
24299 @@ -2602,8 +2602,10 @@ long tty_ioctl(struct file *file, unsign
24300 return retval;
24301 }
24302
24303 +EXPORT_SYMBOL(tty_ioctl);
24304 +
24305 #ifdef CONFIG_COMPAT
24306 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
24307 +long tty_compat_ioctl(struct file *file, unsigned int cmd,
24308 unsigned long arg)
24309 {
24310 struct inode *inode = file->f_dentry->d_inode;
24311 @@ -2627,6 +2629,9 @@ static long tty_compat_ioctl(struct file
24312
24313 return retval;
24314 }
24315 +
24316 +EXPORT_SYMBOL(tty_compat_ioctl);
24317 +
24318 #endif
24319
24320 /*
24321 @@ -3070,11 +3075,6 @@ struct tty_struct *get_current_tty(void)
24322 }
24323 EXPORT_SYMBOL_GPL(get_current_tty);
24324
24325 -void tty_default_fops(struct file_operations *fops)
24326 -{
24327 - *fops = tty_fops;
24328 -}
24329 -
24330 /*
24331 * Initialize the console device. This is called *early*, so
24332 * we can't necessarily depend on lots of kernel help here.
24333 diff -urNp linux-2.6.35.4/drivers/char/tty_ldisc.c linux-2.6.35.4/drivers/char/tty_ldisc.c
24334 --- linux-2.6.35.4/drivers/char/tty_ldisc.c 2010-08-26 19:47:12.000000000 -0400
24335 +++ linux-2.6.35.4/drivers/char/tty_ldisc.c 2010-09-17 20:12:09.000000000 -0400
24336 @@ -75,7 +75,7 @@ static void put_ldisc(struct tty_ldisc *
24337 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
24338 struct tty_ldisc_ops *ldo = ld->ops;
24339
24340 - ldo->refcount--;
24341 + atomic_dec(&ldo->refcount);
24342 module_put(ldo->owner);
24343 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
24344
24345 @@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
24346 spin_lock_irqsave(&tty_ldisc_lock, flags);
24347 tty_ldiscs[disc] = new_ldisc;
24348 new_ldisc->num = disc;
24349 - new_ldisc->refcount = 0;
24350 + atomic_set(&new_ldisc->refcount, 0);
24351 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
24352
24353 return ret;
24354 @@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
24355 return -EINVAL;
24356
24357 spin_lock_irqsave(&tty_ldisc_lock, flags);
24358 - if (tty_ldiscs[disc]->refcount)
24359 + if (atomic_read(&tty_ldiscs[disc]->refcount))
24360 ret = -EBUSY;
24361 else
24362 tty_ldiscs[disc] = NULL;
24363 @@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
24364 if (ldops) {
24365 ret = ERR_PTR(-EAGAIN);
24366 if (try_module_get(ldops->owner)) {
24367 - ldops->refcount++;
24368 + atomic_inc(&ldops->refcount);
24369 ret = ldops;
24370 }
24371 }
24372 @@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
24373 unsigned long flags;
24374
24375 spin_lock_irqsave(&tty_ldisc_lock, flags);
24376 - ldops->refcount--;
24377 + atomic_dec(&ldops->refcount);
24378 module_put(ldops->owner);
24379 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
24380 }
24381 diff -urNp linux-2.6.35.4/drivers/char/vt_ioctl.c linux-2.6.35.4/drivers/char/vt_ioctl.c
24382 --- linux-2.6.35.4/drivers/char/vt_ioctl.c 2010-08-26 19:47:12.000000000 -0400
24383 +++ linux-2.6.35.4/drivers/char/vt_ioctl.c 2010-09-17 20:12:37.000000000 -0400
24384 @@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
24385 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
24386 return -EFAULT;
24387
24388 - if (!capable(CAP_SYS_TTY_CONFIG))
24389 - perm = 0;
24390 -
24391 switch (cmd) {
24392 case KDGKBENT:
24393 key_map = key_maps[s];
24394 @@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __
24395 val = (i ? K_HOLE : K_NOSUCHMAP);
24396 return put_user(val, &user_kbe->kb_value);
24397 case KDSKBENT:
24398 + if (!capable(CAP_SYS_TTY_CONFIG))
24399 + perm = 0;
24400 +
24401 if (!perm)
24402 return -EPERM;
24403 +
24404 if (!i && v == K_NOSUCHMAP) {
24405 /* deallocate map */
24406 key_map = key_maps[s];
24407 @@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
24408 int i, j, k;
24409 int ret;
24410
24411 - if (!capable(CAP_SYS_TTY_CONFIG))
24412 - perm = 0;
24413 -
24414 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
24415 if (!kbs) {
24416 ret = -ENOMEM;
24417 @@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
24418 kfree(kbs);
24419 return ((p && *p) ? -EOVERFLOW : 0);
24420 case KDSKBSENT:
24421 + if (!capable(CAP_SYS_TTY_CONFIG))
24422 + perm = 0;
24423 +
24424 if (!perm) {
24425 ret = -EPERM;
24426 goto reterr;
24427 diff -urNp linux-2.6.35.4/drivers/cpuidle/sysfs.c linux-2.6.35.4/drivers/cpuidle/sysfs.c
24428 --- linux-2.6.35.4/drivers/cpuidle/sysfs.c 2010-08-26 19:47:12.000000000 -0400
24429 +++ linux-2.6.35.4/drivers/cpuidle/sysfs.c 2010-09-17 20:12:09.000000000 -0400
24430 @@ -300,7 +300,7 @@ static struct kobj_type ktype_state_cpui
24431 .release = cpuidle_state_sysfs_release,
24432 };
24433
24434 -static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
24435 +static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
24436 {
24437 kobject_put(&device->kobjs[i]->kobj);
24438 wait_for_completion(&device->kobjs[i]->kobj_unregister);
24439 diff -urNp linux-2.6.35.4/drivers/edac/edac_core.h linux-2.6.35.4/drivers/edac/edac_core.h
24440 --- linux-2.6.35.4/drivers/edac/edac_core.h 2010-08-26 19:47:12.000000000 -0400
24441 +++ linux-2.6.35.4/drivers/edac/edac_core.h 2010-09-17 20:12:09.000000000 -0400
24442 @@ -100,11 +100,11 @@ extern const char *edac_mem_types[];
24443
24444 #else /* !CONFIG_EDAC_DEBUG */
24445
24446 -#define debugf0( ... )
24447 -#define debugf1( ... )
24448 -#define debugf2( ... )
24449 -#define debugf3( ... )
24450 -#define debugf4( ... )
24451 +#define debugf0( ... ) do {} while (0)
24452 +#define debugf1( ... ) do {} while (0)
24453 +#define debugf2( ... ) do {} while (0)
24454 +#define debugf3( ... ) do {} while (0)
24455 +#define debugf4( ... ) do {} while (0)
24456
24457 #endif /* !CONFIG_EDAC_DEBUG */
24458
24459 diff -urNp linux-2.6.35.4/drivers/edac/edac_mc_sysfs.c linux-2.6.35.4/drivers/edac/edac_mc_sysfs.c
24460 --- linux-2.6.35.4/drivers/edac/edac_mc_sysfs.c 2010-08-26 19:47:12.000000000 -0400
24461 +++ linux-2.6.35.4/drivers/edac/edac_mc_sysfs.c 2010-09-17 20:12:09.000000000 -0400
24462 @@ -776,7 +776,7 @@ static void edac_inst_grp_release(struct
24463 }
24464
24465 /* Intermediate show/store table */
24466 -static struct sysfs_ops inst_grp_ops = {
24467 +static const struct sysfs_ops inst_grp_ops = {
24468 .show = inst_grp_show,
24469 .store = inst_grp_store
24470 };
24471 diff -urNp linux-2.6.35.4/drivers/firewire/core-cdev.c linux-2.6.35.4/drivers/firewire/core-cdev.c
24472 --- linux-2.6.35.4/drivers/firewire/core-cdev.c 2010-08-26 19:47:12.000000000 -0400
24473 +++ linux-2.6.35.4/drivers/firewire/core-cdev.c 2010-09-17 20:12:09.000000000 -0400
24474 @@ -1195,8 +1195,7 @@ static int init_iso_resource(struct clie
24475 int ret;
24476
24477 if ((request->channels == 0 && request->bandwidth == 0) ||
24478 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
24479 - request->bandwidth < 0)
24480 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
24481 return -EINVAL;
24482
24483 r = kmalloc(sizeof(*r), GFP_KERNEL);
24484 diff -urNp linux-2.6.35.4/drivers/firmware/dmi_scan.c linux-2.6.35.4/drivers/firmware/dmi_scan.c
24485 --- linux-2.6.35.4/drivers/firmware/dmi_scan.c 2010-08-26 19:47:12.000000000 -0400
24486 +++ linux-2.6.35.4/drivers/firmware/dmi_scan.c 2010-09-17 20:12:09.000000000 -0400
24487 @@ -387,11 +387,6 @@ void __init dmi_scan_machine(void)
24488 }
24489 }
24490 else {
24491 - /*
24492 - * no iounmap() for that ioremap(); it would be a no-op, but
24493 - * it's so early in setup that sucker gets confused into doing
24494 - * what it shouldn't if we actually call it.
24495 - */
24496 p = dmi_ioremap(0xF0000, 0x10000);
24497 if (p == NULL)
24498 goto error;
24499 diff -urNp linux-2.6.35.4/drivers/gpu/drm/drm_drv.c linux-2.6.35.4/drivers/gpu/drm/drm_drv.c
24500 --- linux-2.6.35.4/drivers/gpu/drm/drm_drv.c 2010-08-26 19:47:12.000000000 -0400
24501 +++ linux-2.6.35.4/drivers/gpu/drm/drm_drv.c 2010-09-17 20:12:09.000000000 -0400
24502 @@ -449,7 +449,7 @@ long drm_ioctl(struct file *filp,
24503
24504 dev = file_priv->minor->dev;
24505 atomic_inc(&dev->ioctl_count);
24506 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
24507 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
24508 ++file_priv->ioctl_count;
24509
24510 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
24511 diff -urNp linux-2.6.35.4/drivers/gpu/drm/drm_fops.c linux-2.6.35.4/drivers/gpu/drm/drm_fops.c
24512 --- linux-2.6.35.4/drivers/gpu/drm/drm_fops.c 2010-08-26 19:47:12.000000000 -0400
24513 +++ linux-2.6.35.4/drivers/gpu/drm/drm_fops.c 2010-09-17 20:12:09.000000000 -0400
24514 @@ -67,7 +67,7 @@ static int drm_setup(struct drm_device *
24515 }
24516
24517 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
24518 - atomic_set(&dev->counts[i], 0);
24519 + atomic_set_unchecked(&dev->counts[i], 0);
24520
24521 dev->sigdata.lock = NULL;
24522
24523 @@ -131,9 +131,9 @@ int drm_open(struct inode *inode, struct
24524
24525 retcode = drm_open_helper(inode, filp, dev);
24526 if (!retcode) {
24527 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
24528 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
24529 spin_lock(&dev->count_lock);
24530 - if (!dev->open_count++) {
24531 + if (atomic_inc_return(&dev->open_count) == 1) {
24532 spin_unlock(&dev->count_lock);
24533 retcode = drm_setup(dev);
24534 goto out;
24535 @@ -474,7 +474,7 @@ int drm_release(struct inode *inode, str
24536
24537 lock_kernel();
24538
24539 - DRM_DEBUG("open_count = %d\n", dev->open_count);
24540 + DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count));
24541
24542 if (dev->driver->preclose)
24543 dev->driver->preclose(dev, file_priv);
24544 @@ -486,7 +486,7 @@ int drm_release(struct inode *inode, str
24545 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
24546 task_pid_nr(current),
24547 (long)old_encode_dev(file_priv->minor->device),
24548 - dev->open_count);
24549 + atomic_read(&dev->open_count));
24550
24551 /* if the master has gone away we can't do anything with the lock */
24552 if (file_priv->minor->master)
24553 @@ -567,9 +567,9 @@ int drm_release(struct inode *inode, str
24554 * End inline drm_release
24555 */
24556
24557 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
24558 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
24559 spin_lock(&dev->count_lock);
24560 - if (!--dev->open_count) {
24561 + if (atomic_dec_and_test(&dev->open_count)) {
24562 if (atomic_read(&dev->ioctl_count)) {
24563 DRM_ERROR("Device busy: %d\n",
24564 atomic_read(&dev->ioctl_count));
24565 diff -urNp linux-2.6.35.4/drivers/gpu/drm/drm_ioctl.c linux-2.6.35.4/drivers/gpu/drm/drm_ioctl.c
24566 --- linux-2.6.35.4/drivers/gpu/drm/drm_ioctl.c 2010-08-26 19:47:12.000000000 -0400
24567 +++ linux-2.6.35.4/drivers/gpu/drm/drm_ioctl.c 2010-09-17 20:12:09.000000000 -0400
24568 @@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev,
24569 stats->data[i].value =
24570 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
24571 else
24572 - stats->data[i].value = atomic_read(&dev->counts[i]);
24573 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
24574 stats->data[i].type = dev->types[i];
24575 }
24576
24577 diff -urNp linux-2.6.35.4/drivers/gpu/drm/drm_lock.c linux-2.6.35.4/drivers/gpu/drm/drm_lock.c
24578 --- linux-2.6.35.4/drivers/gpu/drm/drm_lock.c 2010-08-26 19:47:12.000000000 -0400
24579 +++ linux-2.6.35.4/drivers/gpu/drm/drm_lock.c 2010-09-17 20:12:09.000000000 -0400
24580 @@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, voi
24581 if (drm_lock_take(&master->lock, lock->context)) {
24582 master->lock.file_priv = file_priv;
24583 master->lock.lock_time = jiffies;
24584 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
24585 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
24586 break; /* Got lock */
24587 }
24588
24589 @@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, v
24590 return -EINVAL;
24591 }
24592
24593 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
24594 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
24595
24596 /* kernel_context_switch isn't used by any of the x86 drm
24597 * modules but is required by the Sparc driver.
24598 diff -urNp linux-2.6.35.4/drivers/gpu/drm/i810/i810_dma.c linux-2.6.35.4/drivers/gpu/drm/i810/i810_dma.c
24599 --- linux-2.6.35.4/drivers/gpu/drm/i810/i810_dma.c 2010-08-26 19:47:12.000000000 -0400
24600 +++ linux-2.6.35.4/drivers/gpu/drm/i810/i810_dma.c 2010-09-17 20:12:09.000000000 -0400
24601 @@ -953,8 +953,8 @@ static int i810_dma_vertex(struct drm_de
24602 dma->buflist[vertex->idx],
24603 vertex->discard, vertex->used);
24604
24605 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
24606 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
24607 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
24608 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
24609 sarea_priv->last_enqueue = dev_priv->counter - 1;
24610 sarea_priv->last_dispatch = (int)hw_status[5];
24611
24612 @@ -1116,8 +1116,8 @@ static int i810_dma_mc(struct drm_device
24613 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
24614 mc->last_render);
24615
24616 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
24617 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
24618 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
24619 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
24620 sarea_priv->last_enqueue = dev_priv->counter - 1;
24621 sarea_priv->last_dispatch = (int)hw_status[5];
24622
24623 diff -urNp linux-2.6.35.4/drivers/gpu/drm/i915/dvo_ch7017.c linux-2.6.35.4/drivers/gpu/drm/i915/dvo_ch7017.c
24624 --- linux-2.6.35.4/drivers/gpu/drm/i915/dvo_ch7017.c 2010-08-26 19:47:12.000000000 -0400
24625 +++ linux-2.6.35.4/drivers/gpu/drm/i915/dvo_ch7017.c 2010-09-17 20:12:09.000000000 -0400
24626 @@ -402,7 +402,7 @@ static void ch7017_destroy(struct intel_
24627 }
24628 }
24629
24630 -struct intel_dvo_dev_ops ch7017_ops = {
24631 +const struct intel_dvo_dev_ops ch7017_ops = {
24632 .init = ch7017_init,
24633 .detect = ch7017_detect,
24634 .mode_valid = ch7017_mode_valid,
24635 diff -urNp linux-2.6.35.4/drivers/gpu/drm/i915/dvo_ch7xxx.c linux-2.6.35.4/drivers/gpu/drm/i915/dvo_ch7xxx.c
24636 --- linux-2.6.35.4/drivers/gpu/drm/i915/dvo_ch7xxx.c 2010-08-26 19:47:12.000000000 -0400
24637 +++ linux-2.6.35.4/drivers/gpu/drm/i915/dvo_ch7xxx.c 2010-09-17 20:12:09.000000000 -0400
24638 @@ -322,7 +322,7 @@ static void ch7xxx_destroy(struct intel_
24639 }
24640 }
24641
24642 -struct intel_dvo_dev_ops ch7xxx_ops = {
24643 +const struct intel_dvo_dev_ops ch7xxx_ops = {
24644 .init = ch7xxx_init,
24645 .detect = ch7xxx_detect,
24646 .mode_valid = ch7xxx_mode_valid,
24647 diff -urNp linux-2.6.35.4/drivers/gpu/drm/i915/dvo.h linux-2.6.35.4/drivers/gpu/drm/i915/dvo.h
24648 --- linux-2.6.35.4/drivers/gpu/drm/i915/dvo.h 2010-08-26 19:47:12.000000000 -0400
24649 +++ linux-2.6.35.4/drivers/gpu/drm/i915/dvo.h 2010-09-17 20:12:09.000000000 -0400
24650 @@ -125,23 +125,23 @@ struct intel_dvo_dev_ops {
24651 *
24652 * \return singly-linked list of modes or NULL if no modes found.
24653 */
24654 - struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
24655 + struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
24656
24657 /**
24658 * Clean up driver-specific bits of the output
24659 */
24660 - void (*destroy) (struct intel_dvo_device *dvo);
24661 + void (* const destroy) (struct intel_dvo_device *dvo);
24662
24663 /**
24664 * Debugging hook to dump device registers to log file
24665 */
24666 - void (*dump_regs)(struct intel_dvo_device *dvo);
24667 + void (* const dump_regs)(struct intel_dvo_device *dvo);
24668 };
24669
24670 -extern struct intel_dvo_dev_ops sil164_ops;
24671 -extern struct intel_dvo_dev_ops ch7xxx_ops;
24672 -extern struct intel_dvo_dev_ops ivch_ops;
24673 -extern struct intel_dvo_dev_ops tfp410_ops;
24674 -extern struct intel_dvo_dev_ops ch7017_ops;
24675 +extern const struct intel_dvo_dev_ops sil164_ops;
24676 +extern const struct intel_dvo_dev_ops ch7xxx_ops;
24677 +extern const struct intel_dvo_dev_ops ivch_ops;
24678 +extern const struct intel_dvo_dev_ops tfp410_ops;
24679 +extern const struct intel_dvo_dev_ops ch7017_ops;
24680
24681 #endif /* _INTEL_DVO_H */
24682 diff -urNp linux-2.6.35.4/drivers/gpu/drm/i915/dvo_ivch.c linux-2.6.35.4/drivers/gpu/drm/i915/dvo_ivch.c
24683 --- linux-2.6.35.4/drivers/gpu/drm/i915/dvo_ivch.c 2010-08-26 19:47:12.000000000 -0400
24684 +++ linux-2.6.35.4/drivers/gpu/drm/i915/dvo_ivch.c 2010-09-17 20:12:09.000000000 -0400
24685 @@ -412,7 +412,7 @@ static void ivch_destroy(struct intel_dv
24686 }
24687 }
24688
24689 -struct intel_dvo_dev_ops ivch_ops= {
24690 +const struct intel_dvo_dev_ops ivch_ops= {
24691 .init = ivch_init,
24692 .dpms = ivch_dpms,
24693 .mode_valid = ivch_mode_valid,
24694 diff -urNp linux-2.6.35.4/drivers/gpu/drm/i915/dvo_sil164.c linux-2.6.35.4/drivers/gpu/drm/i915/dvo_sil164.c
24695 --- linux-2.6.35.4/drivers/gpu/drm/i915/dvo_sil164.c 2010-08-26 19:47:12.000000000 -0400
24696 +++ linux-2.6.35.4/drivers/gpu/drm/i915/dvo_sil164.c 2010-09-17 20:12:09.000000000 -0400
24697 @@ -254,7 +254,7 @@ static void sil164_destroy(struct intel_
24698 }
24699 }
24700
24701 -struct intel_dvo_dev_ops sil164_ops = {
24702 +const struct intel_dvo_dev_ops sil164_ops = {
24703 .init = sil164_init,
24704 .detect = sil164_detect,
24705 .mode_valid = sil164_mode_valid,
24706 diff -urNp linux-2.6.35.4/drivers/gpu/drm/i915/dvo_tfp410.c linux-2.6.35.4/drivers/gpu/drm/i915/dvo_tfp410.c
24707 --- linux-2.6.35.4/drivers/gpu/drm/i915/dvo_tfp410.c 2010-08-26 19:47:12.000000000 -0400
24708 +++ linux-2.6.35.4/drivers/gpu/drm/i915/dvo_tfp410.c 2010-09-17 20:12:09.000000000 -0400
24709 @@ -295,7 +295,7 @@ static void tfp410_destroy(struct intel_
24710 }
24711 }
24712
24713 -struct intel_dvo_dev_ops tfp410_ops = {
24714 +const struct intel_dvo_dev_ops tfp410_ops = {
24715 .init = tfp410_init,
24716 .detect = tfp410_detect,
24717 .mode_valid = tfp410_mode_valid,
24718 diff -urNp linux-2.6.35.4/drivers/gpu/drm/i915/i915_dma.c linux-2.6.35.4/drivers/gpu/drm/i915/i915_dma.c
24719 --- linux-2.6.35.4/drivers/gpu/drm/i915/i915_dma.c 2010-08-26 19:47:12.000000000 -0400
24720 +++ linux-2.6.35.4/drivers/gpu/drm/i915/i915_dma.c 2010-09-17 20:12:09.000000000 -0400
24721 @@ -1342,7 +1342,7 @@ static bool i915_switcheroo_can_switch(s
24722 bool can_switch;
24723
24724 spin_lock(&dev->count_lock);
24725 - can_switch = (dev->open_count == 0);
24726 + can_switch = (atomic_read(&dev->open_count) == 0);
24727 spin_unlock(&dev->count_lock);
24728 return can_switch;
24729 }
24730 diff -urNp linux-2.6.35.4/drivers/gpu/drm/i915/i915_drv.c linux-2.6.35.4/drivers/gpu/drm/i915/i915_drv.c
24731 --- linux-2.6.35.4/drivers/gpu/drm/i915/i915_drv.c 2010-08-26 19:47:12.000000000 -0400
24732 +++ linux-2.6.35.4/drivers/gpu/drm/i915/i915_drv.c 2010-09-17 20:12:09.000000000 -0400
24733 @@ -491,7 +491,7 @@ const struct dev_pm_ops i915_pm_ops = {
24734 .restore = i915_pm_resume,
24735 };
24736
24737 -static struct vm_operations_struct i915_gem_vm_ops = {
24738 +static const struct vm_operations_struct i915_gem_vm_ops = {
24739 .fault = i915_gem_fault,
24740 .open = drm_gem_vm_open,
24741 .close = drm_gem_vm_close,
24742 diff -urNp linux-2.6.35.4/drivers/gpu/drm/nouveau/nouveau_backlight.c linux-2.6.35.4/drivers/gpu/drm/nouveau/nouveau_backlight.c
24743 --- linux-2.6.35.4/drivers/gpu/drm/nouveau/nouveau_backlight.c 2010-08-26 19:47:12.000000000 -0400
24744 +++ linux-2.6.35.4/drivers/gpu/drm/nouveau/nouveau_backlight.c 2010-09-17 20:12:09.000000000 -0400
24745 @@ -58,7 +58,7 @@ static int nv40_set_intensity(struct bac
24746 return 0;
24747 }
24748
24749 -static struct backlight_ops nv40_bl_ops = {
24750 +static const struct backlight_ops nv40_bl_ops = {
24751 .options = BL_CORE_SUSPENDRESUME,
24752 .get_brightness = nv40_get_intensity,
24753 .update_status = nv40_set_intensity,
24754 @@ -81,7 +81,7 @@ static int nv50_set_intensity(struct bac
24755 return 0;
24756 }
24757
24758 -static struct backlight_ops nv50_bl_ops = {
24759 +static const struct backlight_ops nv50_bl_ops = {
24760 .options = BL_CORE_SUSPENDRESUME,
24761 .get_brightness = nv50_get_intensity,
24762 .update_status = nv50_set_intensity,
24763 diff -urNp linux-2.6.35.4/drivers/gpu/drm/nouveau/nouveau_state.c linux-2.6.35.4/drivers/gpu/drm/nouveau/nouveau_state.c
24764 --- linux-2.6.35.4/drivers/gpu/drm/nouveau/nouveau_state.c 2010-08-26 19:47:12.000000000 -0400
24765 +++ linux-2.6.35.4/drivers/gpu/drm/nouveau/nouveau_state.c 2010-09-17 20:12:09.000000000 -0400
24766 @@ -395,7 +395,7 @@ static bool nouveau_switcheroo_can_switc
24767 bool can_switch;
24768
24769 spin_lock(&dev->count_lock);
24770 - can_switch = (dev->open_count == 0);
24771 + can_switch = (atomic_read(&dev->open_count) == 0);
24772 spin_unlock(&dev->count_lock);
24773 return can_switch;
24774 }
24775 diff -urNp linux-2.6.35.4/drivers/gpu/drm/radeon/mkregtable.c linux-2.6.35.4/drivers/gpu/drm/radeon/mkregtable.c
24776 --- linux-2.6.35.4/drivers/gpu/drm/radeon/mkregtable.c 2010-08-26 19:47:12.000000000 -0400
24777 +++ linux-2.6.35.4/drivers/gpu/drm/radeon/mkregtable.c 2010-09-17 20:12:09.000000000 -0400
24778 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
24779 regex_t mask_rex;
24780 regmatch_t match[4];
24781 char buf[1024];
24782 - size_t end;
24783 + long end;
24784 int len;
24785 int done = 0;
24786 int r;
24787 unsigned o;
24788 struct offset *offset;
24789 char last_reg_s[10];
24790 - int last_reg;
24791 + unsigned long last_reg;
24792
24793 if (regcomp
24794 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
24795 diff -urNp linux-2.6.35.4/drivers/gpu/drm/radeon/radeon_device.c linux-2.6.35.4/drivers/gpu/drm/radeon/radeon_device.c
24796 --- linux-2.6.35.4/drivers/gpu/drm/radeon/radeon_device.c 2010-08-26 19:47:12.000000000 -0400
24797 +++ linux-2.6.35.4/drivers/gpu/drm/radeon/radeon_device.c 2010-09-17 20:12:09.000000000 -0400
24798 @@ -562,7 +562,7 @@ static bool radeon_switcheroo_can_switch
24799 bool can_switch;
24800
24801 spin_lock(&dev->count_lock);
24802 - can_switch = (dev->open_count == 0);
24803 + can_switch = (atomic_read(&dev->open_count) == 0);
24804 spin_unlock(&dev->count_lock);
24805 return can_switch;
24806 }
24807 diff -urNp linux-2.6.35.4/drivers/gpu/drm/radeon/radeon_display.c linux-2.6.35.4/drivers/gpu/drm/radeon/radeon_display.c
24808 --- linux-2.6.35.4/drivers/gpu/drm/radeon/radeon_display.c 2010-08-26 19:47:12.000000000 -0400
24809 +++ linux-2.6.35.4/drivers/gpu/drm/radeon/radeon_display.c 2010-09-17 20:12:09.000000000 -0400
24810 @@ -559,7 +559,7 @@ static void radeon_compute_pll_legacy(st
24811
24812 if (pll->flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
24813 error = freq - current_freq;
24814 - error = error < 0 ? 0xffffffff : error;
24815 + error = (int32_t)error < 0 ? 0xffffffff : error;
24816 } else
24817 error = abs(current_freq - freq);
24818 vco_diff = abs(vco - best_vco);
24819 diff -urNp linux-2.6.35.4/drivers/gpu/drm/radeon/radeon_state.c linux-2.6.35.4/drivers/gpu/drm/radeon/radeon_state.c
24820 --- linux-2.6.35.4/drivers/gpu/drm/radeon/radeon_state.c 2010-08-26 19:47:12.000000000 -0400
24821 +++ linux-2.6.35.4/drivers/gpu/drm/radeon/radeon_state.c 2010-09-17 20:12:09.000000000 -0400
24822 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_de
24823 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
24824 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
24825
24826 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
24827 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
24828 sarea_priv->nbox * sizeof(depth_boxes[0])))
24829 return -EFAULT;
24830
24831 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm
24832 {
24833 drm_radeon_private_t *dev_priv = dev->dev_private;
24834 drm_radeon_getparam_t *param = data;
24835 - int value;
24836 + int value = 0;
24837
24838 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
24839
24840 diff -urNp linux-2.6.35.4/drivers/gpu/drm/radeon/radeon_ttm.c linux-2.6.35.4/drivers/gpu/drm/radeon/radeon_ttm.c
24841 --- linux-2.6.35.4/drivers/gpu/drm/radeon/radeon_ttm.c 2010-08-26 19:47:12.000000000 -0400
24842 +++ linux-2.6.35.4/drivers/gpu/drm/radeon/radeon_ttm.c 2010-09-17 20:12:09.000000000 -0400
24843 @@ -601,8 +601,9 @@ void radeon_ttm_fini(struct radeon_devic
24844 DRM_INFO("radeon: ttm finalized\n");
24845 }
24846
24847 -static struct vm_operations_struct radeon_ttm_vm_ops;
24848 -static const struct vm_operations_struct *ttm_vm_ops = NULL;
24849 +extern int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
24850 +extern void ttm_bo_vm_open(struct vm_area_struct *vma);
24851 +extern void ttm_bo_vm_close(struct vm_area_struct *vma);
24852
24853 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
24854 {
24855 @@ -610,17 +611,22 @@ static int radeon_ttm_fault(struct vm_ar
24856 struct radeon_device *rdev;
24857 int r;
24858
24859 - bo = (struct ttm_buffer_object *)vma->vm_private_data;
24860 - if (bo == NULL) {
24861 + bo = (struct ttm_buffer_object *)vma->vm_private_data;
24862 + if (!bo)
24863 return VM_FAULT_NOPAGE;
24864 - }
24865 rdev = radeon_get_rdev(bo->bdev);
24866 mutex_lock(&rdev->vram_mutex);
24867 - r = ttm_vm_ops->fault(vma, vmf);
24868 + r = ttm_bo_vm_fault(vma, vmf);
24869 mutex_unlock(&rdev->vram_mutex);
24870 return r;
24871 }
24872
24873 +static const struct vm_operations_struct radeon_ttm_vm_ops = {
24874 + .fault = radeon_ttm_fault,
24875 + .open = ttm_bo_vm_open,
24876 + .close = ttm_bo_vm_close
24877 +};
24878 +
24879 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
24880 {
24881 struct drm_file *file_priv;
24882 @@ -633,18 +639,11 @@ int radeon_mmap(struct file *filp, struc
24883
24884 file_priv = (struct drm_file *)filp->private_data;
24885 rdev = file_priv->minor->dev->dev_private;
24886 - if (rdev == NULL) {
24887 + if (!rdev)
24888 return -EINVAL;
24889 - }
24890 r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
24891 - if (unlikely(r != 0)) {
24892 + if (r)
24893 return r;
24894 - }
24895 - if (unlikely(ttm_vm_ops == NULL)) {
24896 - ttm_vm_ops = vma->vm_ops;
24897 - radeon_ttm_vm_ops = *ttm_vm_ops;
24898 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
24899 - }
24900 vma->vm_ops = &radeon_ttm_vm_ops;
24901 return 0;
24902 }
24903 diff -urNp linux-2.6.35.4/drivers/gpu/drm/ttm/ttm_bo.c linux-2.6.35.4/drivers/gpu/drm/ttm/ttm_bo.c
24904 --- linux-2.6.35.4/drivers/gpu/drm/ttm/ttm_bo.c 2010-08-26 19:47:12.000000000 -0400
24905 +++ linux-2.6.35.4/drivers/gpu/drm/ttm/ttm_bo.c 2010-09-17 20:12:09.000000000 -0400
24906 @@ -47,7 +47,7 @@
24907 #include <linux/module.h>
24908
24909 #define TTM_ASSERT_LOCKED(param)
24910 -#define TTM_DEBUG(fmt, arg...)
24911 +#define TTM_DEBUG(fmt, arg...) do {} while (0)
24912 #define TTM_BO_HASH_ORDER 13
24913
24914 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
24915 diff -urNp linux-2.6.35.4/drivers/gpu/drm/ttm/ttm_bo_vm.c linux-2.6.35.4/drivers/gpu/drm/ttm/ttm_bo_vm.c
24916 --- linux-2.6.35.4/drivers/gpu/drm/ttm/ttm_bo_vm.c 2010-08-26 19:47:12.000000000 -0400
24917 +++ linux-2.6.35.4/drivers/gpu/drm/ttm/ttm_bo_vm.c 2010-09-17 20:12:09.000000000 -0400
24918 @@ -69,11 +69,11 @@ static struct ttm_buffer_object *ttm_bo_
24919 return best_bo;
24920 }
24921
24922 -static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
24923 +int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
24924 {
24925 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
24926 vma->vm_private_data;
24927 - struct ttm_bo_device *bdev = bo->bdev;
24928 + struct ttm_bo_device *bdev;
24929 unsigned long page_offset;
24930 unsigned long page_last;
24931 unsigned long pfn;
24932 @@ -84,6 +84,10 @@ static int ttm_bo_vm_fault(struct vm_are
24933 unsigned long address = (unsigned long)vmf->virtual_address;
24934 int retval = VM_FAULT_NOPAGE;
24935
24936 + if (!bo)
24937 + return VM_FAULT_NOPAGE;
24938 + bdev = bo->bdev;
24939 +
24940 /*
24941 * Work around locking order reversal in fault / nopfn
24942 * between mmap_sem and bo_reserve: Perform a trylock operation
24943 @@ -213,7 +217,7 @@ out_unlock:
24944 return retval;
24945 }
24946
24947 -static void ttm_bo_vm_open(struct vm_area_struct *vma)
24948 +void ttm_bo_vm_open(struct vm_area_struct *vma)
24949 {
24950 struct ttm_buffer_object *bo =
24951 (struct ttm_buffer_object *)vma->vm_private_data;
24952 @@ -221,7 +225,7 @@ static void ttm_bo_vm_open(struct vm_are
24953 (void)ttm_bo_reference(bo);
24954 }
24955
24956 -static void ttm_bo_vm_close(struct vm_area_struct *vma)
24957 +void ttm_bo_vm_close(struct vm_area_struct *vma)
24958 {
24959 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
24960
24961 diff -urNp linux-2.6.35.4/drivers/gpu/drm/ttm/ttm_global.c linux-2.6.35.4/drivers/gpu/drm/ttm/ttm_global.c
24962 --- linux-2.6.35.4/drivers/gpu/drm/ttm/ttm_global.c 2010-08-26 19:47:12.000000000 -0400
24963 +++ linux-2.6.35.4/drivers/gpu/drm/ttm/ttm_global.c 2010-09-17 20:12:09.000000000 -0400
24964 @@ -36,7 +36,7 @@
24965 struct ttm_global_item {
24966 struct mutex mutex;
24967 void *object;
24968 - int refcount;
24969 + atomic_t refcount;
24970 };
24971
24972 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
24973 @@ -49,7 +49,7 @@ void ttm_global_init(void)
24974 struct ttm_global_item *item = &glob[i];
24975 mutex_init(&item->mutex);
24976 item->object = NULL;
24977 - item->refcount = 0;
24978 + atomic_set(&item->refcount, 0);
24979 }
24980 }
24981
24982 @@ -59,7 +59,7 @@ void ttm_global_release(void)
24983 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
24984 struct ttm_global_item *item = &glob[i];
24985 BUG_ON(item->object != NULL);
24986 - BUG_ON(item->refcount != 0);
24987 + BUG_ON(atomic_read(&item->refcount) != 0);
24988 }
24989 }
24990
24991 @@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_globa
24992 void *object;
24993
24994 mutex_lock(&item->mutex);
24995 - if (item->refcount == 0) {
24996 + if (atomic_read(&item->refcount) == 0) {
24997 item->object = kzalloc(ref->size, GFP_KERNEL);
24998 if (unlikely(item->object == NULL)) {
24999 ret = -ENOMEM;
25000 @@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_globa
25001 goto out_err;
25002
25003 }
25004 - ++item->refcount;
25005 + atomic_inc(&item->refcount);
25006 ref->object = item->object;
25007 object = item->object;
25008 mutex_unlock(&item->mutex);
25009 @@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_gl
25010 struct ttm_global_item *item = &glob[ref->global_type];
25011
25012 mutex_lock(&item->mutex);
25013 - BUG_ON(item->refcount == 0);
25014 + BUG_ON(atomic_read(&item->refcount) == 0);
25015 BUG_ON(ref->object != item->object);
25016 - if (--item->refcount == 0) {
25017 + if (atomic_dec_and_test(&item->refcount)) {
25018 ref->release(ref);
25019 item->object = NULL;
25020 }
25021 diff -urNp linux-2.6.35.4/drivers/hid/usbhid/hiddev.c linux-2.6.35.4/drivers/hid/usbhid/hiddev.c
25022 --- linux-2.6.35.4/drivers/hid/usbhid/hiddev.c 2010-08-26 19:47:12.000000000 -0400
25023 +++ linux-2.6.35.4/drivers/hid/usbhid/hiddev.c 2010-09-17 20:12:09.000000000 -0400
25024 @@ -616,7 +616,7 @@ static long hiddev_ioctl(struct file *fi
25025 return put_user(HID_VERSION, (int __user *)arg);
25026
25027 case HIDIOCAPPLICATION:
25028 - if (arg < 0 || arg >= hid->maxapplication)
25029 + if (arg >= hid->maxapplication)
25030 return -EINVAL;
25031
25032 for (i = 0; i < hid->maxcollection; i++)
25033 diff -urNp linux-2.6.35.4/drivers/hwmon/k8temp.c linux-2.6.35.4/drivers/hwmon/k8temp.c
25034 --- linux-2.6.35.4/drivers/hwmon/k8temp.c 2010-08-26 19:47:12.000000000 -0400
25035 +++ linux-2.6.35.4/drivers/hwmon/k8temp.c 2010-09-17 20:12:09.000000000 -0400
25036 @@ -138,7 +138,7 @@ static DEVICE_ATTR(name, S_IRUGO, show_n
25037
25038 static const struct pci_device_id k8temp_ids[] = {
25039 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
25040 - { 0 },
25041 + { 0, 0, 0, 0, 0, 0, 0 },
25042 };
25043
25044 MODULE_DEVICE_TABLE(pci, k8temp_ids);
25045 diff -urNp linux-2.6.35.4/drivers/hwmon/sis5595.c linux-2.6.35.4/drivers/hwmon/sis5595.c
25046 --- linux-2.6.35.4/drivers/hwmon/sis5595.c 2010-08-26 19:47:12.000000000 -0400
25047 +++ linux-2.6.35.4/drivers/hwmon/sis5595.c 2010-09-17 20:12:09.000000000 -0400
25048 @@ -699,7 +699,7 @@ static struct sis5595_data *sis5595_upda
25049
25050 static const struct pci_device_id sis5595_pci_ids[] = {
25051 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
25052 - { 0, }
25053 + { 0, 0, 0, 0, 0, 0, 0 }
25054 };
25055
25056 MODULE_DEVICE_TABLE(pci, sis5595_pci_ids);
25057 diff -urNp linux-2.6.35.4/drivers/hwmon/via686a.c linux-2.6.35.4/drivers/hwmon/via686a.c
25058 --- linux-2.6.35.4/drivers/hwmon/via686a.c 2010-08-26 19:47:12.000000000 -0400
25059 +++ linux-2.6.35.4/drivers/hwmon/via686a.c 2010-09-17 20:12:09.000000000 -0400
25060 @@ -769,7 +769,7 @@ static struct via686a_data *via686a_upda
25061
25062 static const struct pci_device_id via686a_pci_ids[] = {
25063 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4) },
25064 - { 0, }
25065 + { 0, 0, 0, 0, 0, 0, 0 }
25066 };
25067
25068 MODULE_DEVICE_TABLE(pci, via686a_pci_ids);
25069 diff -urNp linux-2.6.35.4/drivers/hwmon/vt8231.c linux-2.6.35.4/drivers/hwmon/vt8231.c
25070 --- linux-2.6.35.4/drivers/hwmon/vt8231.c 2010-08-26 19:47:12.000000000 -0400
25071 +++ linux-2.6.35.4/drivers/hwmon/vt8231.c 2010-09-17 20:12:09.000000000 -0400
25072 @@ -699,7 +699,7 @@ static struct platform_driver vt8231_dri
25073
25074 static const struct pci_device_id vt8231_pci_ids[] = {
25075 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231_4) },
25076 - { 0, }
25077 + { 0, 0, 0, 0, 0, 0, 0 }
25078 };
25079
25080 MODULE_DEVICE_TABLE(pci, vt8231_pci_ids);
25081 diff -urNp linux-2.6.35.4/drivers/hwmon/w83791d.c linux-2.6.35.4/drivers/hwmon/w83791d.c
25082 --- linux-2.6.35.4/drivers/hwmon/w83791d.c 2010-08-26 19:47:12.000000000 -0400
25083 +++ linux-2.6.35.4/drivers/hwmon/w83791d.c 2010-09-17 20:12:09.000000000 -0400
25084 @@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_cli
25085 struct i2c_board_info *info);
25086 static int w83791d_remove(struct i2c_client *client);
25087
25088 -static int w83791d_read(struct i2c_client *client, u8 register);
25089 -static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
25090 +static int w83791d_read(struct i2c_client *client, u8 reg);
25091 +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
25092 static struct w83791d_data *w83791d_update_device(struct device *dev);
25093
25094 #ifdef DEBUG
25095 diff -urNp linux-2.6.35.4/drivers/i2c/busses/i2c-i801.c linux-2.6.35.4/drivers/i2c/busses/i2c-i801.c
25096 --- linux-2.6.35.4/drivers/i2c/busses/i2c-i801.c 2010-08-26 19:47:12.000000000 -0400
25097 +++ linux-2.6.35.4/drivers/i2c/busses/i2c-i801.c 2010-09-17 20:12:09.000000000 -0400
25098 @@ -592,7 +592,7 @@ static const struct pci_device_id i801_i
25099 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) },
25100 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PCH_SMBUS) },
25101 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CPT_SMBUS) },
25102 - { 0, }
25103 + { 0, 0, 0, 0, 0, 0, 0 }
25104 };
25105
25106 MODULE_DEVICE_TABLE(pci, i801_ids);
25107 diff -urNp linux-2.6.35.4/drivers/i2c/busses/i2c-piix4.c linux-2.6.35.4/drivers/i2c/busses/i2c-piix4.c
25108 --- linux-2.6.35.4/drivers/i2c/busses/i2c-piix4.c 2010-08-26 19:47:12.000000000 -0400
25109 +++ linux-2.6.35.4/drivers/i2c/busses/i2c-piix4.c 2010-09-17 20:12:09.000000000 -0400
25110 @@ -124,7 +124,7 @@ static struct dmi_system_id __devinitdat
25111 .ident = "IBM",
25112 .matches = { DMI_MATCH(DMI_SYS_VENDOR, "IBM"), },
25113 },
25114 - { },
25115 + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
25116 };
25117
25118 static int __devinit piix4_setup(struct pci_dev *PIIX4_dev,
25119 @@ -491,7 +491,7 @@ static const struct pci_device_id piix4_
25120 PCI_DEVICE_ID_SERVERWORKS_HT1000SB) },
25121 { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
25122 PCI_DEVICE_ID_SERVERWORKS_HT1100LD) },
25123 - { 0, }
25124 + { 0, 0, 0, 0, 0, 0, 0 }
25125 };
25126
25127 MODULE_DEVICE_TABLE (pci, piix4_ids);
25128 diff -urNp linux-2.6.35.4/drivers/i2c/busses/i2c-sis630.c linux-2.6.35.4/drivers/i2c/busses/i2c-sis630.c
25129 --- linux-2.6.35.4/drivers/i2c/busses/i2c-sis630.c 2010-08-26 19:47:12.000000000 -0400
25130 +++ linux-2.6.35.4/drivers/i2c/busses/i2c-sis630.c 2010-09-17 20:12:09.000000000 -0400
25131 @@ -471,7 +471,7 @@ static struct i2c_adapter sis630_adapter
25132 static const struct pci_device_id sis630_ids[] __devinitconst = {
25133 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
25134 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC) },
25135 - { 0, }
25136 + { 0, 0, 0, 0, 0, 0, 0 }
25137 };
25138
25139 MODULE_DEVICE_TABLE (pci, sis630_ids);
25140 diff -urNp linux-2.6.35.4/drivers/i2c/busses/i2c-sis96x.c linux-2.6.35.4/drivers/i2c/busses/i2c-sis96x.c
25141 --- linux-2.6.35.4/drivers/i2c/busses/i2c-sis96x.c 2010-08-26 19:47:12.000000000 -0400
25142 +++ linux-2.6.35.4/drivers/i2c/busses/i2c-sis96x.c 2010-09-17 20:12:09.000000000 -0400
25143 @@ -247,7 +247,7 @@ static struct i2c_adapter sis96x_adapter
25144
25145 static const struct pci_device_id sis96x_ids[] = {
25146 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_SMBUS) },
25147 - { 0, }
25148 + { 0, 0, 0, 0, 0, 0, 0 }
25149 };
25150
25151 MODULE_DEVICE_TABLE (pci, sis96x_ids);
25152 diff -urNp linux-2.6.35.4/drivers/ide/ide-cd.c linux-2.6.35.4/drivers/ide/ide-cd.c
25153 --- linux-2.6.35.4/drivers/ide/ide-cd.c 2010-08-26 19:47:12.000000000 -0400
25154 +++ linux-2.6.35.4/drivers/ide/ide-cd.c 2010-09-17 20:12:09.000000000 -0400
25155 @@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_
25156 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
25157 if ((unsigned long)buf & alignment
25158 || blk_rq_bytes(rq) & q->dma_pad_mask
25159 - || object_is_on_stack(buf))
25160 + || object_starts_on_stack(buf))
25161 drive->dma = 0;
25162 }
25163 }
25164 diff -urNp linux-2.6.35.4/drivers/ieee1394/dv1394.c linux-2.6.35.4/drivers/ieee1394/dv1394.c
25165 --- linux-2.6.35.4/drivers/ieee1394/dv1394.c 2010-08-26 19:47:12.000000000 -0400
25166 +++ linux-2.6.35.4/drivers/ieee1394/dv1394.c 2010-09-17 20:12:09.000000000 -0400
25167 @@ -739,7 +739,7 @@ static void frame_prepare(struct video_c
25168 based upon DIF section and sequence
25169 */
25170
25171 -static void inline
25172 +static inline void
25173 frame_put_packet (struct frame *f, struct packet *p)
25174 {
25175 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
25176 @@ -2179,7 +2179,7 @@ static const struct ieee1394_device_id d
25177 .specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff,
25178 .version = AVC_SW_VERSION_ENTRY & 0xffffff
25179 },
25180 - { }
25181 + { 0, 0, 0, 0, 0, 0 }
25182 };
25183
25184 MODULE_DEVICE_TABLE(ieee1394, dv1394_id_table);
25185 diff -urNp linux-2.6.35.4/drivers/ieee1394/eth1394.c linux-2.6.35.4/drivers/ieee1394/eth1394.c
25186 --- linux-2.6.35.4/drivers/ieee1394/eth1394.c 2010-08-26 19:47:12.000000000 -0400
25187 +++ linux-2.6.35.4/drivers/ieee1394/eth1394.c 2010-09-17 20:12:09.000000000 -0400
25188 @@ -446,7 +446,7 @@ static const struct ieee1394_device_id e
25189 .specifier_id = ETHER1394_GASP_SPECIFIER_ID,
25190 .version = ETHER1394_GASP_VERSION,
25191 },
25192 - {}
25193 + { 0, 0, 0, 0, 0, 0 }
25194 };
25195
25196 MODULE_DEVICE_TABLE(ieee1394, eth1394_id_table);
25197 diff -urNp linux-2.6.35.4/drivers/ieee1394/hosts.c linux-2.6.35.4/drivers/ieee1394/hosts.c
25198 --- linux-2.6.35.4/drivers/ieee1394/hosts.c 2010-08-26 19:47:12.000000000 -0400
25199 +++ linux-2.6.35.4/drivers/ieee1394/hosts.c 2010-09-17 20:12:09.000000000 -0400
25200 @@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso
25201 }
25202
25203 static struct hpsb_host_driver dummy_driver = {
25204 + .name = "dummy",
25205 .transmit_packet = dummy_transmit_packet,
25206 .devctl = dummy_devctl,
25207 .isoctl = dummy_isoctl
25208 diff -urNp linux-2.6.35.4/drivers/ieee1394/ohci1394.c linux-2.6.35.4/drivers/ieee1394/ohci1394.c
25209 --- linux-2.6.35.4/drivers/ieee1394/ohci1394.c 2010-08-26 19:47:12.000000000 -0400
25210 +++ linux-2.6.35.4/drivers/ieee1394/ohci1394.c 2010-09-17 20:12:09.000000000 -0400
25211 @@ -148,9 +148,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_
25212 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
25213
25214 /* Module Parameters */
25215 -static int phys_dma = 1;
25216 +static int phys_dma;
25217 module_param(phys_dma, int, 0444);
25218 -MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
25219 +MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
25220
25221 static void dma_trm_tasklet(unsigned long data);
25222 static void dma_trm_reset(struct dma_trm_ctx *d);
25223 @@ -3445,7 +3445,7 @@ static struct pci_device_id ohci1394_pci
25224 .subvendor = PCI_ANY_ID,
25225 .subdevice = PCI_ANY_ID,
25226 },
25227 - { 0, },
25228 + { 0, 0, 0, 0, 0, 0, 0 },
25229 };
25230
25231 MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
25232 diff -urNp linux-2.6.35.4/drivers/ieee1394/raw1394.c linux-2.6.35.4/drivers/ieee1394/raw1394.c
25233 --- linux-2.6.35.4/drivers/ieee1394/raw1394.c 2010-08-26 19:47:12.000000000 -0400
25234 +++ linux-2.6.35.4/drivers/ieee1394/raw1394.c 2010-09-17 20:12:09.000000000 -0400
25235 @@ -3002,7 +3002,7 @@ static const struct ieee1394_device_id r
25236 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
25237 .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
25238 .version = (CAMERA_SW_VERSION_ENTRY + 2) & 0xffffff},
25239 - {}
25240 + { 0, 0, 0, 0, 0, 0 }
25241 };
25242
25243 MODULE_DEVICE_TABLE(ieee1394, raw1394_id_table);
25244 diff -urNp linux-2.6.35.4/drivers/ieee1394/sbp2.c linux-2.6.35.4/drivers/ieee1394/sbp2.c
25245 --- linux-2.6.35.4/drivers/ieee1394/sbp2.c 2010-08-26 19:47:12.000000000 -0400
25246 +++ linux-2.6.35.4/drivers/ieee1394/sbp2.c 2010-09-17 20:12:09.000000000 -0400
25247 @@ -289,7 +289,7 @@ static const struct ieee1394_device_id s
25248 .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
25249 .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff,
25250 .version = SBP2_SW_VERSION_ENTRY & 0xffffff},
25251 - {}
25252 + { 0, 0, 0, 0, 0, 0 }
25253 };
25254 MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table);
25255
25256 @@ -2110,7 +2110,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 prot
25257 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
25258 MODULE_LICENSE("GPL");
25259
25260 -static int sbp2_module_init(void)
25261 +static int __init sbp2_module_init(void)
25262 {
25263 int ret;
25264
25265 diff -urNp linux-2.6.35.4/drivers/ieee1394/video1394.c linux-2.6.35.4/drivers/ieee1394/video1394.c
25266 --- linux-2.6.35.4/drivers/ieee1394/video1394.c 2010-08-26 19:47:12.000000000 -0400
25267 +++ linux-2.6.35.4/drivers/ieee1394/video1394.c 2010-09-17 20:12:09.000000000 -0400
25268 @@ -1312,7 +1312,7 @@ static const struct ieee1394_device_id v
25269 .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,
25270 .version = (CAMERA_SW_VERSION_ENTRY + 2) & 0xffffff
25271 },
25272 - { }
25273 + { 0, 0, 0, 0, 0, 0 }
25274 };
25275
25276 MODULE_DEVICE_TABLE(ieee1394, video1394_id_table);
25277 diff -urNp linux-2.6.35.4/drivers/infiniband/core/cm.c linux-2.6.35.4/drivers/infiniband/core/cm.c
25278 --- linux-2.6.35.4/drivers/infiniband/core/cm.c 2010-08-26 19:47:12.000000000 -0400
25279 +++ linux-2.6.35.4/drivers/infiniband/core/cm.c 2010-09-17 20:12:09.000000000 -0400
25280 @@ -113,7 +113,7 @@ static char const counter_group_names[CM
25281
25282 struct cm_counter_group {
25283 struct kobject obj;
25284 - atomic_long_t counter[CM_ATTR_COUNT];
25285 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
25286 };
25287
25288 struct cm_counter_attribute {
25289 @@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm
25290 struct ib_mad_send_buf *msg = NULL;
25291 int ret;
25292
25293 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25294 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25295 counter[CM_REQ_COUNTER]);
25296
25297 /* Quick state check to discard duplicate REQs. */
25298 @@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm
25299 if (!cm_id_priv)
25300 return;
25301
25302 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25303 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25304 counter[CM_REP_COUNTER]);
25305 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
25306 if (ret)
25307 @@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work
25308 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
25309 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
25310 spin_unlock_irq(&cm_id_priv->lock);
25311 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25312 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25313 counter[CM_RTU_COUNTER]);
25314 goto out;
25315 }
25316 @@ -2111,7 +2111,7 @@ static int cm_dreq_handler(struct cm_wor
25317 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
25318 dreq_msg->local_comm_id);
25319 if (!cm_id_priv) {
25320 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25321 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25322 counter[CM_DREQ_COUNTER]);
25323 cm_issue_drep(work->port, work->mad_recv_wc);
25324 return -EINVAL;
25325 @@ -2132,7 +2132,7 @@ static int cm_dreq_handler(struct cm_wor
25326 case IB_CM_MRA_REP_RCVD:
25327 break;
25328 case IB_CM_TIMEWAIT:
25329 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25330 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25331 counter[CM_DREQ_COUNTER]);
25332 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
25333 goto unlock;
25334 @@ -2146,7 +2146,7 @@ static int cm_dreq_handler(struct cm_wor
25335 cm_free_msg(msg);
25336 goto deref;
25337 case IB_CM_DREQ_RCVD:
25338 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25339 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25340 counter[CM_DREQ_COUNTER]);
25341 goto unlock;
25342 default:
25343 @@ -2502,7 +2502,7 @@ static int cm_mra_handler(struct cm_work
25344 ib_modify_mad(cm_id_priv->av.port->mad_agent,
25345 cm_id_priv->msg, timeout)) {
25346 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
25347 - atomic_long_inc(&work->port->
25348 + atomic_long_inc_unchecked(&work->port->
25349 counter_group[CM_RECV_DUPLICATES].
25350 counter[CM_MRA_COUNTER]);
25351 goto out;
25352 @@ -2511,7 +2511,7 @@ static int cm_mra_handler(struct cm_work
25353 break;
25354 case IB_CM_MRA_REQ_RCVD:
25355 case IB_CM_MRA_REP_RCVD:
25356 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25357 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25358 counter[CM_MRA_COUNTER]);
25359 /* fall through */
25360 default:
25361 @@ -2673,7 +2673,7 @@ static int cm_lap_handler(struct cm_work
25362 case IB_CM_LAP_IDLE:
25363 break;
25364 case IB_CM_MRA_LAP_SENT:
25365 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25366 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25367 counter[CM_LAP_COUNTER]);
25368 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
25369 goto unlock;
25370 @@ -2689,7 +2689,7 @@ static int cm_lap_handler(struct cm_work
25371 cm_free_msg(msg);
25372 goto deref;
25373 case IB_CM_LAP_RCVD:
25374 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25375 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25376 counter[CM_LAP_COUNTER]);
25377 goto unlock;
25378 default:
25379 @@ -2973,7 +2973,7 @@ static int cm_sidr_req_handler(struct cm
25380 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
25381 if (cur_cm_id_priv) {
25382 spin_unlock_irq(&cm.lock);
25383 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25384 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25385 counter[CM_SIDR_REQ_COUNTER]);
25386 goto out; /* Duplicate message. */
25387 }
25388 @@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_ma
25389 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
25390 msg->retries = 1;
25391
25392 - atomic_long_add(1 + msg->retries,
25393 + atomic_long_add_unchecked(1 + msg->retries,
25394 &port->counter_group[CM_XMIT].counter[attr_index]);
25395 if (msg->retries)
25396 - atomic_long_add(msg->retries,
25397 + atomic_long_add_unchecked(msg->retries,
25398 &port->counter_group[CM_XMIT_RETRIES].
25399 counter[attr_index]);
25400
25401 @@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_ma
25402 }
25403
25404 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
25405 - atomic_long_inc(&port->counter_group[CM_RECV].
25406 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
25407 counter[attr_id - CM_ATTR_ID_OFFSET]);
25408
25409 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
25410 @@ -3595,7 +3595,7 @@ static ssize_t cm_show_counter(struct ko
25411 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
25412
25413 return sprintf(buf, "%ld\n",
25414 - atomic_long_read(&group->counter[cm_attr->index]));
25415 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
25416 }
25417
25418 static const struct sysfs_ops cm_counter_ops = {
25419 diff -urNp linux-2.6.35.4/drivers/infiniband/hw/qib/qib.h linux-2.6.35.4/drivers/infiniband/hw/qib/qib.h
25420 --- linux-2.6.35.4/drivers/infiniband/hw/qib/qib.h 2010-08-26 19:47:12.000000000 -0400
25421 +++ linux-2.6.35.4/drivers/infiniband/hw/qib/qib.h 2010-09-17 20:12:09.000000000 -0400
25422 @@ -50,6 +50,7 @@
25423 #include <linux/completion.h>
25424 #include <linux/kref.h>
25425 #include <linux/sched.h>
25426 +#include <linux/slab.h>
25427
25428 #include "qib_common.h"
25429 #include "qib_verbs.h"
25430 diff -urNp linux-2.6.35.4/drivers/input/keyboard/atkbd.c linux-2.6.35.4/drivers/input/keyboard/atkbd.c
25431 --- linux-2.6.35.4/drivers/input/keyboard/atkbd.c 2010-08-26 19:47:12.000000000 -0400
25432 +++ linux-2.6.35.4/drivers/input/keyboard/atkbd.c 2010-09-17 20:12:09.000000000 -0400
25433 @@ -1240,7 +1240,7 @@ static struct serio_device_id atkbd_seri
25434 .id = SERIO_ANY,
25435 .extra = SERIO_ANY,
25436 },
25437 - { 0 }
25438 + { 0, 0, 0, 0 }
25439 };
25440
25441 MODULE_DEVICE_TABLE(serio, atkbd_serio_ids);
25442 diff -urNp linux-2.6.35.4/drivers/input/mouse/lifebook.c linux-2.6.35.4/drivers/input/mouse/lifebook.c
25443 --- linux-2.6.35.4/drivers/input/mouse/lifebook.c 2010-08-26 19:47:12.000000000 -0400
25444 +++ linux-2.6.35.4/drivers/input/mouse/lifebook.c 2010-09-17 20:12:09.000000000 -0400
25445 @@ -123,7 +123,7 @@ static const struct dmi_system_id __init
25446 DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook B142"),
25447 },
25448 },
25449 - { }
25450 + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL}
25451 };
25452
25453 void __init lifebook_module_init(void)
25454 diff -urNp linux-2.6.35.4/drivers/input/mouse/psmouse-base.c linux-2.6.35.4/drivers/input/mouse/psmouse-base.c
25455 --- linux-2.6.35.4/drivers/input/mouse/psmouse-base.c 2010-08-26 19:47:12.000000000 -0400
25456 +++ linux-2.6.35.4/drivers/input/mouse/psmouse-base.c 2010-09-17 20:12:09.000000000 -0400
25457 @@ -1460,7 +1460,7 @@ static struct serio_device_id psmouse_se
25458 .id = SERIO_ANY,
25459 .extra = SERIO_ANY,
25460 },
25461 - { 0 }
25462 + { 0, 0, 0, 0 }
25463 };
25464
25465 MODULE_DEVICE_TABLE(serio, psmouse_serio_ids);
25466 diff -urNp linux-2.6.35.4/drivers/input/mouse/synaptics.c linux-2.6.35.4/drivers/input/mouse/synaptics.c
25467 --- linux-2.6.35.4/drivers/input/mouse/synaptics.c 2010-08-26 19:47:12.000000000 -0400
25468 +++ linux-2.6.35.4/drivers/input/mouse/synaptics.c 2010-09-17 20:12:09.000000000 -0400
25469 @@ -476,7 +476,7 @@ static void synaptics_process_packet(str
25470 break;
25471 case 2:
25472 if (SYN_MODEL_PEN(priv->model_id))
25473 - ; /* Nothing, treat a pen as a single finger */
25474 + break; /* Nothing, treat a pen as a single finger */
25475 break;
25476 case 4 ... 15:
25477 if (SYN_CAP_PALMDETECT(priv->capabilities))
25478 @@ -701,7 +701,6 @@ static const struct dmi_system_id __init
25479 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
25480 DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE M300"),
25481 },
25482 -
25483 },
25484 {
25485 /* Toshiba Portege M300 */
25486 @@ -710,9 +709,8 @@ static const struct dmi_system_id __init
25487 DMI_MATCH(DMI_PRODUCT_NAME, "Portable PC"),
25488 DMI_MATCH(DMI_PRODUCT_VERSION, "Version 1.0"),
25489 },
25490 -
25491 },
25492 - { }
25493 + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
25494 #endif
25495 };
25496
25497 diff -urNp linux-2.6.35.4/drivers/input/mousedev.c linux-2.6.35.4/drivers/input/mousedev.c
25498 --- linux-2.6.35.4/drivers/input/mousedev.c 2010-08-26 19:47:12.000000000 -0400
25499 +++ linux-2.6.35.4/drivers/input/mousedev.c 2010-09-17 20:12:09.000000000 -0400
25500 @@ -754,7 +754,7 @@ static ssize_t mousedev_read(struct file
25501
25502 spin_unlock_irq(&client->packet_lock);
25503
25504 - if (copy_to_user(buffer, data, count))
25505 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
25506 return -EFAULT;
25507
25508 return count;
25509 @@ -1051,7 +1051,7 @@ static struct input_handler mousedev_han
25510
25511 #ifdef CONFIG_INPUT_MOUSEDEV_PSAUX
25512 static struct miscdevice psaux_mouse = {
25513 - PSMOUSE_MINOR, "psaux", &mousedev_fops
25514 + PSMOUSE_MINOR, "psaux", &mousedev_fops, {NULL, NULL}, NULL, NULL
25515 };
25516 static int psaux_registered;
25517 #endif
25518 diff -urNp linux-2.6.35.4/drivers/input/serio/i8042-x86ia64io.h linux-2.6.35.4/drivers/input/serio/i8042-x86ia64io.h
25519 --- linux-2.6.35.4/drivers/input/serio/i8042-x86ia64io.h 2010-08-26 19:47:12.000000000 -0400
25520 +++ linux-2.6.35.4/drivers/input/serio/i8042-x86ia64io.h 2010-09-17 20:12:09.000000000 -0400
25521 @@ -183,7 +183,7 @@ static const struct dmi_system_id __init
25522 DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"),
25523 },
25524 },
25525 - { }
25526 + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
25527 };
25528
25529 /*
25530 @@ -413,7 +413,7 @@ static const struct dmi_system_id __init
25531 DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
25532 },
25533 },
25534 - { }
25535 + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
25536 };
25537
25538 static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
25539 @@ -487,7 +487,7 @@ static const struct dmi_system_id __init
25540 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1720"),
25541 },
25542 },
25543 - { }
25544 + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
25545 };
25546
25547 #ifdef CONFIG_PNP
25548 @@ -506,7 +506,7 @@ static const struct dmi_system_id __init
25549 DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
25550 },
25551 },
25552 - { }
25553 + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
25554 };
25555
25556 static const struct dmi_system_id __initconst i8042_dmi_laptop_table[] = {
25557 @@ -530,7 +530,7 @@ static const struct dmi_system_id __init
25558 DMI_MATCH(DMI_CHASSIS_TYPE, "14"), /* Sub-Notebook */
25559 },
25560 },
25561 - { }
25562 + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
25563 };
25564 #endif
25565
25566 @@ -604,7 +604,7 @@ static const struct dmi_system_id __init
25567 DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4280"),
25568 },
25569 },
25570 - { }
25571 + { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL }
25572 };
25573
25574 #endif /* CONFIG_X86 */
25575 diff -urNp linux-2.6.35.4/drivers/input/serio/serio_raw.c linux-2.6.35.4/drivers/input/serio/serio_raw.c
25576 --- linux-2.6.35.4/drivers/input/serio/serio_raw.c 2010-08-26 19:47:12.000000000 -0400
25577 +++ linux-2.6.35.4/drivers/input/serio/serio_raw.c 2010-09-17 20:12:09.000000000 -0400
25578 @@ -376,7 +376,7 @@ static struct serio_device_id serio_raw_
25579 .id = SERIO_ANY,
25580 .extra = SERIO_ANY,
25581 },
25582 - { 0 }
25583 + { 0, 0, 0, 0 }
25584 };
25585
25586 MODULE_DEVICE_TABLE(serio, serio_raw_serio_ids);
25587 diff -urNp linux-2.6.35.4/drivers/isdn/gigaset/common.c linux-2.6.35.4/drivers/isdn/gigaset/common.c
25588 --- linux-2.6.35.4/drivers/isdn/gigaset/common.c 2010-08-26 19:47:12.000000000 -0400
25589 +++ linux-2.6.35.4/drivers/isdn/gigaset/common.c 2010-09-17 20:12:09.000000000 -0400
25590 @@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct
25591 cs->commands_pending = 0;
25592 cs->cur_at_seq = 0;
25593 cs->gotfwver = -1;
25594 - cs->open_count = 0;
25595 + atomic_set(&cs->open_count, 0);
25596 cs->dev = NULL;
25597 cs->tty = NULL;
25598 cs->tty_dev = NULL;
25599 diff -urNp linux-2.6.35.4/drivers/isdn/gigaset/gigaset.h linux-2.6.35.4/drivers/isdn/gigaset/gigaset.h
25600 --- linux-2.6.35.4/drivers/isdn/gigaset/gigaset.h 2010-08-26 19:47:12.000000000 -0400
25601 +++ linux-2.6.35.4/drivers/isdn/gigaset/gigaset.h 2010-09-17 20:12:09.000000000 -0400
25602 @@ -442,7 +442,7 @@ struct cardstate {
25603 spinlock_t cmdlock;
25604 unsigned curlen, cmdbytes;
25605
25606 - unsigned open_count;
25607 + atomic_t open_count;
25608 struct tty_struct *tty;
25609 struct tasklet_struct if_wake_tasklet;
25610 unsigned control_state;
25611 diff -urNp linux-2.6.35.4/drivers/isdn/gigaset/interface.c linux-2.6.35.4/drivers/isdn/gigaset/interface.c
25612 --- linux-2.6.35.4/drivers/isdn/gigaset/interface.c 2010-08-26 19:47:12.000000000 -0400
25613 +++ linux-2.6.35.4/drivers/isdn/gigaset/interface.c 2010-09-17 20:12:09.000000000 -0400
25614 @@ -160,9 +160,7 @@ static int if_open(struct tty_struct *tt
25615 return -ERESTARTSYS;
25616 tty->driver_data = cs;
25617
25618 - ++cs->open_count;
25619 -
25620 - if (cs->open_count == 1) {
25621 + if (atomic_inc_return(&cs->open_count) == 1) {
25622 spin_lock_irqsave(&cs->lock, flags);
25623 cs->tty = tty;
25624 spin_unlock_irqrestore(&cs->lock, flags);
25625 @@ -190,10 +188,10 @@ static void if_close(struct tty_struct *
25626
25627 if (!cs->connected)
25628 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
25629 - else if (!cs->open_count)
25630 + else if (!atomic_read(&cs->open_count))
25631 dev_warn(cs->dev, "%s: device not opened\n", __func__);
25632 else {
25633 - if (!--cs->open_count) {
25634 + if (!atomic_dec_return(&cs->open_count)) {
25635 spin_lock_irqsave(&cs->lock, flags);
25636 cs->tty = NULL;
25637 spin_unlock_irqrestore(&cs->lock, flags);
25638 @@ -228,7 +226,7 @@ static int if_ioctl(struct tty_struct *t
25639 if (!cs->connected) {
25640 gig_dbg(DEBUG_IF, "not connected");
25641 retval = -ENODEV;
25642 - } else if (!cs->open_count)
25643 + } else if (!atomic_read(&cs->open_count))
25644 dev_warn(cs->dev, "%s: device not opened\n", __func__);
25645 else {
25646 retval = 0;
25647 @@ -355,7 +353,7 @@ static int if_write(struct tty_struct *t
25648 if (!cs->connected) {
25649 gig_dbg(DEBUG_IF, "not connected");
25650 retval = -ENODEV;
25651 - } else if (!cs->open_count)
25652 + } else if (!atomic_read(&cs->open_count))
25653 dev_warn(cs->dev, "%s: device not opened\n", __func__);
25654 else if (cs->mstate != MS_LOCKED) {
25655 dev_warn(cs->dev, "can't write to unlocked device\n");
25656 @@ -389,7 +387,7 @@ static int if_write_room(struct tty_stru
25657 if (!cs->connected) {
25658 gig_dbg(DEBUG_IF, "not connected");
25659 retval = -ENODEV;
25660 - } else if (!cs->open_count)
25661 + } else if (!atomic_read(&cs->open_count))
25662 dev_warn(cs->dev, "%s: device not opened\n", __func__);
25663 else if (cs->mstate != MS_LOCKED) {
25664 dev_warn(cs->dev, "can't write to unlocked device\n");
25665 @@ -419,7 +417,7 @@ static int if_chars_in_buffer(struct tty
25666
25667 if (!cs->connected)
25668 gig_dbg(DEBUG_IF, "not connected");
25669 - else if (!cs->open_count)
25670 + else if (!atomic_read(&cs->open_count))
25671 dev_warn(cs->dev, "%s: device not opened\n", __func__);
25672 else if (cs->mstate != MS_LOCKED)
25673 dev_warn(cs->dev, "can't write to unlocked device\n");
25674 @@ -447,7 +445,7 @@ static void if_throttle(struct tty_struc
25675
25676 if (!cs->connected)
25677 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
25678 - else if (!cs->open_count)
25679 + else if (!atomic_read(&cs->open_count))
25680 dev_warn(cs->dev, "%s: device not opened\n", __func__);
25681 else
25682 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
25683 @@ -471,7 +469,7 @@ static void if_unthrottle(struct tty_str
25684
25685 if (!cs->connected)
25686 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
25687 - else if (!cs->open_count)
25688 + else if (!atomic_read(&cs->open_count))
25689 dev_warn(cs->dev, "%s: device not opened\n", __func__);
25690 else
25691 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
25692 @@ -502,7 +500,7 @@ static void if_set_termios(struct tty_st
25693 goto out;
25694 }
25695
25696 - if (!cs->open_count) {
25697 + if (!atomic_read(&cs->open_count)) {
25698 dev_warn(cs->dev, "%s: device not opened\n", __func__);
25699 goto out;
25700 }
25701 diff -urNp linux-2.6.35.4/drivers/isdn/hardware/avm/b1.c linux-2.6.35.4/drivers/isdn/hardware/avm/b1.c
25702 --- linux-2.6.35.4/drivers/isdn/hardware/avm/b1.c 2010-08-26 19:47:12.000000000 -0400
25703 +++ linux-2.6.35.4/drivers/isdn/hardware/avm/b1.c 2010-09-17 20:12:37.000000000 -0400
25704 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capilo
25705 }
25706 if (left) {
25707 if (t4file->user) {
25708 - if (copy_from_user(buf, dp, left))
25709 + if (left > sizeof(buf) || copy_from_user(buf, dp, left))
25710 return -EFAULT;
25711 } else {
25712 memcpy(buf, dp, left);
25713 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capilo
25714 }
25715 if (left) {
25716 if (config->user) {
25717 - if (copy_from_user(buf, dp, left))
25718 + if (left > sizeof(buf) || copy_from_user(buf, dp, left))
25719 return -EFAULT;
25720 } else {
25721 memcpy(buf, dp, left);
25722 diff -urNp linux-2.6.35.4/drivers/isdn/icn/icn.c linux-2.6.35.4/drivers/isdn/icn/icn.c
25723 --- linux-2.6.35.4/drivers/isdn/icn/icn.c 2010-08-26 19:47:12.000000000 -0400
25724 +++ linux-2.6.35.4/drivers/isdn/icn/icn.c 2010-09-17 20:12:37.000000000 -0400
25725 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len
25726 if (count > len)
25727 count = len;
25728 if (user) {
25729 - if (copy_from_user(msg, buf, count))
25730 + if (count > sizeof(msg) || copy_from_user(msg, buf, count))
25731 return -EFAULT;
25732 } else
25733 memcpy(msg, buf, count);
25734 diff -urNp linux-2.6.35.4/drivers/lguest/core.c linux-2.6.35.4/drivers/lguest/core.c
25735 --- linux-2.6.35.4/drivers/lguest/core.c 2010-08-26 19:47:12.000000000 -0400
25736 +++ linux-2.6.35.4/drivers/lguest/core.c 2010-09-17 20:12:09.000000000 -0400
25737 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
25738 * it's worked so far. The end address needs +1 because __get_vm_area
25739 * allocates an extra guard page, so we need space for that.
25740 */
25741 +
25742 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
25743 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
25744 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
25745 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
25746 +#else
25747 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
25748 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
25749 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
25750 +#endif
25751 +
25752 if (!switcher_vma) {
25753 err = -ENOMEM;
25754 printk("lguest: could not map switcher pages high\n");
25755 diff -urNp linux-2.6.35.4/drivers/macintosh/via-pmu-backlight.c linux-2.6.35.4/drivers/macintosh/via-pmu-backlight.c
25756 --- linux-2.6.35.4/drivers/macintosh/via-pmu-backlight.c 2010-08-26 19:47:12.000000000 -0400
25757 +++ linux-2.6.35.4/drivers/macintosh/via-pmu-backlight.c 2010-09-17 20:12:09.000000000 -0400
25758 @@ -15,7 +15,7 @@
25759
25760 #define MAX_PMU_LEVEL 0xFF
25761
25762 -static struct backlight_ops pmu_backlight_data;
25763 +static const struct backlight_ops pmu_backlight_data;
25764 static DEFINE_SPINLOCK(pmu_backlight_lock);
25765 static int sleeping, uses_pmu_bl;
25766 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
25767 @@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(
25768 return bd->props.brightness;
25769 }
25770
25771 -static struct backlight_ops pmu_backlight_data = {
25772 +static const struct backlight_ops pmu_backlight_data = {
25773 .get_brightness = pmu_backlight_get_brightness,
25774 .update_status = pmu_backlight_update_status,
25775
25776 diff -urNp linux-2.6.35.4/drivers/macintosh/via-pmu.c linux-2.6.35.4/drivers/macintosh/via-pmu.c
25777 --- linux-2.6.35.4/drivers/macintosh/via-pmu.c 2010-08-26 19:47:12.000000000 -0400
25778 +++ linux-2.6.35.4/drivers/macintosh/via-pmu.c 2010-09-17 20:12:09.000000000 -0400
25779 @@ -2254,7 +2254,7 @@ static int pmu_sleep_valid(suspend_state
25780 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
25781 }
25782
25783 -static struct platform_suspend_ops pmu_pm_ops = {
25784 +static const struct platform_suspend_ops pmu_pm_ops = {
25785 .enter = powerbook_sleep,
25786 .valid = pmu_sleep_valid,
25787 };
25788 diff -urNp linux-2.6.35.4/drivers/md/bitmap.c linux-2.6.35.4/drivers/md/bitmap.c
25789 --- linux-2.6.35.4/drivers/md/bitmap.c 2010-08-26 19:47:12.000000000 -0400
25790 +++ linux-2.6.35.4/drivers/md/bitmap.c 2010-09-17 20:12:09.000000000 -0400
25791 @@ -58,7 +58,7 @@
25792 # if DEBUG > 0
25793 # define PRINTK(x...) printk(KERN_DEBUG x)
25794 # else
25795 -# define PRINTK(x...)
25796 +# define PRINTK(x...) do {} while (0)
25797 # endif
25798 #endif
25799
25800 diff -urNp linux-2.6.35.4/drivers/md/dm-table.c linux-2.6.35.4/drivers/md/dm-table.c
25801 --- linux-2.6.35.4/drivers/md/dm-table.c 2010-08-26 19:47:12.000000000 -0400
25802 +++ linux-2.6.35.4/drivers/md/dm-table.c 2010-09-17 20:12:09.000000000 -0400
25803 @@ -363,7 +363,7 @@ static int device_area_is_invalid(struct
25804 if (!dev_size)
25805 return 0;
25806
25807 - if ((start >= dev_size) || (start + len > dev_size)) {
25808 + if ((start >= dev_size) || (len > dev_size - start)) {
25809 DMWARN("%s: %s too small for target: "
25810 "start=%llu, len=%llu, dev_size=%llu",
25811 dm_device_name(ti->table->md), bdevname(bdev, b),
25812 diff -urNp linux-2.6.35.4/drivers/md/md.c linux-2.6.35.4/drivers/md/md.c
25813 --- linux-2.6.35.4/drivers/md/md.c 2010-08-26 19:47:12.000000000 -0400
25814 +++ linux-2.6.35.4/drivers/md/md.c 2010-09-17 20:12:09.000000000 -0400
25815 @@ -6352,7 +6352,7 @@ static int md_seq_show(struct seq_file *
25816 chunk_kb ? "KB" : "B");
25817 if (bitmap->file) {
25818 seq_printf(seq, ", file: ");
25819 - seq_path(seq, &bitmap->file->f_path, " \t\n");
25820 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
25821 }
25822
25823 seq_printf(seq, "\n");
25824 @@ -6446,7 +6446,7 @@ static int is_mddev_idle(mddev_t *mddev,
25825 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
25826 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
25827 (int)part_stat_read(&disk->part0, sectors[1]) -
25828 - atomic_read(&disk->sync_io);
25829 + atomic_read_unchecked(&disk->sync_io);
25830 /* sync IO will cause sync_io to increase before the disk_stats
25831 * as sync_io is counted when a request starts, and
25832 * disk_stats is counted when it completes.
25833 diff -urNp linux-2.6.35.4/drivers/md/md.h linux-2.6.35.4/drivers/md/md.h
25834 --- linux-2.6.35.4/drivers/md/md.h 2010-08-26 19:47:12.000000000 -0400
25835 +++ linux-2.6.35.4/drivers/md/md.h 2010-09-17 20:12:09.000000000 -0400
25836 @@ -334,7 +334,7 @@ static inline void rdev_dec_pending(mdk_
25837
25838 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
25839 {
25840 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
25841 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
25842 }
25843
25844 struct mdk_personality
25845 diff -urNp linux-2.6.35.4/drivers/media/dvb/dvb-core/dvbdev.c linux-2.6.35.4/drivers/media/dvb/dvb-core/dvbdev.c
25846 --- linux-2.6.35.4/drivers/media/dvb/dvb-core/dvbdev.c 2010-08-26 19:47:12.000000000 -0400
25847 +++ linux-2.6.35.4/drivers/media/dvb/dvb-core/dvbdev.c 2010-09-17 20:12:09.000000000 -0400
25848 @@ -196,6 +196,7 @@ int dvb_register_device(struct dvb_adapt
25849 const struct dvb_device *template, void *priv, int type)
25850 {
25851 struct dvb_device *dvbdev;
25852 + /* cannot be const, see this function */
25853 struct file_operations *dvbdevfops;
25854 struct device *clsdev;
25855 int minor;
25856 diff -urNp linux-2.6.35.4/drivers/media/radio/radio-cadet.c linux-2.6.35.4/drivers/media/radio/radio-cadet.c
25857 --- linux-2.6.35.4/drivers/media/radio/radio-cadet.c 2010-08-26 19:47:12.000000000 -0400
25858 +++ linux-2.6.35.4/drivers/media/radio/radio-cadet.c 2010-09-17 20:12:37.000000000 -0400
25859 @@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *f
25860 while (i < count && dev->rdsin != dev->rdsout)
25861 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
25862
25863 - if (copy_to_user(data, readbuf, i))
25864 + if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
25865 return -EFAULT;
25866 return i;
25867 }
25868 diff -urNp linux-2.6.35.4/drivers/message/fusion/mptbase.c linux-2.6.35.4/drivers/message/fusion/mptbase.c
25869 --- linux-2.6.35.4/drivers/message/fusion/mptbase.c 2010-08-26 19:47:12.000000000 -0400
25870 +++ linux-2.6.35.4/drivers/message/fusion/mptbase.c 2010-09-17 20:12:37.000000000 -0400
25871 @@ -6715,8 +6715,14 @@ procmpt_iocinfo_read(char *buf, char **s
25872 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
25873 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
25874
25875 +#ifdef CONFIG_GRKERNSEC_HIDESYM
25876 + len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
25877 + NULL, NULL);
25878 +#else
25879 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
25880 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
25881 +#endif
25882 +
25883 /*
25884 * Rounding UP to nearest 4-kB boundary here...
25885 */
25886 diff -urNp linux-2.6.35.4/drivers/message/fusion/mptdebug.h linux-2.6.35.4/drivers/message/fusion/mptdebug.h
25887 --- linux-2.6.35.4/drivers/message/fusion/mptdebug.h 2010-08-26 19:47:12.000000000 -0400
25888 +++ linux-2.6.35.4/drivers/message/fusion/mptdebug.h 2010-09-17 20:12:09.000000000 -0400
25889 @@ -71,7 +71,7 @@
25890 CMD; \
25891 }
25892 #else
25893 -#define MPT_CHECK_LOGGING(IOC, CMD, BITS)
25894 +#define MPT_CHECK_LOGGING(IOC, CMD, BITS) do {} while (0)
25895 #endif
25896
25897
25898 diff -urNp linux-2.6.35.4/drivers/message/fusion/mptsas.c linux-2.6.35.4/drivers/message/fusion/mptsas.c
25899 --- linux-2.6.35.4/drivers/message/fusion/mptsas.c 2010-08-26 19:47:12.000000000 -0400
25900 +++ linux-2.6.35.4/drivers/message/fusion/mptsas.c 2010-09-17 20:12:09.000000000 -0400
25901 @@ -437,6 +437,23 @@ mptsas_is_end_device(struct mptsas_devin
25902 return 0;
25903 }
25904
25905 +static inline void
25906 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
25907 +{
25908 + if (phy_info->port_details) {
25909 + phy_info->port_details->rphy = rphy;
25910 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
25911 + ioc->name, rphy));
25912 + }
25913 +
25914 + if (rphy) {
25915 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
25916 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
25917 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
25918 + ioc->name, rphy, rphy->dev.release));
25919 + }
25920 +}
25921 +
25922 /* no mutex */
25923 static void
25924 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
25925 @@ -475,23 +492,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
25926 return NULL;
25927 }
25928
25929 -static inline void
25930 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
25931 -{
25932 - if (phy_info->port_details) {
25933 - phy_info->port_details->rphy = rphy;
25934 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
25935 - ioc->name, rphy));
25936 - }
25937 -
25938 - if (rphy) {
25939 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
25940 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
25941 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
25942 - ioc->name, rphy, rphy->dev.release));
25943 - }
25944 -}
25945 -
25946 static inline struct sas_port *
25947 mptsas_get_port(struct mptsas_phyinfo *phy_info)
25948 {
25949 diff -urNp linux-2.6.35.4/drivers/message/i2o/i2o_proc.c linux-2.6.35.4/drivers/message/i2o/i2o_proc.c
25950 --- linux-2.6.35.4/drivers/message/i2o/i2o_proc.c 2010-08-26 19:47:12.000000000 -0400
25951 +++ linux-2.6.35.4/drivers/message/i2o/i2o_proc.c 2010-09-17 20:12:09.000000000 -0400
25952 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
25953 "Array Controller Device"
25954 };
25955
25956 -static char *chtostr(u8 * chars, int n)
25957 -{
25958 - char tmp[256];
25959 - tmp[0] = 0;
25960 - return strncat(tmp, (char *)chars, n);
25961 -}
25962 -
25963 static int i2o_report_query_status(struct seq_file *seq, int block_status,
25964 char *group)
25965 {
25966 @@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct
25967
25968 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
25969 seq_printf(seq, "%-#8x", ddm_table.module_id);
25970 - seq_printf(seq, "%-29s",
25971 - chtostr(ddm_table.module_name_version, 28));
25972 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
25973 seq_printf(seq, "%9d ", ddm_table.data_size);
25974 seq_printf(seq, "%8d", ddm_table.code_size);
25975
25976 @@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(s
25977
25978 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
25979 seq_printf(seq, "%-#8x", dst->module_id);
25980 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
25981 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
25982 + seq_printf(seq, "%-.28s", dst->module_name_version);
25983 + seq_printf(seq, "%-.8s", dst->date);
25984 seq_printf(seq, "%8d ", dst->module_size);
25985 seq_printf(seq, "%8d ", dst->mpb_size);
25986 seq_printf(seq, "0x%04x", dst->module_flags);
25987 @@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(str
25988 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
25989 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
25990 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
25991 - seq_printf(seq, "Vendor info : %s\n",
25992 - chtostr((u8 *) (work32 + 2), 16));
25993 - seq_printf(seq, "Product info : %s\n",
25994 - chtostr((u8 *) (work32 + 6), 16));
25995 - seq_printf(seq, "Description : %s\n",
25996 - chtostr((u8 *) (work32 + 10), 16));
25997 - seq_printf(seq, "Product rev. : %s\n",
25998 - chtostr((u8 *) (work32 + 14), 8));
25999 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
26000 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
26001 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
26002 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
26003
26004 seq_printf(seq, "Serial number : ");
26005 print_serial_number(seq, (u8 *) (work32 + 16),
26006 @@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(str
26007 }
26008
26009 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
26010 - seq_printf(seq, "Module name : %s\n",
26011 - chtostr(result.module_name, 24));
26012 - seq_printf(seq, "Module revision : %s\n",
26013 - chtostr(result.module_rev, 8));
26014 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
26015 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
26016
26017 seq_printf(seq, "Serial number : ");
26018 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
26019 @@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq
26020 return 0;
26021 }
26022
26023 - seq_printf(seq, "Device name : %s\n",
26024 - chtostr(result.device_name, 64));
26025 - seq_printf(seq, "Service name : %s\n",
26026 - chtostr(result.service_name, 64));
26027 - seq_printf(seq, "Physical name : %s\n",
26028 - chtostr(result.physical_location, 64));
26029 - seq_printf(seq, "Instance number : %s\n",
26030 - chtostr(result.instance_number, 4));
26031 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
26032 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
26033 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
26034 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
26035
26036 return 0;
26037 }
26038 diff -urNp linux-2.6.35.4/drivers/mfd/janz-cmodio.c linux-2.6.35.4/drivers/mfd/janz-cmodio.c
26039 --- linux-2.6.35.4/drivers/mfd/janz-cmodio.c 2010-08-26 19:47:12.000000000 -0400
26040 +++ linux-2.6.35.4/drivers/mfd/janz-cmodio.c 2010-09-17 20:12:09.000000000 -0400
26041 @@ -13,6 +13,7 @@
26042
26043 #include <linux/kernel.h>
26044 #include <linux/module.h>
26045 +#include <linux/slab.h>
26046 #include <linux/init.h>
26047 #include <linux/pci.h>
26048 #include <linux/interrupt.h>
26049 diff -urNp linux-2.6.35.4/drivers/misc/kgdbts.c linux-2.6.35.4/drivers/misc/kgdbts.c
26050 --- linux-2.6.35.4/drivers/misc/kgdbts.c 2010-08-26 19:47:12.000000000 -0400
26051 +++ linux-2.6.35.4/drivers/misc/kgdbts.c 2010-09-17 20:12:09.000000000 -0400
26052 @@ -118,7 +118,7 @@
26053 } while (0)
26054 #define MAX_CONFIG_LEN 40
26055
26056 -static struct kgdb_io kgdbts_io_ops;
26057 +static const struct kgdb_io kgdbts_io_ops;
26058 static char get_buf[BUFMAX];
26059 static int get_buf_cnt;
26060 static char put_buf[BUFMAX];
26061 @@ -1114,7 +1114,7 @@ static void kgdbts_post_exp_handler(void
26062 module_put(THIS_MODULE);
26063 }
26064
26065 -static struct kgdb_io kgdbts_io_ops = {
26066 +static const struct kgdb_io kgdbts_io_ops = {
26067 .name = "kgdbts",
26068 .read_char = kgdbts_get_char,
26069 .write_char = kgdbts_put_char,
26070 diff -urNp linux-2.6.35.4/drivers/misc/sgi-gru/gruhandles.c linux-2.6.35.4/drivers/misc/sgi-gru/gruhandles.c
26071 --- linux-2.6.35.4/drivers/misc/sgi-gru/gruhandles.c 2010-08-26 19:47:12.000000000 -0400
26072 +++ linux-2.6.35.4/drivers/misc/sgi-gru/gruhandles.c 2010-09-17 20:12:09.000000000 -0400
26073 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op
26074 unsigned long nsec;
26075
26076 nsec = CLKS2NSEC(clks);
26077 - atomic_long_inc(&mcs_op_statistics[op].count);
26078 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
26079 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
26080 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
26081 if (mcs_op_statistics[op].max < nsec)
26082 mcs_op_statistics[op].max = nsec;
26083 }
26084 diff -urNp linux-2.6.35.4/drivers/misc/sgi-gru/gruprocfs.c linux-2.6.35.4/drivers/misc/sgi-gru/gruprocfs.c
26085 --- linux-2.6.35.4/drivers/misc/sgi-gru/gruprocfs.c 2010-08-26 19:47:12.000000000 -0400
26086 +++ linux-2.6.35.4/drivers/misc/sgi-gru/gruprocfs.c 2010-09-17 20:12:09.000000000 -0400
26087 @@ -32,9 +32,9 @@
26088
26089 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
26090
26091 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
26092 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
26093 {
26094 - unsigned long val = atomic_long_read(v);
26095 + unsigned long val = atomic_long_read_unchecked(v);
26096
26097 seq_printf(s, "%16lu %s\n", val, id);
26098 }
26099 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct se
26100
26101 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
26102 for (op = 0; op < mcsop_last; op++) {
26103 - count = atomic_long_read(&mcs_op_statistics[op].count);
26104 - total = atomic_long_read(&mcs_op_statistics[op].total);
26105 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
26106 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
26107 max = mcs_op_statistics[op].max;
26108 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
26109 count ? total / count : 0, max);
26110 diff -urNp linux-2.6.35.4/drivers/misc/sgi-gru/grutables.h linux-2.6.35.4/drivers/misc/sgi-gru/grutables.h
26111 --- linux-2.6.35.4/drivers/misc/sgi-gru/grutables.h 2010-08-26 19:47:12.000000000 -0400
26112 +++ linux-2.6.35.4/drivers/misc/sgi-gru/grutables.h 2010-09-17 20:12:09.000000000 -0400
26113 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
26114 * GRU statistics.
26115 */
26116 struct gru_stats_s {
26117 - atomic_long_t vdata_alloc;
26118 - atomic_long_t vdata_free;
26119 - atomic_long_t gts_alloc;
26120 - atomic_long_t gts_free;
26121 - atomic_long_t gms_alloc;
26122 - atomic_long_t gms_free;
26123 - atomic_long_t gts_double_allocate;
26124 - atomic_long_t assign_context;
26125 - atomic_long_t assign_context_failed;
26126 - atomic_long_t free_context;
26127 - atomic_long_t load_user_context;
26128 - atomic_long_t load_kernel_context;
26129 - atomic_long_t lock_kernel_context;
26130 - atomic_long_t unlock_kernel_context;
26131 - atomic_long_t steal_user_context;
26132 - atomic_long_t steal_kernel_context;
26133 - atomic_long_t steal_context_failed;
26134 - atomic_long_t nopfn;
26135 - atomic_long_t asid_new;
26136 - atomic_long_t asid_next;
26137 - atomic_long_t asid_wrap;
26138 - atomic_long_t asid_reuse;
26139 - atomic_long_t intr;
26140 - atomic_long_t intr_cbr;
26141 - atomic_long_t intr_tfh;
26142 - atomic_long_t intr_spurious;
26143 - atomic_long_t intr_mm_lock_failed;
26144 - atomic_long_t call_os;
26145 - atomic_long_t call_os_wait_queue;
26146 - atomic_long_t user_flush_tlb;
26147 - atomic_long_t user_unload_context;
26148 - atomic_long_t user_exception;
26149 - atomic_long_t set_context_option;
26150 - atomic_long_t check_context_retarget_intr;
26151 - atomic_long_t check_context_unload;
26152 - atomic_long_t tlb_dropin;
26153 - atomic_long_t tlb_preload_page;
26154 - atomic_long_t tlb_dropin_fail_no_asid;
26155 - atomic_long_t tlb_dropin_fail_upm;
26156 - atomic_long_t tlb_dropin_fail_invalid;
26157 - atomic_long_t tlb_dropin_fail_range_active;
26158 - atomic_long_t tlb_dropin_fail_idle;
26159 - atomic_long_t tlb_dropin_fail_fmm;
26160 - atomic_long_t tlb_dropin_fail_no_exception;
26161 - atomic_long_t tfh_stale_on_fault;
26162 - atomic_long_t mmu_invalidate_range;
26163 - atomic_long_t mmu_invalidate_page;
26164 - atomic_long_t flush_tlb;
26165 - atomic_long_t flush_tlb_gru;
26166 - atomic_long_t flush_tlb_gru_tgh;
26167 - atomic_long_t flush_tlb_gru_zero_asid;
26168 -
26169 - atomic_long_t copy_gpa;
26170 - atomic_long_t read_gpa;
26171 -
26172 - atomic_long_t mesq_receive;
26173 - atomic_long_t mesq_receive_none;
26174 - atomic_long_t mesq_send;
26175 - atomic_long_t mesq_send_failed;
26176 - atomic_long_t mesq_noop;
26177 - atomic_long_t mesq_send_unexpected_error;
26178 - atomic_long_t mesq_send_lb_overflow;
26179 - atomic_long_t mesq_send_qlimit_reached;
26180 - atomic_long_t mesq_send_amo_nacked;
26181 - atomic_long_t mesq_send_put_nacked;
26182 - atomic_long_t mesq_page_overflow;
26183 - atomic_long_t mesq_qf_locked;
26184 - atomic_long_t mesq_qf_noop_not_full;
26185 - atomic_long_t mesq_qf_switch_head_failed;
26186 - atomic_long_t mesq_qf_unexpected_error;
26187 - atomic_long_t mesq_noop_unexpected_error;
26188 - atomic_long_t mesq_noop_lb_overflow;
26189 - atomic_long_t mesq_noop_qlimit_reached;
26190 - atomic_long_t mesq_noop_amo_nacked;
26191 - atomic_long_t mesq_noop_put_nacked;
26192 - atomic_long_t mesq_noop_page_overflow;
26193 + atomic_long_unchecked_t vdata_alloc;
26194 + atomic_long_unchecked_t vdata_free;
26195 + atomic_long_unchecked_t gts_alloc;
26196 + atomic_long_unchecked_t gts_free;
26197 + atomic_long_unchecked_t gms_alloc;
26198 + atomic_long_unchecked_t gms_free;
26199 + atomic_long_unchecked_t gts_double_allocate;
26200 + atomic_long_unchecked_t assign_context;
26201 + atomic_long_unchecked_t assign_context_failed;
26202 + atomic_long_unchecked_t free_context;
26203 + atomic_long_unchecked_t load_user_context;
26204 + atomic_long_unchecked_t load_kernel_context;
26205 + atomic_long_unchecked_t lock_kernel_context;
26206 + atomic_long_unchecked_t unlock_kernel_context;
26207 + atomic_long_unchecked_t steal_user_context;
26208 + atomic_long_unchecked_t steal_kernel_context;
26209 + atomic_long_unchecked_t steal_context_failed;
26210 + atomic_long_unchecked_t nopfn;
26211 + atomic_long_unchecked_t asid_new;
26212 + atomic_long_unchecked_t asid_next;
26213 + atomic_long_unchecked_t asid_wrap;
26214 + atomic_long_unchecked_t asid_reuse;
26215 + atomic_long_unchecked_t intr;
26216 + atomic_long_unchecked_t intr_cbr;
26217 + atomic_long_unchecked_t intr_tfh;
26218 + atomic_long_unchecked_t intr_spurious;
26219 + atomic_long_unchecked_t intr_mm_lock_failed;
26220 + atomic_long_unchecked_t call_os;
26221 + atomic_long_unchecked_t call_os_wait_queue;
26222 + atomic_long_unchecked_t user_flush_tlb;
26223 + atomic_long_unchecked_t user_unload_context;
26224 + atomic_long_unchecked_t user_exception;
26225 + atomic_long_unchecked_t set_context_option;
26226 + atomic_long_unchecked_t check_context_retarget_intr;
26227 + atomic_long_unchecked_t check_context_unload;
26228 + atomic_long_unchecked_t tlb_dropin;
26229 + atomic_long_unchecked_t tlb_preload_page;
26230 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
26231 + atomic_long_unchecked_t tlb_dropin_fail_upm;
26232 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
26233 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
26234 + atomic_long_unchecked_t tlb_dropin_fail_idle;
26235 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
26236 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
26237 + atomic_long_unchecked_t tfh_stale_on_fault;
26238 + atomic_long_unchecked_t mmu_invalidate_range;
26239 + atomic_long_unchecked_t mmu_invalidate_page;
26240 + atomic_long_unchecked_t flush_tlb;
26241 + atomic_long_unchecked_t flush_tlb_gru;
26242 + atomic_long_unchecked_t flush_tlb_gru_tgh;
26243 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
26244 +
26245 + atomic_long_unchecked_t copy_gpa;
26246 + atomic_long_unchecked_t read_gpa;
26247 +
26248 + atomic_long_unchecked_t mesq_receive;
26249 + atomic_long_unchecked_t mesq_receive_none;
26250 + atomic_long_unchecked_t mesq_send;
26251 + atomic_long_unchecked_t mesq_send_failed;
26252 + atomic_long_unchecked_t mesq_noop;
26253 + atomic_long_unchecked_t mesq_send_unexpected_error;
26254 + atomic_long_unchecked_t mesq_send_lb_overflow;
26255 + atomic_long_unchecked_t mesq_send_qlimit_reached;
26256 + atomic_long_unchecked_t mesq_send_amo_nacked;
26257 + atomic_long_unchecked_t mesq_send_put_nacked;
26258 + atomic_long_unchecked_t mesq_page_overflow;
26259 + atomic_long_unchecked_t mesq_qf_locked;
26260 + atomic_long_unchecked_t mesq_qf_noop_not_full;
26261 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
26262 + atomic_long_unchecked_t mesq_qf_unexpected_error;
26263 + atomic_long_unchecked_t mesq_noop_unexpected_error;
26264 + atomic_long_unchecked_t mesq_noop_lb_overflow;
26265 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
26266 + atomic_long_unchecked_t mesq_noop_amo_nacked;
26267 + atomic_long_unchecked_t mesq_noop_put_nacked;
26268 + atomic_long_unchecked_t mesq_noop_page_overflow;
26269
26270 };
26271
26272 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start
26273 tghop_invalidate, mcsop_last};
26274
26275 struct mcs_op_statistic {
26276 - atomic_long_t count;
26277 - atomic_long_t total;
26278 + atomic_long_unchecked_t count;
26279 + atomic_long_unchecked_t total;
26280 unsigned long max;
26281 };
26282
26283 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_st
26284
26285 #define STAT(id) do { \
26286 if (gru_options & OPT_STATS) \
26287 - atomic_long_inc(&gru_stats.id); \
26288 + atomic_long_inc_unchecked(&gru_stats.id); \
26289 } while (0)
26290
26291 #ifdef CONFIG_SGI_GRU_DEBUG
26292 diff -urNp linux-2.6.35.4/drivers/mtd/devices/doc2000.c linux-2.6.35.4/drivers/mtd/devices/doc2000.c
26293 --- linux-2.6.35.4/drivers/mtd/devices/doc2000.c 2010-08-26 19:47:12.000000000 -0400
26294 +++ linux-2.6.35.4/drivers/mtd/devices/doc2000.c 2010-09-17 20:12:09.000000000 -0400
26295 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
26296
26297 /* The ECC will not be calculated correctly if less than 512 is written */
26298 /* DBB-
26299 - if (len != 0x200 && eccbuf)
26300 + if (len != 0x200)
26301 printk(KERN_WARNING
26302 "ECC needs a full sector write (adr: %lx size %lx)\n",
26303 (long) to, (long) len);
26304 diff -urNp linux-2.6.35.4/drivers/mtd/devices/doc2001.c linux-2.6.35.4/drivers/mtd/devices/doc2001.c
26305 --- linux-2.6.35.4/drivers/mtd/devices/doc2001.c 2010-08-26 19:47:12.000000000 -0400
26306 +++ linux-2.6.35.4/drivers/mtd/devices/doc2001.c 2010-09-17 20:12:09.000000000 -0400
26307 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
26308 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
26309
26310 /* Don't allow read past end of device */
26311 - if (from >= this->totlen)
26312 + if (from >= this->totlen || !len)
26313 return -EINVAL;
26314
26315 /* Don't allow a single read to cross a 512-byte block boundary */
26316 diff -urNp linux-2.6.35.4/drivers/mtd/nand/denali.c linux-2.6.35.4/drivers/mtd/nand/denali.c
26317 --- linux-2.6.35.4/drivers/mtd/nand/denali.c 2010-08-26 19:47:12.000000000 -0400
26318 +++ linux-2.6.35.4/drivers/mtd/nand/denali.c 2010-09-17 20:12:09.000000000 -0400
26319 @@ -24,6 +24,7 @@
26320 #include <linux/pci.h>
26321 #include <linux/mtd/mtd.h>
26322 #include <linux/module.h>
26323 +#include <linux/slab.h>
26324
26325 #include "denali.h"
26326
26327 diff -urNp linux-2.6.35.4/drivers/mtd/ubi/build.c linux-2.6.35.4/drivers/mtd/ubi/build.c
26328 --- linux-2.6.35.4/drivers/mtd/ubi/build.c 2010-08-26 19:47:12.000000000 -0400
26329 +++ linux-2.6.35.4/drivers/mtd/ubi/build.c 2010-09-17 20:12:09.000000000 -0400
26330 @@ -1282,7 +1282,7 @@ module_exit(ubi_exit);
26331 static int __init bytes_str_to_int(const char *str)
26332 {
26333 char *endp;
26334 - unsigned long result;
26335 + unsigned long result, scale = 1;
26336
26337 result = simple_strtoul(str, &endp, 0);
26338 if (str == endp || result >= INT_MAX) {
26339 @@ -1293,11 +1293,11 @@ static int __init bytes_str_to_int(const
26340
26341 switch (*endp) {
26342 case 'G':
26343 - result *= 1024;
26344 + scale *= 1024;
26345 case 'M':
26346 - result *= 1024;
26347 + scale *= 1024;
26348 case 'K':
26349 - result *= 1024;
26350 + scale *= 1024;
26351 if (endp[1] == 'i' && endp[2] == 'B')
26352 endp += 2;
26353 case '\0':
26354 @@ -1308,7 +1308,13 @@ static int __init bytes_str_to_int(const
26355 return -EINVAL;
26356 }
26357
26358 - return result;
26359 + if ((intoverflow_t)result*scale >= INT_MAX) {
26360 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
26361 + str);
26362 + return -EINVAL;
26363 + }
26364 +
26365 + return result*scale;
26366 }
26367
26368 /**
26369 diff -urNp linux-2.6.35.4/drivers/net/cxgb3/cxgb3_main.c linux-2.6.35.4/drivers/net/cxgb3/cxgb3_main.c
26370 --- linux-2.6.35.4/drivers/net/cxgb3/cxgb3_main.c 2010-08-26 19:47:12.000000000 -0400
26371 +++ linux-2.6.35.4/drivers/net/cxgb3/cxgb3_main.c 2010-09-17 20:12:37.000000000 -0400
26372 @@ -2296,6 +2296,8 @@ static int cxgb_extension_ioctl(struct n
26373 case CHELSIO_GET_QSET_NUM:{
26374 struct ch_reg edata;
26375
26376 + memset(&edata, 0, sizeof(edata));
26377 +
26378 edata.cmd = CHELSIO_GET_QSET_NUM;
26379 edata.val = pi->nqsets;
26380 if (copy_to_user(useraddr, &edata, sizeof(edata)))
26381 diff -urNp linux-2.6.35.4/drivers/net/e1000e/82571.c linux-2.6.35.4/drivers/net/e1000e/82571.c
26382 --- linux-2.6.35.4/drivers/net/e1000e/82571.c 2010-08-26 19:47:12.000000000 -0400
26383 +++ linux-2.6.35.4/drivers/net/e1000e/82571.c 2010-09-17 20:12:09.000000000 -0400
26384 @@ -207,6 +207,7 @@ static s32 e1000_init_mac_params_82571(s
26385 {
26386 struct e1000_hw *hw = &adapter->hw;
26387 struct e1000_mac_info *mac = &hw->mac;
26388 + /* cannot be const */
26389 struct e1000_mac_operations *func = &mac->ops;
26390 u32 swsm = 0;
26391 u32 swsm2 = 0;
26392 @@ -1703,7 +1704,7 @@ static void e1000_clear_hw_cntrs_82571(s
26393 er32(ICRXDMTC);
26394 }
26395
26396 -static struct e1000_mac_operations e82571_mac_ops = {
26397 +static const struct e1000_mac_operations e82571_mac_ops = {
26398 /* .check_mng_mode: mac type dependent */
26399 /* .check_for_link: media type dependent */
26400 .id_led_init = e1000e_id_led_init,
26401 @@ -1725,7 +1726,7 @@ static struct e1000_mac_operations e8257
26402 .read_mac_addr = e1000_read_mac_addr_82571,
26403 };
26404
26405 -static struct e1000_phy_operations e82_phy_ops_igp = {
26406 +static const struct e1000_phy_operations e82_phy_ops_igp = {
26407 .acquire = e1000_get_hw_semaphore_82571,
26408 .check_polarity = e1000_check_polarity_igp,
26409 .check_reset_block = e1000e_check_reset_block_generic,
26410 @@ -1743,7 +1744,7 @@ static struct e1000_phy_operations e82_p
26411 .cfg_on_link_up = NULL,
26412 };
26413
26414 -static struct e1000_phy_operations e82_phy_ops_m88 = {
26415 +static const struct e1000_phy_operations e82_phy_ops_m88 = {
26416 .acquire = e1000_get_hw_semaphore_82571,
26417 .check_polarity = e1000_check_polarity_m88,
26418 .check_reset_block = e1000e_check_reset_block_generic,
26419 @@ -1761,7 +1762,7 @@ static struct e1000_phy_operations e82_p
26420 .cfg_on_link_up = NULL,
26421 };
26422
26423 -static struct e1000_phy_operations e82_phy_ops_bm = {
26424 +static const struct e1000_phy_operations e82_phy_ops_bm = {
26425 .acquire = e1000_get_hw_semaphore_82571,
26426 .check_polarity = e1000_check_polarity_m88,
26427 .check_reset_block = e1000e_check_reset_block_generic,
26428 @@ -1779,7 +1780,7 @@ static struct e1000_phy_operations e82_p
26429 .cfg_on_link_up = NULL,
26430 };
26431
26432 -static struct e1000_nvm_operations e82571_nvm_ops = {
26433 +static const struct e1000_nvm_operations e82571_nvm_ops = {
26434 .acquire = e1000_acquire_nvm_82571,
26435 .read = e1000e_read_nvm_eerd,
26436 .release = e1000_release_nvm_82571,
26437 diff -urNp linux-2.6.35.4/drivers/net/e1000e/e1000.h linux-2.6.35.4/drivers/net/e1000e/e1000.h
26438 --- linux-2.6.35.4/drivers/net/e1000e/e1000.h 2010-08-26 19:47:12.000000000 -0400
26439 +++ linux-2.6.35.4/drivers/net/e1000e/e1000.h 2010-09-17 20:12:09.000000000 -0400
26440 @@ -377,9 +377,9 @@ struct e1000_info {
26441 u32 pba;
26442 u32 max_hw_frame_size;
26443 s32 (*get_variants)(struct e1000_adapter *);
26444 - struct e1000_mac_operations *mac_ops;
26445 - struct e1000_phy_operations *phy_ops;
26446 - struct e1000_nvm_operations *nvm_ops;
26447 + const struct e1000_mac_operations *mac_ops;
26448 + const struct e1000_phy_operations *phy_ops;
26449 + const struct e1000_nvm_operations *nvm_ops;
26450 };
26451
26452 /* hardware capability, feature, and workaround flags */
26453 diff -urNp linux-2.6.35.4/drivers/net/e1000e/es2lan.c linux-2.6.35.4/drivers/net/e1000e/es2lan.c
26454 --- linux-2.6.35.4/drivers/net/e1000e/es2lan.c 2010-08-26 19:47:12.000000000 -0400
26455 +++ linux-2.6.35.4/drivers/net/e1000e/es2lan.c 2010-09-17 20:12:09.000000000 -0400
26456 @@ -205,6 +205,7 @@ static s32 e1000_init_mac_params_80003es
26457 {
26458 struct e1000_hw *hw = &adapter->hw;
26459 struct e1000_mac_info *mac = &hw->mac;
26460 + /* cannot be const */
26461 struct e1000_mac_operations *func = &mac->ops;
26462
26463 /* Set media type */
26464 @@ -1431,7 +1432,7 @@ static void e1000_clear_hw_cntrs_80003es
26465 er32(ICRXDMTC);
26466 }
26467
26468 -static struct e1000_mac_operations es2_mac_ops = {
26469 +static const struct e1000_mac_operations es2_mac_ops = {
26470 .read_mac_addr = e1000_read_mac_addr_80003es2lan,
26471 .id_led_init = e1000e_id_led_init,
26472 .check_mng_mode = e1000e_check_mng_mode_generic,
26473 @@ -1453,7 +1454,7 @@ static struct e1000_mac_operations es2_m
26474 .setup_led = e1000e_setup_led_generic,
26475 };
26476
26477 -static struct e1000_phy_operations es2_phy_ops = {
26478 +static const struct e1000_phy_operations es2_phy_ops = {
26479 .acquire = e1000_acquire_phy_80003es2lan,
26480 .check_polarity = e1000_check_polarity_m88,
26481 .check_reset_block = e1000e_check_reset_block_generic,
26482 @@ -1471,7 +1472,7 @@ static struct e1000_phy_operations es2_p
26483 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
26484 };
26485
26486 -static struct e1000_nvm_operations es2_nvm_ops = {
26487 +static const struct e1000_nvm_operations es2_nvm_ops = {
26488 .acquire = e1000_acquire_nvm_80003es2lan,
26489 .read = e1000e_read_nvm_eerd,
26490 .release = e1000_release_nvm_80003es2lan,
26491 diff -urNp linux-2.6.35.4/drivers/net/e1000e/hw.h linux-2.6.35.4/drivers/net/e1000e/hw.h
26492 --- linux-2.6.35.4/drivers/net/e1000e/hw.h 2010-08-26 19:47:12.000000000 -0400
26493 +++ linux-2.6.35.4/drivers/net/e1000e/hw.h 2010-09-17 20:12:09.000000000 -0400
26494 @@ -791,13 +791,13 @@ struct e1000_phy_operations {
26495
26496 /* Function pointers for the NVM. */
26497 struct e1000_nvm_operations {
26498 - s32 (*acquire)(struct e1000_hw *);
26499 - s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
26500 - void (*release)(struct e1000_hw *);
26501 - s32 (*update)(struct e1000_hw *);
26502 - s32 (*valid_led_default)(struct e1000_hw *, u16 *);
26503 - s32 (*validate)(struct e1000_hw *);
26504 - s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
26505 + s32 (* const acquire)(struct e1000_hw *);
26506 + s32 (* const read)(struct e1000_hw *, u16, u16, u16 *);
26507 + void (* const release)(struct e1000_hw *);
26508 + s32 (* const update)(struct e1000_hw *);
26509 + s32 (* const valid_led_default)(struct e1000_hw *, u16 *);
26510 + s32 (* const validate)(struct e1000_hw *);
26511 + s32 (* const write)(struct e1000_hw *, u16, u16, u16 *);
26512 };
26513
26514 struct e1000_mac_info {
26515 @@ -877,6 +877,7 @@ struct e1000_phy_info {
26516 };
26517
26518 struct e1000_nvm_info {
26519 + /* cannot be const */
26520 struct e1000_nvm_operations ops;
26521
26522 enum e1000_nvm_type type;
26523 diff -urNp linux-2.6.35.4/drivers/net/e1000e/ich8lan.c linux-2.6.35.4/drivers/net/e1000e/ich8lan.c
26524 --- linux-2.6.35.4/drivers/net/e1000e/ich8lan.c 2010-08-26 19:47:12.000000000 -0400
26525 +++ linux-2.6.35.4/drivers/net/e1000e/ich8lan.c 2010-09-17 20:12:09.000000000 -0400
26526 @@ -3388,7 +3388,7 @@ static void e1000_clear_hw_cntrs_ich8lan
26527 }
26528 }
26529
26530 -static struct e1000_mac_operations ich8_mac_ops = {
26531 +static const struct e1000_mac_operations ich8_mac_ops = {
26532 .id_led_init = e1000e_id_led_init,
26533 .check_mng_mode = e1000_check_mng_mode_ich8lan,
26534 .check_for_link = e1000_check_for_copper_link_ich8lan,
26535 @@ -3407,7 +3407,7 @@ static struct e1000_mac_operations ich8_
26536 /* id_led_init dependent on mac type */
26537 };
26538
26539 -static struct e1000_phy_operations ich8_phy_ops = {
26540 +static const struct e1000_phy_operations ich8_phy_ops = {
26541 .acquire = e1000_acquire_swflag_ich8lan,
26542 .check_reset_block = e1000_check_reset_block_ich8lan,
26543 .commit = NULL,
26544 @@ -3421,7 +3421,7 @@ static struct e1000_phy_operations ich8_
26545 .write_reg = e1000e_write_phy_reg_igp,
26546 };
26547
26548 -static struct e1000_nvm_operations ich8_nvm_ops = {
26549 +static const struct e1000_nvm_operations ich8_nvm_ops = {
26550 .acquire = e1000_acquire_nvm_ich8lan,
26551 .read = e1000_read_nvm_ich8lan,
26552 .release = e1000_release_nvm_ich8lan,
26553 diff -urNp linux-2.6.35.4/drivers/net/eql.c linux-2.6.35.4/drivers/net/eql.c
26554 --- linux-2.6.35.4/drivers/net/eql.c 2010-08-26 19:47:12.000000000 -0400
26555 +++ linux-2.6.35.4/drivers/net/eql.c 2010-09-17 20:12:37.000000000 -0400
26556 @@ -555,6 +555,8 @@ static int eql_g_master_cfg(struct net_d
26557 equalizer_t *eql;
26558 master_config_t mc;
26559
26560 + memset(&mc, 0, sizeof(mc));
26561 +
26562 if (eql_is_master(dev)) {
26563 eql = netdev_priv(dev);
26564 mc.max_slaves = eql->max_slaves;
26565 diff -urNp linux-2.6.35.4/drivers/net/igb/e1000_82575.c linux-2.6.35.4/drivers/net/igb/e1000_82575.c
26566 --- linux-2.6.35.4/drivers/net/igb/e1000_82575.c 2010-08-26 19:47:12.000000000 -0400
26567 +++ linux-2.6.35.4/drivers/net/igb/e1000_82575.c 2010-09-17 20:12:09.000000000 -0400
26568 @@ -1597,7 +1597,7 @@ u16 igb_rxpbs_adjust_82580(u32 data)
26569 return ret_val;
26570 }
26571
26572 -static struct e1000_mac_operations e1000_mac_ops_82575 = {
26573 +static const struct e1000_mac_operations e1000_mac_ops_82575 = {
26574 .init_hw = igb_init_hw_82575,
26575 .check_for_link = igb_check_for_link_82575,
26576 .rar_set = igb_rar_set,
26577 @@ -1605,13 +1605,13 @@ static struct e1000_mac_operations e1000
26578 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
26579 };
26580
26581 -static struct e1000_phy_operations e1000_phy_ops_82575 = {
26582 +static const struct e1000_phy_operations e1000_phy_ops_82575 = {
26583 .acquire = igb_acquire_phy_82575,
26584 .get_cfg_done = igb_get_cfg_done_82575,
26585 .release = igb_release_phy_82575,
26586 };
26587
26588 -static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
26589 +static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
26590 .acquire = igb_acquire_nvm_82575,
26591 .read = igb_read_nvm_eerd,
26592 .release = igb_release_nvm_82575,
26593 diff -urNp linux-2.6.35.4/drivers/net/igb/e1000_hw.h linux-2.6.35.4/drivers/net/igb/e1000_hw.h
26594 --- linux-2.6.35.4/drivers/net/igb/e1000_hw.h 2010-08-26 19:47:12.000000000 -0400
26595 +++ linux-2.6.35.4/drivers/net/igb/e1000_hw.h 2010-09-17 20:12:09.000000000 -0400
26596 @@ -323,17 +323,17 @@ struct e1000_phy_operations {
26597 };
26598
26599 struct e1000_nvm_operations {
26600 - s32 (*acquire)(struct e1000_hw *);
26601 - s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
26602 - void (*release)(struct e1000_hw *);
26603 - s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
26604 + s32 (* const acquire)(struct e1000_hw *);
26605 + s32 (* const read)(struct e1000_hw *, u16, u16, u16 *);
26606 + void (* const release)(struct e1000_hw *);
26607 + s32 (* const write)(struct e1000_hw *, u16, u16, u16 *);
26608 };
26609
26610 struct e1000_info {
26611 s32 (*get_invariants)(struct e1000_hw *);
26612 - struct e1000_mac_operations *mac_ops;
26613 - struct e1000_phy_operations *phy_ops;
26614 - struct e1000_nvm_operations *nvm_ops;
26615 + const struct e1000_mac_operations *mac_ops;
26616 + const struct e1000_phy_operations *phy_ops;
26617 + const struct e1000_nvm_operations *nvm_ops;
26618 };
26619
26620 extern const struct e1000_info e1000_82575_info;
26621 @@ -412,6 +412,7 @@ struct e1000_phy_info {
26622 };
26623
26624 struct e1000_nvm_info {
26625 + /* cannot be const */
26626 struct e1000_nvm_operations ops;
26627
26628 enum e1000_nvm_type type;
26629 diff -urNp linux-2.6.35.4/drivers/net/irda/vlsi_ir.c linux-2.6.35.4/drivers/net/irda/vlsi_ir.c
26630 --- linux-2.6.35.4/drivers/net/irda/vlsi_ir.c 2010-08-26 19:47:12.000000000 -0400
26631 +++ linux-2.6.35.4/drivers/net/irda/vlsi_ir.c 2010-09-17 20:12:09.000000000 -0400
26632 @@ -907,13 +907,12 @@ static netdev_tx_t vlsi_hard_start_xmit(
26633 /* no race - tx-ring already empty */
26634 vlsi_set_baud(idev, iobase);
26635 netif_wake_queue(ndev);
26636 - }
26637 - else
26638 - ;
26639 + } else {
26640 /* keep the speed change pending like it would
26641 * for any len>0 packet. tx completion interrupt
26642 * will apply it when the tx ring becomes empty.
26643 */
26644 + }
26645 spin_unlock_irqrestore(&idev->lock, flags);
26646 dev_kfree_skb_any(skb);
26647 return NETDEV_TX_OK;
26648 diff -urNp linux-2.6.35.4/drivers/net/pcnet32.c linux-2.6.35.4/drivers/net/pcnet32.c
26649 --- linux-2.6.35.4/drivers/net/pcnet32.c 2010-08-26 19:47:12.000000000 -0400
26650 +++ linux-2.6.35.4/drivers/net/pcnet32.c 2010-09-17 20:12:09.000000000 -0400
26651 @@ -82,7 +82,7 @@ static int cards_found;
26652 /*
26653 * VLB I/O addresses
26654 */
26655 -static unsigned int pcnet32_portlist[] __initdata =
26656 +static unsigned int pcnet32_portlist[] __devinitdata =
26657 { 0x300, 0x320, 0x340, 0x360, 0 };
26658
26659 static int pcnet32_debug;
26660 diff -urNp linux-2.6.35.4/drivers/net/ppp_generic.c linux-2.6.35.4/drivers/net/ppp_generic.c
26661 --- linux-2.6.35.4/drivers/net/ppp_generic.c 2010-08-26 19:47:12.000000000 -0400
26662 +++ linux-2.6.35.4/drivers/net/ppp_generic.c 2010-09-17 20:12:09.000000000 -0400
26663 @@ -992,7 +992,6 @@ ppp_net_ioctl(struct net_device *dev, st
26664 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
26665 struct ppp_stats stats;
26666 struct ppp_comp_stats cstats;
26667 - char *vers;
26668
26669 switch (cmd) {
26670 case SIOCGPPPSTATS:
26671 @@ -1014,8 +1013,7 @@ ppp_net_ioctl(struct net_device *dev, st
26672 break;
26673
26674 case SIOCGPPPVER:
26675 - vers = PPP_VERSION;
26676 - if (copy_to_user(addr, vers, strlen(vers) + 1))
26677 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
26678 break;
26679 err = 0;
26680 break;
26681 diff -urNp linux-2.6.35.4/drivers/net/tg3.c linux-2.6.35.4/drivers/net/tg3.c
26682 --- linux-2.6.35.4/drivers/net/tg3.c 2010-08-26 19:47:12.000000000 -0400
26683 +++ linux-2.6.35.4/drivers/net/tg3.c 2010-09-17 20:12:09.000000000 -0400
26684 @@ -12410,7 +12410,7 @@ static void __devinit tg3_read_vpd(struc
26685 cnt = pci_read_vpd(tp->pdev, pos,
26686 TG3_NVM_VPD_LEN - pos,
26687 &vpd_data[pos]);
26688 - if (cnt == -ETIMEDOUT || -EINTR)
26689 + if (cnt == -ETIMEDOUT || cnt == -EINTR)
26690 cnt = 0;
26691 else if (cnt < 0)
26692 goto out_not_found;
26693 diff -urNp linux-2.6.35.4/drivers/net/tg3.h linux-2.6.35.4/drivers/net/tg3.h
26694 --- linux-2.6.35.4/drivers/net/tg3.h 2010-08-26 19:47:12.000000000 -0400
26695 +++ linux-2.6.35.4/drivers/net/tg3.h 2010-09-17 20:12:09.000000000 -0400
26696 @@ -130,6 +130,7 @@
26697 #define CHIPREV_ID_5750_A0 0x4000
26698 #define CHIPREV_ID_5750_A1 0x4001
26699 #define CHIPREV_ID_5750_A3 0x4003
26700 +#define CHIPREV_ID_5750_C1 0x4201
26701 #define CHIPREV_ID_5750_C2 0x4202
26702 #define CHIPREV_ID_5752_A0_HW 0x5000
26703 #define CHIPREV_ID_5752_A0 0x6000
26704 diff -urNp linux-2.6.35.4/drivers/net/tulip/de4x5.c linux-2.6.35.4/drivers/net/tulip/de4x5.c
26705 --- linux-2.6.35.4/drivers/net/tulip/de4x5.c 2010-08-26 19:47:12.000000000 -0400
26706 +++ linux-2.6.35.4/drivers/net/tulip/de4x5.c 2010-09-17 20:12:37.000000000 -0400
26707 @@ -5401,7 +5401,7 @@ de4x5_ioctl(struct net_device *dev, stru
26708 for (i=0; i<ETH_ALEN; i++) {
26709 tmp.addr[i] = dev->dev_addr[i];
26710 }
26711 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
26712 + if (ioc->len > sizeof(tmp.addr) || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
26713 break;
26714
26715 case DE4X5_SET_HWADDR: /* Set the hardware address */
26716 @@ -5441,7 +5441,7 @@ de4x5_ioctl(struct net_device *dev, stru
26717 spin_lock_irqsave(&lp->lock, flags);
26718 memcpy(&statbuf, &lp->pktStats, ioc->len);
26719 spin_unlock_irqrestore(&lp->lock, flags);
26720 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
26721 + if (ioc->len > sizeof(statbuf) || copy_to_user(ioc->data, &statbuf, ioc->len))
26722 return -EFAULT;
26723 break;
26724 }
26725 @@ -5474,7 +5474,7 @@ de4x5_ioctl(struct net_device *dev, stru
26726 tmp.lval[6] = inl(DE4X5_STRR); j+=4;
26727 tmp.lval[7] = inl(DE4X5_SIGR); j+=4;
26728 ioc->len = j;
26729 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
26730 + if (copy_to_user(ioc->data, tmp.lval, ioc->len)) return -EFAULT;
26731 break;
26732
26733 #define DE4X5_DUMP 0x0f /* Dump the DE4X5 Status */
26734 diff -urNp linux-2.6.35.4/drivers/net/usb/hso.c linux-2.6.35.4/drivers/net/usb/hso.c
26735 --- linux-2.6.35.4/drivers/net/usb/hso.c 2010-08-26 19:47:12.000000000 -0400
26736 +++ linux-2.6.35.4/drivers/net/usb/hso.c 2010-09-17 20:12:37.000000000 -0400
26737 @@ -258,7 +258,7 @@ struct hso_serial {
26738
26739 /* from usb_serial_port */
26740 struct tty_struct *tty;
26741 - int open_count;
26742 + atomic_t open_count;
26743 spinlock_t serial_lock;
26744
26745 int (*write_data) (struct hso_serial *serial);
26746 @@ -1201,7 +1201,7 @@ static void put_rxbuf_data_and_resubmit_
26747 struct urb *urb;
26748
26749 urb = serial->rx_urb[0];
26750 - if (serial->open_count > 0) {
26751 + if (atomic_read(&serial->open_count) > 0) {
26752 count = put_rxbuf_data(urb, serial);
26753 if (count == -1)
26754 return;
26755 @@ -1237,7 +1237,7 @@ static void hso_std_serial_read_bulk_cal
26756 DUMP1(urb->transfer_buffer, urb->actual_length);
26757
26758 /* Anyone listening? */
26759 - if (serial->open_count == 0)
26760 + if (atomic_read(&serial->open_count) == 0)
26761 return;
26762
26763 if (status == 0) {
26764 @@ -1332,8 +1332,7 @@ static int hso_serial_open(struct tty_st
26765 spin_unlock_irq(&serial->serial_lock);
26766
26767 /* check for port already opened, if not set the termios */
26768 - serial->open_count++;
26769 - if (serial->open_count == 1) {
26770 + if (atomic_inc_return(&serial->open_count) == 1) {
26771 serial->rx_state = RX_IDLE;
26772 /* Force default termio settings */
26773 _hso_serial_set_termios(tty, NULL);
26774 @@ -1345,7 +1344,7 @@ static int hso_serial_open(struct tty_st
26775 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
26776 if (result) {
26777 hso_stop_serial_device(serial->parent);
26778 - serial->open_count--;
26779 + atomic_dec(&serial->open_count);
26780 kref_put(&serial->parent->ref, hso_serial_ref_free);
26781 }
26782 } else {
26783 @@ -1382,10 +1381,10 @@ static void hso_serial_close(struct tty_
26784
26785 /* reset the rts and dtr */
26786 /* do the actual close */
26787 - serial->open_count--;
26788 + atomic_dec(&serial->open_count);
26789
26790 - if (serial->open_count <= 0) {
26791 - serial->open_count = 0;
26792 + if (atomic_read(&serial->open_count) <= 0) {
26793 + atomic_set(&serial->open_count, 0);
26794 spin_lock_irq(&serial->serial_lock);
26795 if (serial->tty == tty) {
26796 serial->tty->driver_data = NULL;
26797 @@ -1467,7 +1466,7 @@ static void hso_serial_set_termios(struc
26798
26799 /* the actual setup */
26800 spin_lock_irqsave(&serial->serial_lock, flags);
26801 - if (serial->open_count)
26802 + if (atomic_read(&serial->open_count))
26803 _hso_serial_set_termios(tty, old);
26804 else
26805 tty->termios = old;
26806 @@ -1655,6 +1654,9 @@ static int hso_get_count(struct hso_seri
26807
26808 if (!tiocmget)
26809 return -ENOENT;
26810 +
26811 + memset(&icount, 0, sizeof(icount));
26812 +
26813 spin_lock_irq(&serial->serial_lock);
26814 memcpy(&cnow, &tiocmget->icount, sizeof(struct uart_icount));
26815 spin_unlock_irq(&serial->serial_lock);
26816 @@ -1929,7 +1931,7 @@ static void intr_callback(struct urb *ur
26817 D1("Pending read interrupt on port %d\n", i);
26818 spin_lock(&serial->serial_lock);
26819 if (serial->rx_state == RX_IDLE &&
26820 - serial->open_count > 0) {
26821 + atomic_read(&serial->open_count) > 0) {
26822 /* Setup and send a ctrl req read on
26823 * port i */
26824 if (!serial->rx_urb_filled[0]) {
26825 @@ -3119,7 +3121,7 @@ static int hso_resume(struct usb_interfa
26826 /* Start all serial ports */
26827 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
26828 if (serial_table[i] && (serial_table[i]->interface == iface)) {
26829 - if (dev2ser(serial_table[i])->open_count) {
26830 + if (atomic_read(&dev2ser(serial_table[i])->open_count)) {
26831 result =
26832 hso_start_serial_device(serial_table[i], GFP_NOIO);
26833 hso_kick_transmit(dev2ser(serial_table[i]));
26834 diff -urNp linux-2.6.35.4/drivers/net/wireless/b43/debugfs.c linux-2.6.35.4/drivers/net/wireless/b43/debugfs.c
26835 --- linux-2.6.35.4/drivers/net/wireless/b43/debugfs.c 2010-08-26 19:47:12.000000000 -0400
26836 +++ linux-2.6.35.4/drivers/net/wireless/b43/debugfs.c 2010-09-17 20:12:09.000000000 -0400
26837 @@ -43,7 +43,7 @@ static struct dentry *rootdir;
26838 struct b43_debugfs_fops {
26839 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
26840 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
26841 - struct file_operations fops;
26842 + const struct file_operations fops;
26843 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
26844 size_t file_struct_offset;
26845 };
26846 diff -urNp linux-2.6.35.4/drivers/net/wireless/b43legacy/debugfs.c linux-2.6.35.4/drivers/net/wireless/b43legacy/debugfs.c
26847 --- linux-2.6.35.4/drivers/net/wireless/b43legacy/debugfs.c 2010-08-26 19:47:12.000000000 -0400
26848 +++ linux-2.6.35.4/drivers/net/wireless/b43legacy/debugfs.c 2010-09-17 20:12:09.000000000 -0400
26849 @@ -44,7 +44,7 @@ static struct dentry *rootdir;
26850 struct b43legacy_debugfs_fops {
26851 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
26852 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
26853 - struct file_operations fops;
26854 + const struct file_operations fops;
26855 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
26856 size_t file_struct_offset;
26857 /* Take wl->irq_lock before calling read/write? */
26858 diff -urNp linux-2.6.35.4/drivers/net/wireless/iwlwifi/iwl-debug.h linux-2.6.35.4/drivers/net/wireless/iwlwifi/iwl-debug.h
26859 --- linux-2.6.35.4/drivers/net/wireless/iwlwifi/iwl-debug.h 2010-08-26 19:47:12.000000000 -0400
26860 +++ linux-2.6.35.4/drivers/net/wireless/iwlwifi/iwl-debug.h 2010-09-17 20:12:09.000000000 -0400
26861 @@ -68,8 +68,8 @@ do {
26862 } while (0)
26863
26864 #else
26865 -#define IWL_DEBUG(__priv, level, fmt, args...)
26866 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
26867 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
26868 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
26869 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
26870 void *p, u32 len)
26871 {}
26872 diff -urNp linux-2.6.35.4/drivers/net/wireless/libertas/debugfs.c linux-2.6.35.4/drivers/net/wireless/libertas/debugfs.c
26873 --- linux-2.6.35.4/drivers/net/wireless/libertas/debugfs.c 2010-08-26 19:47:12.000000000 -0400
26874 +++ linux-2.6.35.4/drivers/net/wireless/libertas/debugfs.c 2010-09-17 20:12:09.000000000 -0400
26875 @@ -718,7 +718,7 @@ out_unlock:
26876 struct lbs_debugfs_files {
26877 const char *name;
26878 int perm;
26879 - struct file_operations fops;
26880 + const struct file_operations fops;
26881 };
26882
26883 static const struct lbs_debugfs_files debugfs_files[] = {
26884 diff -urNp linux-2.6.35.4/drivers/net/wireless/rndis_wlan.c linux-2.6.35.4/drivers/net/wireless/rndis_wlan.c
26885 --- linux-2.6.35.4/drivers/net/wireless/rndis_wlan.c 2010-08-26 19:47:12.000000000 -0400
26886 +++ linux-2.6.35.4/drivers/net/wireless/rndis_wlan.c 2010-09-17 20:12:09.000000000 -0400
26887 @@ -1235,7 +1235,7 @@ static int set_rts_threshold(struct usbn
26888
26889 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
26890
26891 - if (rts_threshold < 0 || rts_threshold > 2347)
26892 + if (rts_threshold > 2347)
26893 rts_threshold = 2347;
26894
26895 tmp = cpu_to_le32(rts_threshold);
26896 diff -urNp linux-2.6.35.4/drivers/oprofile/buffer_sync.c linux-2.6.35.4/drivers/oprofile/buffer_sync.c
26897 --- linux-2.6.35.4/drivers/oprofile/buffer_sync.c 2010-08-26 19:47:12.000000000 -0400
26898 +++ linux-2.6.35.4/drivers/oprofile/buffer_sync.c 2010-09-17 20:12:09.000000000 -0400
26899 @@ -341,7 +341,7 @@ static void add_data(struct op_entry *en
26900 if (cookie == NO_COOKIE)
26901 offset = pc;
26902 if (cookie == INVALID_COOKIE) {
26903 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
26904 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
26905 offset = pc;
26906 }
26907 if (cookie != last_cookie) {
26908 @@ -385,14 +385,14 @@ add_sample(struct mm_struct *mm, struct
26909 /* add userspace sample */
26910
26911 if (!mm) {
26912 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
26913 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
26914 return 0;
26915 }
26916
26917 cookie = lookup_dcookie(mm, s->eip, &offset);
26918
26919 if (cookie == INVALID_COOKIE) {
26920 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
26921 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
26922 return 0;
26923 }
26924
26925 @@ -561,7 +561,7 @@ void sync_buffer(int cpu)
26926 /* ignore backtraces if failed to add a sample */
26927 if (state == sb_bt_start) {
26928 state = sb_bt_ignore;
26929 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
26930 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
26931 }
26932 }
26933 release_mm(mm);
26934 diff -urNp linux-2.6.35.4/drivers/oprofile/event_buffer.c linux-2.6.35.4/drivers/oprofile/event_buffer.c
26935 --- linux-2.6.35.4/drivers/oprofile/event_buffer.c 2010-08-26 19:47:12.000000000 -0400
26936 +++ linux-2.6.35.4/drivers/oprofile/event_buffer.c 2010-09-17 20:12:09.000000000 -0400
26937 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
26938 }
26939
26940 if (buffer_pos == buffer_size) {
26941 - atomic_inc(&oprofile_stats.event_lost_overflow);
26942 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
26943 return;
26944 }
26945
26946 diff -urNp linux-2.6.35.4/drivers/oprofile/oprof.c linux-2.6.35.4/drivers/oprofile/oprof.c
26947 --- linux-2.6.35.4/drivers/oprofile/oprof.c 2010-08-26 19:47:12.000000000 -0400
26948 +++ linux-2.6.35.4/drivers/oprofile/oprof.c 2010-09-17 20:12:09.000000000 -0400
26949 @@ -110,7 +110,7 @@ static void switch_worker(struct work_st
26950 if (oprofile_ops.switch_events())
26951 return;
26952
26953 - atomic_inc(&oprofile_stats.multiplex_counter);
26954 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
26955 start_switch_worker();
26956 }
26957
26958 diff -urNp linux-2.6.35.4/drivers/oprofile/oprofilefs.c linux-2.6.35.4/drivers/oprofile/oprofilefs.c
26959 --- linux-2.6.35.4/drivers/oprofile/oprofilefs.c 2010-08-26 19:47:12.000000000 -0400
26960 +++ linux-2.6.35.4/drivers/oprofile/oprofilefs.c 2010-09-17 20:12:09.000000000 -0400
26961 @@ -187,7 +187,7 @@ static const struct file_operations atom
26962
26963
26964 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
26965 - char const *name, atomic_t *val)
26966 + char const *name, atomic_unchecked_t *val)
26967 {
26968 struct dentry *d = __oprofilefs_create_file(sb, root, name,
26969 &atomic_ro_fops, 0444);
26970 diff -urNp linux-2.6.35.4/drivers/oprofile/oprofile_stats.c linux-2.6.35.4/drivers/oprofile/oprofile_stats.c
26971 --- linux-2.6.35.4/drivers/oprofile/oprofile_stats.c 2010-08-26 19:47:12.000000000 -0400
26972 +++ linux-2.6.35.4/drivers/oprofile/oprofile_stats.c 2010-09-17 20:12:09.000000000 -0400
26973 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
26974 cpu_buf->sample_invalid_eip = 0;
26975 }
26976
26977 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
26978 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
26979 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
26980 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
26981 - atomic_set(&oprofile_stats.multiplex_counter, 0);
26982 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
26983 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
26984 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
26985 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
26986 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
26987 }
26988
26989
26990 diff -urNp linux-2.6.35.4/drivers/oprofile/oprofile_stats.h linux-2.6.35.4/drivers/oprofile/oprofile_stats.h
26991 --- linux-2.6.35.4/drivers/oprofile/oprofile_stats.h 2010-08-26 19:47:12.000000000 -0400
26992 +++ linux-2.6.35.4/drivers/oprofile/oprofile_stats.h 2010-09-17 20:12:09.000000000 -0400
26993 @@ -13,11 +13,11 @@
26994 #include <asm/atomic.h>
26995
26996 struct oprofile_stat_struct {
26997 - atomic_t sample_lost_no_mm;
26998 - atomic_t sample_lost_no_mapping;
26999 - atomic_t bt_lost_no_mapping;
27000 - atomic_t event_lost_overflow;
27001 - atomic_t multiplex_counter;
27002 + atomic_unchecked_t sample_lost_no_mm;
27003 + atomic_unchecked_t sample_lost_no_mapping;
27004 + atomic_unchecked_t bt_lost_no_mapping;
27005 + atomic_unchecked_t event_lost_overflow;
27006 + atomic_unchecked_t multiplex_counter;
27007 };
27008
27009 extern struct oprofile_stat_struct oprofile_stats;
27010 diff -urNp linux-2.6.35.4/drivers/parport/procfs.c linux-2.6.35.4/drivers/parport/procfs.c
27011 --- linux-2.6.35.4/drivers/parport/procfs.c 2010-08-26 19:47:12.000000000 -0400
27012 +++ linux-2.6.35.4/drivers/parport/procfs.c 2010-09-17 20:12:37.000000000 -0400
27013 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
27014
27015 *ppos += len;
27016
27017 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
27018 + return (len > sizeof(buffer) || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
27019 }
27020
27021 #ifdef CONFIG_PARPORT_1284
27022 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
27023
27024 *ppos += len;
27025
27026 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
27027 + return (len > sizeof(buffer) || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
27028 }
27029 #endif /* IEEE1284.3 support. */
27030
27031 diff -urNp linux-2.6.35.4/drivers/pci/hotplug/acpiphp_glue.c linux-2.6.35.4/drivers/pci/hotplug/acpiphp_glue.c
27032 --- linux-2.6.35.4/drivers/pci/hotplug/acpiphp_glue.c 2010-08-26 19:47:12.000000000 -0400
27033 +++ linux-2.6.35.4/drivers/pci/hotplug/acpiphp_glue.c 2010-09-17 20:12:09.000000000 -0400
27034 @@ -110,7 +110,7 @@ static int post_dock_fixups(struct notif
27035 }
27036
27037
27038 -static struct acpi_dock_ops acpiphp_dock_ops = {
27039 +static const struct acpi_dock_ops acpiphp_dock_ops = {
27040 .handler = handle_hotplug_event_func,
27041 };
27042
27043 diff -urNp linux-2.6.35.4/drivers/pci/hotplug/cpqphp_nvram.c linux-2.6.35.4/drivers/pci/hotplug/cpqphp_nvram.c
27044 --- linux-2.6.35.4/drivers/pci/hotplug/cpqphp_nvram.c 2010-08-26 19:47:12.000000000 -0400
27045 +++ linux-2.6.35.4/drivers/pci/hotplug/cpqphp_nvram.c 2010-09-17 20:12:09.000000000 -0400
27046 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
27047
27048 void compaq_nvram_init (void __iomem *rom_start)
27049 {
27050 +
27051 +#ifndef CONFIG_PAX_KERNEXEC
27052 if (rom_start) {
27053 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
27054 }
27055 +#endif
27056 +
27057 dbg("int15 entry = %p\n", compaq_int15_entry_point);
27058
27059 /* initialize our int15 lock */
27060 diff -urNp linux-2.6.35.4/drivers/pci/intel-iommu.c linux-2.6.35.4/drivers/pci/intel-iommu.c
27061 --- linux-2.6.35.4/drivers/pci/intel-iommu.c 2010-08-26 19:47:12.000000000 -0400
27062 +++ linux-2.6.35.4/drivers/pci/intel-iommu.c 2010-09-17 20:12:09.000000000 -0400
27063 @@ -2938,7 +2938,7 @@ static int intel_mapping_error(struct de
27064 return !dma_addr;
27065 }
27066
27067 -struct dma_map_ops intel_dma_ops = {
27068 +const struct dma_map_ops intel_dma_ops = {
27069 .alloc_coherent = intel_alloc_coherent,
27070 .free_coherent = intel_free_coherent,
27071 .map_sg = intel_map_sg,
27072 diff -urNp linux-2.6.35.4/drivers/pci/pcie/portdrv_pci.c linux-2.6.35.4/drivers/pci/pcie/portdrv_pci.c
27073 --- linux-2.6.35.4/drivers/pci/pcie/portdrv_pci.c 2010-08-26 19:47:12.000000000 -0400
27074 +++ linux-2.6.35.4/drivers/pci/pcie/portdrv_pci.c 2010-09-17 20:12:09.000000000 -0400
27075 @@ -250,7 +250,7 @@ static void pcie_portdrv_err_resume(stru
27076 static const struct pci_device_id port_pci_ids[] = { {
27077 /* handle any PCI-Express port */
27078 PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x00), ~0),
27079 - }, { /* end: all zeroes */ }
27080 + }, { 0, 0, 0, 0, 0, 0, 0 }
27081 };
27082 MODULE_DEVICE_TABLE(pci, port_pci_ids);
27083
27084 diff -urNp linux-2.6.35.4/drivers/pci/probe.c linux-2.6.35.4/drivers/pci/probe.c
27085 --- linux-2.6.35.4/drivers/pci/probe.c 2010-08-26 19:47:12.000000000 -0400
27086 +++ linux-2.6.35.4/drivers/pci/probe.c 2010-09-17 20:12:09.000000000 -0400
27087 @@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(
27088 return ret;
27089 }
27090
27091 -static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
27092 +static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
27093 struct device_attribute *attr,
27094 char *buf)
27095 {
27096 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
27097 }
27098
27099 -static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
27100 +static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
27101 struct device_attribute *attr,
27102 char *buf)
27103 {
27104 diff -urNp linux-2.6.35.4/drivers/pci/proc.c linux-2.6.35.4/drivers/pci/proc.c
27105 --- linux-2.6.35.4/drivers/pci/proc.c 2010-08-26 19:47:12.000000000 -0400
27106 +++ linux-2.6.35.4/drivers/pci/proc.c 2010-09-17 20:12:37.000000000 -0400
27107 @@ -481,7 +481,16 @@ static const struct file_operations proc
27108 static int __init pci_proc_init(void)
27109 {
27110 struct pci_dev *dev = NULL;
27111 +
27112 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
27113 +#ifdef CONFIG_GRKERNSEC_PROC_USER
27114 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
27115 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
27116 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
27117 +#endif
27118 +#else
27119 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
27120 +#endif
27121 proc_create("devices", 0, proc_bus_pci_dir,
27122 &proc_bus_pci_dev_operations);
27123 proc_initialized = 1;
27124 diff -urNp linux-2.6.35.4/drivers/pcmcia/pcmcia_ioctl.c linux-2.6.35.4/drivers/pcmcia/pcmcia_ioctl.c
27125 --- linux-2.6.35.4/drivers/pcmcia/pcmcia_ioctl.c 2010-08-26 19:47:12.000000000 -0400
27126 +++ linux-2.6.35.4/drivers/pcmcia/pcmcia_ioctl.c 2010-09-17 20:12:09.000000000 -0400
27127 @@ -850,7 +850,7 @@ static int ds_ioctl(struct file *file, u
27128 return -EFAULT;
27129 }
27130 }
27131 - buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
27132 + buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
27133 if (!buf)
27134 return -ENOMEM;
27135
27136 diff -urNp linux-2.6.35.4/drivers/pcmcia/ti113x.h linux-2.6.35.4/drivers/pcmcia/ti113x.h
27137 --- linux-2.6.35.4/drivers/pcmcia/ti113x.h 2010-08-26 19:47:12.000000000 -0400
27138 +++ linux-2.6.35.4/drivers/pcmcia/ti113x.h 2010-09-17 20:12:09.000000000 -0400
27139 @@ -936,7 +936,7 @@ static struct pci_device_id ene_tune_tbl
27140 DEVID(PCI_VENDOR_ID_MOTOROLA, 0x3410, 0xECC0, PCI_ANY_ID,
27141 ENE_TEST_C9_TLTENABLE | ENE_TEST_C9_PFENABLE, ENE_TEST_C9_TLTENABLE),
27142
27143 - {}
27144 + { 0, 0, 0, 0, 0, 0, 0 }
27145 };
27146
27147 static void ene_tune_bridge(struct pcmcia_socket *sock, struct pci_bus *bus)
27148 diff -urNp linux-2.6.35.4/drivers/pcmcia/yenta_socket.c linux-2.6.35.4/drivers/pcmcia/yenta_socket.c
27149 --- linux-2.6.35.4/drivers/pcmcia/yenta_socket.c 2010-08-26 19:47:12.000000000 -0400
27150 +++ linux-2.6.35.4/drivers/pcmcia/yenta_socket.c 2010-09-17 20:12:09.000000000 -0400
27151 @@ -1428,7 +1428,7 @@ static struct pci_device_id yenta_table[
27152
27153 /* match any cardbus bridge */
27154 CB_ID(PCI_ANY_ID, PCI_ANY_ID, DEFAULT),
27155 - { /* all zeroes */ }
27156 + { 0, 0, 0, 0, 0, 0, 0 }
27157 };
27158 MODULE_DEVICE_TABLE(pci, yenta_table);
27159
27160 diff -urNp linux-2.6.35.4/drivers/platform/x86/acer-wmi.c linux-2.6.35.4/drivers/platform/x86/acer-wmi.c
27161 --- linux-2.6.35.4/drivers/platform/x86/acer-wmi.c 2010-08-26 19:47:12.000000000 -0400
27162 +++ linux-2.6.35.4/drivers/platform/x86/acer-wmi.c 2010-09-17 20:12:09.000000000 -0400
27163 @@ -916,7 +916,7 @@ static int update_bl_status(struct backl
27164 return 0;
27165 }
27166
27167 -static struct backlight_ops acer_bl_ops = {
27168 +static const struct backlight_ops acer_bl_ops = {
27169 .get_brightness = read_brightness,
27170 .update_status = update_bl_status,
27171 };
27172 diff -urNp linux-2.6.35.4/drivers/platform/x86/asus_acpi.c linux-2.6.35.4/drivers/platform/x86/asus_acpi.c
27173 --- linux-2.6.35.4/drivers/platform/x86/asus_acpi.c 2010-08-26 19:47:12.000000000 -0400
27174 +++ linux-2.6.35.4/drivers/platform/x86/asus_acpi.c 2010-09-17 20:12:09.000000000 -0400
27175 @@ -1464,7 +1464,7 @@ static int asus_hotk_remove(struct acpi_
27176 return 0;
27177 }
27178
27179 -static struct backlight_ops asus_backlight_data = {
27180 +static const struct backlight_ops asus_backlight_data = {
27181 .get_brightness = read_brightness,
27182 .update_status = set_brightness_status,
27183 };
27184 diff -urNp linux-2.6.35.4/drivers/platform/x86/asus-laptop.c linux-2.6.35.4/drivers/platform/x86/asus-laptop.c
27185 --- linux-2.6.35.4/drivers/platform/x86/asus-laptop.c 2010-08-26 19:47:12.000000000 -0400
27186 +++ linux-2.6.35.4/drivers/platform/x86/asus-laptop.c 2010-09-17 20:12:09.000000000 -0400
27187 @@ -224,7 +224,6 @@ struct asus_laptop {
27188 struct asus_led gled;
27189 struct asus_led kled;
27190 struct workqueue_struct *led_workqueue;
27191 -
27192 int wireless_status;
27193 bool have_rsts;
27194 int lcd_state;
27195 @@ -621,7 +620,7 @@ static int update_bl_status(struct backl
27196 return asus_lcd_set(asus, value);
27197 }
27198
27199 -static struct backlight_ops asusbl_ops = {
27200 +static const struct backlight_ops asusbl_ops = {
27201 .get_brightness = asus_read_brightness,
27202 .update_status = update_bl_status,
27203 };
27204 diff -urNp linux-2.6.35.4/drivers/platform/x86/compal-laptop.c linux-2.6.35.4/drivers/platform/x86/compal-laptop.c
27205 --- linux-2.6.35.4/drivers/platform/x86/compal-laptop.c 2010-08-26 19:47:12.000000000 -0400
27206 +++ linux-2.6.35.4/drivers/platform/x86/compal-laptop.c 2010-09-17 20:12:09.000000000 -0400
27207 @@ -168,7 +168,7 @@ static int bl_update_status(struct backl
27208 return set_lcd_level(b->props.brightness);
27209 }
27210
27211 -static struct backlight_ops compalbl_ops = {
27212 +static const struct backlight_ops compalbl_ops = {
27213 .get_brightness = bl_get_brightness,
27214 .update_status = bl_update_status,
27215 };
27216 diff -urNp linux-2.6.35.4/drivers/platform/x86/dell-laptop.c linux-2.6.35.4/drivers/platform/x86/dell-laptop.c
27217 --- linux-2.6.35.4/drivers/platform/x86/dell-laptop.c 2010-08-26 19:47:12.000000000 -0400
27218 +++ linux-2.6.35.4/drivers/platform/x86/dell-laptop.c 2010-09-17 20:12:09.000000000 -0400
27219 @@ -469,7 +469,7 @@ out:
27220 return buffer->output[1];
27221 }
27222
27223 -static struct backlight_ops dell_ops = {
27224 +static const struct backlight_ops dell_ops = {
27225 .get_brightness = dell_get_intensity,
27226 .update_status = dell_send_intensity,
27227 };
27228 diff -urNp linux-2.6.35.4/drivers/platform/x86/eeepc-laptop.c linux-2.6.35.4/drivers/platform/x86/eeepc-laptop.c
27229 --- linux-2.6.35.4/drivers/platform/x86/eeepc-laptop.c 2010-08-26 19:47:12.000000000 -0400
27230 +++ linux-2.6.35.4/drivers/platform/x86/eeepc-laptop.c 2010-09-17 20:12:09.000000000 -0400
27231 @@ -1114,7 +1114,7 @@ static int update_bl_status(struct backl
27232 return set_brightness(bd, bd->props.brightness);
27233 }
27234
27235 -static struct backlight_ops eeepcbl_ops = {
27236 +static const struct backlight_ops eeepcbl_ops = {
27237 .get_brightness = read_brightness,
27238 .update_status = update_bl_status,
27239 };
27240 diff -urNp linux-2.6.35.4/drivers/platform/x86/fujitsu-laptop.c linux-2.6.35.4/drivers/platform/x86/fujitsu-laptop.c
27241 --- linux-2.6.35.4/drivers/platform/x86/fujitsu-laptop.c 2010-08-26 19:47:12.000000000 -0400
27242 +++ linux-2.6.35.4/drivers/platform/x86/fujitsu-laptop.c 2010-09-17 20:12:09.000000000 -0400
27243 @@ -437,7 +437,7 @@ static int bl_update_status(struct backl
27244 return ret;
27245 }
27246
27247 -static struct backlight_ops fujitsubl_ops = {
27248 +static const struct backlight_ops fujitsubl_ops = {
27249 .get_brightness = bl_get_brightness,
27250 .update_status = bl_update_status,
27251 };
27252 diff -urNp linux-2.6.35.4/drivers/platform/x86/sony-laptop.c linux-2.6.35.4/drivers/platform/x86/sony-laptop.c
27253 --- linux-2.6.35.4/drivers/platform/x86/sony-laptop.c 2010-08-26 19:47:12.000000000 -0400
27254 +++ linux-2.6.35.4/drivers/platform/x86/sony-laptop.c 2010-09-17 20:12:09.000000000 -0400
27255 @@ -857,7 +857,7 @@ static int sony_backlight_get_brightness
27256 }
27257
27258 static struct backlight_device *sony_backlight_device;
27259 -static struct backlight_ops sony_backlight_ops = {
27260 +static const struct backlight_ops sony_backlight_ops = {
27261 .update_status = sony_backlight_update_status,
27262 .get_brightness = sony_backlight_get_brightness,
27263 };
27264 diff -urNp linux-2.6.35.4/drivers/platform/x86/thinkpad_acpi.c linux-2.6.35.4/drivers/platform/x86/thinkpad_acpi.c
27265 --- linux-2.6.35.4/drivers/platform/x86/thinkpad_acpi.c 2010-08-26 19:47:12.000000000 -0400
27266 +++ linux-2.6.35.4/drivers/platform/x86/thinkpad_acpi.c 2010-09-17 20:12:09.000000000 -0400
27267 @@ -6142,7 +6142,7 @@ static void tpacpi_brightness_notify_cha
27268 BACKLIGHT_UPDATE_HOTKEY);
27269 }
27270
27271 -static struct backlight_ops ibm_backlight_data = {
27272 +static const struct backlight_ops ibm_backlight_data = {
27273 .get_brightness = brightness_get,
27274 .update_status = brightness_update_status,
27275 };
27276 diff -urNp linux-2.6.35.4/drivers/platform/x86/toshiba_acpi.c linux-2.6.35.4/drivers/platform/x86/toshiba_acpi.c
27277 --- linux-2.6.35.4/drivers/platform/x86/toshiba_acpi.c 2010-08-26 19:47:12.000000000 -0400
27278 +++ linux-2.6.35.4/drivers/platform/x86/toshiba_acpi.c 2010-09-17 20:12:09.000000000 -0400
27279 @@ -741,7 +741,7 @@ static acpi_status remove_device(void)
27280 return AE_OK;
27281 }
27282
27283 -static struct backlight_ops toshiba_backlight_data = {
27284 +static const struct backlight_ops toshiba_backlight_data = {
27285 .get_brightness = get_lcd,
27286 .update_status = set_lcd_status,
27287 };
27288 diff -urNp linux-2.6.35.4/drivers/pnp/pnpbios/bioscalls.c linux-2.6.35.4/drivers/pnp/pnpbios/bioscalls.c
27289 --- linux-2.6.35.4/drivers/pnp/pnpbios/bioscalls.c 2010-08-26 19:47:12.000000000 -0400
27290 +++ linux-2.6.35.4/drivers/pnp/pnpbios/bioscalls.c 2010-09-17 20:12:09.000000000 -0400
27291 @@ -59,7 +59,7 @@ do { \
27292 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
27293 } while(0)
27294
27295 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
27296 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
27297 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
27298
27299 /*
27300 @@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func
27301
27302 cpu = get_cpu();
27303 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
27304 +
27305 + pax_open_kernel();
27306 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
27307 + pax_close_kernel();
27308
27309 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
27310 spin_lock_irqsave(&pnp_bios_lock, flags);
27311 @@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func
27312 :"memory");
27313 spin_unlock_irqrestore(&pnp_bios_lock, flags);
27314
27315 + pax_open_kernel();
27316 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
27317 + pax_close_kernel();
27318 +
27319 put_cpu();
27320
27321 /* If we get here and this is set then the PnP BIOS faulted on us. */
27322 @@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 n
27323 return status;
27324 }
27325
27326 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
27327 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
27328 {
27329 int i;
27330
27331 @@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_i
27332 pnp_bios_callpoint.offset = header->fields.pm16offset;
27333 pnp_bios_callpoint.segment = PNP_CS16;
27334
27335 + pax_open_kernel();
27336 +
27337 for_each_possible_cpu(i) {
27338 struct desc_struct *gdt = get_cpu_gdt_table(i);
27339 if (!gdt)
27340 @@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_i
27341 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
27342 (unsigned long)__va(header->fields.pm16dseg));
27343 }
27344 +
27345 + pax_close_kernel();
27346 }
27347 diff -urNp linux-2.6.35.4/drivers/pnp/quirks.c linux-2.6.35.4/drivers/pnp/quirks.c
27348 --- linux-2.6.35.4/drivers/pnp/quirks.c 2010-08-26 19:47:12.000000000 -0400
27349 +++ linux-2.6.35.4/drivers/pnp/quirks.c 2010-09-17 20:12:09.000000000 -0400
27350 @@ -322,7 +322,7 @@ static struct pnp_fixup pnp_fixups[] = {
27351 /* PnP resources that might overlap PCI BARs */
27352 {"PNP0c01", quirk_system_pci_resources},
27353 {"PNP0c02", quirk_system_pci_resources},
27354 - {""}
27355 + {"", NULL}
27356 };
27357
27358 void pnp_fixup_device(struct pnp_dev *dev)
27359 diff -urNp linux-2.6.35.4/drivers/pnp/resource.c linux-2.6.35.4/drivers/pnp/resource.c
27360 --- linux-2.6.35.4/drivers/pnp/resource.c 2010-08-26 19:47:12.000000000 -0400
27361 +++ linux-2.6.35.4/drivers/pnp/resource.c 2010-09-17 20:12:09.000000000 -0400
27362 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
27363 return 1;
27364
27365 /* check if the resource is valid */
27366 - if (*irq < 0 || *irq > 15)
27367 + if (*irq > 15)
27368 return 0;
27369
27370 /* check if the resource is reserved */
27371 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
27372 return 1;
27373
27374 /* check if the resource is valid */
27375 - if (*dma < 0 || *dma == 4 || *dma > 7)
27376 + if (*dma == 4 || *dma > 7)
27377 return 0;
27378
27379 /* check if the resource is reserved */
27380 diff -urNp linux-2.6.35.4/drivers/s390/cio/qdio_debug.c linux-2.6.35.4/drivers/s390/cio/qdio_debug.c
27381 --- linux-2.6.35.4/drivers/s390/cio/qdio_debug.c 2010-08-26 19:47:12.000000000 -0400
27382 +++ linux-2.6.35.4/drivers/s390/cio/qdio_debug.c 2010-09-17 20:12:09.000000000 -0400
27383 @@ -233,7 +233,7 @@ static int qperf_seq_open(struct inode *
27384 filp->f_path.dentry->d_inode->i_private);
27385 }
27386
27387 -static struct file_operations debugfs_perf_fops = {
27388 +static const struct file_operations debugfs_perf_fops = {
27389 .owner = THIS_MODULE,
27390 .open = qperf_seq_open,
27391 .read = seq_read,
27392 diff -urNp linux-2.6.35.4/drivers/scsi/ipr.c linux-2.6.35.4/drivers/scsi/ipr.c
27393 --- linux-2.6.35.4/drivers/scsi/ipr.c 2010-08-26 19:47:12.000000000 -0400
27394 +++ linux-2.6.35.4/drivers/scsi/ipr.c 2010-09-17 20:12:09.000000000 -0400
27395 @@ -6091,7 +6091,7 @@ static bool ipr_qc_fill_rtf(struct ata_q
27396 return true;
27397 }
27398
27399 -static struct ata_port_operations ipr_sata_ops = {
27400 +static const struct ata_port_operations ipr_sata_ops = {
27401 .phy_reset = ipr_ata_phy_reset,
27402 .hardreset = ipr_sata_reset,
27403 .post_internal_cmd = ipr_ata_post_internal,
27404 diff -urNp linux-2.6.35.4/drivers/scsi/libfc/fc_exch.c linux-2.6.35.4/drivers/scsi/libfc/fc_exch.c
27405 --- linux-2.6.35.4/drivers/scsi/libfc/fc_exch.c 2010-08-26 19:47:12.000000000 -0400
27406 +++ linux-2.6.35.4/drivers/scsi/libfc/fc_exch.c 2010-09-17 20:12:09.000000000 -0400
27407 @@ -100,12 +100,12 @@ struct fc_exch_mgr {
27408 * all together if not used XXX
27409 */
27410 struct {
27411 - atomic_t no_free_exch;
27412 - atomic_t no_free_exch_xid;
27413 - atomic_t xid_not_found;
27414 - atomic_t xid_busy;
27415 - atomic_t seq_not_found;
27416 - atomic_t non_bls_resp;
27417 + atomic_unchecked_t no_free_exch;
27418 + atomic_unchecked_t no_free_exch_xid;
27419 + atomic_unchecked_t xid_not_found;
27420 + atomic_unchecked_t xid_busy;
27421 + atomic_unchecked_t seq_not_found;
27422 + atomic_unchecked_t non_bls_resp;
27423 } stats;
27424 };
27425 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
27426 @@ -671,7 +671,7 @@ static struct fc_exch *fc_exch_em_alloc(
27427 /* allocate memory for exchange */
27428 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
27429 if (!ep) {
27430 - atomic_inc(&mp->stats.no_free_exch);
27431 + atomic_inc_unchecked(&mp->stats.no_free_exch);
27432 goto out;
27433 }
27434 memset(ep, 0, sizeof(*ep));
27435 @@ -719,7 +719,7 @@ out:
27436 return ep;
27437 err:
27438 spin_unlock_bh(&pool->lock);
27439 - atomic_inc(&mp->stats.no_free_exch_xid);
27440 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
27441 mempool_free(ep, mp->ep_pool);
27442 return NULL;
27443 }
27444 @@ -864,7 +864,7 @@ static enum fc_pf_rjt_reason fc_seq_look
27445 xid = ntohs(fh->fh_ox_id); /* we originated exch */
27446 ep = fc_exch_find(mp, xid);
27447 if (!ep) {
27448 - atomic_inc(&mp->stats.xid_not_found);
27449 + atomic_inc_unchecked(&mp->stats.xid_not_found);
27450 reject = FC_RJT_OX_ID;
27451 goto out;
27452 }
27453 @@ -894,7 +894,7 @@ static enum fc_pf_rjt_reason fc_seq_look
27454 ep = fc_exch_find(mp, xid);
27455 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
27456 if (ep) {
27457 - atomic_inc(&mp->stats.xid_busy);
27458 + atomic_inc_unchecked(&mp->stats.xid_busy);
27459 reject = FC_RJT_RX_ID;
27460 goto rel;
27461 }
27462 @@ -905,7 +905,7 @@ static enum fc_pf_rjt_reason fc_seq_look
27463 }
27464 xid = ep->xid; /* get our XID */
27465 } else if (!ep) {
27466 - atomic_inc(&mp->stats.xid_not_found);
27467 + atomic_inc_unchecked(&mp->stats.xid_not_found);
27468 reject = FC_RJT_RX_ID; /* XID not found */
27469 goto out;
27470 }
27471 @@ -922,7 +922,7 @@ static enum fc_pf_rjt_reason fc_seq_look
27472 } else {
27473 sp = &ep->seq;
27474 if (sp->id != fh->fh_seq_id) {
27475 - atomic_inc(&mp->stats.seq_not_found);
27476 + atomic_inc_unchecked(&mp->stats.seq_not_found);
27477 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
27478 goto rel;
27479 }
27480 @@ -1303,22 +1303,22 @@ static void fc_exch_recv_seq_resp(struct
27481
27482 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
27483 if (!ep) {
27484 - atomic_inc(&mp->stats.xid_not_found);
27485 + atomic_inc_unchecked(&mp->stats.xid_not_found);
27486 goto out;
27487 }
27488 if (ep->esb_stat & ESB_ST_COMPLETE) {
27489 - atomic_inc(&mp->stats.xid_not_found);
27490 + atomic_inc_unchecked(&mp->stats.xid_not_found);
27491 goto out;
27492 }
27493 if (ep->rxid == FC_XID_UNKNOWN)
27494 ep->rxid = ntohs(fh->fh_rx_id);
27495 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
27496 - atomic_inc(&mp->stats.xid_not_found);
27497 + atomic_inc_unchecked(&mp->stats.xid_not_found);
27498 goto rel;
27499 }
27500 if (ep->did != ntoh24(fh->fh_s_id) &&
27501 ep->did != FC_FID_FLOGI) {
27502 - atomic_inc(&mp->stats.xid_not_found);
27503 + atomic_inc_unchecked(&mp->stats.xid_not_found);
27504 goto rel;
27505 }
27506 sof = fr_sof(fp);
27507 @@ -1327,7 +1327,7 @@ static void fc_exch_recv_seq_resp(struct
27508 sp->ssb_stat |= SSB_ST_RESP;
27509 sp->id = fh->fh_seq_id;
27510 } else if (sp->id != fh->fh_seq_id) {
27511 - atomic_inc(&mp->stats.seq_not_found);
27512 + atomic_inc_unchecked(&mp->stats.seq_not_found);
27513 goto rel;
27514 }
27515
27516 @@ -1390,9 +1390,9 @@ static void fc_exch_recv_resp(struct fc_
27517 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
27518
27519 if (!sp)
27520 - atomic_inc(&mp->stats.xid_not_found);
27521 + atomic_inc_unchecked(&mp->stats.xid_not_found);
27522 else
27523 - atomic_inc(&mp->stats.non_bls_resp);
27524 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
27525
27526 fc_frame_free(fp);
27527 }
27528 diff -urNp linux-2.6.35.4/drivers/scsi/libsas/sas_ata.c linux-2.6.35.4/drivers/scsi/libsas/sas_ata.c
27529 --- linux-2.6.35.4/drivers/scsi/libsas/sas_ata.c 2010-08-26 19:47:12.000000000 -0400
27530 +++ linux-2.6.35.4/drivers/scsi/libsas/sas_ata.c 2010-09-17 20:12:09.000000000 -0400
27531 @@ -344,7 +344,7 @@ static int sas_ata_scr_read(struct ata_l
27532 }
27533 }
27534
27535 -static struct ata_port_operations sas_sata_ops = {
27536 +static const struct ata_port_operations sas_sata_ops = {
27537 .phy_reset = sas_ata_phy_reset,
27538 .post_internal_cmd = sas_ata_post_internal,
27539 .qc_prep = ata_noop_qc_prep,
27540 diff -urNp linux-2.6.35.4/drivers/scsi/mpt2sas/mpt2sas_debug.h linux-2.6.35.4/drivers/scsi/mpt2sas/mpt2sas_debug.h
27541 --- linux-2.6.35.4/drivers/scsi/mpt2sas/mpt2sas_debug.h 2010-08-26 19:47:12.000000000 -0400
27542 +++ linux-2.6.35.4/drivers/scsi/mpt2sas/mpt2sas_debug.h 2010-09-17 20:12:09.000000000 -0400
27543 @@ -79,7 +79,7 @@
27544 CMD; \
27545 }
27546 #else
27547 -#define MPT_CHECK_LOGGING(IOC, CMD, BITS)
27548 +#define MPT_CHECK_LOGGING(IOC, CMD, BITS) do {} while (0)
27549 #endif /* CONFIG_SCSI_MPT2SAS_LOGGING */
27550
27551
27552 diff -urNp linux-2.6.35.4/drivers/scsi/qla2xxx/qla_os.c linux-2.6.35.4/drivers/scsi/qla2xxx/qla_os.c
27553 --- linux-2.6.35.4/drivers/scsi/qla2xxx/qla_os.c 2010-08-26 19:47:12.000000000 -0400
27554 +++ linux-2.6.35.4/drivers/scsi/qla2xxx/qla_os.c 2010-09-17 20:12:09.000000000 -0400
27555 @@ -3899,7 +3899,7 @@ static struct pci_driver qla2xxx_pci_dri
27556 .err_handler = &qla2xxx_err_handler,
27557 };
27558
27559 -static struct file_operations apidev_fops = {
27560 +static const struct file_operations apidev_fops = {
27561 .owner = THIS_MODULE,
27562 };
27563
27564 diff -urNp linux-2.6.35.4/drivers/scsi/scsi_logging.h linux-2.6.35.4/drivers/scsi/scsi_logging.h
27565 --- linux-2.6.35.4/drivers/scsi/scsi_logging.h 2010-08-26 19:47:12.000000000 -0400
27566 +++ linux-2.6.35.4/drivers/scsi/scsi_logging.h 2010-09-17 20:12:09.000000000 -0400
27567 @@ -51,7 +51,7 @@ do { \
27568 } while (0); \
27569 } while (0)
27570 #else
27571 -#define SCSI_CHECK_LOGGING(SHIFT, BITS, LEVEL, CMD)
27572 +#define SCSI_CHECK_LOGGING(SHIFT, BITS, LEVEL, CMD) do {} while (0)
27573 #endif /* CONFIG_SCSI_LOGGING */
27574
27575 /*
27576 diff -urNp linux-2.6.35.4/drivers/scsi/sg.c linux-2.6.35.4/drivers/scsi/sg.c
27577 --- linux-2.6.35.4/drivers/scsi/sg.c 2010-08-26 19:47:12.000000000 -0400
27578 +++ linux-2.6.35.4/drivers/scsi/sg.c 2010-09-17 20:12:09.000000000 -0400
27579 @@ -2302,7 +2302,7 @@ struct sg_proc_leaf {
27580 const struct file_operations * fops;
27581 };
27582
27583 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
27584 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
27585 {"allow_dio", &adio_fops},
27586 {"debug", &debug_fops},
27587 {"def_reserved_size", &dressz_fops},
27588 @@ -2317,7 +2317,7 @@ sg_proc_init(void)
27589 {
27590 int k, mask;
27591 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
27592 - struct sg_proc_leaf * leaf;
27593 + const struct sg_proc_leaf * leaf;
27594
27595 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
27596 if (!sg_proc_sgp)
27597 diff -urNp linux-2.6.35.4/drivers/serial/8250_pci.c linux-2.6.35.4/drivers/serial/8250_pci.c
27598 --- linux-2.6.35.4/drivers/serial/8250_pci.c 2010-08-26 19:47:12.000000000 -0400
27599 +++ linux-2.6.35.4/drivers/serial/8250_pci.c 2010-09-17 20:12:09.000000000 -0400
27600 @@ -3777,7 +3777,7 @@ static struct pci_device_id serial_pci_t
27601 PCI_ANY_ID, PCI_ANY_ID,
27602 PCI_CLASS_COMMUNICATION_MULTISERIAL << 8,
27603 0xffff00, pbn_default },
27604 - { 0, }
27605 + { 0, 0, 0, 0, 0, 0, 0 }
27606 };
27607
27608 static struct pci_driver serial_pci_driver = {
27609 diff -urNp linux-2.6.35.4/drivers/serial/kgdboc.c linux-2.6.35.4/drivers/serial/kgdboc.c
27610 --- linux-2.6.35.4/drivers/serial/kgdboc.c 2010-08-26 19:47:12.000000000 -0400
27611 +++ linux-2.6.35.4/drivers/serial/kgdboc.c 2010-09-17 20:12:09.000000000 -0400
27612 @@ -20,7 +20,7 @@
27613
27614 #define MAX_CONFIG_LEN 40
27615
27616 -static struct kgdb_io kgdboc_io_ops;
27617 +static struct kgdb_io kgdboc_io_ops;
27618
27619 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
27620 static int configured = -1;
27621 diff -urNp linux-2.6.35.4/drivers/staging/comedi/comedi_fops.c linux-2.6.35.4/drivers/staging/comedi/comedi_fops.c
27622 --- linux-2.6.35.4/drivers/staging/comedi/comedi_fops.c 2010-08-26 19:47:12.000000000 -0400
27623 +++ linux-2.6.35.4/drivers/staging/comedi/comedi_fops.c 2010-09-17 20:12:09.000000000 -0400
27624 @@ -1425,7 +1425,7 @@ static void comedi_unmap(struct vm_area_
27625 mutex_unlock(&dev->mutex);
27626 }
27627
27628 -static struct vm_operations_struct comedi_vm_ops = {
27629 +static const struct vm_operations_struct comedi_vm_ops = {
27630 .close = comedi_unmap,
27631 };
27632
27633 diff -urNp linux-2.6.35.4/drivers/staging/dream/pmem.c linux-2.6.35.4/drivers/staging/dream/pmem.c
27634 --- linux-2.6.35.4/drivers/staging/dream/pmem.c 2010-08-26 19:47:12.000000000 -0400
27635 +++ linux-2.6.35.4/drivers/staging/dream/pmem.c 2010-09-17 20:12:09.000000000 -0400
27636 @@ -175,7 +175,7 @@ static int pmem_mmap(struct file *, stru
27637 static int pmem_open(struct inode *, struct file *);
27638 static long pmem_ioctl(struct file *, unsigned int, unsigned long);
27639
27640 -struct file_operations pmem_fops = {
27641 +const struct file_operations pmem_fops = {
27642 .release = pmem_release,
27643 .mmap = pmem_mmap,
27644 .open = pmem_open,
27645 @@ -1201,7 +1201,7 @@ static ssize_t debug_read(struct file *f
27646 return simple_read_from_buffer(buf, count, ppos, buffer, n);
27647 }
27648
27649 -static struct file_operations debug_fops = {
27650 +static const struct file_operations debug_fops = {
27651 .read = debug_read,
27652 .open = debug_open,
27653 };
27654 diff -urNp linux-2.6.35.4/drivers/staging/dream/qdsp5/adsp_driver.c linux-2.6.35.4/drivers/staging/dream/qdsp5/adsp_driver.c
27655 --- linux-2.6.35.4/drivers/staging/dream/qdsp5/adsp_driver.c 2010-08-26 19:47:12.000000000 -0400
27656 +++ linux-2.6.35.4/drivers/staging/dream/qdsp5/adsp_driver.c 2010-09-17 20:12:09.000000000 -0400
27657 @@ -577,7 +577,7 @@ static struct adsp_device *inode_to_devi
27658 static dev_t adsp_devno;
27659 static struct class *adsp_class;
27660
27661 -static struct file_operations adsp_fops = {
27662 +static const struct file_operations adsp_fops = {
27663 .owner = THIS_MODULE,
27664 .open = adsp_open,
27665 .unlocked_ioctl = adsp_ioctl,
27666 diff -urNp linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_aac.c linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_aac.c
27667 --- linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_aac.c 2010-08-26 19:47:12.000000000 -0400
27668 +++ linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_aac.c 2010-09-17 20:12:09.000000000 -0400
27669 @@ -1023,7 +1023,7 @@ done:
27670 return rc;
27671 }
27672
27673 -static struct file_operations audio_aac_fops = {
27674 +static const struct file_operations audio_aac_fops = {
27675 .owner = THIS_MODULE,
27676 .open = audio_open,
27677 .release = audio_release,
27678 diff -urNp linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_amrnb.c linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_amrnb.c
27679 --- linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_amrnb.c 2010-08-26 19:47:12.000000000 -0400
27680 +++ linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_amrnb.c 2010-09-17 20:12:09.000000000 -0400
27681 @@ -834,7 +834,7 @@ done:
27682 return rc;
27683 }
27684
27685 -static struct file_operations audio_amrnb_fops = {
27686 +static const struct file_operations audio_amrnb_fops = {
27687 .owner = THIS_MODULE,
27688 .open = audamrnb_open,
27689 .release = audamrnb_release,
27690 diff -urNp linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_evrc.c linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_evrc.c
27691 --- linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_evrc.c 2010-08-26 19:47:12.000000000 -0400
27692 +++ linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_evrc.c 2010-09-17 20:12:09.000000000 -0400
27693 @@ -806,7 +806,7 @@ dma_fail:
27694 return rc;
27695 }
27696
27697 -static struct file_operations audio_evrc_fops = {
27698 +static const struct file_operations audio_evrc_fops = {
27699 .owner = THIS_MODULE,
27700 .open = audevrc_open,
27701 .release = audevrc_release,
27702 diff -urNp linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_in.c linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_in.c
27703 --- linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_in.c 2010-08-26 19:47:12.000000000 -0400
27704 +++ linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_in.c 2010-09-17 20:12:09.000000000 -0400
27705 @@ -914,7 +914,7 @@ static int audpre_open(struct inode *ino
27706 return 0;
27707 }
27708
27709 -static struct file_operations audio_fops = {
27710 +static const struct file_operations audio_fops = {
27711 .owner = THIS_MODULE,
27712 .open = audio_in_open,
27713 .release = audio_in_release,
27714 @@ -923,7 +923,7 @@ static struct file_operations audio_fops
27715 .unlocked_ioctl = audio_in_ioctl,
27716 };
27717
27718 -static struct file_operations audpre_fops = {
27719 +static const struct file_operations audpre_fops = {
27720 .owner = THIS_MODULE,
27721 .open = audpre_open,
27722 .unlocked_ioctl = audpre_ioctl,
27723 diff -urNp linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_mp3.c linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_mp3.c
27724 --- linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_mp3.c 2010-08-26 19:47:12.000000000 -0400
27725 +++ linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_mp3.c 2010-09-17 20:12:09.000000000 -0400
27726 @@ -941,7 +941,7 @@ done:
27727 return rc;
27728 }
27729
27730 -static struct file_operations audio_mp3_fops = {
27731 +static const struct file_operations audio_mp3_fops = {
27732 .owner = THIS_MODULE,
27733 .open = audio_open,
27734 .release = audio_release,
27735 diff -urNp linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_out.c linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_out.c
27736 --- linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_out.c 2010-08-26 19:47:12.000000000 -0400
27737 +++ linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_out.c 2010-09-17 20:12:09.000000000 -0400
27738 @@ -800,7 +800,7 @@ static int audpp_open(struct inode *inod
27739 return 0;
27740 }
27741
27742 -static struct file_operations audio_fops = {
27743 +static const struct file_operations audio_fops = {
27744 .owner = THIS_MODULE,
27745 .open = audio_open,
27746 .release = audio_release,
27747 @@ -809,7 +809,7 @@ static struct file_operations audio_fops
27748 .unlocked_ioctl = audio_ioctl,
27749 };
27750
27751 -static struct file_operations audpp_fops = {
27752 +static const struct file_operations audpp_fops = {
27753 .owner = THIS_MODULE,
27754 .open = audpp_open,
27755 .unlocked_ioctl = audpp_ioctl,
27756 diff -urNp linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_qcelp.c linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_qcelp.c
27757 --- linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_qcelp.c 2010-08-26 19:47:12.000000000 -0400
27758 +++ linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_qcelp.c 2010-09-17 20:12:09.000000000 -0400
27759 @@ -817,7 +817,7 @@ err:
27760 return rc;
27761 }
27762
27763 -static struct file_operations audio_qcelp_fops = {
27764 +static const struct file_operations audio_qcelp_fops = {
27765 .owner = THIS_MODULE,
27766 .open = audqcelp_open,
27767 .release = audqcelp_release,
27768 diff -urNp linux-2.6.35.4/drivers/staging/dream/qdsp5/snd.c linux-2.6.35.4/drivers/staging/dream/qdsp5/snd.c
27769 --- linux-2.6.35.4/drivers/staging/dream/qdsp5/snd.c 2010-08-26 19:47:12.000000000 -0400
27770 +++ linux-2.6.35.4/drivers/staging/dream/qdsp5/snd.c 2010-09-17 20:12:09.000000000 -0400
27771 @@ -242,7 +242,7 @@ err:
27772 return rc;
27773 }
27774
27775 -static struct file_operations snd_fops = {
27776 +static const struct file_operations snd_fops = {
27777 .owner = THIS_MODULE,
27778 .open = snd_open,
27779 .release = snd_release,
27780 diff -urNp linux-2.6.35.4/drivers/staging/dt3155/dt3155_drv.c linux-2.6.35.4/drivers/staging/dt3155/dt3155_drv.c
27781 --- linux-2.6.35.4/drivers/staging/dt3155/dt3155_drv.c 2010-08-26 19:47:12.000000000 -0400
27782 +++ linux-2.6.35.4/drivers/staging/dt3155/dt3155_drv.c 2010-09-17 20:12:09.000000000 -0400
27783 @@ -853,7 +853,7 @@ dt3155_unlocked_ioctl(struct file *file,
27784 * needed by init_module
27785 * register_chrdev
27786 *****************************************************/
27787 -static struct file_operations dt3155_fops = {
27788 +static const struct file_operations dt3155_fops = {
27789 .read = dt3155_read,
27790 .unlocked_ioctl = dt3155_unlocked_ioctl,
27791 .mmap = dt3155_mmap,
27792 diff -urNp linux-2.6.35.4/drivers/staging/go7007/go7007-v4l2.c linux-2.6.35.4/drivers/staging/go7007/go7007-v4l2.c
27793 --- linux-2.6.35.4/drivers/staging/go7007/go7007-v4l2.c 2010-08-26 19:47:12.000000000 -0400
27794 +++ linux-2.6.35.4/drivers/staging/go7007/go7007-v4l2.c 2010-09-17 20:12:09.000000000 -0400
27795 @@ -1673,7 +1673,7 @@ static int go7007_vm_fault(struct vm_are
27796 return 0;
27797 }
27798
27799 -static struct vm_operations_struct go7007_vm_ops = {
27800 +static const struct vm_operations_struct go7007_vm_ops = {
27801 .open = go7007_vm_open,
27802 .close = go7007_vm_close,
27803 .fault = go7007_vm_fault,
27804 diff -urNp linux-2.6.35.4/drivers/staging/hv/hv.c linux-2.6.35.4/drivers/staging/hv/hv.c
27805 --- linux-2.6.35.4/drivers/staging/hv/hv.c 2010-08-26 19:47:12.000000000 -0400
27806 +++ linux-2.6.35.4/drivers/staging/hv/hv.c 2010-09-17 20:12:09.000000000 -0400
27807 @@ -162,7 +162,7 @@ static u64 HvDoHypercall(u64 Control, vo
27808 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
27809 u32 outputAddressHi = outputAddress >> 32;
27810 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
27811 - volatile void *hypercallPage = gHvContext.HypercallPage;
27812 + volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
27813
27814 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
27815 Control, Input, Output);
27816 diff -urNp linux-2.6.35.4/drivers/staging/msm/msm_fb_bl.c linux-2.6.35.4/drivers/staging/msm/msm_fb_bl.c
27817 --- linux-2.6.35.4/drivers/staging/msm/msm_fb_bl.c 2010-08-26 19:47:12.000000000 -0400
27818 +++ linux-2.6.35.4/drivers/staging/msm/msm_fb_bl.c 2010-09-17 20:12:09.000000000 -0400
27819 @@ -42,7 +42,7 @@ static int msm_fb_bl_update_status(struc
27820 return 0;
27821 }
27822
27823 -static struct backlight_ops msm_fb_bl_ops = {
27824 +static const struct backlight_ops msm_fb_bl_ops = {
27825 .get_brightness = msm_fb_bl_get_brightness,
27826 .update_status = msm_fb_bl_update_status,
27827 };
27828 diff -urNp linux-2.6.35.4/drivers/staging/panel/panel.c linux-2.6.35.4/drivers/staging/panel/panel.c
27829 --- linux-2.6.35.4/drivers/staging/panel/panel.c 2010-08-26 19:47:12.000000000 -0400
27830 +++ linux-2.6.35.4/drivers/staging/panel/panel.c 2010-09-17 20:12:09.000000000 -0400
27831 @@ -1304,7 +1304,7 @@ static int lcd_release(struct inode *ino
27832 return 0;
27833 }
27834
27835 -static struct file_operations lcd_fops = {
27836 +static const struct file_operations lcd_fops = {
27837 .write = lcd_write,
27838 .open = lcd_open,
27839 .release = lcd_release,
27840 @@ -1564,7 +1564,7 @@ static int keypad_release(struct inode *
27841 return 0;
27842 }
27843
27844 -static struct file_operations keypad_fops = {
27845 +static const struct file_operations keypad_fops = {
27846 .read = keypad_read, /* read */
27847 .open = keypad_open, /* open */
27848 .release = keypad_release, /* close */
27849 diff -urNp linux-2.6.35.4/drivers/staging/phison/phison.c linux-2.6.35.4/drivers/staging/phison/phison.c
27850 --- linux-2.6.35.4/drivers/staging/phison/phison.c 2010-08-26 19:47:12.000000000 -0400
27851 +++ linux-2.6.35.4/drivers/staging/phison/phison.c 2010-09-17 20:12:09.000000000 -0400
27852 @@ -43,7 +43,7 @@ static struct scsi_host_template phison_
27853 ATA_BMDMA_SHT(DRV_NAME),
27854 };
27855
27856 -static struct ata_port_operations phison_ops = {
27857 +static const struct ata_port_operations phison_ops = {
27858 .inherits = &ata_bmdma_port_ops,
27859 .prereset = phison_pre_reset,
27860 };
27861 diff -urNp linux-2.6.35.4/drivers/staging/pohmelfs/inode.c linux-2.6.35.4/drivers/staging/pohmelfs/inode.c
27862 --- linux-2.6.35.4/drivers/staging/pohmelfs/inode.c 2010-08-26 19:47:12.000000000 -0400
27863 +++ linux-2.6.35.4/drivers/staging/pohmelfs/inode.c 2010-09-17 20:12:09.000000000 -0400
27864 @@ -1846,7 +1846,7 @@ static int pohmelfs_fill_super(struct su
27865 mutex_init(&psb->mcache_lock);
27866 psb->mcache_root = RB_ROOT;
27867 psb->mcache_timeout = msecs_to_jiffies(5000);
27868 - atomic_long_set(&psb->mcache_gen, 0);
27869 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
27870
27871 psb->trans_max_pages = 100;
27872
27873 diff -urNp linux-2.6.35.4/drivers/staging/pohmelfs/mcache.c linux-2.6.35.4/drivers/staging/pohmelfs/mcache.c
27874 --- linux-2.6.35.4/drivers/staging/pohmelfs/mcache.c 2010-08-26 19:47:12.000000000 -0400
27875 +++ linux-2.6.35.4/drivers/staging/pohmelfs/mcache.c 2010-09-17 20:12:09.000000000 -0400
27876 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
27877 m->data = data;
27878 m->start = start;
27879 m->size = size;
27880 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
27881 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
27882
27883 mutex_lock(&psb->mcache_lock);
27884 err = pohmelfs_mcache_insert(psb, m);
27885 diff -urNp linux-2.6.35.4/drivers/staging/pohmelfs/netfs.h linux-2.6.35.4/drivers/staging/pohmelfs/netfs.h
27886 --- linux-2.6.35.4/drivers/staging/pohmelfs/netfs.h 2010-08-26 19:47:12.000000000 -0400
27887 +++ linux-2.6.35.4/drivers/staging/pohmelfs/netfs.h 2010-09-17 20:12:09.000000000 -0400
27888 @@ -571,7 +571,7 @@ struct pohmelfs_config;
27889 struct pohmelfs_sb {
27890 struct rb_root mcache_root;
27891 struct mutex mcache_lock;
27892 - atomic_long_t mcache_gen;
27893 + atomic_long_unchecked_t mcache_gen;
27894 unsigned long mcache_timeout;
27895
27896 unsigned int idx;
27897 diff -urNp linux-2.6.35.4/drivers/staging/ramzswap/ramzswap_drv.c linux-2.6.35.4/drivers/staging/ramzswap/ramzswap_drv.c
27898 --- linux-2.6.35.4/drivers/staging/ramzswap/ramzswap_drv.c 2010-08-26 19:47:12.000000000 -0400
27899 +++ linux-2.6.35.4/drivers/staging/ramzswap/ramzswap_drv.c 2010-09-17 20:12:09.000000000 -0400
27900 @@ -693,7 +693,7 @@ void ramzswap_slot_free_notify(struct bl
27901 return;
27902 }
27903
27904 -static struct block_device_operations ramzswap_devops = {
27905 +static const struct block_device_operations ramzswap_devops = {
27906 .ioctl = ramzswap_ioctl,
27907 .swap_slot_free_notify = ramzswap_slot_free_notify,
27908 .owner = THIS_MODULE
27909 diff -urNp linux-2.6.35.4/drivers/staging/rtl8192u/ieee80211/proc.c linux-2.6.35.4/drivers/staging/rtl8192u/ieee80211/proc.c
27910 --- linux-2.6.35.4/drivers/staging/rtl8192u/ieee80211/proc.c 2010-08-26 19:47:12.000000000 -0400
27911 +++ linux-2.6.35.4/drivers/staging/rtl8192u/ieee80211/proc.c 2010-09-17 20:12:09.000000000 -0400
27912 @@ -99,7 +99,7 @@ static int crypto_info_open(struct inode
27913 return seq_open(file, &crypto_seq_ops);
27914 }
27915
27916 -static struct file_operations proc_crypto_ops = {
27917 +static const struct file_operations proc_crypto_ops = {
27918 .open = crypto_info_open,
27919 .read = seq_read,
27920 .llseek = seq_lseek,
27921 diff -urNp linux-2.6.35.4/drivers/staging/samsung-laptop/samsung-laptop.c linux-2.6.35.4/drivers/staging/samsung-laptop/samsung-laptop.c
27922 --- linux-2.6.35.4/drivers/staging/samsung-laptop/samsung-laptop.c 2010-08-26 19:47:12.000000000 -0400
27923 +++ linux-2.6.35.4/drivers/staging/samsung-laptop/samsung-laptop.c 2010-09-17 20:12:09.000000000 -0400
27924 @@ -269,7 +269,7 @@ static int update_status(struct backligh
27925 return 0;
27926 }
27927
27928 -static struct backlight_ops backlight_ops = {
27929 +static const struct backlight_ops backlight_ops = {
27930 .get_brightness = get_brightness,
27931 .update_status = update_status,
27932 };
27933 diff -urNp linux-2.6.35.4/drivers/staging/sep/sep_driver.c linux-2.6.35.4/drivers/staging/sep/sep_driver.c
27934 --- linux-2.6.35.4/drivers/staging/sep/sep_driver.c 2010-08-26 19:47:12.000000000 -0400
27935 +++ linux-2.6.35.4/drivers/staging/sep/sep_driver.c 2010-09-17 20:12:09.000000000 -0400
27936 @@ -2637,7 +2637,7 @@ static struct pci_driver sep_pci_driver
27937 static dev_t sep_devno;
27938
27939 /* the files operations structure of the driver */
27940 -static struct file_operations sep_file_operations = {
27941 +static const struct file_operations sep_file_operations = {
27942 .owner = THIS_MODULE,
27943 .unlocked_ioctl = sep_ioctl,
27944 .poll = sep_poll,
27945 diff -urNp linux-2.6.35.4/drivers/staging/vme/devices/vme_user.c linux-2.6.35.4/drivers/staging/vme/devices/vme_user.c
27946 --- linux-2.6.35.4/drivers/staging/vme/devices/vme_user.c 2010-08-26 19:47:12.000000000 -0400
27947 +++ linux-2.6.35.4/drivers/staging/vme/devices/vme_user.c 2010-09-17 20:12:09.000000000 -0400
27948 @@ -136,7 +136,7 @@ static long vme_user_unlocked_ioctl(stru
27949 static int __init vme_user_probe(struct device *, int, int);
27950 static int __exit vme_user_remove(struct device *, int, int);
27951
27952 -static struct file_operations vme_user_fops = {
27953 +static const struct file_operations vme_user_fops = {
27954 .open = vme_user_open,
27955 .release = vme_user_release,
27956 .read = vme_user_read,
27957 diff -urNp linux-2.6.35.4/drivers/usb/atm/usbatm.c linux-2.6.35.4/drivers/usb/atm/usbatm.c
27958 --- linux-2.6.35.4/drivers/usb/atm/usbatm.c 2010-08-26 19:47:12.000000000 -0400
27959 +++ linux-2.6.35.4/drivers/usb/atm/usbatm.c 2010-09-17 20:12:09.000000000 -0400
27960 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(stru
27961 if (printk_ratelimit())
27962 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
27963 __func__, vpi, vci);
27964 - atomic_inc(&vcc->stats->rx_err);
27965 + atomic_inc_unchecked(&vcc->stats->rx_err);
27966 return;
27967 }
27968
27969 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(stru
27970 if (length > ATM_MAX_AAL5_PDU) {
27971 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
27972 __func__, length, vcc);
27973 - atomic_inc(&vcc->stats->rx_err);
27974 + atomic_inc_unchecked(&vcc->stats->rx_err);
27975 goto out;
27976 }
27977
27978 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(stru
27979 if (sarb->len < pdu_length) {
27980 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
27981 __func__, pdu_length, sarb->len, vcc);
27982 - atomic_inc(&vcc->stats->rx_err);
27983 + atomic_inc_unchecked(&vcc->stats->rx_err);
27984 goto out;
27985 }
27986
27987 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
27988 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
27989 __func__, vcc);
27990 - atomic_inc(&vcc->stats->rx_err);
27991 + atomic_inc_unchecked(&vcc->stats->rx_err);
27992 goto out;
27993 }
27994
27995 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(stru
27996 if (printk_ratelimit())
27997 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
27998 __func__, length);
27999 - atomic_inc(&vcc->stats->rx_drop);
28000 + atomic_inc_unchecked(&vcc->stats->rx_drop);
28001 goto out;
28002 }
28003
28004 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(stru
28005
28006 vcc->push(vcc, skb);
28007
28008 - atomic_inc(&vcc->stats->rx);
28009 + atomic_inc_unchecked(&vcc->stats->rx);
28010 out:
28011 skb_trim(sarb, 0);
28012 }
28013 @@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned l
28014 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
28015
28016 usbatm_pop(vcc, skb);
28017 - atomic_inc(&vcc->stats->tx);
28018 + atomic_inc_unchecked(&vcc->stats->tx);
28019
28020 skb = skb_dequeue(&instance->sndqueue);
28021 }
28022 @@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct a
28023 if (!left--)
28024 return sprintf(page,
28025 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
28026 - atomic_read(&atm_dev->stats.aal5.tx),
28027 - atomic_read(&atm_dev->stats.aal5.tx_err),
28028 - atomic_read(&atm_dev->stats.aal5.rx),
28029 - atomic_read(&atm_dev->stats.aal5.rx_err),
28030 - atomic_read(&atm_dev->stats.aal5.rx_drop));
28031 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
28032 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
28033 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
28034 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
28035 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
28036
28037 if (!left--) {
28038 if (instance->disconnected)
28039 diff -urNp linux-2.6.35.4/drivers/usb/class/cdc-acm.c linux-2.6.35.4/drivers/usb/class/cdc-acm.c
28040 --- linux-2.6.35.4/drivers/usb/class/cdc-acm.c 2010-08-26 19:47:12.000000000 -0400
28041 +++ linux-2.6.35.4/drivers/usb/class/cdc-acm.c 2010-09-17 20:12:09.000000000 -0400
28042 @@ -1619,7 +1619,7 @@ static const struct usb_device_id acm_id
28043 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
28044 USB_CDC_ACM_PROTO_AT_CDMA) },
28045
28046 - { }
28047 + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }
28048 };
28049
28050 MODULE_DEVICE_TABLE(usb, acm_ids);
28051 diff -urNp linux-2.6.35.4/drivers/usb/class/cdc-wdm.c linux-2.6.35.4/drivers/usb/class/cdc-wdm.c
28052 --- linux-2.6.35.4/drivers/usb/class/cdc-wdm.c 2010-08-26 19:47:12.000000000 -0400
28053 +++ linux-2.6.35.4/drivers/usb/class/cdc-wdm.c 2010-09-17 20:12:09.000000000 -0400
28054 @@ -342,7 +342,7 @@ static ssize_t wdm_write
28055 goto outnp;
28056 }
28057
28058 - if (!file->f_flags && O_NONBLOCK)
28059 + if (!(file->f_flags & O_NONBLOCK))
28060 r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
28061 &desc->flags));
28062 else
28063 diff -urNp linux-2.6.35.4/drivers/usb/class/usblp.c linux-2.6.35.4/drivers/usb/class/usblp.c
28064 --- linux-2.6.35.4/drivers/usb/class/usblp.c 2010-08-26 19:47:12.000000000 -0400
28065 +++ linux-2.6.35.4/drivers/usb/class/usblp.c 2010-09-17 20:12:09.000000000 -0400
28066 @@ -226,7 +226,7 @@ static const struct quirk_printer_struct
28067 { 0x0482, 0x0010, USBLP_QUIRK_BIDIR }, /* Kyocera Mita FS 820, by zut <kernel@zut.de> */
28068 { 0x04f9, 0x000d, USBLP_QUIRK_BIDIR }, /* Brother Industries, Ltd HL-1440 Laser Printer */
28069 { 0x04b8, 0x0202, USBLP_QUIRK_BAD_CLASS }, /* Seiko Epson Receipt Printer M129C */
28070 - { 0, 0 }
28071 + { 0, 0, 0 }
28072 };
28073
28074 static int usblp_wwait(struct usblp *usblp, int nonblock);
28075 @@ -1398,7 +1398,7 @@ static const struct usb_device_id usblp_
28076 { USB_INTERFACE_INFO(7, 1, 2) },
28077 { USB_INTERFACE_INFO(7, 1, 3) },
28078 { USB_DEVICE(0x04b8, 0x0202) }, /* Seiko Epson Receipt Printer M129C */
28079 - { } /* Terminating entry */
28080 + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } /* Terminating entry */
28081 };
28082
28083 MODULE_DEVICE_TABLE (usb, usblp_ids);
28084 diff -urNp linux-2.6.35.4/drivers/usb/core/hcd.c linux-2.6.35.4/drivers/usb/core/hcd.c
28085 --- linux-2.6.35.4/drivers/usb/core/hcd.c 2010-08-26 19:47:12.000000000 -0400
28086 +++ linux-2.6.35.4/drivers/usb/core/hcd.c 2010-09-17 20:12:09.000000000 -0400
28087 @@ -2381,7 +2381,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutd
28088
28089 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
28090
28091 -struct usb_mon_operations *mon_ops;
28092 +const struct usb_mon_operations *mon_ops;
28093
28094 /*
28095 * The registration is unlocked.
28096 @@ -2391,7 +2391,7 @@ struct usb_mon_operations *mon_ops;
28097 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
28098 */
28099
28100 -int usb_mon_register (struct usb_mon_operations *ops)
28101 +int usb_mon_register (const struct usb_mon_operations *ops)
28102 {
28103
28104 if (mon_ops)
28105 diff -urNp linux-2.6.35.4/drivers/usb/core/hub.c linux-2.6.35.4/drivers/usb/core/hub.c
28106 --- linux-2.6.35.4/drivers/usb/core/hub.c 2010-08-26 19:47:12.000000000 -0400
28107 +++ linux-2.6.35.4/drivers/usb/core/hub.c 2010-09-17 20:12:09.000000000 -0400
28108 @@ -3453,7 +3453,7 @@ static const struct usb_device_id hub_id
28109 .bDeviceClass = USB_CLASS_HUB},
28110 { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS,
28111 .bInterfaceClass = USB_CLASS_HUB},
28112 - { } /* Terminating entry */
28113 + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } /* Terminating entry */
28114 };
28115
28116 MODULE_DEVICE_TABLE (usb, hub_id_table);
28117 diff -urNp linux-2.6.35.4/drivers/usb/core/message.c linux-2.6.35.4/drivers/usb/core/message.c
28118 --- linux-2.6.35.4/drivers/usb/core/message.c 2010-08-26 19:47:12.000000000 -0400
28119 +++ linux-2.6.35.4/drivers/usb/core/message.c 2010-09-17 20:12:09.000000000 -0400
28120 @@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device
28121 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
28122 if (buf) {
28123 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
28124 - if (len > 0) {
28125 - smallbuf = kmalloc(++len, GFP_NOIO);
28126 + if (len++ > 0) {
28127 + smallbuf = kmalloc(len, GFP_NOIO);
28128 if (!smallbuf)
28129 return buf;
28130 memcpy(smallbuf, buf, len);
28131 diff -urNp linux-2.6.35.4/drivers/usb/early/ehci-dbgp.c linux-2.6.35.4/drivers/usb/early/ehci-dbgp.c
28132 --- linux-2.6.35.4/drivers/usb/early/ehci-dbgp.c 2010-08-26 19:47:12.000000000 -0400
28133 +++ linux-2.6.35.4/drivers/usb/early/ehci-dbgp.c 2010-09-17 20:12:09.000000000 -0400
28134 @@ -1026,6 +1026,7 @@ static void kgdbdbgp_write_char(u8 chr)
28135 early_dbgp_write(NULL, &chr, 1);
28136 }
28137
28138 +/* cannot be const, see kgdbdbgp_parse_config() */
28139 static struct kgdb_io kgdbdbgp_io_ops = {
28140 .name = "kgdbdbgp",
28141 .read_char = kgdbdbgp_read_char,
28142 diff -urNp linux-2.6.35.4/drivers/usb/host/ehci-pci.c linux-2.6.35.4/drivers/usb/host/ehci-pci.c
28143 --- linux-2.6.35.4/drivers/usb/host/ehci-pci.c 2010-08-26 19:47:12.000000000 -0400
28144 +++ linux-2.6.35.4/drivers/usb/host/ehci-pci.c 2010-09-17 20:12:09.000000000 -0400
28145 @@ -419,7 +419,7 @@ static const struct pci_device_id pci_id
28146 PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_EHCI, ~0),
28147 .driver_data = (unsigned long) &ehci_pci_hc_driver,
28148 },
28149 - { /* end: all zeroes */ }
28150 + { 0, 0, 0, 0, 0, 0, 0 }
28151 };
28152 MODULE_DEVICE_TABLE(pci, pci_ids);
28153
28154 diff -urNp linux-2.6.35.4/drivers/usb/host/uhci-hcd.c linux-2.6.35.4/drivers/usb/host/uhci-hcd.c
28155 --- linux-2.6.35.4/drivers/usb/host/uhci-hcd.c 2010-08-26 19:47:12.000000000 -0400
28156 +++ linux-2.6.35.4/drivers/usb/host/uhci-hcd.c 2010-09-17 20:12:09.000000000 -0400
28157 @@ -941,7 +941,7 @@ static const struct pci_device_id uhci_p
28158 /* handle any USB UHCI controller */
28159 PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_UHCI, ~0),
28160 .driver_data = (unsigned long) &uhci_driver,
28161 - }, { /* end: all zeroes */ }
28162 + }, { 0, 0, 0, 0, 0, 0, 0 }
28163 };
28164
28165 MODULE_DEVICE_TABLE(pci, uhci_pci_ids);
28166 diff -urNp linux-2.6.35.4/drivers/usb/mon/mon_main.c linux-2.6.35.4/drivers/usb/mon/mon_main.c
28167 --- linux-2.6.35.4/drivers/usb/mon/mon_main.c 2010-08-26 19:47:12.000000000 -0400
28168 +++ linux-2.6.35.4/drivers/usb/mon/mon_main.c 2010-09-17 20:12:09.000000000 -0400
28169 @@ -240,7 +240,7 @@ static struct notifier_block mon_nb = {
28170 /*
28171 * Ops
28172 */
28173 -static struct usb_mon_operations mon_ops_0 = {
28174 +static const struct usb_mon_operations mon_ops_0 = {
28175 .urb_submit = mon_submit,
28176 .urb_submit_error = mon_submit_error,
28177 .urb_complete = mon_complete,
28178 diff -urNp linux-2.6.35.4/drivers/usb/storage/debug.h linux-2.6.35.4/drivers/usb/storage/debug.h
28179 --- linux-2.6.35.4/drivers/usb/storage/debug.h 2010-08-26 19:47:12.000000000 -0400
28180 +++ linux-2.6.35.4/drivers/usb/storage/debug.h 2010-09-17 20:12:09.000000000 -0400
28181 @@ -54,9 +54,9 @@ void usb_stor_show_sense( unsigned char
28182 #define US_DEBUGPX(x...) printk( x )
28183 #define US_DEBUG(x) x
28184 #else
28185 -#define US_DEBUGP(x...)
28186 -#define US_DEBUGPX(x...)
28187 -#define US_DEBUG(x)
28188 +#define US_DEBUGP(x...) do {} while (0)
28189 +#define US_DEBUGPX(x...) do {} while (0)
28190 +#define US_DEBUG(x) do {} while (0)
28191 #endif
28192
28193 #endif
28194 diff -urNp linux-2.6.35.4/drivers/usb/storage/usb.c linux-2.6.35.4/drivers/usb/storage/usb.c
28195 --- linux-2.6.35.4/drivers/usb/storage/usb.c 2010-08-26 19:47:12.000000000 -0400
28196 +++ linux-2.6.35.4/drivers/usb/storage/usb.c 2010-09-17 20:12:09.000000000 -0400
28197 @@ -122,7 +122,7 @@ MODULE_PARM_DESC(quirks, "supplemental l
28198
28199 static struct us_unusual_dev us_unusual_dev_list[] = {
28200 # include "unusual_devs.h"
28201 - { } /* Terminating entry */
28202 + { NULL, NULL, 0, 0, NULL } /* Terminating entry */
28203 };
28204
28205 #undef UNUSUAL_DEV
28206 diff -urNp linux-2.6.35.4/drivers/usb/storage/usual-tables.c linux-2.6.35.4/drivers/usb/storage/usual-tables.c
28207 --- linux-2.6.35.4/drivers/usb/storage/usual-tables.c 2010-08-26 19:47:12.000000000 -0400
28208 +++ linux-2.6.35.4/drivers/usb/storage/usual-tables.c 2010-09-17 20:12:09.000000000 -0400
28209 @@ -48,7 +48,7 @@
28210
28211 struct usb_device_id usb_storage_usb_ids[] = {
28212 # include "unusual_devs.h"
28213 - { } /* Terminating entry */
28214 + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } /* Terminating entry */
28215 };
28216 EXPORT_SYMBOL_GPL(usb_storage_usb_ids);
28217
28218 diff -urNp linux-2.6.35.4/drivers/uwb/wlp/messages.c linux-2.6.35.4/drivers/uwb/wlp/messages.c
28219 --- linux-2.6.35.4/drivers/uwb/wlp/messages.c 2010-08-26 19:47:12.000000000 -0400
28220 +++ linux-2.6.35.4/drivers/uwb/wlp/messages.c 2010-09-17 20:12:09.000000000 -0400
28221 @@ -920,7 +920,7 @@ int wlp_parse_f0(struct wlp *wlp, struct
28222 size_t len = skb->len;
28223 size_t used;
28224 ssize_t result;
28225 - struct wlp_nonce enonce, rnonce;
28226 + struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
28227 enum wlp_assc_error assc_err;
28228 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
28229 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
28230 diff -urNp linux-2.6.35.4/drivers/vhost/vhost.c linux-2.6.35.4/drivers/vhost/vhost.c
28231 --- linux-2.6.35.4/drivers/vhost/vhost.c 2010-08-26 19:47:12.000000000 -0400
28232 +++ linux-2.6.35.4/drivers/vhost/vhost.c 2010-09-17 20:12:09.000000000 -0400
28233 @@ -357,7 +357,7 @@ static int init_used(struct vhost_virtqu
28234 return get_user(vq->last_used_idx, &used->idx);
28235 }
28236
28237 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
28238 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
28239 {
28240 struct file *eventfp, *filep = NULL,
28241 *pollstart = NULL, *pollstop = NULL;
28242 diff -urNp linux-2.6.35.4/drivers/video/atmel_lcdfb.c linux-2.6.35.4/drivers/video/atmel_lcdfb.c
28243 --- linux-2.6.35.4/drivers/video/atmel_lcdfb.c 2010-08-26 19:47:12.000000000 -0400
28244 +++ linux-2.6.35.4/drivers/video/atmel_lcdfb.c 2010-09-17 20:12:09.000000000 -0400
28245 @@ -111,7 +111,7 @@ static int atmel_bl_get_brightness(struc
28246 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
28247 }
28248
28249 -static struct backlight_ops atmel_lcdc_bl_ops = {
28250 +static const struct backlight_ops atmel_lcdc_bl_ops = {
28251 .update_status = atmel_bl_update_status,
28252 .get_brightness = atmel_bl_get_brightness,
28253 };
28254 diff -urNp linux-2.6.35.4/drivers/video/aty/aty128fb.c linux-2.6.35.4/drivers/video/aty/aty128fb.c
28255 --- linux-2.6.35.4/drivers/video/aty/aty128fb.c 2010-08-26 19:47:12.000000000 -0400
28256 +++ linux-2.6.35.4/drivers/video/aty/aty128fb.c 2010-09-17 20:12:09.000000000 -0400
28257 @@ -1786,7 +1786,7 @@ static int aty128_bl_get_brightness(stru
28258 return bd->props.brightness;
28259 }
28260
28261 -static struct backlight_ops aty128_bl_data = {
28262 +static const struct backlight_ops aty128_bl_data = {
28263 .get_brightness = aty128_bl_get_brightness,
28264 .update_status = aty128_bl_update_status,
28265 };
28266 diff -urNp linux-2.6.35.4/drivers/video/aty/atyfb_base.c linux-2.6.35.4/drivers/video/aty/atyfb_base.c
28267 --- linux-2.6.35.4/drivers/video/aty/atyfb_base.c 2010-08-26 19:47:12.000000000 -0400
28268 +++ linux-2.6.35.4/drivers/video/aty/atyfb_base.c 2010-09-17 20:12:09.000000000 -0400
28269 @@ -2221,7 +2221,7 @@ static int aty_bl_get_brightness(struct
28270 return bd->props.brightness;
28271 }
28272
28273 -static struct backlight_ops aty_bl_data = {
28274 +static const struct backlight_ops aty_bl_data = {
28275 .get_brightness = aty_bl_get_brightness,
28276 .update_status = aty_bl_update_status,
28277 };
28278 diff -urNp linux-2.6.35.4/drivers/video/aty/radeon_backlight.c linux-2.6.35.4/drivers/video/aty/radeon_backlight.c
28279 --- linux-2.6.35.4/drivers/video/aty/radeon_backlight.c 2010-08-26 19:47:12.000000000 -0400
28280 +++ linux-2.6.35.4/drivers/video/aty/radeon_backlight.c 2010-09-17 20:12:09.000000000 -0400
28281 @@ -128,7 +128,7 @@ static int radeon_bl_get_brightness(stru
28282 return bd->props.brightness;
28283 }
28284
28285 -static struct backlight_ops radeon_bl_data = {
28286 +static const struct backlight_ops radeon_bl_data = {
28287 .get_brightness = radeon_bl_get_brightness,
28288 .update_status = radeon_bl_update_status,
28289 };
28290 diff -urNp linux-2.6.35.4/drivers/video/backlight/88pm860x_bl.c linux-2.6.35.4/drivers/video/backlight/88pm860x_bl.c
28291 --- linux-2.6.35.4/drivers/video/backlight/88pm860x_bl.c 2010-08-26 19:47:12.000000000 -0400
28292 +++ linux-2.6.35.4/drivers/video/backlight/88pm860x_bl.c 2010-09-17 20:12:09.000000000 -0400
28293 @@ -155,7 +155,7 @@ out:
28294 return -EINVAL;
28295 }
28296
28297 -static struct backlight_ops pm860x_backlight_ops = {
28298 +static const struct backlight_ops pm860x_backlight_ops = {
28299 .options = BL_CORE_SUSPENDRESUME,
28300 .update_status = pm860x_backlight_update_status,
28301 .get_brightness = pm860x_backlight_get_brightness,
28302 diff -urNp linux-2.6.35.4/drivers/video/backlight/max8925_bl.c linux-2.6.35.4/drivers/video/backlight/max8925_bl.c
28303 --- linux-2.6.35.4/drivers/video/backlight/max8925_bl.c 2010-08-26 19:47:12.000000000 -0400
28304 +++ linux-2.6.35.4/drivers/video/backlight/max8925_bl.c 2010-09-17 20:12:09.000000000 -0400
28305 @@ -92,7 +92,7 @@ static int max8925_backlight_get_brightn
28306 return ret;
28307 }
28308
28309 -static struct backlight_ops max8925_backlight_ops = {
28310 +static const struct backlight_ops max8925_backlight_ops = {
28311 .options = BL_CORE_SUSPENDRESUME,
28312 .update_status = max8925_backlight_update_status,
28313 .get_brightness = max8925_backlight_get_brightness,
28314 diff -urNp linux-2.6.35.4/drivers/video/fbcmap.c linux-2.6.35.4/drivers/video/fbcmap.c
28315 --- linux-2.6.35.4/drivers/video/fbcmap.c 2010-08-26 19:47:12.000000000 -0400
28316 +++ linux-2.6.35.4/drivers/video/fbcmap.c 2010-09-17 20:12:09.000000000 -0400
28317 @@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user
28318 rc = -ENODEV;
28319 goto out;
28320 }
28321 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
28322 - !info->fbops->fb_setcmap)) {
28323 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
28324 rc = -EINVAL;
28325 goto out1;
28326 }
28327 diff -urNp linux-2.6.35.4/drivers/video/fbmem.c linux-2.6.35.4/drivers/video/fbmem.c
28328 --- linux-2.6.35.4/drivers/video/fbmem.c 2010-08-26 19:47:12.000000000 -0400
28329 +++ linux-2.6.35.4/drivers/video/fbmem.c 2010-09-17 20:12:09.000000000 -0400
28330 @@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_in
28331 image->dx += image->width + 8;
28332 }
28333 } else if (rotate == FB_ROTATE_UD) {
28334 - for (x = 0; x < num && image->dx >= 0; x++) {
28335 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
28336 info->fbops->fb_imageblit(info, image);
28337 image->dx -= image->width + 8;
28338 }
28339 @@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_in
28340 image->dy += image->height + 8;
28341 }
28342 } else if (rotate == FB_ROTATE_CCW) {
28343 - for (x = 0; x < num && image->dy >= 0; x++) {
28344 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
28345 info->fbops->fb_imageblit(info, image);
28346 image->dy -= image->height + 8;
28347 }
28348 @@ -1119,7 +1119,7 @@ static long do_fb_ioctl(struct fb_info *
28349 return -EFAULT;
28350 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
28351 return -EINVAL;
28352 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
28353 + if (con2fb.framebuffer >= FB_MAX)
28354 return -EINVAL;
28355 if (!registered_fb[con2fb.framebuffer])
28356 request_module("fb%d", con2fb.framebuffer);
28357 diff -urNp linux-2.6.35.4/drivers/video/fbmon.c linux-2.6.35.4/drivers/video/fbmon.c
28358 --- linux-2.6.35.4/drivers/video/fbmon.c 2010-08-26 19:47:12.000000000 -0400
28359 +++ linux-2.6.35.4/drivers/video/fbmon.c 2010-09-17 20:12:09.000000000 -0400
28360 @@ -46,7 +46,7 @@
28361 #ifdef DEBUG
28362 #define DPRINTK(fmt, args...) printk(fmt,## args)
28363 #else
28364 -#define DPRINTK(fmt, args...)
28365 +#define DPRINTK(fmt, args...) do {} while (0)
28366 #endif
28367
28368 #define FBMON_FIX_HEADER 1
28369 diff -urNp linux-2.6.35.4/drivers/video/i810/i810_accel.c linux-2.6.35.4/drivers/video/i810/i810_accel.c
28370 --- linux-2.6.35.4/drivers/video/i810/i810_accel.c 2010-08-26 19:47:12.000000000 -0400
28371 +++ linux-2.6.35.4/drivers/video/i810/i810_accel.c 2010-09-17 20:12:09.000000000 -0400
28372 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct
28373 }
28374 }
28375 printk("ringbuffer lockup!!!\n");
28376 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
28377 i810_report_error(mmio);
28378 par->dev_flags |= LOCKUP;
28379 info->pixmap.scan_align = 1;
28380 diff -urNp linux-2.6.35.4/drivers/video/i810/i810_main.c linux-2.6.35.4/drivers/video/i810/i810_main.c
28381 --- linux-2.6.35.4/drivers/video/i810/i810_main.c 2010-08-26 19:47:12.000000000 -0400
28382 +++ linux-2.6.35.4/drivers/video/i810/i810_main.c 2010-09-17 20:12:09.000000000 -0400
28383 @@ -120,7 +120,7 @@ static struct pci_device_id i810fb_pci_t
28384 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
28385 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC,
28386 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
28387 - { 0 },
28388 + { 0, 0, 0, 0, 0, 0, 0 },
28389 };
28390
28391 static struct pci_driver i810fb_driver = {
28392 diff -urNp linux-2.6.35.4/drivers/video/modedb.c linux-2.6.35.4/drivers/video/modedb.c
28393 --- linux-2.6.35.4/drivers/video/modedb.c 2010-08-26 19:47:12.000000000 -0400
28394 +++ linux-2.6.35.4/drivers/video/modedb.c 2010-09-17 20:12:09.000000000 -0400
28395 @@ -40,240 +40,240 @@ static const struct fb_videomode modedb[
28396 {
28397 /* 640x400 @ 70 Hz, 31.5 kHz hsync */
28398 NULL, 70, 640, 400, 39721, 40, 24, 39, 9, 96, 2,
28399 - 0, FB_VMODE_NONINTERLACED
28400 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28401 }, {
28402 /* 640x480 @ 60 Hz, 31.5 kHz hsync */
28403 NULL, 60, 640, 480, 39721, 40, 24, 32, 11, 96, 2,
28404 - 0, FB_VMODE_NONINTERLACED
28405 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28406 }, {
28407 /* 800x600 @ 56 Hz, 35.15 kHz hsync */
28408 NULL, 56, 800, 600, 27777, 128, 24, 22, 1, 72, 2,
28409 - 0, FB_VMODE_NONINTERLACED
28410 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28411 }, {
28412 /* 1024x768 @ 87 Hz interlaced, 35.5 kHz hsync */
28413 NULL, 87, 1024, 768, 22271, 56, 24, 33, 8, 160, 8,
28414 - 0, FB_VMODE_INTERLACED
28415 + 0, FB_VMODE_INTERLACED, FB_MODE_IS_UNKNOWN
28416 }, {
28417 /* 640x400 @ 85 Hz, 37.86 kHz hsync */
28418 NULL, 85, 640, 400, 31746, 96, 32, 41, 1, 64, 3,
28419 - FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
28420 + FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28421 }, {
28422 /* 640x480 @ 72 Hz, 36.5 kHz hsync */
28423 NULL, 72, 640, 480, 31746, 144, 40, 30, 8, 40, 3,
28424 - 0, FB_VMODE_NONINTERLACED
28425 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28426 }, {
28427 /* 640x480 @ 75 Hz, 37.50 kHz hsync */
28428 NULL, 75, 640, 480, 31746, 120, 16, 16, 1, 64, 3,
28429 - 0, FB_VMODE_NONINTERLACED
28430 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28431 }, {
28432 /* 800x600 @ 60 Hz, 37.8 kHz hsync */
28433 NULL, 60, 800, 600, 25000, 88, 40, 23, 1, 128, 4,
28434 - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
28435 + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28436 }, {
28437 /* 640x480 @ 85 Hz, 43.27 kHz hsync */
28438 NULL, 85, 640, 480, 27777, 80, 56, 25, 1, 56, 3,
28439 - 0, FB_VMODE_NONINTERLACED
28440 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28441 }, {
28442 /* 1152x864 @ 89 Hz interlaced, 44 kHz hsync */
28443 NULL, 89, 1152, 864, 15384, 96, 16, 110, 1, 216, 10,
28444 - 0, FB_VMODE_INTERLACED
28445 + 0, FB_VMODE_INTERLACED, FB_MODE_IS_UNKNOWN
28446 }, {
28447 /* 800x600 @ 72 Hz, 48.0 kHz hsync */
28448 NULL, 72, 800, 600, 20000, 64, 56, 23, 37, 120, 6,
28449 - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
28450 + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28451 }, {
28452 /* 1024x768 @ 60 Hz, 48.4 kHz hsync */
28453 NULL, 60, 1024, 768, 15384, 168, 8, 29, 3, 144, 6,
28454 - 0, FB_VMODE_NONINTERLACED
28455 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28456 }, {
28457 /* 640x480 @ 100 Hz, 53.01 kHz hsync */
28458 NULL, 100, 640, 480, 21834, 96, 32, 36, 8, 96, 6,
28459 - 0, FB_VMODE_NONINTERLACED
28460 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28461 }, {
28462 /* 1152x864 @ 60 Hz, 53.5 kHz hsync */
28463 NULL, 60, 1152, 864, 11123, 208, 64, 16, 4, 256, 8,
28464 - 0, FB_VMODE_NONINTERLACED
28465 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28466 }, {
28467 /* 800x600 @ 85 Hz, 55.84 kHz hsync */
28468 NULL, 85, 800, 600, 16460, 160, 64, 36, 16, 64, 5,
28469 - 0, FB_VMODE_NONINTERLACED
28470 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28471 }, {
28472 /* 1024x768 @ 70 Hz, 56.5 kHz hsync */
28473 NULL, 70, 1024, 768, 13333, 144, 24, 29, 3, 136, 6,
28474 - 0, FB_VMODE_NONINTERLACED
28475 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28476 }, {
28477 /* 1280x1024 @ 87 Hz interlaced, 51 kHz hsync */
28478 NULL, 87, 1280, 1024, 12500, 56, 16, 128, 1, 216, 12,
28479 - 0, FB_VMODE_INTERLACED
28480 + 0, FB_VMODE_INTERLACED, FB_MODE_IS_UNKNOWN
28481 }, {
28482 /* 800x600 @ 100 Hz, 64.02 kHz hsync */
28483 NULL, 100, 800, 600, 14357, 160, 64, 30, 4, 64, 6,
28484 - 0, FB_VMODE_NONINTERLACED
28485 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28486 }, {
28487 /* 1024x768 @ 76 Hz, 62.5 kHz hsync */
28488 NULL, 76, 1024, 768, 11764, 208, 8, 36, 16, 120, 3,
28489 - 0, FB_VMODE_NONINTERLACED
28490 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28491 }, {
28492 /* 1152x864 @ 70 Hz, 62.4 kHz hsync */
28493 NULL, 70, 1152, 864, 10869, 106, 56, 20, 1, 160, 10,
28494 - 0, FB_VMODE_NONINTERLACED
28495 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28496 }, {
28497 /* 1280x1024 @ 61 Hz, 64.2 kHz hsync */
28498 NULL, 61, 1280, 1024, 9090, 200, 48, 26, 1, 184, 3,
28499 - 0, FB_VMODE_NONINTERLACED
28500 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28501 }, {
28502 /* 1400x1050 @ 60Hz, 63.9 kHz hsync */
28503 NULL, 60, 1400, 1050, 9259, 136, 40, 13, 1, 112, 3,
28504 - 0, FB_VMODE_NONINTERLACED
28505 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28506 }, {
28507 /* 1400x1050 @ 75,107 Hz, 82,392 kHz +hsync +vsync*/
28508 NULL, 75, 1400, 1050, 7190, 120, 56, 23, 10, 112, 13,
28509 - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
28510 + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28511 }, {
28512 /* 1400x1050 @ 60 Hz, ? kHz +hsync +vsync*/
28513 NULL, 60, 1400, 1050, 9259, 128, 40, 12, 0, 112, 3,
28514 - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
28515 + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28516 }, {
28517 /* 1024x768 @ 85 Hz, 70.24 kHz hsync */
28518 NULL, 85, 1024, 768, 10111, 192, 32, 34, 14, 160, 6,
28519 - 0, FB_VMODE_NONINTERLACED
28520 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28521 }, {
28522 /* 1152x864 @ 78 Hz, 70.8 kHz hsync */
28523 NULL, 78, 1152, 864, 9090, 228, 88, 32, 0, 84, 12,
28524 - 0, FB_VMODE_NONINTERLACED
28525 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28526 }, {
28527 /* 1280x1024 @ 70 Hz, 74.59 kHz hsync */
28528 NULL, 70, 1280, 1024, 7905, 224, 32, 28, 8, 160, 8,
28529 - 0, FB_VMODE_NONINTERLACED
28530 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28531 }, {
28532 /* 1600x1200 @ 60Hz, 75.00 kHz hsync */
28533 NULL, 60, 1600, 1200, 6172, 304, 64, 46, 1, 192, 3,
28534 - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
28535 + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28536 }, {
28537 /* 1152x864 @ 84 Hz, 76.0 kHz hsync */
28538 NULL, 84, 1152, 864, 7407, 184, 312, 32, 0, 128, 12,
28539 - 0, FB_VMODE_NONINTERLACED
28540 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28541 }, {
28542 /* 1280x1024 @ 74 Hz, 78.85 kHz hsync */
28543 NULL, 74, 1280, 1024, 7407, 256, 32, 34, 3, 144, 3,
28544 - 0, FB_VMODE_NONINTERLACED
28545 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28546 }, {
28547 /* 1024x768 @ 100Hz, 80.21 kHz hsync */
28548 NULL, 100, 1024, 768, 8658, 192, 32, 21, 3, 192, 10,
28549 - 0, FB_VMODE_NONINTERLACED
28550 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28551 }, {
28552 /* 1280x1024 @ 76 Hz, 81.13 kHz hsync */
28553 NULL, 76, 1280, 1024, 7407, 248, 32, 34, 3, 104, 3,
28554 - 0, FB_VMODE_NONINTERLACED
28555 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28556 }, {
28557 /* 1600x1200 @ 70 Hz, 87.50 kHz hsync */
28558 NULL, 70, 1600, 1200, 5291, 304, 64, 46, 1, 192, 3,
28559 - 0, FB_VMODE_NONINTERLACED
28560 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28561 }, {
28562 /* 1152x864 @ 100 Hz, 89.62 kHz hsync */
28563 NULL, 100, 1152, 864, 7264, 224, 32, 17, 2, 128, 19,
28564 - 0, FB_VMODE_NONINTERLACED
28565 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28566 }, {
28567 /* 1280x1024 @ 85 Hz, 91.15 kHz hsync */
28568 NULL, 85, 1280, 1024, 6349, 224, 64, 44, 1, 160, 3,
28569 - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
28570 + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28571 }, {
28572 /* 1600x1200 @ 75 Hz, 93.75 kHz hsync */
28573 NULL, 75, 1600, 1200, 4938, 304, 64, 46, 1, 192, 3,
28574 - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
28575 + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28576 }, {
28577 /* 1680x1050 @ 60 Hz, 65.191 kHz hsync */
28578 NULL, 60, 1680, 1050, 6848, 280, 104, 30, 3, 176, 6,
28579 - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
28580 + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28581 }, {
28582 /* 1600x1200 @ 85 Hz, 105.77 kHz hsync */
28583 NULL, 85, 1600, 1200, 4545, 272, 16, 37, 4, 192, 3,
28584 - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
28585 + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28586 }, {
28587 /* 1280x1024 @ 100 Hz, 107.16 kHz hsync */
28588 NULL, 100, 1280, 1024, 5502, 256, 32, 26, 7, 128, 15,
28589 - 0, FB_VMODE_NONINTERLACED
28590 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28591 }, {
28592 /* 1800x1440 @ 64Hz, 96.15 kHz hsync */
28593 NULL, 64, 1800, 1440, 4347, 304, 96, 46, 1, 192, 3,
28594 - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
28595 + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28596 }, {
28597 /* 1800x1440 @ 70Hz, 104.52 kHz hsync */
28598 NULL, 70, 1800, 1440, 4000, 304, 96, 46, 1, 192, 3,
28599 - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
28600 + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28601 }, {
28602 /* 512x384 @ 78 Hz, 31.50 kHz hsync */
28603 NULL, 78, 512, 384, 49603, 48, 16, 16, 1, 64, 3,
28604 - 0, FB_VMODE_NONINTERLACED
28605 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28606 }, {
28607 /* 512x384 @ 85 Hz, 34.38 kHz hsync */
28608 NULL, 85, 512, 384, 45454, 48, 16, 16, 1, 64, 3,
28609 - 0, FB_VMODE_NONINTERLACED
28610 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28611 }, {
28612 /* 320x200 @ 70 Hz, 31.5 kHz hsync, 8:5 aspect ratio */
28613 NULL, 70, 320, 200, 79440, 16, 16, 20, 4, 48, 1,
28614 - 0, FB_VMODE_DOUBLE
28615 + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
28616 }, {
28617 /* 320x240 @ 60 Hz, 31.5 kHz hsync, 4:3 aspect ratio */
28618 NULL, 60, 320, 240, 79440, 16, 16, 16, 5, 48, 1,
28619 - 0, FB_VMODE_DOUBLE
28620 + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
28621 }, {
28622 /* 320x240 @ 72 Hz, 36.5 kHz hsync */
28623 NULL, 72, 320, 240, 63492, 16, 16, 16, 4, 48, 2,
28624 - 0, FB_VMODE_DOUBLE
28625 + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
28626 }, {
28627 /* 400x300 @ 56 Hz, 35.2 kHz hsync, 4:3 aspect ratio */
28628 NULL, 56, 400, 300, 55555, 64, 16, 10, 1, 32, 1,
28629 - 0, FB_VMODE_DOUBLE
28630 + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
28631 }, {
28632 /* 400x300 @ 60 Hz, 37.8 kHz hsync */
28633 NULL, 60, 400, 300, 50000, 48, 16, 11, 1, 64, 2,
28634 - 0, FB_VMODE_DOUBLE
28635 + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
28636 }, {
28637 /* 400x300 @ 72 Hz, 48.0 kHz hsync */
28638 NULL, 72, 400, 300, 40000, 32, 24, 11, 19, 64, 3,
28639 - 0, FB_VMODE_DOUBLE
28640 + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
28641 }, {
28642 /* 480x300 @ 56 Hz, 35.2 kHz hsync, 8:5 aspect ratio */
28643 NULL, 56, 480, 300, 46176, 80, 16, 10, 1, 40, 1,
28644 - 0, FB_VMODE_DOUBLE
28645 + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
28646 }, {
28647 /* 480x300 @ 60 Hz, 37.8 kHz hsync */
28648 NULL, 60, 480, 300, 41858, 56, 16, 11, 1, 80, 2,
28649 - 0, FB_VMODE_DOUBLE
28650 + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
28651 }, {
28652 /* 480x300 @ 63 Hz, 39.6 kHz hsync */
28653 NULL, 63, 480, 300, 40000, 56, 16, 11, 1, 80, 2,
28654 - 0, FB_VMODE_DOUBLE
28655 + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
28656 }, {
28657 /* 480x300 @ 72 Hz, 48.0 kHz hsync */
28658 NULL, 72, 480, 300, 33386, 40, 24, 11, 19, 80, 3,
28659 - 0, FB_VMODE_DOUBLE
28660 + 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN
28661 }, {
28662 /* 1920x1200 @ 60 Hz, 74.5 Khz hsync */
28663 NULL, 60, 1920, 1200, 5177, 128, 336, 1, 38, 208, 3,
28664 FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
28665 - FB_VMODE_NONINTERLACED
28666 + FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28667 }, {
28668 /* 1152x768, 60 Hz, PowerBook G4 Titanium I and II */
28669 NULL, 60, 1152, 768, 14047, 158, 26, 29, 3, 136, 6,
28670 - FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED
28671 + FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28672 }, {
28673 /* 1366x768, 60 Hz, 47.403 kHz hsync, WXGA 16:9 aspect ratio */
28674 NULL, 60, 1366, 768, 13806, 120, 10, 14, 3, 32, 5,
28675 - 0, FB_VMODE_NONINTERLACED
28676 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28677 }, {
28678 /* 1280x800, 60 Hz, 47.403 kHz hsync, WXGA 16:10 aspect ratio */
28679 NULL, 60, 1280, 800, 12048, 200, 64, 24, 1, 136, 3,
28680 - 0, FB_VMODE_NONINTERLACED
28681 + 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN
28682 }, {
28683 /* 720x576i @ 50 Hz, 15.625 kHz hsync (PAL RGB) */
28684 NULL, 50, 720, 576, 74074, 64, 16, 39, 5, 64, 5,
28685 - 0, FB_VMODE_INTERLACED
28686 + 0, FB_VMODE_INTERLACED, FB_MODE_IS_UNKNOWN
28687 }, {
28688 /* 800x520i @ 50 Hz, 15.625 kHz hsync (PAL RGB) */
28689 NULL, 50, 800, 520, 58823, 144, 64, 72, 28, 80, 5,
28690 - 0, FB_VMODE_INTERLACED
28691 + 0, FB_VMODE_INTERLACED, FB_MODE_IS_UNKNOWN
28692 },
28693 };
28694
28695 diff -urNp linux-2.6.35.4/drivers/video/nvidia/nv_backlight.c linux-2.6.35.4/drivers/video/nvidia/nv_backlight.c
28696 --- linux-2.6.35.4/drivers/video/nvidia/nv_backlight.c 2010-08-26 19:47:12.000000000 -0400
28697 +++ linux-2.6.35.4/drivers/video/nvidia/nv_backlight.c 2010-09-17 20:12:09.000000000 -0400
28698 @@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(stru
28699 return bd->props.brightness;
28700 }
28701
28702 -static struct backlight_ops nvidia_bl_ops = {
28703 +static const struct backlight_ops nvidia_bl_ops = {
28704 .get_brightness = nvidia_bl_get_brightness,
28705 .update_status = nvidia_bl_update_status,
28706 };
28707 diff -urNp linux-2.6.35.4/drivers/video/omap2/displays/panel-taal.c linux-2.6.35.4/drivers/video/omap2/displays/panel-taal.c
28708 --- linux-2.6.35.4/drivers/video/omap2/displays/panel-taal.c 2010-08-26 19:47:12.000000000 -0400
28709 +++ linux-2.6.35.4/drivers/video/omap2/displays/panel-taal.c 2010-09-17 20:12:09.000000000 -0400
28710 @@ -319,7 +319,7 @@ static int taal_bl_get_intensity(struct
28711 return 0;
28712 }
28713
28714 -static struct backlight_ops taal_bl_ops = {
28715 +static const struct backlight_ops taal_bl_ops = {
28716 .get_brightness = taal_bl_get_intensity,
28717 .update_status = taal_bl_update_status,
28718 };
28719 diff -urNp linux-2.6.35.4/drivers/video/riva/fbdev.c linux-2.6.35.4/drivers/video/riva/fbdev.c
28720 --- linux-2.6.35.4/drivers/video/riva/fbdev.c 2010-08-26 19:47:12.000000000 -0400
28721 +++ linux-2.6.35.4/drivers/video/riva/fbdev.c 2010-09-17 20:12:09.000000000 -0400
28722 @@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct
28723 return bd->props.brightness;
28724 }
28725
28726 -static struct backlight_ops riva_bl_ops = {
28727 +static const struct backlight_ops riva_bl_ops = {
28728 .get_brightness = riva_bl_get_brightness,
28729 .update_status = riva_bl_update_status,
28730 };
28731 diff -urNp linux-2.6.35.4/drivers/video/uvesafb.c linux-2.6.35.4/drivers/video/uvesafb.c
28732 --- linux-2.6.35.4/drivers/video/uvesafb.c 2010-08-26 19:47:12.000000000 -0400
28733 +++ linux-2.6.35.4/drivers/video/uvesafb.c 2010-09-17 20:12:09.000000000 -0400
28734 @@ -19,6 +19,7 @@
28735 #include <linux/io.h>
28736 #include <linux/mutex.h>
28737 #include <linux/slab.h>
28738 +#include <linux/moduleloader.h>
28739 #include <video/edid.h>
28740 #include <video/uvesafb.h>
28741 #ifdef CONFIG_X86
28742 @@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
28743 NULL,
28744 };
28745
28746 - return call_usermodehelper(v86d_path, argv, envp, 1);
28747 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
28748 }
28749
28750 /*
28751 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(
28752 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
28753 par->pmi_setpal = par->ypan = 0;
28754 } else {
28755 +
28756 +#ifdef CONFIG_PAX_KERNEXEC
28757 +#ifdef CONFIG_MODULES
28758 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
28759 +#endif
28760 + if (!par->pmi_code) {
28761 + par->pmi_setpal = par->ypan = 0;
28762 + return 0;
28763 + }
28764 +#endif
28765 +
28766 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
28767 + task->t.regs.edi);
28768 +
28769 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
28770 + pax_open_kernel();
28771 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
28772 + pax_close_kernel();
28773 +
28774 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
28775 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
28776 +#else
28777 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
28778 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
28779 +#endif
28780 +
28781 printk(KERN_INFO "uvesafb: protected mode interface info at "
28782 "%04x:%04x\n",
28783 (u16)task->t.regs.es, (u16)task->t.regs.edi);
28784 @@ -1800,6 +1823,11 @@ out:
28785 if (par->vbe_modes)
28786 kfree(par->vbe_modes);
28787
28788 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
28789 + if (par->pmi_code)
28790 + module_free_exec(NULL, par->pmi_code);
28791 +#endif
28792 +
28793 framebuffer_release(info);
28794 return err;
28795 }
28796 @@ -1826,6 +1854,12 @@ static int uvesafb_remove(struct platfor
28797 kfree(par->vbe_state_orig);
28798 if (par->vbe_state_saved)
28799 kfree(par->vbe_state_saved);
28800 +
28801 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
28802 + if (par->pmi_code)
28803 + module_free_exec(NULL, par->pmi_code);
28804 +#endif
28805 +
28806 }
28807
28808 framebuffer_release(info);
28809 diff -urNp linux-2.6.35.4/drivers/video/vesafb.c linux-2.6.35.4/drivers/video/vesafb.c
28810 --- linux-2.6.35.4/drivers/video/vesafb.c 2010-08-26 19:47:12.000000000 -0400
28811 +++ linux-2.6.35.4/drivers/video/vesafb.c 2010-09-17 20:12:09.000000000 -0400
28812 @@ -9,6 +9,7 @@
28813 */
28814
28815 #include <linux/module.h>
28816 +#include <linux/moduleloader.h>
28817 #include <linux/kernel.h>
28818 #include <linux/errno.h>
28819 #include <linux/string.h>
28820 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /*
28821 static int vram_total __initdata; /* Set total amount of memory */
28822 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
28823 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
28824 -static void (*pmi_start)(void) __read_mostly;
28825 -static void (*pmi_pal) (void) __read_mostly;
28826 +static void (*pmi_start)(void) __read_only;
28827 +static void (*pmi_pal) (void) __read_only;
28828 static int depth __read_mostly;
28829 static int vga_compat __read_mostly;
28830 /* --------------------------------------------------------------------- */
28831 @@ -232,6 +233,7 @@ static int __init vesafb_probe(struct pl
28832 unsigned int size_vmode;
28833 unsigned int size_remap;
28834 unsigned int size_total;
28835 + void *pmi_code = NULL;
28836
28837 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
28838 return -ENODEV;
28839 @@ -274,10 +276,6 @@ static int __init vesafb_probe(struct pl
28840 size_remap = size_total;
28841 vesafb_fix.smem_len = size_remap;
28842
28843 -#ifndef __i386__
28844 - screen_info.vesapm_seg = 0;
28845 -#endif
28846 -
28847 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
28848 printk(KERN_WARNING
28849 "vesafb: cannot reserve video memory at 0x%lx\n",
28850 @@ -319,9 +317,21 @@ static int __init vesafb_probe(struct pl
28851 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
28852 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
28853
28854 +#ifdef __i386__
28855 +
28856 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
28857 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
28858 + if (!pmi_code)
28859 +#elif !defined(CONFIG_PAX_KERNEXEC)
28860 + if (0)
28861 +#endif
28862 +
28863 +#endif
28864 + screen_info.vesapm_seg = 0;
28865 +
28866 if (screen_info.vesapm_seg) {
28867 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
28868 - screen_info.vesapm_seg,screen_info.vesapm_off);
28869 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
28870 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
28871 }
28872
28873 if (screen_info.vesapm_seg < 0xc000)
28874 @@ -329,9 +339,25 @@ static int __init vesafb_probe(struct pl
28875
28876 if (ypan || pmi_setpal) {
28877 unsigned short *pmi_base;
28878 - pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
28879 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
28880 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
28881 +
28882 + pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
28883 +
28884 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
28885 + pax_open_kernel();
28886 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
28887 +#else
28888 + pmi_code = pmi_base;
28889 +#endif
28890 +
28891 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
28892 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
28893 +
28894 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
28895 + pmi_start = ktva_ktla(pmi_start);
28896 + pmi_pal = ktva_ktla(pmi_pal);
28897 + pax_close_kernel();
28898 +#endif
28899 +
28900 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
28901 if (pmi_base[3]) {
28902 printk(KERN_INFO "vesafb: pmi: ports = ");
28903 @@ -473,6 +499,11 @@ static int __init vesafb_probe(struct pl
28904 info->node, info->fix.id);
28905 return 0;
28906 err:
28907 +
28908 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
28909 + module_free_exec(NULL, pmi_code);
28910 +#endif
28911 +
28912 if (info->screen_base)
28913 iounmap(info->screen_base);
28914 framebuffer_release(info);
28915 diff -urNp linux-2.6.35.4/fs/9p/vfs_inode.c linux-2.6.35.4/fs/9p/vfs_inode.c
28916 --- linux-2.6.35.4/fs/9p/vfs_inode.c 2010-08-26 19:47:12.000000000 -0400
28917 +++ linux-2.6.35.4/fs/9p/vfs_inode.c 2010-09-17 20:12:09.000000000 -0400
28918 @@ -1087,7 +1087,7 @@ static void *v9fs_vfs_follow_link(struct
28919 static void
28920 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
28921 {
28922 - char *s = nd_get_link(nd);
28923 + const char *s = nd_get_link(nd);
28924
28925 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
28926 IS_ERR(s) ? "<error>" : s);
28927 diff -urNp linux-2.6.35.4/fs/aio.c linux-2.6.35.4/fs/aio.c
28928 --- linux-2.6.35.4/fs/aio.c 2010-08-26 19:47:12.000000000 -0400
28929 +++ linux-2.6.35.4/fs/aio.c 2010-09-17 20:12:09.000000000 -0400
28930 @@ -130,7 +130,7 @@ static int aio_setup_ring(struct kioctx
28931 size += sizeof(struct io_event) * nr_events;
28932 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
28933
28934 - if (nr_pages < 0)
28935 + if (nr_pages <= 0)
28936 return -EINVAL;
28937
28938 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
28939 diff -urNp linux-2.6.35.4/fs/attr.c linux-2.6.35.4/fs/attr.c
28940 --- linux-2.6.35.4/fs/attr.c 2010-08-26 19:47:12.000000000 -0400
28941 +++ linux-2.6.35.4/fs/attr.c 2010-09-17 20:12:37.000000000 -0400
28942 @@ -82,6 +82,7 @@ int inode_newsize_ok(const struct inode
28943 unsigned long limit;
28944
28945 limit = rlimit(RLIMIT_FSIZE);
28946 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
28947 if (limit != RLIM_INFINITY && offset > limit)
28948 goto out_sig;
28949 if (offset > inode->i_sb->s_maxbytes)
28950 diff -urNp linux-2.6.35.4/fs/autofs/root.c linux-2.6.35.4/fs/autofs/root.c
28951 --- linux-2.6.35.4/fs/autofs/root.c 2010-08-26 19:47:12.000000000 -0400
28952 +++ linux-2.6.35.4/fs/autofs/root.c 2010-09-17 20:12:09.000000000 -0400
28953 @@ -301,7 +301,8 @@ static int autofs_root_symlink(struct in
28954 set_bit(n,sbi->symlink_bitmap);
28955 sl = &sbi->symlink[n];
28956 sl->len = strlen(symname);
28957 - sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
28958 + slsize = sl->len+1;
28959 + sl->data = kmalloc(slsize, GFP_KERNEL);
28960 if (!sl->data) {
28961 clear_bit(n,sbi->symlink_bitmap);
28962 unlock_kernel();
28963 diff -urNp linux-2.6.35.4/fs/autofs4/symlink.c linux-2.6.35.4/fs/autofs4/symlink.c
28964 --- linux-2.6.35.4/fs/autofs4/symlink.c 2010-08-26 19:47:12.000000000 -0400
28965 +++ linux-2.6.35.4/fs/autofs4/symlink.c 2010-09-17 20:12:09.000000000 -0400
28966 @@ -15,7 +15,7 @@
28967 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
28968 {
28969 struct autofs_info *ino = autofs4_dentry_ino(dentry);
28970 - nd_set_link(nd, (char *)ino->u.symlink);
28971 + nd_set_link(nd, ino->u.symlink);
28972 return NULL;
28973 }
28974
28975 diff -urNp linux-2.6.35.4/fs/befs/linuxvfs.c linux-2.6.35.4/fs/befs/linuxvfs.c
28976 --- linux-2.6.35.4/fs/befs/linuxvfs.c 2010-08-26 19:47:12.000000000 -0400
28977 +++ linux-2.6.35.4/fs/befs/linuxvfs.c 2010-09-17 20:12:09.000000000 -0400
28978 @@ -493,7 +493,7 @@ static void befs_put_link(struct dentry
28979 {
28980 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
28981 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
28982 - char *link = nd_get_link(nd);
28983 + const char *link = nd_get_link(nd);
28984 if (!IS_ERR(link))
28985 kfree(link);
28986 }
28987 diff -urNp linux-2.6.35.4/fs/binfmt_aout.c linux-2.6.35.4/fs/binfmt_aout.c
28988 --- linux-2.6.35.4/fs/binfmt_aout.c 2010-08-26 19:47:12.000000000 -0400
28989 +++ linux-2.6.35.4/fs/binfmt_aout.c 2010-09-17 20:12:37.000000000 -0400
28990 @@ -16,6 +16,7 @@
28991 #include <linux/string.h>
28992 #include <linux/fs.h>
28993 #include <linux/file.h>
28994 +#include <linux/security.h>
28995 #include <linux/stat.h>
28996 #include <linux/fcntl.h>
28997 #include <linux/ptrace.h>
28998 @@ -97,10 +98,12 @@ static int aout_core_dump(struct coredum
28999
29000 /* If the size of the dump file exceeds the rlimit, then see what would happen
29001 if we wrote the stack, but not the data area. */
29002 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
29003 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
29004 dump.u_dsize = 0;
29005
29006 /* Make sure we have enough room to write the stack and data areas. */
29007 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
29008 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
29009 dump.u_ssize = 0;
29010
29011 @@ -238,6 +241,8 @@ static int load_aout_binary(struct linux
29012 rlim = rlimit(RLIMIT_DATA);
29013 if (rlim >= RLIM_INFINITY)
29014 rlim = ~0;
29015 +
29016 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
29017 if (ex.a_data + ex.a_bss > rlim)
29018 return -ENOMEM;
29019
29020 @@ -266,6 +271,27 @@ static int load_aout_binary(struct linux
29021 install_exec_creds(bprm);
29022 current->flags &= ~PF_FORKNOEXEC;
29023
29024 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
29025 + current->mm->pax_flags = 0UL;
29026 +#endif
29027 +
29028 +#ifdef CONFIG_PAX_PAGEEXEC
29029 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
29030 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
29031 +
29032 +#ifdef CONFIG_PAX_EMUTRAMP
29033 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
29034 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
29035 +#endif
29036 +
29037 +#ifdef CONFIG_PAX_MPROTECT
29038 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
29039 + current->mm->pax_flags |= MF_PAX_MPROTECT;
29040 +#endif
29041 +
29042 + }
29043 +#endif
29044 +
29045 if (N_MAGIC(ex) == OMAGIC) {
29046 unsigned long text_addr, map_size;
29047 loff_t pos;
29048 @@ -338,7 +364,7 @@ static int load_aout_binary(struct linux
29049
29050 down_write(&current->mm->mmap_sem);
29051 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
29052 - PROT_READ | PROT_WRITE | PROT_EXEC,
29053 + PROT_READ | PROT_WRITE,
29054 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
29055 fd_offset + ex.a_text);
29056 up_write(&current->mm->mmap_sem);
29057 diff -urNp linux-2.6.35.4/fs/binfmt_elf.c linux-2.6.35.4/fs/binfmt_elf.c
29058 --- linux-2.6.35.4/fs/binfmt_elf.c 2010-08-26 19:47:12.000000000 -0400
29059 +++ linux-2.6.35.4/fs/binfmt_elf.c 2010-09-17 20:12:37.000000000 -0400
29060 @@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump
29061 #define elf_core_dump NULL
29062 #endif
29063
29064 +#ifdef CONFIG_PAX_MPROTECT
29065 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
29066 +#endif
29067 +
29068 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
29069 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
29070 #else
29071 @@ -70,6 +74,11 @@ static struct linux_binfmt elf_format =
29072 .load_binary = load_elf_binary,
29073 .load_shlib = load_elf_library,
29074 .core_dump = elf_core_dump,
29075 +
29076 +#ifdef CONFIG_PAX_MPROTECT
29077 + .handle_mprotect= elf_handle_mprotect,
29078 +#endif
29079 +
29080 .min_coredump = ELF_EXEC_PAGESIZE,
29081 .hasvdso = 1
29082 };
29083 @@ -78,6 +87,8 @@ static struct linux_binfmt elf_format =
29084
29085 static int set_brk(unsigned long start, unsigned long end)
29086 {
29087 + unsigned long e = end;
29088 +
29089 start = ELF_PAGEALIGN(start);
29090 end = ELF_PAGEALIGN(end);
29091 if (end > start) {
29092 @@ -88,7 +99,7 @@ static int set_brk(unsigned long start,
29093 if (BAD_ADDR(addr))
29094 return addr;
29095 }
29096 - current->mm->start_brk = current->mm->brk = end;
29097 + current->mm->start_brk = current->mm->brk = e;
29098 return 0;
29099 }
29100
29101 @@ -149,7 +160,7 @@ create_elf_tables(struct linux_binprm *b
29102 elf_addr_t __user *u_rand_bytes;
29103 const char *k_platform = ELF_PLATFORM;
29104 const char *k_base_platform = ELF_BASE_PLATFORM;
29105 - unsigned char k_rand_bytes[16];
29106 + u32 k_rand_bytes[4];
29107 int items;
29108 elf_addr_t *elf_info;
29109 int ei_index = 0;
29110 @@ -196,8 +207,12 @@ create_elf_tables(struct linux_binprm *b
29111 * Generate 16 random bytes for userspace PRNG seeding.
29112 */
29113 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
29114 - u_rand_bytes = (elf_addr_t __user *)
29115 - STACK_ALLOC(p, sizeof(k_rand_bytes));
29116 + srandom32(k_rand_bytes[0] ^ random32());
29117 + srandom32(k_rand_bytes[1] ^ random32());
29118 + srandom32(k_rand_bytes[2] ^ random32());
29119 + srandom32(k_rand_bytes[3] ^ random32());
29120 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
29121 + u_rand_bytes = (elf_addr_t __user *) p;
29122 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
29123 return -EFAULT;
29124
29125 @@ -386,10 +401,10 @@ static unsigned long load_elf_interp(str
29126 {
29127 struct elf_phdr *elf_phdata;
29128 struct elf_phdr *eppnt;
29129 - unsigned long load_addr = 0;
29130 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
29131 int load_addr_set = 0;
29132 unsigned long last_bss = 0, elf_bss = 0;
29133 - unsigned long error = ~0UL;
29134 + unsigned long error = -EINVAL;
29135 unsigned long total_size;
29136 int retval, i, size;
29137
29138 @@ -435,6 +450,11 @@ static unsigned long load_elf_interp(str
29139 goto out_close;
29140 }
29141
29142 +#ifdef CONFIG_PAX_SEGMEXEC
29143 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
29144 + pax_task_size = SEGMEXEC_TASK_SIZE;
29145 +#endif
29146 +
29147 eppnt = elf_phdata;
29148 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
29149 if (eppnt->p_type == PT_LOAD) {
29150 @@ -478,8 +498,8 @@ static unsigned long load_elf_interp(str
29151 k = load_addr + eppnt->p_vaddr;
29152 if (BAD_ADDR(k) ||
29153 eppnt->p_filesz > eppnt->p_memsz ||
29154 - eppnt->p_memsz > TASK_SIZE ||
29155 - TASK_SIZE - eppnt->p_memsz < k) {
29156 + eppnt->p_memsz > pax_task_size ||
29157 + pax_task_size - eppnt->p_memsz < k) {
29158 error = -ENOMEM;
29159 goto out_close;
29160 }
29161 @@ -533,6 +553,177 @@ out:
29162 return error;
29163 }
29164
29165 +#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
29166 +static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
29167 +{
29168 + unsigned long pax_flags = 0UL;
29169 +
29170 +#ifdef CONFIG_PAX_PAGEEXEC
29171 + if (elf_phdata->p_flags & PF_PAGEEXEC)
29172 + pax_flags |= MF_PAX_PAGEEXEC;
29173 +#endif
29174 +
29175 +#ifdef CONFIG_PAX_SEGMEXEC
29176 + if (elf_phdata->p_flags & PF_SEGMEXEC)
29177 + pax_flags |= MF_PAX_SEGMEXEC;
29178 +#endif
29179 +
29180 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
29181 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
29182 + if ((__supported_pte_mask & _PAGE_NX))
29183 + pax_flags &= ~MF_PAX_SEGMEXEC;
29184 + else
29185 + pax_flags &= ~MF_PAX_PAGEEXEC;
29186 + }
29187 +#endif
29188 +
29189 +#ifdef CONFIG_PAX_EMUTRAMP
29190 + if (elf_phdata->p_flags & PF_EMUTRAMP)
29191 + pax_flags |= MF_PAX_EMUTRAMP;
29192 +#endif
29193 +
29194 +#ifdef CONFIG_PAX_MPROTECT
29195 + if (elf_phdata->p_flags & PF_MPROTECT)
29196 + pax_flags |= MF_PAX_MPROTECT;
29197 +#endif
29198 +
29199 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
29200 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
29201 + pax_flags |= MF_PAX_RANDMMAP;
29202 +#endif
29203 +
29204 + return pax_flags;
29205 +}
29206 +#endif
29207 +
29208 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
29209 +static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
29210 +{
29211 + unsigned long pax_flags = 0UL;
29212 +
29213 +#ifdef CONFIG_PAX_PAGEEXEC
29214 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
29215 + pax_flags |= MF_PAX_PAGEEXEC;
29216 +#endif
29217 +
29218 +#ifdef CONFIG_PAX_SEGMEXEC
29219 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
29220 + pax_flags |= MF_PAX_SEGMEXEC;
29221 +#endif
29222 +
29223 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
29224 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
29225 + if ((__supported_pte_mask & _PAGE_NX))
29226 + pax_flags &= ~MF_PAX_SEGMEXEC;
29227 + else
29228 + pax_flags &= ~MF_PAX_PAGEEXEC;
29229 + }
29230 +#endif
29231 +
29232 +#ifdef CONFIG_PAX_EMUTRAMP
29233 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
29234 + pax_flags |= MF_PAX_EMUTRAMP;
29235 +#endif
29236 +
29237 +#ifdef CONFIG_PAX_MPROTECT
29238 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
29239 + pax_flags |= MF_PAX_MPROTECT;
29240 +#endif
29241 +
29242 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
29243 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
29244 + pax_flags |= MF_PAX_RANDMMAP;
29245 +#endif
29246 +
29247 + return pax_flags;
29248 +}
29249 +#endif
29250 +
29251 +#ifdef CONFIG_PAX_EI_PAX
29252 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
29253 +{
29254 + unsigned long pax_flags = 0UL;
29255 +
29256 +#ifdef CONFIG_PAX_PAGEEXEC
29257 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
29258 + pax_flags |= MF_PAX_PAGEEXEC;
29259 +#endif
29260 +
29261 +#ifdef CONFIG_PAX_SEGMEXEC
29262 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
29263 + pax_flags |= MF_PAX_SEGMEXEC;
29264 +#endif
29265 +
29266 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
29267 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
29268 + if ((__supported_pte_mask & _PAGE_NX))
29269 + pax_flags &= ~MF_PAX_SEGMEXEC;
29270 + else
29271 + pax_flags &= ~MF_PAX_PAGEEXEC;
29272 + }
29273 +#endif
29274 +
29275 +#ifdef CONFIG_PAX_EMUTRAMP
29276 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
29277 + pax_flags |= MF_PAX_EMUTRAMP;
29278 +#endif
29279 +
29280 +#ifdef CONFIG_PAX_MPROTECT
29281 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
29282 + pax_flags |= MF_PAX_MPROTECT;
29283 +#endif
29284 +
29285 +#ifdef CONFIG_PAX_ASLR
29286 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
29287 + pax_flags |= MF_PAX_RANDMMAP;
29288 +#endif
29289 +
29290 + return pax_flags;
29291 +}
29292 +#endif
29293 +
29294 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
29295 +static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
29296 +{
29297 + unsigned long pax_flags = 0UL;
29298 +
29299 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
29300 + unsigned long i;
29301 +#endif
29302 +
29303 +#ifdef CONFIG_PAX_EI_PAX
29304 + pax_flags = pax_parse_ei_pax(elf_ex);
29305 +#endif
29306 +
29307 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
29308 + for (i = 0UL; i < elf_ex->e_phnum; i++)
29309 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
29310 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
29311 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
29312 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
29313 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
29314 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
29315 + return -EINVAL;
29316 +
29317 +#ifdef CONFIG_PAX_SOFTMODE
29318 + if (pax_softmode)
29319 + pax_flags = pax_parse_softmode(&elf_phdata[i]);
29320 + else
29321 +#endif
29322 +
29323 + pax_flags = pax_parse_hardmode(&elf_phdata[i]);
29324 + break;
29325 + }
29326 +#endif
29327 +
29328 + if (0 > pax_check_flags(&pax_flags))
29329 + return -EINVAL;
29330 +
29331 + current->mm->pax_flags = pax_flags;
29332 + return 0;
29333 +}
29334 +#endif
29335 +
29336 /*
29337 * These are the functions used to load ELF style executables and shared
29338 * libraries. There is no binary dependent code anywhere else.
29339 @@ -549,6 +740,11 @@ static unsigned long randomize_stack_top
29340 {
29341 unsigned int random_variable = 0;
29342
29343 +#ifdef CONFIG_PAX_RANDUSTACK
29344 + if (randomize_va_space)
29345 + return stack_top - current->mm->delta_stack;
29346 +#endif
29347 +
29348 if ((current->flags & PF_RANDOMIZE) &&
29349 !(current->personality & ADDR_NO_RANDOMIZE)) {
29350 random_variable = get_random_int() & STACK_RND_MASK;
29351 @@ -567,7 +763,7 @@ static int load_elf_binary(struct linux_
29352 unsigned long load_addr = 0, load_bias = 0;
29353 int load_addr_set = 0;
29354 char * elf_interpreter = NULL;
29355 - unsigned long error;
29356 + unsigned long error = 0;
29357 struct elf_phdr *elf_ppnt, *elf_phdata;
29358 unsigned long elf_bss, elf_brk;
29359 int retval, i;
29360 @@ -577,11 +773,11 @@ static int load_elf_binary(struct linux_
29361 unsigned long start_code, end_code, start_data, end_data;
29362 unsigned long reloc_func_desc = 0;
29363 int executable_stack = EXSTACK_DEFAULT;
29364 - unsigned long def_flags = 0;
29365 struct {
29366 struct elfhdr elf_ex;
29367 struct elfhdr interp_elf_ex;
29368 } *loc;
29369 + unsigned long pax_task_size = TASK_SIZE;
29370
29371 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
29372 if (!loc) {
29373 @@ -719,11 +915,80 @@ static int load_elf_binary(struct linux_
29374
29375 /* OK, This is the point of no return */
29376 current->flags &= ~PF_FORKNOEXEC;
29377 - current->mm->def_flags = def_flags;
29378 +
29379 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
29380 + current->mm->pax_flags = 0UL;
29381 +#endif
29382 +
29383 +#ifdef CONFIG_PAX_DLRESOLVE
29384 + current->mm->call_dl_resolve = 0UL;
29385 +#endif
29386 +
29387 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
29388 + current->mm->call_syscall = 0UL;
29389 +#endif
29390 +
29391 +#ifdef CONFIG_PAX_ASLR
29392 + current->mm->delta_mmap = 0UL;
29393 + current->mm->delta_stack = 0UL;
29394 +#endif
29395 +
29396 + current->mm->def_flags = 0;
29397 +
29398 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
29399 + if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
29400 + send_sig(SIGKILL, current, 0);
29401 + goto out_free_dentry;
29402 + }
29403 +#endif
29404 +
29405 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
29406 + pax_set_initial_flags(bprm);
29407 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
29408 + if (pax_set_initial_flags_func)
29409 + (pax_set_initial_flags_func)(bprm);
29410 +#endif
29411 +
29412 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
29413 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
29414 + current->mm->context.user_cs_limit = PAGE_SIZE;
29415 + current->mm->def_flags |= VM_PAGEEXEC;
29416 + }
29417 +#endif
29418 +
29419 +#ifdef CONFIG_PAX_SEGMEXEC
29420 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
29421 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
29422 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
29423 + pax_task_size = SEGMEXEC_TASK_SIZE;
29424 + }
29425 +#endif
29426 +
29427 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
29428 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
29429 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
29430 + put_cpu();
29431 + }
29432 +#endif
29433
29434 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
29435 may depend on the personality. */
29436 SET_PERSONALITY(loc->elf_ex);
29437 +
29438 +#ifdef CONFIG_PAX_ASLR
29439 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
29440 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
29441 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
29442 + }
29443 +#endif
29444 +
29445 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
29446 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
29447 + executable_stack = EXSTACK_DISABLE_X;
29448 + current->personality &= ~READ_IMPLIES_EXEC;
29449 + } else
29450 +#endif
29451 +
29452 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
29453 current->personality |= READ_IMPLIES_EXEC;
29454
29455 @@ -805,6 +1070,20 @@ static int load_elf_binary(struct linux_
29456 #else
29457 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
29458 #endif
29459 +
29460 +#ifdef CONFIG_PAX_RANDMMAP
29461 + /* PaX: randomize base address at the default exe base if requested */
29462 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
29463 +#ifdef CONFIG_SPARC64
29464 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
29465 +#else
29466 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
29467 +#endif
29468 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
29469 + elf_flags |= MAP_FIXED;
29470 + }
29471 +#endif
29472 +
29473 }
29474
29475 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
29476 @@ -837,9 +1116,9 @@ static int load_elf_binary(struct linux_
29477 * allowed task size. Note that p_filesz must always be
29478 * <= p_memsz so it is only necessary to check p_memsz.
29479 */
29480 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
29481 - elf_ppnt->p_memsz > TASK_SIZE ||
29482 - TASK_SIZE - elf_ppnt->p_memsz < k) {
29483 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
29484 + elf_ppnt->p_memsz > pax_task_size ||
29485 + pax_task_size - elf_ppnt->p_memsz < k) {
29486 /* set_brk can never work. Avoid overflows. */
29487 send_sig(SIGKILL, current, 0);
29488 retval = -EINVAL;
29489 @@ -867,6 +1146,11 @@ static int load_elf_binary(struct linux_
29490 start_data += load_bias;
29491 end_data += load_bias;
29492
29493 +#ifdef CONFIG_PAX_RANDMMAP
29494 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
29495 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
29496 +#endif
29497 +
29498 /* Calling set_brk effectively mmaps the pages that we need
29499 * for the bss and break sections. We must do this before
29500 * mapping in the interpreter, to make sure it doesn't wind
29501 @@ -878,9 +1162,11 @@ static int load_elf_binary(struct linux_
29502 goto out_free_dentry;
29503 }
29504 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
29505 - send_sig(SIGSEGV, current, 0);
29506 - retval = -EFAULT; /* Nobody gets to see this, but.. */
29507 - goto out_free_dentry;
29508 + /*
29509 + * This bss-zeroing can fail if the ELF
29510 + * file specifies odd protections. So
29511 + * we don't check the return value
29512 + */
29513 }
29514
29515 if (elf_interpreter) {
29516 @@ -1091,7 +1377,7 @@ out:
29517 * Decide what to dump of a segment, part, all or none.
29518 */
29519 static unsigned long vma_dump_size(struct vm_area_struct *vma,
29520 - unsigned long mm_flags)
29521 + unsigned long mm_flags, long signr)
29522 {
29523 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
29524
29525 @@ -1125,7 +1411,7 @@ static unsigned long vma_dump_size(struc
29526 if (vma->vm_file == NULL)
29527 return 0;
29528
29529 - if (FILTER(MAPPED_PRIVATE))
29530 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
29531 goto whole;
29532
29533 /*
29534 @@ -1347,9 +1633,9 @@ static void fill_auxv_note(struct memelf
29535 {
29536 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
29537 int i = 0;
29538 - do
29539 + do {
29540 i += 2;
29541 - while (auxv[i - 2] != AT_NULL);
29542 + } while (auxv[i - 2] != AT_NULL);
29543 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
29544 }
29545
29546 @@ -1855,14 +2141,14 @@ static void fill_extnum_info(struct elfh
29547 }
29548
29549 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
29550 - unsigned long mm_flags)
29551 + struct coredump_params *cprm)
29552 {
29553 struct vm_area_struct *vma;
29554 size_t size = 0;
29555
29556 for (vma = first_vma(current, gate_vma); vma != NULL;
29557 vma = next_vma(vma, gate_vma))
29558 - size += vma_dump_size(vma, mm_flags);
29559 + size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
29560 return size;
29561 }
29562
29563 @@ -1956,7 +2242,7 @@ static int elf_core_dump(struct coredump
29564
29565 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
29566
29567 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
29568 + offset += elf_core_vma_data_size(gate_vma, cprm);
29569 offset += elf_core_extra_data_size();
29570 e_shoff = offset;
29571
29572 @@ -1970,10 +2256,12 @@ static int elf_core_dump(struct coredump
29573 offset = dataoff;
29574
29575 size += sizeof(*elf);
29576 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
29577 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
29578 goto end_coredump;
29579
29580 size += sizeof(*phdr4note);
29581 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
29582 if (size > cprm->limit
29583 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
29584 goto end_coredump;
29585 @@ -1987,7 +2275,7 @@ static int elf_core_dump(struct coredump
29586 phdr.p_offset = offset;
29587 phdr.p_vaddr = vma->vm_start;
29588 phdr.p_paddr = 0;
29589 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
29590 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
29591 phdr.p_memsz = vma->vm_end - vma->vm_start;
29592 offset += phdr.p_filesz;
29593 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
29594 @@ -1998,6 +2286,7 @@ static int elf_core_dump(struct coredump
29595 phdr.p_align = ELF_EXEC_PAGESIZE;
29596
29597 size += sizeof(phdr);
29598 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
29599 if (size > cprm->limit
29600 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
29601 goto end_coredump;
29602 @@ -2022,7 +2311,7 @@ static int elf_core_dump(struct coredump
29603 unsigned long addr;
29604 unsigned long end;
29605
29606 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
29607 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
29608
29609 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
29610 struct page *page;
29611 @@ -2031,6 +2320,7 @@ static int elf_core_dump(struct coredump
29612 page = get_dump_page(addr);
29613 if (page) {
29614 void *kaddr = kmap(page);
29615 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
29616 stop = ((size += PAGE_SIZE) > cprm->limit) ||
29617 !dump_write(cprm->file, kaddr,
29618 PAGE_SIZE);
29619 @@ -2048,6 +2338,7 @@ static int elf_core_dump(struct coredump
29620
29621 if (e_phnum == PN_XNUM) {
29622 size += sizeof(*shdr4extnum);
29623 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
29624 if (size > cprm->limit
29625 || !dump_write(cprm->file, shdr4extnum,
29626 sizeof(*shdr4extnum)))
29627 @@ -2068,6 +2359,97 @@ out:
29628
29629 #endif /* CONFIG_ELF_CORE */
29630
29631 +#ifdef CONFIG_PAX_MPROTECT
29632 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
29633 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
29634 + * we'll remove VM_MAYWRITE for good on RELRO segments.
29635 + *
29636 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
29637 + * basis because we want to allow the common case and not the special ones.
29638 + */
29639 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
29640 +{
29641 + struct elfhdr elf_h;
29642 + struct elf_phdr elf_p;
29643 + unsigned long i;
29644 + unsigned long oldflags;
29645 + bool is_textrel_rw, is_textrel_rx, is_relro;
29646 +
29647 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
29648 + return;
29649 +
29650 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
29651 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
29652 +
29653 +#ifdef CONFIG_PAX_ELFRELOCS
29654 + /* possible TEXTREL */
29655 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
29656 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
29657 +#else
29658 + is_textrel_rw = false;
29659 + is_textrel_rx = false;
29660 +#endif
29661 +
29662 + /* possible RELRO */
29663 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
29664 +
29665 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
29666 + return;
29667 +
29668 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
29669 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
29670 +
29671 +#ifdef CONFIG_PAX_ETEXECRELOCS
29672 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
29673 +#else
29674 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
29675 +#endif
29676 +
29677 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
29678 + !elf_check_arch(&elf_h) ||
29679 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
29680 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
29681 + return;
29682 +
29683 + for (i = 0UL; i < elf_h.e_phnum; i++) {
29684 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
29685 + return;
29686 + switch (elf_p.p_type) {
29687 + case PT_DYNAMIC:
29688 + if (!is_textrel_rw && !is_textrel_rx)
29689 + continue;
29690 + i = 0UL;
29691 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
29692 + elf_dyn dyn;
29693 +
29694 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
29695 + return;
29696 + if (dyn.d_tag == DT_NULL)
29697 + return;
29698 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
29699 + gr_log_textrel(vma);
29700 + if (is_textrel_rw)
29701 + vma->vm_flags |= VM_MAYWRITE;
29702 + else
29703 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
29704 + vma->vm_flags &= ~VM_MAYWRITE;
29705 + return;
29706 + }
29707 + i++;
29708 + }
29709 + return;
29710 +
29711 + case PT_GNU_RELRO:
29712 + if (!is_relro)
29713 + continue;
29714 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
29715 + vma->vm_flags &= ~VM_MAYWRITE;
29716 + return;
29717 + }
29718 + }
29719 +}
29720 +#endif
29721 +
29722 static int __init init_elf_binfmt(void)
29723 {
29724 return register_binfmt(&elf_format);
29725 diff -urNp linux-2.6.35.4/fs/binfmt_flat.c linux-2.6.35.4/fs/binfmt_flat.c
29726 --- linux-2.6.35.4/fs/binfmt_flat.c 2010-08-26 19:47:12.000000000 -0400
29727 +++ linux-2.6.35.4/fs/binfmt_flat.c 2010-09-17 20:12:09.000000000 -0400
29728 @@ -567,7 +567,9 @@ static int load_flat_file(struct linux_b
29729 realdatastart = (unsigned long) -ENOMEM;
29730 printk("Unable to allocate RAM for process data, errno %d\n",
29731 (int)-realdatastart);
29732 + down_write(&current->mm->mmap_sem);
29733 do_munmap(current->mm, textpos, text_len);
29734 + up_write(&current->mm->mmap_sem);
29735 ret = realdatastart;
29736 goto err;
29737 }
29738 @@ -591,8 +593,10 @@ static int load_flat_file(struct linux_b
29739 }
29740 if (IS_ERR_VALUE(result)) {
29741 printk("Unable to read data+bss, errno %d\n", (int)-result);
29742 + down_write(&current->mm->mmap_sem);
29743 do_munmap(current->mm, textpos, text_len);
29744 do_munmap(current->mm, realdatastart, len);
29745 + up_write(&current->mm->mmap_sem);
29746 ret = result;
29747 goto err;
29748 }
29749 @@ -661,8 +665,10 @@ static int load_flat_file(struct linux_b
29750 }
29751 if (IS_ERR_VALUE(result)) {
29752 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
29753 + down_write(&current->mm->mmap_sem);
29754 do_munmap(current->mm, textpos, text_len + data_len + extra +
29755 MAX_SHARED_LIBS * sizeof(unsigned long));
29756 + up_write(&current->mm->mmap_sem);
29757 ret = result;
29758 goto err;
29759 }
29760 diff -urNp linux-2.6.35.4/fs/binfmt_misc.c linux-2.6.35.4/fs/binfmt_misc.c
29761 --- linux-2.6.35.4/fs/binfmt_misc.c 2010-08-26 19:47:12.000000000 -0400
29762 +++ linux-2.6.35.4/fs/binfmt_misc.c 2010-09-17 20:12:09.000000000 -0400
29763 @@ -693,7 +693,7 @@ static int bm_fill_super(struct super_bl
29764 static struct tree_descr bm_files[] = {
29765 [2] = {"status", &bm_status_operations, S_IWUSR|S_IRUGO},
29766 [3] = {"register", &bm_register_operations, S_IWUSR},
29767 - /* last one */ {""}
29768 + /* last one */ {"", NULL, 0}
29769 };
29770 int err = simple_fill_super(sb, 0x42494e4d, bm_files);
29771 if (!err)
29772 diff -urNp linux-2.6.35.4/fs/bio.c linux-2.6.35.4/fs/bio.c
29773 --- linux-2.6.35.4/fs/bio.c 2010-08-26 19:47:12.000000000 -0400
29774 +++ linux-2.6.35.4/fs/bio.c 2010-09-17 20:12:09.000000000 -0400
29775 @@ -1213,7 +1213,7 @@ static void bio_copy_kern_endio(struct b
29776 const int read = bio_data_dir(bio) == READ;
29777 struct bio_map_data *bmd = bio->bi_private;
29778 int i;
29779 - char *p = bmd->sgvecs[0].iov_base;
29780 + char *p = (__force char *)bmd->sgvecs[0].iov_base;
29781
29782 __bio_for_each_segment(bvec, bio, i, 0) {
29783 char *addr = page_address(bvec->bv_page);
29784 diff -urNp linux-2.6.35.4/fs/block_dev.c linux-2.6.35.4/fs/block_dev.c
29785 --- linux-2.6.35.4/fs/block_dev.c 2010-08-26 19:47:12.000000000 -0400
29786 +++ linux-2.6.35.4/fs/block_dev.c 2010-09-17 20:12:09.000000000 -0400
29787 @@ -647,7 +647,7 @@ static bool bd_may_claim(struct block_de
29788 else if (bdev->bd_contains == bdev)
29789 return true; /* is a whole device which isn't held */
29790
29791 - else if (whole->bd_holder == bd_claim)
29792 + else if (whole->bd_holder == (void *)bd_claim)
29793 return true; /* is a partition of a device that is being partitioned */
29794 else if (whole->bd_holder != NULL)
29795 return false; /* is a partition of a held device */
29796 diff -urNp linux-2.6.35.4/fs/btrfs/ctree.c linux-2.6.35.4/fs/btrfs/ctree.c
29797 --- linux-2.6.35.4/fs/btrfs/ctree.c 2010-08-26 19:47:12.000000000 -0400
29798 +++ linux-2.6.35.4/fs/btrfs/ctree.c 2010-09-17 20:12:09.000000000 -0400
29799 @@ -3763,7 +3763,6 @@ setup_items_for_insert(struct btrfs_tran
29800
29801 ret = 0;
29802 if (slot == 0) {
29803 - struct btrfs_disk_key disk_key;
29804 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
29805 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
29806 }
29807 diff -urNp linux-2.6.35.4/fs/btrfs/disk-io.c linux-2.6.35.4/fs/btrfs/disk-io.c
29808 --- linux-2.6.35.4/fs/btrfs/disk-io.c 2010-08-26 19:47:12.000000000 -0400
29809 +++ linux-2.6.35.4/fs/btrfs/disk-io.c 2010-09-17 20:12:09.000000000 -0400
29810 @@ -40,7 +40,7 @@
29811 #include "tree-log.h"
29812 #include "free-space-cache.h"
29813
29814 -static struct extent_io_ops btree_extent_io_ops;
29815 +static const struct extent_io_ops btree_extent_io_ops;
29816 static void end_workqueue_fn(struct btrfs_work *work);
29817 static void free_fs_root(struct btrfs_root *root);
29818
29819 @@ -2597,7 +2597,7 @@ out:
29820 return 0;
29821 }
29822
29823 -static struct extent_io_ops btree_extent_io_ops = {
29824 +static const struct extent_io_ops btree_extent_io_ops = {
29825 .write_cache_pages_lock_hook = btree_lock_page_hook,
29826 .readpage_end_io_hook = btree_readpage_end_io_hook,
29827 .submit_bio_hook = btree_submit_bio_hook,
29828 diff -urNp linux-2.6.35.4/fs/btrfs/extent_io.h linux-2.6.35.4/fs/btrfs/extent_io.h
29829 --- linux-2.6.35.4/fs/btrfs/extent_io.h 2010-08-26 19:47:12.000000000 -0400
29830 +++ linux-2.6.35.4/fs/btrfs/extent_io.h 2010-09-17 20:12:09.000000000 -0400
29831 @@ -51,36 +51,36 @@ typedef int (extent_submit_bio_hook_t)(s
29832 struct bio *bio, int mirror_num,
29833 unsigned long bio_flags, u64 bio_offset);
29834 struct extent_io_ops {
29835 - int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
29836 + int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
29837 u64 start, u64 end, int *page_started,
29838 unsigned long *nr_written);
29839 - int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
29840 - int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
29841 + int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
29842 + int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
29843 extent_submit_bio_hook_t *submit_bio_hook;
29844 - int (*merge_bio_hook)(struct page *page, unsigned long offset,
29845 + int (* const merge_bio_hook)(struct page *page, unsigned long offset,
29846 size_t size, struct bio *bio,
29847 unsigned long bio_flags);
29848 - int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
29849 - int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
29850 + int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
29851 + int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
29852 u64 start, u64 end,
29853 struct extent_state *state);
29854 - int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
29855 + int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
29856 u64 start, u64 end,
29857 struct extent_state *state);
29858 - int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
29859 + int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
29860 struct extent_state *state);
29861 - int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
29862 + int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
29863 struct extent_state *state, int uptodate);
29864 - int (*set_bit_hook)(struct inode *inode, struct extent_state *state,
29865 + int (* const set_bit_hook)(struct inode *inode, struct extent_state *state,
29866 int *bits);
29867 - int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
29868 + int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
29869 int *bits);
29870 - int (*merge_extent_hook)(struct inode *inode,
29871 + int (* const merge_extent_hook)(struct inode *inode,
29872 struct extent_state *new,
29873 struct extent_state *other);
29874 - int (*split_extent_hook)(struct inode *inode,
29875 + int (* const split_extent_hook)(struct inode *inode,
29876 struct extent_state *orig, u64 split);
29877 - int (*write_cache_pages_lock_hook)(struct page *page);
29878 + int (* const write_cache_pages_lock_hook)(struct page *page);
29879 };
29880
29881 struct extent_io_tree {
29882 @@ -90,7 +90,7 @@ struct extent_io_tree {
29883 u64 dirty_bytes;
29884 spinlock_t lock;
29885 spinlock_t buffer_lock;
29886 - struct extent_io_ops *ops;
29887 + const struct extent_io_ops *ops;
29888 };
29889
29890 struct extent_state {
29891 diff -urNp linux-2.6.35.4/fs/btrfs/free-space-cache.c linux-2.6.35.4/fs/btrfs/free-space-cache.c
29892 --- linux-2.6.35.4/fs/btrfs/free-space-cache.c 2010-08-26 19:47:12.000000000 -0400
29893 +++ linux-2.6.35.4/fs/btrfs/free-space-cache.c 2010-09-17 20:12:09.000000000 -0400
29894 @@ -1075,8 +1075,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
29895
29896 while(1) {
29897 if (entry->bytes < bytes || entry->offset < min_start) {
29898 - struct rb_node *node;
29899 -
29900 node = rb_next(&entry->offset_index);
29901 if (!node)
29902 break;
29903 @@ -1227,7 +1225,7 @@ again:
29904 */
29905 while (entry->bitmap || found_bitmap ||
29906 (!entry->bitmap && entry->bytes < min_bytes)) {
29907 - struct rb_node *node = rb_next(&entry->offset_index);
29908 + node = rb_next(&entry->offset_index);
29909
29910 if (entry->bitmap && entry->bytes > bytes + empty_size) {
29911 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
29912 diff -urNp linux-2.6.35.4/fs/btrfs/inode.c linux-2.6.35.4/fs/btrfs/inode.c
29913 --- linux-2.6.35.4/fs/btrfs/inode.c 2010-08-26 19:47:12.000000000 -0400
29914 +++ linux-2.6.35.4/fs/btrfs/inode.c 2010-09-17 20:12:09.000000000 -0400
29915 @@ -64,7 +64,7 @@ static const struct inode_operations btr
29916 static const struct address_space_operations btrfs_aops;
29917 static const struct address_space_operations btrfs_symlink_aops;
29918 static const struct file_operations btrfs_dir_file_operations;
29919 -static struct extent_io_ops btrfs_extent_io_ops;
29920 +static const struct extent_io_ops btrfs_extent_io_ops;
29921
29922 static struct kmem_cache *btrfs_inode_cachep;
29923 struct kmem_cache *btrfs_trans_handle_cachep;
29924 @@ -6958,7 +6958,7 @@ static const struct file_operations btrf
29925 .fsync = btrfs_sync_file,
29926 };
29927
29928 -static struct extent_io_ops btrfs_extent_io_ops = {
29929 +static const struct extent_io_ops btrfs_extent_io_ops = {
29930 .fill_delalloc = run_delalloc_range,
29931 .submit_bio_hook = btrfs_submit_bio_hook,
29932 .merge_bio_hook = btrfs_merge_bio_hook,
29933 diff -urNp linux-2.6.35.4/fs/buffer.c linux-2.6.35.4/fs/buffer.c
29934 --- linux-2.6.35.4/fs/buffer.c 2010-08-26 19:47:12.000000000 -0400
29935 +++ linux-2.6.35.4/fs/buffer.c 2010-09-17 20:12:37.000000000 -0400
29936 @@ -25,6 +25,7 @@
29937 #include <linux/percpu.h>
29938 #include <linux/slab.h>
29939 #include <linux/capability.h>
29940 +#include <linux/security.h>
29941 #include <linux/blkdev.h>
29942 #include <linux/file.h>
29943 #include <linux/quotaops.h>
29944 diff -urNp linux-2.6.35.4/fs/cachefiles/bind.c linux-2.6.35.4/fs/cachefiles/bind.c
29945 --- linux-2.6.35.4/fs/cachefiles/bind.c 2010-08-26 19:47:12.000000000 -0400
29946 +++ linux-2.6.35.4/fs/cachefiles/bind.c 2010-09-17 20:12:09.000000000 -0400
29947 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
29948 args);
29949
29950 /* start by checking things over */
29951 - ASSERT(cache->fstop_percent >= 0 &&
29952 - cache->fstop_percent < cache->fcull_percent &&
29953 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
29954 cache->fcull_percent < cache->frun_percent &&
29955 cache->frun_percent < 100);
29956
29957 - ASSERT(cache->bstop_percent >= 0 &&
29958 - cache->bstop_percent < cache->bcull_percent &&
29959 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
29960 cache->bcull_percent < cache->brun_percent &&
29961 cache->brun_percent < 100);
29962
29963 diff -urNp linux-2.6.35.4/fs/cachefiles/daemon.c linux-2.6.35.4/fs/cachefiles/daemon.c
29964 --- linux-2.6.35.4/fs/cachefiles/daemon.c 2010-08-26 19:47:12.000000000 -0400
29965 +++ linux-2.6.35.4/fs/cachefiles/daemon.c 2010-09-17 20:12:09.000000000 -0400
29966 @@ -195,7 +195,7 @@ static ssize_t cachefiles_daemon_read(st
29967 if (n > buflen)
29968 return -EMSGSIZE;
29969
29970 - if (copy_to_user(_buffer, buffer, n) != 0)
29971 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
29972 return -EFAULT;
29973
29974 return n;
29975 @@ -221,7 +221,7 @@ static ssize_t cachefiles_daemon_write(s
29976 if (test_bit(CACHEFILES_DEAD, &cache->flags))
29977 return -EIO;
29978
29979 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
29980 + if (datalen > PAGE_SIZE - 1)
29981 return -EOPNOTSUPP;
29982
29983 /* drag the command string into the kernel so we can parse it */
29984 @@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struc
29985 if (args[0] != '%' || args[1] != '\0')
29986 return -EINVAL;
29987
29988 - if (fstop < 0 || fstop >= cache->fcull_percent)
29989 + if (fstop >= cache->fcull_percent)
29990 return cachefiles_daemon_range_error(cache, args);
29991
29992 cache->fstop_percent = fstop;
29993 @@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struc
29994 if (args[0] != '%' || args[1] != '\0')
29995 return -EINVAL;
29996
29997 - if (bstop < 0 || bstop >= cache->bcull_percent)
29998 + if (bstop >= cache->bcull_percent)
29999 return cachefiles_daemon_range_error(cache, args);
30000
30001 cache->bstop_percent = bstop;
30002 diff -urNp linux-2.6.35.4/fs/cachefiles/rdwr.c linux-2.6.35.4/fs/cachefiles/rdwr.c
30003 --- linux-2.6.35.4/fs/cachefiles/rdwr.c 2010-08-26 19:47:12.000000000 -0400
30004 +++ linux-2.6.35.4/fs/cachefiles/rdwr.c 2010-09-17 20:12:09.000000000 -0400
30005 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache
30006 old_fs = get_fs();
30007 set_fs(KERNEL_DS);
30008 ret = file->f_op->write(
30009 - file, (const void __user *) data, len, &pos);
30010 + file, (__force const void __user *) data, len, &pos);
30011 set_fs(old_fs);
30012 kunmap(page);
30013 if (ret != len)
30014 diff -urNp linux-2.6.35.4/fs/cifs/cifs_uniupr.h linux-2.6.35.4/fs/cifs/cifs_uniupr.h
30015 --- linux-2.6.35.4/fs/cifs/cifs_uniupr.h 2010-08-26 19:47:12.000000000 -0400
30016 +++ linux-2.6.35.4/fs/cifs/cifs_uniupr.h 2010-09-17 20:12:09.000000000 -0400
30017 @@ -132,7 +132,7 @@ const struct UniCaseRange CifsUniUpperRa
30018 {0x0490, 0x04cc, UniCaseRangeU0490},
30019 {0x1e00, 0x1ffc, UniCaseRangeU1e00},
30020 {0xff40, 0xff5a, UniCaseRangeUff40},
30021 - {0}
30022 + {0, 0, NULL}
30023 };
30024 #endif
30025
30026 diff -urNp linux-2.6.35.4/fs/cifs/link.c linux-2.6.35.4/fs/cifs/link.c
30027 --- linux-2.6.35.4/fs/cifs/link.c 2010-08-26 19:47:12.000000000 -0400
30028 +++ linux-2.6.35.4/fs/cifs/link.c 2010-09-17 20:12:09.000000000 -0400
30029 @@ -216,7 +216,7 @@ cifs_symlink(struct inode *inode, struct
30030
30031 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
30032 {
30033 - char *p = nd_get_link(nd);
30034 + const char *p = nd_get_link(nd);
30035 if (!IS_ERR(p))
30036 kfree(p);
30037 }
30038 diff -urNp linux-2.6.35.4/fs/compat_binfmt_elf.c linux-2.6.35.4/fs/compat_binfmt_elf.c
30039 --- linux-2.6.35.4/fs/compat_binfmt_elf.c 2010-08-26 19:47:12.000000000 -0400
30040 +++ linux-2.6.35.4/fs/compat_binfmt_elf.c 2010-09-17 20:12:09.000000000 -0400
30041 @@ -30,11 +30,13 @@
30042 #undef elf_phdr
30043 #undef elf_shdr
30044 #undef elf_note
30045 +#undef elf_dyn
30046 #undef elf_addr_t
30047 #define elfhdr elf32_hdr
30048 #define elf_phdr elf32_phdr
30049 #define elf_shdr elf32_shdr
30050 #define elf_note elf32_note
30051 +#define elf_dyn Elf32_Dyn
30052 #define elf_addr_t Elf32_Addr
30053
30054 /*
30055 diff -urNp linux-2.6.35.4/fs/compat.c linux-2.6.35.4/fs/compat.c
30056 --- linux-2.6.35.4/fs/compat.c 2010-08-26 19:47:12.000000000 -0400
30057 +++ linux-2.6.35.4/fs/compat.c 2010-09-17 20:12:37.000000000 -0400
30058 @@ -1433,14 +1433,12 @@ static int compat_copy_strings(int argc,
30059 if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
30060 struct page *page;
30061
30062 -#ifdef CONFIG_STACK_GROWSUP
30063 ret = expand_stack_downwards(bprm->vma, pos);
30064 if (ret < 0) {
30065 /* We've exceed the stack rlimit. */
30066 ret = -E2BIG;
30067 goto out;
30068 }
30069 -#endif
30070 ret = get_user_pages(current, bprm->mm, pos,
30071 1, 1, 1, &page, NULL);
30072 if (ret <= 0) {
30073 @@ -1486,6 +1484,11 @@ int compat_do_execve(char * filename,
30074 compat_uptr_t __user *envp,
30075 struct pt_regs * regs)
30076 {
30077 +#ifdef CONFIG_GRKERNSEC
30078 + struct file *old_exec_file;
30079 + struct acl_subject_label *old_acl;
30080 + struct rlimit old_rlim[RLIM_NLIMITS];
30081 +#endif
30082 struct linux_binprm *bprm;
30083 struct file *file;
30084 struct files_struct *displaced;
30085 @@ -1522,6 +1525,14 @@ int compat_do_execve(char * filename,
30086 bprm->filename = filename;
30087 bprm->interp = filename;
30088
30089 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
30090 + retval = -EAGAIN;
30091 + if (gr_handle_nproc())
30092 + goto out_file;
30093 + retval = -EACCES;
30094 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
30095 + goto out_file;
30096 +
30097 retval = bprm_mm_init(bprm);
30098 if (retval)
30099 goto out_file;
30100 @@ -1551,9 +1562,40 @@ int compat_do_execve(char * filename,
30101 if (retval < 0)
30102 goto out;
30103
30104 + if (!gr_tpe_allow(file)) {
30105 + retval = -EACCES;
30106 + goto out;
30107 + }
30108 +
30109 + if (gr_check_crash_exec(file)) {
30110 + retval = -EACCES;
30111 + goto out;
30112 + }
30113 +
30114 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
30115 +
30116 + gr_handle_exec_args(bprm, (char __user * __user *)argv);
30117 +
30118 +#ifdef CONFIG_GRKERNSEC
30119 + old_acl = current->acl;
30120 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
30121 + old_exec_file = current->exec_file;
30122 + get_file(file);
30123 + current->exec_file = file;
30124 +#endif
30125 +
30126 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
30127 + bprm->unsafe & LSM_UNSAFE_SHARE);
30128 + if (retval < 0)
30129 + goto out_fail;
30130 +
30131 retval = search_binary_handler(bprm, regs);
30132 if (retval < 0)
30133 - goto out;
30134 + goto out_fail;
30135 +#ifdef CONFIG_GRKERNSEC
30136 + if (old_exec_file)
30137 + fput(old_exec_file);
30138 +#endif
30139
30140 /* execve succeeded */
30141 current->fs->in_exec = 0;
30142 @@ -1564,6 +1606,14 @@ int compat_do_execve(char * filename,
30143 put_files_struct(displaced);
30144 return retval;
30145
30146 +out_fail:
30147 +#ifdef CONFIG_GRKERNSEC
30148 + current->acl = old_acl;
30149 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
30150 + fput(current->exec_file);
30151 + current->exec_file = old_exec_file;
30152 +#endif
30153 +
30154 out:
30155 if (bprm->mm)
30156 mmput(bprm->mm);
30157 diff -urNp linux-2.6.35.4/fs/debugfs/inode.c linux-2.6.35.4/fs/debugfs/inode.c
30158 --- linux-2.6.35.4/fs/debugfs/inode.c 2010-08-26 19:47:12.000000000 -0400
30159 +++ linux-2.6.35.4/fs/debugfs/inode.c 2010-09-17 20:12:09.000000000 -0400
30160 @@ -129,7 +129,7 @@ static inline int debugfs_positive(struc
30161
30162 static int debug_fill_super(struct super_block *sb, void *data, int silent)
30163 {
30164 - static struct tree_descr debug_files[] = {{""}};
30165 + static struct tree_descr debug_files[] = {{"", NULL, 0}};
30166
30167 return simple_fill_super(sb, DEBUGFS_MAGIC, debug_files);
30168 }
30169 diff -urNp linux-2.6.35.4/fs/dlm/lockspace.c linux-2.6.35.4/fs/dlm/lockspace.c
30170 --- linux-2.6.35.4/fs/dlm/lockspace.c 2010-08-26 19:47:12.000000000 -0400
30171 +++ linux-2.6.35.4/fs/dlm/lockspace.c 2010-09-17 20:12:09.000000000 -0400
30172 @@ -200,7 +200,7 @@ static int dlm_uevent(struct kset *kset,
30173 return 0;
30174 }
30175
30176 -static struct kset_uevent_ops dlm_uevent_ops = {
30177 +static const struct kset_uevent_ops dlm_uevent_ops = {
30178 .uevent = dlm_uevent,
30179 };
30180
30181 diff -urNp linux-2.6.35.4/fs/ecryptfs/inode.c linux-2.6.35.4/fs/ecryptfs/inode.c
30182 --- linux-2.6.35.4/fs/ecryptfs/inode.c 2010-08-26 19:47:12.000000000 -0400
30183 +++ linux-2.6.35.4/fs/ecryptfs/inode.c 2010-09-17 20:12:09.000000000 -0400
30184 @@ -658,7 +658,7 @@ static int ecryptfs_readlink_lower(struc
30185 old_fs = get_fs();
30186 set_fs(get_ds());
30187 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
30188 - (char __user *)lower_buf,
30189 + (__force char __user *)lower_buf,
30190 lower_bufsiz);
30191 set_fs(old_fs);
30192 if (rc < 0)
30193 @@ -704,7 +704,7 @@ static void *ecryptfs_follow_link(struct
30194 }
30195 old_fs = get_fs();
30196 set_fs(get_ds());
30197 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
30198 + rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
30199 set_fs(old_fs);
30200 if (rc < 0) {
30201 kfree(buf);
30202 @@ -719,7 +719,7 @@ out:
30203 static void
30204 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
30205 {
30206 - char *buf = nd_get_link(nd);
30207 + const char *buf = nd_get_link(nd);
30208 if (!IS_ERR(buf)) {
30209 /* Free the char* */
30210 kfree(buf);
30211 diff -urNp linux-2.6.35.4/fs/ecryptfs/miscdev.c linux-2.6.35.4/fs/ecryptfs/miscdev.c
30212 --- linux-2.6.35.4/fs/ecryptfs/miscdev.c 2010-08-26 19:47:12.000000000 -0400
30213 +++ linux-2.6.35.4/fs/ecryptfs/miscdev.c 2010-09-17 20:12:09.000000000 -0400
30214 @@ -328,7 +328,7 @@ check_list:
30215 goto out_unlock_msg_ctx;
30216 i = 5;
30217 if (msg_ctx->msg) {
30218 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
30219 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
30220 goto out_unlock_msg_ctx;
30221 i += packet_length_size;
30222 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
30223 diff -urNp linux-2.6.35.4/fs/exec.c linux-2.6.35.4/fs/exec.c
30224 --- linux-2.6.35.4/fs/exec.c 2010-08-26 19:47:12.000000000 -0400
30225 +++ linux-2.6.35.4/fs/exec.c 2010-09-17 20:12:37.000000000 -0400
30226 @@ -55,12 +55,24 @@
30227 #include <linux/fsnotify.h>
30228 #include <linux/fs_struct.h>
30229 #include <linux/pipe_fs_i.h>
30230 +#include <linux/random.h>
30231 +#include <linux/seq_file.h>
30232 +
30233 +#ifdef CONFIG_PAX_REFCOUNT
30234 +#include <linux/kallsyms.h>
30235 +#include <linux/kdebug.h>
30236 +#endif
30237
30238 #include <asm/uaccess.h>
30239 #include <asm/mmu_context.h>
30240 #include <asm/tlb.h>
30241 #include "internal.h"
30242
30243 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
30244 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
30245 +EXPORT_SYMBOL(pax_set_initial_flags_func);
30246 +#endif
30247 +
30248 int core_uses_pid;
30249 char core_pattern[CORENAME_MAX_SIZE] = "core";
30250 unsigned int core_pipe_limit;
30251 @@ -114,7 +126,7 @@ SYSCALL_DEFINE1(uselib, const char __use
30252 goto out;
30253
30254 file = do_filp_open(AT_FDCWD, tmp,
30255 - O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
30256 + O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
30257 MAY_READ | MAY_EXEC | MAY_OPEN);
30258 putname(tmp);
30259 error = PTR_ERR(file);
30260 @@ -162,18 +174,10 @@ static struct page *get_arg_page(struct
30261 int write)
30262 {
30263 struct page *page;
30264 - int ret;
30265
30266 -#ifdef CONFIG_STACK_GROWSUP
30267 - if (write) {
30268 - ret = expand_stack_downwards(bprm->vma, pos);
30269 - if (ret < 0)
30270 - return NULL;
30271 - }
30272 -#endif
30273 - ret = get_user_pages(current, bprm->mm, pos,
30274 - 1, write, 1, &page, NULL);
30275 - if (ret <= 0)
30276 + if (0 > expand_stack_downwards(bprm->vma, pos))
30277 + return NULL;
30278 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
30279 return NULL;
30280
30281 if (write) {
30282 @@ -246,6 +250,11 @@ static int __bprm_mm_init(struct linux_b
30283 vma->vm_end = STACK_TOP_MAX;
30284 vma->vm_start = vma->vm_end - PAGE_SIZE;
30285 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
30286 +
30287 +#ifdef CONFIG_PAX_SEGMEXEC
30288 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
30289 +#endif
30290 +
30291 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
30292 INIT_LIST_HEAD(&vma->anon_vma_chain);
30293 err = insert_vm_struct(mm, vma);
30294 @@ -255,6 +264,12 @@ static int __bprm_mm_init(struct linux_b
30295 mm->stack_vm = mm->total_vm = 1;
30296 up_write(&mm->mmap_sem);
30297 bprm->p = vma->vm_end - sizeof(void *);
30298 +
30299 +#ifdef CONFIG_PAX_RANDUSTACK
30300 + if (randomize_va_space)
30301 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
30302 +#endif
30303 +
30304 return 0;
30305 err:
30306 up_write(&mm->mmap_sem);
30307 @@ -476,7 +491,7 @@ int copy_strings_kernel(int argc,char **
30308 int r;
30309 mm_segment_t oldfs = get_fs();
30310 set_fs(KERNEL_DS);
30311 - r = copy_strings(argc, (char __user * __user *)argv, bprm);
30312 + r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
30313 set_fs(oldfs);
30314 return r;
30315 }
30316 @@ -506,7 +521,8 @@ static int shift_arg_pages(struct vm_are
30317 unsigned long new_end = old_end - shift;
30318 struct mmu_gather *tlb;
30319
30320 - BUG_ON(new_start > new_end);
30321 + if (new_start >= new_end || new_start < mmap_min_addr)
30322 + return -EFAULT;
30323
30324 /*
30325 * ensure there are no vmas between where we want to go
30326 @@ -515,6 +531,10 @@ static int shift_arg_pages(struct vm_are
30327 if (vma != find_vma(mm, new_start))
30328 return -EFAULT;
30329
30330 +#ifdef CONFIG_PAX_SEGMEXEC
30331 + BUG_ON(pax_find_mirror_vma(vma));
30332 +#endif
30333 +
30334 /*
30335 * cover the whole range: [new_start, old_end)
30336 */
30337 @@ -605,8 +625,28 @@ int setup_arg_pages(struct linux_binprm
30338 bprm->exec -= stack_shift;
30339
30340 down_write(&mm->mmap_sem);
30341 +
30342 + /* Move stack pages down in memory. */
30343 + if (stack_shift) {
30344 + ret = shift_arg_pages(vma, stack_shift);
30345 + if (ret)
30346 + goto out_unlock;
30347 + }
30348 +
30349 vm_flags = VM_STACK_FLAGS;
30350
30351 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
30352 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
30353 + vm_flags &= ~VM_EXEC;
30354 +
30355 +#ifdef CONFIG_PAX_MPROTECT
30356 + if (mm->pax_flags & MF_PAX_MPROTECT)
30357 + vm_flags &= ~VM_MAYEXEC;
30358 +#endif
30359 +
30360 + }
30361 +#endif
30362 +
30363 /*
30364 * Adjust stack execute permissions; explicitly enable for
30365 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
30366 @@ -625,13 +665,6 @@ int setup_arg_pages(struct linux_binprm
30367 goto out_unlock;
30368 BUG_ON(prev != vma);
30369
30370 - /* Move stack pages down in memory. */
30371 - if (stack_shift) {
30372 - ret = shift_arg_pages(vma, stack_shift);
30373 - if (ret)
30374 - goto out_unlock;
30375 - }
30376 -
30377 /* mprotect_fixup is overkill to remove the temporary stack flags */
30378 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
30379
30380 @@ -671,7 +704,7 @@ struct file *open_exec(const char *name)
30381 int err;
30382
30383 file = do_filp_open(AT_FDCWD, name,
30384 - O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
30385 + O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
30386 MAY_EXEC | MAY_OPEN);
30387 if (IS_ERR(file))
30388 goto out;
30389 @@ -708,7 +741,7 @@ int kernel_read(struct file *file, loff_
30390 old_fs = get_fs();
30391 set_fs(get_ds());
30392 /* The cast to a user pointer is valid due to the set_fs() */
30393 - result = vfs_read(file, (void __user *)addr, count, &pos);
30394 + result = vfs_read(file, (__force void __user *)addr, count, &pos);
30395 set_fs(old_fs);
30396 return result;
30397 }
30398 @@ -1125,7 +1158,7 @@ int check_unsafe_exec(struct linux_binpr
30399 }
30400 rcu_read_unlock();
30401
30402 - if (p->fs->users > n_fs) {
30403 + if (atomic_read(&p->fs->users) > n_fs) {
30404 bprm->unsafe |= LSM_UNSAFE_SHARE;
30405 } else {
30406 res = -EAGAIN;
30407 @@ -1321,6 +1354,11 @@ int do_execve(char * filename,
30408 char __user *__user *envp,
30409 struct pt_regs * regs)
30410 {
30411 +#ifdef CONFIG_GRKERNSEC
30412 + struct file *old_exec_file;
30413 + struct acl_subject_label *old_acl;
30414 + struct rlimit old_rlim[RLIM_NLIMITS];
30415 +#endif
30416 struct linux_binprm *bprm;
30417 struct file *file;
30418 struct files_struct *displaced;
30419 @@ -1357,6 +1395,18 @@ int do_execve(char * filename,
30420 bprm->filename = filename;
30421 bprm->interp = filename;
30422
30423 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
30424 +
30425 + if (gr_handle_nproc()) {
30426 + retval = -EAGAIN;
30427 + goto out_file;
30428 + }
30429 +
30430 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
30431 + retval = -EACCES;
30432 + goto out_file;
30433 + }
30434 +
30435 retval = bprm_mm_init(bprm);
30436 if (retval)
30437 goto out_file;
30438 @@ -1386,10 +1436,41 @@ int do_execve(char * filename,
30439 if (retval < 0)
30440 goto out;
30441
30442 + if (!gr_tpe_allow(file)) {
30443 + retval = -EACCES;
30444 + goto out;
30445 + }
30446 +
30447 + if (gr_check_crash_exec(file)) {
30448 + retval = -EACCES;
30449 + goto out;
30450 + }
30451 +
30452 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
30453 +
30454 + gr_handle_exec_args(bprm, argv);
30455 +
30456 +#ifdef CONFIG_GRKERNSEC
30457 + old_acl = current->acl;
30458 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
30459 + old_exec_file = current->exec_file;
30460 + get_file(file);
30461 + current->exec_file = file;
30462 +#endif
30463 +
30464 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
30465 + bprm->unsafe & LSM_UNSAFE_SHARE);
30466 + if (retval < 0)
30467 + goto out_fail;
30468 +
30469 current->flags &= ~PF_KTHREAD;
30470 retval = search_binary_handler(bprm,regs);
30471 if (retval < 0)
30472 - goto out;
30473 + goto out_fail;
30474 +#ifdef CONFIG_GRKERNSEC
30475 + if (old_exec_file)
30476 + fput(old_exec_file);
30477 +#endif
30478
30479 /* execve succeeded */
30480 current->fs->in_exec = 0;
30481 @@ -1400,6 +1481,14 @@ int do_execve(char * filename,
30482 put_files_struct(displaced);
30483 return retval;
30484
30485 +out_fail:
30486 +#ifdef CONFIG_GRKERNSEC
30487 + current->acl = old_acl;
30488 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
30489 + fput(current->exec_file);
30490 + current->exec_file = old_exec_file;
30491 +#endif
30492 +
30493 out:
30494 if (bprm->mm)
30495 mmput (bprm->mm);
30496 @@ -1563,6 +1652,225 @@ out:
30497 return ispipe;
30498 }
30499
30500 +int pax_check_flags(unsigned long *flags)
30501 +{
30502 + int retval = 0;
30503 +
30504 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
30505 + if (*flags & MF_PAX_SEGMEXEC)
30506 + {
30507 + *flags &= ~MF_PAX_SEGMEXEC;
30508 + retval = -EINVAL;
30509 + }
30510 +#endif
30511 +
30512 + if ((*flags & MF_PAX_PAGEEXEC)
30513 +
30514 +#ifdef CONFIG_PAX_PAGEEXEC
30515 + && (*flags & MF_PAX_SEGMEXEC)
30516 +#endif
30517 +
30518 + )
30519 + {
30520 + *flags &= ~MF_PAX_PAGEEXEC;
30521 + retval = -EINVAL;
30522 + }
30523 +
30524 + if ((*flags & MF_PAX_MPROTECT)
30525 +
30526 +#ifdef CONFIG_PAX_MPROTECT
30527 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
30528 +#endif
30529 +
30530 + )
30531 + {
30532 + *flags &= ~MF_PAX_MPROTECT;
30533 + retval = -EINVAL;
30534 + }
30535 +
30536 + if ((*flags & MF_PAX_EMUTRAMP)
30537 +
30538 +#ifdef CONFIG_PAX_EMUTRAMP
30539 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
30540 +#endif
30541 +
30542 + )
30543 + {
30544 + *flags &= ~MF_PAX_EMUTRAMP;
30545 + retval = -EINVAL;
30546 + }
30547 +
30548 + return retval;
30549 +}
30550 +
30551 +EXPORT_SYMBOL(pax_check_flags);
30552 +
30553 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
30554 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
30555 +{
30556 + struct task_struct *tsk = current;
30557 + struct mm_struct *mm = current->mm;
30558 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
30559 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
30560 + char *path_exec = NULL;
30561 + char *path_fault = NULL;
30562 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
30563 +
30564 + if (buffer_exec && buffer_fault) {
30565 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
30566 +
30567 + down_read(&mm->mmap_sem);
30568 + vma = mm->mmap;
30569 + while (vma && (!vma_exec || !vma_fault)) {
30570 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
30571 + vma_exec = vma;
30572 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
30573 + vma_fault = vma;
30574 + vma = vma->vm_next;
30575 + }
30576 + if (vma_exec) {
30577 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
30578 + if (IS_ERR(path_exec))
30579 + path_exec = "<path too long>";
30580 + else {
30581 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
30582 + if (path_exec) {
30583 + *path_exec = 0;
30584 + path_exec = buffer_exec;
30585 + } else
30586 + path_exec = "<path too long>";
30587 + }
30588 + }
30589 + if (vma_fault) {
30590 + start = vma_fault->vm_start;
30591 + end = vma_fault->vm_end;
30592 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
30593 + if (vma_fault->vm_file) {
30594 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
30595 + if (IS_ERR(path_fault))
30596 + path_fault = "<path too long>";
30597 + else {
30598 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
30599 + if (path_fault) {
30600 + *path_fault = 0;
30601 + path_fault = buffer_fault;
30602 + } else
30603 + path_fault = "<path too long>";
30604 + }
30605 + } else
30606 + path_fault = "<anonymous mapping>";
30607 + }
30608 + up_read(&mm->mmap_sem);
30609 + }
30610 + if (tsk->signal->curr_ip)
30611 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
30612 + else
30613 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
30614 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
30615 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
30616 + task_uid(tsk), task_euid(tsk), pc, sp);
30617 + free_page((unsigned long)buffer_exec);
30618 + free_page((unsigned long)buffer_fault);
30619 + pax_report_insns(pc, sp);
30620 + do_coredump(SIGKILL, SIGKILL, regs);
30621 +}
30622 +#endif
30623 +
30624 +#ifdef CONFIG_PAX_REFCOUNT
30625 +void pax_report_refcount_overflow(struct pt_regs *regs)
30626 +{
30627 + if (current->signal->curr_ip)
30628 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
30629 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
30630 + else
30631 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
30632 + current->comm, task_pid_nr(current), current_uid(), current_euid());
30633 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
30634 + show_regs(regs);
30635 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
30636 +}
30637 +#endif
30638 +
30639 +#ifdef CONFIG_PAX_USERCOPY
30640 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
30641 +struct stack_frame {
30642 + struct stack_frame *next_frame;
30643 + unsigned long return_address;
30644 +};
30645 +#endif
30646 +
30647 +/* 0: not at all, 1: fully, 2: fully inside frame,
30648 + -1: partially (implies an error) */
30649 +
30650 +int object_is_on_stack(const void *obj, unsigned long len)
30651 +{
30652 + const void *stack = task_stack_page(current);
30653 + const void *stackend = stack + THREAD_SIZE;
30654 +
30655 + if (obj + len < obj)
30656 + return -1;
30657 +
30658 + if (stack <= obj && obj + len <= stackend) {
30659 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
30660 + void *frame = __builtin_frame_address(2);
30661 + void *oldframe = __builtin_frame_address(1);
30662 + /*
30663 + bottom ----------------------------------------------> top
30664 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
30665 + ^----------------^
30666 + allow copies only within here
30667 + */
30668 + while (frame) {
30669 + /* if obj + len extends past the last frame, this
30670 + check won't pass and the next frame will be 0,
30671 + causing us to bail out and correctly report
30672 + the copy as invalid
30673 + */
30674 + if (obj + len <= frame) {
30675 + if (obj >= (oldframe + (2 * sizeof(void *))))
30676 + return 2;
30677 + else
30678 + return -1;
30679 + }
30680 + oldframe = frame;
30681 + frame = ((struct stack_frame *)frame)->next_frame;
30682 + }
30683 + return -1;
30684 +#else
30685 + return 1;
30686 +#endif
30687 + }
30688 +
30689 + if (obj + len <= stack || stackend <= obj)
30690 + return 0;
30691 +
30692 + return -1;
30693 +}
30694 +
30695 +
30696 +void pax_report_leak_to_user(const void *ptr, unsigned long len)
30697 +{
30698 + if (current->signal->curr_ip)
30699 + printk(KERN_ERR "PAX: From %pI4: kernel memory leak attempt detected from %p (%lu bytes)\n",
30700 + &current->signal->curr_ip, ptr, len);
30701 + else
30702 + printk(KERN_ERR "PAX: kernel memory leak attempt detected from %p (%lu bytes)\n", ptr, len);
30703 + dump_stack();
30704 + do_group_exit(SIGKILL);
30705 +}
30706 +
30707 +void pax_report_overflow_from_user(const void *ptr, unsigned long len)
30708 +{
30709 + if (current->signal->curr_ip)
30710 + printk(KERN_ERR "PAX: From %pI4: kernel memory overflow attempt detected to %p (%lu bytes)\n",
30711 + &current->signal->curr_ip, ptr, len);
30712 + else
30713 + printk(KERN_ERR "PAX: kernel memory overflow attempt detected to %p (%lu bytes)\n", ptr, len);
30714 + dump_stack();
30715 + do_group_exit(SIGKILL);
30716 +}
30717 +#endif
30718 +
30719 static int zap_process(struct task_struct *start, int exit_code)
30720 {
30721 struct task_struct *t;
30722 @@ -1773,17 +2081,17 @@ static void wait_for_dump_helpers(struct
30723 pipe = file->f_path.dentry->d_inode->i_pipe;
30724
30725 pipe_lock(pipe);
30726 - pipe->readers++;
30727 - pipe->writers--;
30728 + atomic_inc(&pipe->readers);
30729 + atomic_dec(&pipe->writers);
30730
30731 - while ((pipe->readers > 1) && (!signal_pending(current))) {
30732 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
30733 wake_up_interruptible_sync(&pipe->wait);
30734 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
30735 pipe_wait(pipe);
30736 }
30737
30738 - pipe->readers--;
30739 - pipe->writers++;
30740 + atomic_dec(&pipe->readers);
30741 + atomic_inc(&pipe->writers);
30742 pipe_unlock(pipe);
30743
30744 }
30745 @@ -1891,6 +2199,10 @@ void do_coredump(long signr, int exit_co
30746 */
30747 clear_thread_flag(TIF_SIGPENDING);
30748
30749 + if (signr == SIGKILL || signr == SIGILL)
30750 + gr_handle_brute_attach(current);
30751 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
30752 +
30753 /*
30754 * lock_kernel() because format_corename() is controlled by sysctl, which
30755 * uses lock_kernel()
30756 diff -urNp linux-2.6.35.4/fs/ext2/balloc.c linux-2.6.35.4/fs/ext2/balloc.c
30757 --- linux-2.6.35.4/fs/ext2/balloc.c 2010-08-26 19:47:12.000000000 -0400
30758 +++ linux-2.6.35.4/fs/ext2/balloc.c 2010-09-17 20:12:37.000000000 -0400
30759 @@ -1193,7 +1193,7 @@ static int ext2_has_free_blocks(struct e
30760
30761 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
30762 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
30763 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
30764 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
30765 sbi->s_resuid != current_fsuid() &&
30766 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
30767 return 0;
30768 diff -urNp linux-2.6.35.4/fs/ext2/xattr.c linux-2.6.35.4/fs/ext2/xattr.c
30769 --- linux-2.6.35.4/fs/ext2/xattr.c 2010-08-26 19:47:12.000000000 -0400
30770 +++ linux-2.6.35.4/fs/ext2/xattr.c 2010-09-17 20:12:09.000000000 -0400
30771 @@ -86,8 +86,8 @@
30772 printk("\n"); \
30773 } while (0)
30774 #else
30775 -# define ea_idebug(f...)
30776 -# define ea_bdebug(f...)
30777 +# define ea_idebug(inode, f...) do {} while (0)
30778 +# define ea_bdebug(bh, f...) do {} while (0)
30779 #endif
30780
30781 static int ext2_xattr_set2(struct inode *, struct buffer_head *,
30782 diff -urNp linux-2.6.35.4/fs/ext3/balloc.c linux-2.6.35.4/fs/ext3/balloc.c
30783 --- linux-2.6.35.4/fs/ext3/balloc.c 2010-08-26 19:47:12.000000000 -0400
30784 +++ linux-2.6.35.4/fs/ext3/balloc.c 2010-09-17 20:12:37.000000000 -0400
30785 @@ -1422,7 +1422,7 @@ static int ext3_has_free_blocks(struct e
30786
30787 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
30788 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
30789 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
30790 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
30791 sbi->s_resuid != current_fsuid() &&
30792 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
30793 return 0;
30794 diff -urNp linux-2.6.35.4/fs/ext3/namei.c linux-2.6.35.4/fs/ext3/namei.c
30795 --- linux-2.6.35.4/fs/ext3/namei.c 2010-08-26 19:47:12.000000000 -0400
30796 +++ linux-2.6.35.4/fs/ext3/namei.c 2010-09-17 20:12:09.000000000 -0400
30797 @@ -1168,7 +1168,7 @@ static struct ext3_dir_entry_2 *do_split
30798 char *data1 = (*bh)->b_data, *data2;
30799 unsigned split, move, size;
30800 struct ext3_dir_entry_2 *de = NULL, *de2;
30801 - int err = 0, i;
30802 + int i, err = 0;
30803
30804 bh2 = ext3_append (handle, dir, &newblock, &err);
30805 if (!(bh2)) {
30806 diff -urNp linux-2.6.35.4/fs/ext3/xattr.c linux-2.6.35.4/fs/ext3/xattr.c
30807 --- linux-2.6.35.4/fs/ext3/xattr.c 2010-08-26 19:47:12.000000000 -0400
30808 +++ linux-2.6.35.4/fs/ext3/xattr.c 2010-09-17 20:12:09.000000000 -0400
30809 @@ -89,8 +89,8 @@
30810 printk("\n"); \
30811 } while (0)
30812 #else
30813 -# define ea_idebug(f...)
30814 -# define ea_bdebug(f...)
30815 +# define ea_idebug(f...) do {} while (0)
30816 +# define ea_bdebug(f...) do {} while (0)
30817 #endif
30818
30819 static void ext3_xattr_cache_insert(struct buffer_head *);
30820 diff -urNp linux-2.6.35.4/fs/ext4/balloc.c linux-2.6.35.4/fs/ext4/balloc.c
30821 --- linux-2.6.35.4/fs/ext4/balloc.c 2010-08-26 19:47:12.000000000 -0400
30822 +++ linux-2.6.35.4/fs/ext4/balloc.c 2010-09-17 20:12:37.000000000 -0400
30823 @@ -522,7 +522,7 @@ int ext4_has_free_blocks(struct ext4_sb_
30824 /* Hm, nope. Are (enough) root reserved blocks available? */
30825 if (sbi->s_resuid == current_fsuid() ||
30826 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
30827 - capable(CAP_SYS_RESOURCE)) {
30828 + capable_nolog(CAP_SYS_RESOURCE)) {
30829 if (free_blocks >= (nblocks + dirty_blocks))
30830 return 1;
30831 }
30832 diff -urNp linux-2.6.35.4/fs/ext4/namei.c linux-2.6.35.4/fs/ext4/namei.c
30833 --- linux-2.6.35.4/fs/ext4/namei.c 2010-08-26 19:47:12.000000000 -0400
30834 +++ linux-2.6.35.4/fs/ext4/namei.c 2010-09-17 20:12:09.000000000 -0400
30835 @@ -1197,7 +1197,7 @@ static struct ext4_dir_entry_2 *do_split
30836 char *data1 = (*bh)->b_data, *data2;
30837 unsigned split, move, size;
30838 struct ext4_dir_entry_2 *de = NULL, *de2;
30839 - int err = 0, i;
30840 + int i, err = 0;
30841
30842 bh2 = ext4_append (handle, dir, &newblock, &err);
30843 if (!(bh2)) {
30844 diff -urNp linux-2.6.35.4/fs/ext4/xattr.c linux-2.6.35.4/fs/ext4/xattr.c
30845 --- linux-2.6.35.4/fs/ext4/xattr.c 2010-08-26 19:47:12.000000000 -0400
30846 +++ linux-2.6.35.4/fs/ext4/xattr.c 2010-09-17 20:12:09.000000000 -0400
30847 @@ -82,8 +82,8 @@
30848 printk("\n"); \
30849 } while (0)
30850 #else
30851 -# define ea_idebug(f...)
30852 -# define ea_bdebug(f...)
30853 +# define ea_idebug(inode, f...) do {} while (0)
30854 +# define ea_bdebug(bh, f...) do {} while (0)
30855 #endif
30856
30857 static void ext4_xattr_cache_insert(struct buffer_head *);
30858 diff -urNp linux-2.6.35.4/fs/fcntl.c linux-2.6.35.4/fs/fcntl.c
30859 --- linux-2.6.35.4/fs/fcntl.c 2010-08-26 19:47:12.000000000 -0400
30860 +++ linux-2.6.35.4/fs/fcntl.c 2010-09-17 20:12:37.000000000 -0400
30861 @@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct
30862 if (err)
30863 return err;
30864
30865 + if (gr_handle_chroot_fowner(pid, type))
30866 + return -ENOENT;
30867 + if (gr_check_protected_task_fowner(pid, type))
30868 + return -EACCES;
30869 +
30870 f_modown(filp, pid, type, force);
30871 return 0;
30872 }
30873 @@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned in
30874 switch (cmd) {
30875 case F_DUPFD:
30876 case F_DUPFD_CLOEXEC:
30877 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
30878 if (arg >= rlimit(RLIMIT_NOFILE))
30879 break;
30880 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
30881 diff -urNp linux-2.6.35.4/fs/fifo.c linux-2.6.35.4/fs/fifo.c
30882 --- linux-2.6.35.4/fs/fifo.c 2010-08-26 19:47:12.000000000 -0400
30883 +++ linux-2.6.35.4/fs/fifo.c 2010-09-17 20:12:09.000000000 -0400
30884 @@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode
30885 */
30886 filp->f_op = &read_pipefifo_fops;
30887 pipe->r_counter++;
30888 - if (pipe->readers++ == 0)
30889 + if (atomic_inc_return(&pipe->readers) == 1)
30890 wake_up_partner(inode);
30891
30892 - if (!pipe->writers) {
30893 + if (!atomic_read(&pipe->writers)) {
30894 if ((filp->f_flags & O_NONBLOCK)) {
30895 /* suppress POLLHUP until we have
30896 * seen a writer */
30897 @@ -82,15 +82,15 @@ static int fifo_open(struct inode *inode
30898 * errno=ENXIO when there is no process reading the FIFO.
30899 */
30900 ret = -ENXIO;
30901 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
30902 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
30903 goto err;
30904
30905 filp->f_op = &write_pipefifo_fops;
30906 pipe->w_counter++;
30907 - if (!pipe->writers++)
30908 + if (atomic_inc_return(&pipe->writers) == 1)
30909 wake_up_partner(inode);
30910
30911 - if (!pipe->readers) {
30912 + if (!atomic_read(&pipe->readers)) {
30913 wait_for_partner(inode, &pipe->r_counter);
30914 if (signal_pending(current))
30915 goto err_wr;
30916 @@ -106,11 +106,11 @@ static int fifo_open(struct inode *inode
30917 */
30918 filp->f_op = &rdwr_pipefifo_fops;
30919
30920 - pipe->readers++;
30921 - pipe->writers++;
30922 + atomic_inc(&pipe->readers);
30923 + atomic_inc(&pipe->writers);
30924 pipe->r_counter++;
30925 pipe->w_counter++;
30926 - if (pipe->readers == 1 || pipe->writers == 1)
30927 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
30928 wake_up_partner(inode);
30929 break;
30930
30931 @@ -124,19 +124,19 @@ static int fifo_open(struct inode *inode
30932 return 0;
30933
30934 err_rd:
30935 - if (!--pipe->readers)
30936 + if (atomic_dec_and_test(&pipe->readers))
30937 wake_up_interruptible(&pipe->wait);
30938 ret = -ERESTARTSYS;
30939 goto err;
30940
30941 err_wr:
30942 - if (!--pipe->writers)
30943 + if (atomic_dec_and_test(&pipe->writers))
30944 wake_up_interruptible(&pipe->wait);
30945 ret = -ERESTARTSYS;
30946 goto err;
30947
30948 err:
30949 - if (!pipe->readers && !pipe->writers)
30950 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
30951 free_pipe_info(inode);
30952
30953 err_nocleanup:
30954 diff -urNp linux-2.6.35.4/fs/file.c linux-2.6.35.4/fs/file.c
30955 --- linux-2.6.35.4/fs/file.c 2010-08-26 19:47:12.000000000 -0400
30956 +++ linux-2.6.35.4/fs/file.c 2010-09-17 20:12:37.000000000 -0400
30957 @@ -14,6 +14,7 @@
30958 #include <linux/slab.h>
30959 #include <linux/vmalloc.h>
30960 #include <linux/file.h>
30961 +#include <linux/security.h>
30962 #include <linux/fdtable.h>
30963 #include <linux/bitops.h>
30964 #include <linux/interrupt.h>
30965 @@ -257,6 +258,7 @@ int expand_files(struct files_struct *fi
30966 * N.B. For clone tasks sharing a files structure, this test
30967 * will limit the total number of files that can be opened.
30968 */
30969 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
30970 if (nr >= rlimit(RLIMIT_NOFILE))
30971 return -EMFILE;
30972
30973 diff -urNp linux-2.6.35.4/fs/fs_struct.c linux-2.6.35.4/fs/fs_struct.c
30974 --- linux-2.6.35.4/fs/fs_struct.c 2010-08-26 19:47:12.000000000 -0400
30975 +++ linux-2.6.35.4/fs/fs_struct.c 2010-09-17 20:12:37.000000000 -0400
30976 @@ -4,6 +4,7 @@
30977 #include <linux/path.h>
30978 #include <linux/slab.h>
30979 #include <linux/fs_struct.h>
30980 +#include <linux/grsecurity.h>
30981
30982 /*
30983 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
30984 @@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, s
30985 old_root = fs->root;
30986 fs->root = *path;
30987 path_get(path);
30988 + gr_set_chroot_entries(current, path);
30989 write_unlock(&fs->lock);
30990 if (old_root.dentry)
30991 path_put(&old_root);
30992 @@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_roo
30993 && fs->root.mnt == old_root->mnt) {
30994 path_get(new_root);
30995 fs->root = *new_root;
30996 + gr_set_chroot_entries(p, new_root);
30997 count++;
30998 }
30999 if (fs->pwd.dentry == old_root->dentry
31000 @@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
31001 task_lock(tsk);
31002 write_lock(&fs->lock);
31003 tsk->fs = NULL;
31004 - kill = !--fs->users;
31005 + gr_clear_chroot_entries(tsk);
31006 + kill = !atomic_dec_return(&fs->users);
31007 write_unlock(&fs->lock);
31008 task_unlock(tsk);
31009 if (kill)
31010 @@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct
31011 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
31012 /* We don't need to lock fs - think why ;-) */
31013 if (fs) {
31014 - fs->users = 1;
31015 + atomic_set(&fs->users, 1);
31016 fs->in_exec = 0;
31017 rwlock_init(&fs->lock);
31018 fs->umask = old->umask;
31019 @@ -127,8 +131,9 @@ int unshare_fs_struct(void)
31020
31021 task_lock(current);
31022 write_lock(&fs->lock);
31023 - kill = !--fs->users;
31024 + kill = !atomic_dec_return(&fs->users);
31025 current->fs = new_fs;
31026 + gr_set_chroot_entries(current, &new_fs->root);
31027 write_unlock(&fs->lock);
31028 task_unlock(current);
31029
31030 @@ -147,7 +152,7 @@ EXPORT_SYMBOL(current_umask);
31031
31032 /* to be mentioned only in INIT_TASK */
31033 struct fs_struct init_fs = {
31034 - .users = 1,
31035 + .users = ATOMIC_INIT(1),
31036 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
31037 .umask = 0022,
31038 };
31039 @@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
31040 task_lock(current);
31041
31042 write_lock(&init_fs.lock);
31043 - init_fs.users++;
31044 + atomic_inc(&init_fs.users);
31045 write_unlock(&init_fs.lock);
31046
31047 write_lock(&fs->lock);
31048 current->fs = &init_fs;
31049 - kill = !--fs->users;
31050 + gr_set_chroot_entries(current, &current->fs->root);
31051 + kill = !atomic_dec_return(&fs->users);
31052 write_unlock(&fs->lock);
31053
31054 task_unlock(current);
31055 diff -urNp linux-2.6.35.4/fs/fuse/control.c linux-2.6.35.4/fs/fuse/control.c
31056 --- linux-2.6.35.4/fs/fuse/control.c 2010-08-26 19:47:12.000000000 -0400
31057 +++ linux-2.6.35.4/fs/fuse/control.c 2010-09-17 20:12:09.000000000 -0400
31058 @@ -293,7 +293,7 @@ void fuse_ctl_remove_conn(struct fuse_co
31059
31060 static int fuse_ctl_fill_super(struct super_block *sb, void *data, int silent)
31061 {
31062 - struct tree_descr empty_descr = {""};
31063 + struct tree_descr empty_descr = {"", NULL, 0};
31064 struct fuse_conn *fc;
31065 int err;
31066
31067 diff -urNp linux-2.6.35.4/fs/fuse/cuse.c linux-2.6.35.4/fs/fuse/cuse.c
31068 --- linux-2.6.35.4/fs/fuse/cuse.c 2010-08-26 19:47:12.000000000 -0400
31069 +++ linux-2.6.35.4/fs/fuse/cuse.c 2010-09-17 20:12:09.000000000 -0400
31070 @@ -529,8 +529,18 @@ static int cuse_channel_release(struct i
31071 return rc;
31072 }
31073
31074 -static struct file_operations cuse_channel_fops; /* initialized during init */
31075 -
31076 +static const struct file_operations cuse_channel_fops = { /* initialized during init */
31077 + .owner = THIS_MODULE,
31078 + .llseek = no_llseek,
31079 + .read = do_sync_read,
31080 + .aio_read = fuse_dev_read,
31081 + .write = do_sync_write,
31082 + .aio_write = fuse_dev_write,
31083 + .poll = fuse_dev_poll,
31084 + .open = cuse_channel_open,
31085 + .release = cuse_channel_release,
31086 + .fasync = fuse_dev_fasync,
31087 +};
31088
31089 /**************************************************************************
31090 * Misc stuff and module initializatiion
31091 @@ -576,12 +586,6 @@ static int __init cuse_init(void)
31092 for (i = 0; i < CUSE_CONNTBL_LEN; i++)
31093 INIT_LIST_HEAD(&cuse_conntbl[i]);
31094
31095 - /* inherit and extend fuse_dev_operations */
31096 - cuse_channel_fops = fuse_dev_operations;
31097 - cuse_channel_fops.owner = THIS_MODULE;
31098 - cuse_channel_fops.open = cuse_channel_open;
31099 - cuse_channel_fops.release = cuse_channel_release;
31100 -
31101 cuse_class = class_create(THIS_MODULE, "cuse");
31102 if (IS_ERR(cuse_class))
31103 return PTR_ERR(cuse_class);
31104 diff -urNp linux-2.6.35.4/fs/fuse/dev.c linux-2.6.35.4/fs/fuse/dev.c
31105 --- linux-2.6.35.4/fs/fuse/dev.c 2010-08-26 19:47:12.000000000 -0400
31106 +++ linux-2.6.35.4/fs/fuse/dev.c 2010-09-17 20:12:09.000000000 -0400
31107 @@ -1031,7 +1031,7 @@ static ssize_t fuse_dev_do_read(struct f
31108 return err;
31109 }
31110
31111 -static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
31112 +ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
31113 unsigned long nr_segs, loff_t pos)
31114 {
31115 struct fuse_copy_state cs;
31116 @@ -1045,6 +1045,8 @@ static ssize_t fuse_dev_read(struct kioc
31117 return fuse_dev_do_read(fc, file, &cs, iov_length(iov, nr_segs));
31118 }
31119
31120 +EXPORT_SYMBOL_GPL(fuse_dev_read);
31121 +
31122 static int fuse_dev_pipe_buf_steal(struct pipe_inode_info *pipe,
31123 struct pipe_buffer *buf)
31124 {
31125 @@ -1088,7 +1090,7 @@ static ssize_t fuse_dev_splice_read(stru
31126 ret = 0;
31127 pipe_lock(pipe);
31128
31129 - if (!pipe->readers) {
31130 + if (!atomic_read(&pipe->readers)) {
31131 send_sig(SIGPIPE, current, 0);
31132 if (!ret)
31133 ret = -EPIPE;
31134 @@ -1387,7 +1389,7 @@ static ssize_t fuse_dev_do_write(struct
31135 return err;
31136 }
31137
31138 -static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
31139 +ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
31140 unsigned long nr_segs, loff_t pos)
31141 {
31142 struct fuse_copy_state cs;
31143 @@ -1400,6 +1402,8 @@ static ssize_t fuse_dev_write(struct kio
31144 return fuse_dev_do_write(fc, &cs, iov_length(iov, nr_segs));
31145 }
31146
31147 +EXPORT_SYMBOL_GPL(fuse_dev_write);
31148 +
31149 static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
31150 struct file *out, loff_t *ppos,
31151 size_t len, unsigned int flags)
31152 @@ -1478,7 +1482,7 @@ out:
31153 return ret;
31154 }
31155
31156 -static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
31157 +unsigned fuse_dev_poll(struct file *file, poll_table *wait)
31158 {
31159 unsigned mask = POLLOUT | POLLWRNORM;
31160 struct fuse_conn *fc = fuse_get_conn(file);
31161 @@ -1497,6 +1501,8 @@ static unsigned fuse_dev_poll(struct fil
31162 return mask;
31163 }
31164
31165 +EXPORT_SYMBOL_GPL(fuse_dev_poll);
31166 +
31167 /*
31168 * Abort all requests on the given list (pending or processing)
31169 *
31170 @@ -1604,7 +1610,7 @@ int fuse_dev_release(struct inode *inode
31171 }
31172 EXPORT_SYMBOL_GPL(fuse_dev_release);
31173
31174 -static int fuse_dev_fasync(int fd, struct file *file, int on)
31175 +int fuse_dev_fasync(int fd, struct file *file, int on)
31176 {
31177 struct fuse_conn *fc = fuse_get_conn(file);
31178 if (!fc)
31179 @@ -1614,6 +1620,8 @@ static int fuse_dev_fasync(int fd, struc
31180 return fasync_helper(fd, file, on, &fc->fasync);
31181 }
31182
31183 +EXPORT_SYMBOL_GPL(fuse_dev_fasync);
31184 +
31185 const struct file_operations fuse_dev_operations = {
31186 .owner = THIS_MODULE,
31187 .llseek = no_llseek,
31188 diff -urNp linux-2.6.35.4/fs/fuse/dir.c linux-2.6.35.4/fs/fuse/dir.c
31189 --- linux-2.6.35.4/fs/fuse/dir.c 2010-08-26 19:47:12.000000000 -0400
31190 +++ linux-2.6.35.4/fs/fuse/dir.c 2010-09-17 20:12:09.000000000 -0400
31191 @@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *de
31192 return link;
31193 }
31194
31195 -static void free_link(char *link)
31196 +static void free_link(const char *link)
31197 {
31198 if (!IS_ERR(link))
31199 free_page((unsigned long) link);
31200 diff -urNp linux-2.6.35.4/fs/fuse/fuse_i.h linux-2.6.35.4/fs/fuse/fuse_i.h
31201 --- linux-2.6.35.4/fs/fuse/fuse_i.h 2010-08-26 19:47:12.000000000 -0400
31202 +++ linux-2.6.35.4/fs/fuse/fuse_i.h 2010-09-17 20:12:09.000000000 -0400
31203 @@ -524,6 +524,16 @@ extern const struct file_operations fuse
31204
31205 extern const struct dentry_operations fuse_dentry_operations;
31206
31207 +extern ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
31208 + unsigned long nr_segs, loff_t pos);
31209 +
31210 +extern ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
31211 + unsigned long nr_segs, loff_t pos);
31212 +
31213 +extern unsigned fuse_dev_poll(struct file *file, poll_table *wait);
31214 +
31215 +extern int fuse_dev_fasync(int fd, struct file *file, int on);
31216 +
31217 /**
31218 * Inode to nodeid comparison.
31219 */
31220 diff -urNp linux-2.6.35.4/fs/hfs/inode.c linux-2.6.35.4/fs/hfs/inode.c
31221 --- linux-2.6.35.4/fs/hfs/inode.c 2010-08-26 19:47:12.000000000 -0400
31222 +++ linux-2.6.35.4/fs/hfs/inode.c 2010-09-17 20:12:09.000000000 -0400
31223 @@ -423,7 +423,7 @@ int hfs_write_inode(struct inode *inode,
31224
31225 if (S_ISDIR(main_inode->i_mode)) {
31226 if (fd.entrylength < sizeof(struct hfs_cat_dir))
31227 - /* panic? */;
31228 + {/* panic? */}
31229 hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
31230 sizeof(struct hfs_cat_dir));
31231 if (rec.type != HFS_CDR_DIR ||
31232 @@ -444,7 +444,7 @@ int hfs_write_inode(struct inode *inode,
31233 sizeof(struct hfs_cat_file));
31234 } else {
31235 if (fd.entrylength < sizeof(struct hfs_cat_file))
31236 - /* panic? */;
31237 + {/* panic? */}
31238 hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
31239 sizeof(struct hfs_cat_file));
31240 if (rec.type != HFS_CDR_FIL ||
31241 diff -urNp linux-2.6.35.4/fs/hfsplus/inode.c linux-2.6.35.4/fs/hfsplus/inode.c
31242 --- linux-2.6.35.4/fs/hfsplus/inode.c 2010-08-26 19:47:12.000000000 -0400
31243 +++ linux-2.6.35.4/fs/hfsplus/inode.c 2010-09-17 20:12:09.000000000 -0400
31244 @@ -406,7 +406,7 @@ int hfsplus_cat_read_inode(struct inode
31245 struct hfsplus_cat_folder *folder = &entry.folder;
31246
31247 if (fd->entrylength < sizeof(struct hfsplus_cat_folder))
31248 - /* panic? */;
31249 + {/* panic? */}
31250 hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
31251 sizeof(struct hfsplus_cat_folder));
31252 hfsplus_get_perms(inode, &folder->permissions, 1);
31253 @@ -423,7 +423,7 @@ int hfsplus_cat_read_inode(struct inode
31254 struct hfsplus_cat_file *file = &entry.file;
31255
31256 if (fd->entrylength < sizeof(struct hfsplus_cat_file))
31257 - /* panic? */;
31258 + {/* panic? */}
31259 hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
31260 sizeof(struct hfsplus_cat_file));
31261
31262 @@ -479,7 +479,7 @@ int hfsplus_cat_write_inode(struct inode
31263 struct hfsplus_cat_folder *folder = &entry.folder;
31264
31265 if (fd.entrylength < sizeof(struct hfsplus_cat_folder))
31266 - /* panic? */;
31267 + {/* panic? */}
31268 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
31269 sizeof(struct hfsplus_cat_folder));
31270 /* simple node checks? */
31271 @@ -501,7 +501,7 @@ int hfsplus_cat_write_inode(struct inode
31272 struct hfsplus_cat_file *file = &entry.file;
31273
31274 if (fd.entrylength < sizeof(struct hfsplus_cat_file))
31275 - /* panic? */;
31276 + {/* panic? */}
31277 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
31278 sizeof(struct hfsplus_cat_file));
31279 hfsplus_inode_write_fork(inode, &file->data_fork);
31280 diff -urNp linux-2.6.35.4/fs/hugetlbfs/inode.c linux-2.6.35.4/fs/hugetlbfs/inode.c
31281 --- linux-2.6.35.4/fs/hugetlbfs/inode.c 2010-08-26 19:47:12.000000000 -0400
31282 +++ linux-2.6.35.4/fs/hugetlbfs/inode.c 2010-09-17 20:12:37.000000000 -0400
31283 @@ -908,7 +908,7 @@ static struct file_system_type hugetlbfs
31284 .kill_sb = kill_litter_super,
31285 };
31286
31287 -static struct vfsmount *hugetlbfs_vfsmount;
31288 +struct vfsmount *hugetlbfs_vfsmount;
31289
31290 static int can_do_hugetlb_shm(void)
31291 {
31292 diff -urNp linux-2.6.35.4/fs/ioctl.c linux-2.6.35.4/fs/ioctl.c
31293 --- linux-2.6.35.4/fs/ioctl.c 2010-08-26 19:47:12.000000000 -0400
31294 +++ linux-2.6.35.4/fs/ioctl.c 2010-09-17 20:12:09.000000000 -0400
31295 @@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiema
31296 u64 phys, u64 len, u32 flags)
31297 {
31298 struct fiemap_extent extent;
31299 - struct fiemap_extent *dest = fieinfo->fi_extents_start;
31300 + struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
31301
31302 /* only count the extents */
31303 if (fieinfo->fi_extents_max == 0) {
31304 @@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *fil
31305
31306 fieinfo.fi_flags = fiemap.fm_flags;
31307 fieinfo.fi_extents_max = fiemap.fm_extent_count;
31308 - fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
31309 + fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
31310
31311 if (fiemap.fm_extent_count != 0 &&
31312 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
31313 @@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *fil
31314 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
31315 fiemap.fm_flags = fieinfo.fi_flags;
31316 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
31317 - if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
31318 + if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
31319 error = -EFAULT;
31320
31321 return error;
31322 diff -urNp linux-2.6.35.4/fs/jffs2/debug.h linux-2.6.35.4/fs/jffs2/debug.h
31323 --- linux-2.6.35.4/fs/jffs2/debug.h 2010-08-26 19:47:12.000000000 -0400
31324 +++ linux-2.6.35.4/fs/jffs2/debug.h 2010-09-17 20:12:09.000000000 -0400
31325 @@ -52,13 +52,13 @@
31326 #if CONFIG_JFFS2_FS_DEBUG > 0
31327 #define D1(x) x
31328 #else
31329 -#define D1(x)
31330 +#define D1(x) do {} while (0);
31331 #endif
31332
31333 #if CONFIG_JFFS2_FS_DEBUG > 1
31334 #define D2(x) x
31335 #else
31336 -#define D2(x)
31337 +#define D2(x) do {} while (0);
31338 #endif
31339
31340 /* The prefixes of JFFS2 messages */
31341 @@ -114,73 +114,73 @@
31342 #ifdef JFFS2_DBG_READINODE_MESSAGES
31343 #define dbg_readinode(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
31344 #else
31345 -#define dbg_readinode(fmt, ...)
31346 +#define dbg_readinode(fmt, ...) do {} while (0)
31347 #endif
31348 #ifdef JFFS2_DBG_READINODE2_MESSAGES
31349 #define dbg_readinode2(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
31350 #else
31351 -#define dbg_readinode2(fmt, ...)
31352 +#define dbg_readinode2(fmt, ...) do {} while (0)
31353 #endif
31354
31355 /* Fragtree build debugging messages */
31356 #ifdef JFFS2_DBG_FRAGTREE_MESSAGES
31357 #define dbg_fragtree(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
31358 #else
31359 -#define dbg_fragtree(fmt, ...)
31360 +#define dbg_fragtree(fmt, ...) do {} while (0)
31361 #endif
31362 #ifdef JFFS2_DBG_FRAGTREE2_MESSAGES
31363 #define dbg_fragtree2(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
31364 #else
31365 -#define dbg_fragtree2(fmt, ...)
31366 +#define dbg_fragtree2(fmt, ...) do {} while (0)
31367 #endif
31368
31369 /* Directory entry list manilulation debugging messages */
31370 #ifdef JFFS2_DBG_DENTLIST_MESSAGES
31371 #define dbg_dentlist(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
31372 #else
31373 -#define dbg_dentlist(fmt, ...)
31374 +#define dbg_dentlist(fmt, ...) do {} while (0)
31375 #endif
31376
31377 /* Print the messages about manipulating node_refs */
31378 #ifdef JFFS2_DBG_NODEREF_MESSAGES
31379 #define dbg_noderef(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
31380 #else
31381 -#define dbg_noderef(fmt, ...)
31382 +#define dbg_noderef(fmt, ...) do {} while (0)
31383 #endif
31384
31385 /* Manipulations with the list of inodes (JFFS2 inocache) */
31386 #ifdef JFFS2_DBG_INOCACHE_MESSAGES
31387 #define dbg_inocache(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
31388 #else
31389 -#define dbg_inocache(fmt, ...)
31390 +#define dbg_inocache(fmt, ...) do {} while (0)
31391 #endif
31392
31393 /* Summary debugging messages */
31394 #ifdef JFFS2_DBG_SUMMARY_MESSAGES
31395 #define dbg_summary(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
31396 #else
31397 -#define dbg_summary(fmt, ...)
31398 +#define dbg_summary(fmt, ...) do {} while (0)
31399 #endif
31400
31401 /* File system build messages */
31402 #ifdef JFFS2_DBG_FSBUILD_MESSAGES
31403 #define dbg_fsbuild(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
31404 #else
31405 -#define dbg_fsbuild(fmt, ...)
31406 +#define dbg_fsbuild(fmt, ...) do {} while (0)
31407 #endif
31408
31409 /* Watch the object allocations */
31410 #ifdef JFFS2_DBG_MEMALLOC_MESSAGES
31411 #define dbg_memalloc(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
31412 #else
31413 -#define dbg_memalloc(fmt, ...)
31414 +#define dbg_memalloc(fmt, ...) do {} while (0)
31415 #endif
31416
31417 /* Watch the XATTR subsystem */
31418 #ifdef JFFS2_DBG_XATTR_MESSAGES
31419 #define dbg_xattr(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__)
31420 #else
31421 -#define dbg_xattr(fmt, ...)
31422 +#define dbg_xattr(fmt, ...) do {} while (0)
31423 #endif
31424
31425 /* "Sanity" checks */
31426 diff -urNp linux-2.6.35.4/fs/jffs2/erase.c linux-2.6.35.4/fs/jffs2/erase.c
31427 --- linux-2.6.35.4/fs/jffs2/erase.c 2010-08-26 19:47:12.000000000 -0400
31428 +++ linux-2.6.35.4/fs/jffs2/erase.c 2010-09-17 20:12:09.000000000 -0400
31429 @@ -438,7 +438,8 @@ static void jffs2_mark_erased_block(stru
31430 struct jffs2_unknown_node marker = {
31431 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
31432 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
31433 - .totlen = cpu_to_je32(c->cleanmarker_size)
31434 + .totlen = cpu_to_je32(c->cleanmarker_size),
31435 + .hdr_crc = cpu_to_je32(0)
31436 };
31437
31438 jffs2_prealloc_raw_node_refs(c, jeb, 1);
31439 diff -urNp linux-2.6.35.4/fs/jffs2/summary.h linux-2.6.35.4/fs/jffs2/summary.h
31440 --- linux-2.6.35.4/fs/jffs2/summary.h 2010-08-26 19:47:12.000000000 -0400
31441 +++ linux-2.6.35.4/fs/jffs2/summary.h 2010-09-17 20:12:09.000000000 -0400
31442 @@ -194,18 +194,18 @@ int jffs2_sum_scan_sumnode(struct jffs2_
31443
31444 #define jffs2_sum_active() (0)
31445 #define jffs2_sum_init(a) (0)
31446 -#define jffs2_sum_exit(a)
31447 -#define jffs2_sum_disable_collecting(a)
31448 +#define jffs2_sum_exit(a) do {} while (0)
31449 +#define jffs2_sum_disable_collecting(a) do {} while (0)
31450 #define jffs2_sum_is_disabled(a) (0)
31451 -#define jffs2_sum_reset_collected(a)
31452 +#define jffs2_sum_reset_collected(a) do {} while (0)
31453 #define jffs2_sum_add_kvec(a,b,c,d) (0)
31454 -#define jffs2_sum_move_collected(a,b)
31455 +#define jffs2_sum_move_collected(a,b) do {} while (0)
31456 #define jffs2_sum_write_sumnode(a) (0)
31457 -#define jffs2_sum_add_padding_mem(a,b)
31458 -#define jffs2_sum_add_inode_mem(a,b,c)
31459 -#define jffs2_sum_add_dirent_mem(a,b,c)
31460 -#define jffs2_sum_add_xattr_mem(a,b,c)
31461 -#define jffs2_sum_add_xref_mem(a,b,c)
31462 +#define jffs2_sum_add_padding_mem(a,b) do {} while (0)
31463 +#define jffs2_sum_add_inode_mem(a,b,c) do {} while (0)
31464 +#define jffs2_sum_add_dirent_mem(a,b,c) do {} while (0)
31465 +#define jffs2_sum_add_xattr_mem(a,b,c) do {} while (0)
31466 +#define jffs2_sum_add_xref_mem(a,b,c) do {} while (0)
31467 #define jffs2_sum_scan_sumnode(a,b,c,d,e) (0)
31468
31469 #endif /* CONFIG_JFFS2_SUMMARY */
31470 diff -urNp linux-2.6.35.4/fs/jffs2/wbuf.c linux-2.6.35.4/fs/jffs2/wbuf.c
31471 --- linux-2.6.35.4/fs/jffs2/wbuf.c 2010-08-26 19:47:12.000000000 -0400
31472 +++ linux-2.6.35.4/fs/jffs2/wbuf.c 2010-09-17 20:12:09.000000000 -0400
31473 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
31474 {
31475 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
31476 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
31477 - .totlen = constant_cpu_to_je32(8)
31478 + .totlen = constant_cpu_to_je32(8),
31479 + .hdr_crc = constant_cpu_to_je32(0)
31480 };
31481
31482 /*
31483 diff -urNp linux-2.6.35.4/fs/lockd/svc.c linux-2.6.35.4/fs/lockd/svc.c
31484 --- linux-2.6.35.4/fs/lockd/svc.c 2010-08-26 19:47:12.000000000 -0400
31485 +++ linux-2.6.35.4/fs/lockd/svc.c 2010-09-17 20:12:09.000000000 -0400
31486 @@ -42,7 +42,7 @@
31487
31488 static struct svc_program nlmsvc_program;
31489
31490 -struct nlmsvc_binding * nlmsvc_ops;
31491 +const struct nlmsvc_binding * nlmsvc_ops;
31492 EXPORT_SYMBOL_GPL(nlmsvc_ops);
31493
31494 static DEFINE_MUTEX(nlmsvc_mutex);
31495 diff -urNp linux-2.6.35.4/fs/locks.c linux-2.6.35.4/fs/locks.c
31496 --- linux-2.6.35.4/fs/locks.c 2010-08-26 19:47:12.000000000 -0400
31497 +++ linux-2.6.35.4/fs/locks.c 2010-09-17 20:12:09.000000000 -0400
31498 @@ -2008,16 +2008,16 @@ void locks_remove_flock(struct file *fil
31499 return;
31500
31501 if (filp->f_op && filp->f_op->flock) {
31502 - struct file_lock fl = {
31503 + struct file_lock flock = {
31504 .fl_pid = current->tgid,
31505 .fl_file = filp,
31506 .fl_flags = FL_FLOCK,
31507 .fl_type = F_UNLCK,
31508 .fl_end = OFFSET_MAX,
31509 };
31510 - filp->f_op->flock(filp, F_SETLKW, &fl);
31511 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
31512 - fl.fl_ops->fl_release_private(&fl);
31513 + filp->f_op->flock(filp, F_SETLKW, &flock);
31514 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
31515 + flock.fl_ops->fl_release_private(&flock);
31516 }
31517
31518 lock_kernel();
31519 diff -urNp linux-2.6.35.4/fs/namei.c linux-2.6.35.4/fs/namei.c
31520 --- linux-2.6.35.4/fs/namei.c 2010-08-26 19:47:12.000000000 -0400
31521 +++ linux-2.6.35.4/fs/namei.c 2010-09-17 20:12:37.000000000 -0400
31522 @@ -548,7 +548,7 @@ __do_follow_link(struct path *path, stru
31523 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
31524 error = PTR_ERR(*p);
31525 if (!IS_ERR(*p)) {
31526 - char *s = nd_get_link(nd);
31527 + const char *s = nd_get_link(nd);
31528 error = 0;
31529 if (s)
31530 error = __vfs_follow_link(nd, s);
31531 @@ -581,6 +581,13 @@ static inline int do_follow_link(struct
31532 err = security_inode_follow_link(path->dentry, nd);
31533 if (err)
31534 goto loop;
31535 +
31536 + if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
31537 + path->dentry->d_inode, path->dentry, nd->path.mnt)) {
31538 + err = -EACCES;
31539 + goto loop;
31540 + }
31541 +
31542 current->link_count++;
31543 current->total_link_count++;
31544 nd->depth++;
31545 @@ -965,11 +972,18 @@ return_reval:
31546 break;
31547 }
31548 return_base:
31549 + if (!(nd->flags & LOOKUP_PARENT) && !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
31550 + path_put(&nd->path);
31551 + return -ENOENT;
31552 + }
31553 return 0;
31554 out_dput:
31555 path_put_conditional(&next, nd);
31556 break;
31557 }
31558 + if (!(nd->flags & LOOKUP_PARENT) && !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
31559 + err = -ENOENT;
31560 +
31561 path_put(&nd->path);
31562 return_err:
31563 return err;
31564 @@ -1506,12 +1520,19 @@ static int __open_namei_create(struct na
31565 int error;
31566 struct dentry *dir = nd->path.dentry;
31567
31568 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, nd->path.mnt, open_flag, mode)) {
31569 + error = -EACCES;
31570 + goto out_unlock;
31571 + }
31572 +
31573 if (!IS_POSIXACL(dir->d_inode))
31574 mode &= ~current_umask();
31575 error = security_path_mknod(&nd->path, path->dentry, mode, 0);
31576 if (error)
31577 goto out_unlock;
31578 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
31579 + if (!error)
31580 + gr_handle_create(path->dentry, nd->path.mnt);
31581 out_unlock:
31582 mutex_unlock(&dir->d_inode->i_mutex);
31583 dput(nd->path.dentry);
31584 @@ -1614,6 +1635,7 @@ static struct file *do_last(struct namei
31585 int mode, const char *pathname)
31586 {
31587 struct dentry *dir = nd->path.dentry;
31588 + int flag = open_to_namei_flags(open_flag);
31589 struct file *filp;
31590 int error = -EISDIR;
31591
31592 @@ -1662,6 +1684,22 @@ static struct file *do_last(struct namei
31593 }
31594 path_to_nameidata(path, nd);
31595 audit_inode(pathname, nd->path.dentry);
31596 +
31597 + if (gr_handle_rofs_blockwrite(nd->path.dentry, nd->path.mnt, acc_mode)) {
31598 + error = -EPERM;
31599 + goto exit;
31600 + }
31601 +
31602 + if (gr_handle_rawio(nd->path.dentry->d_inode)) {
31603 + error = -EPERM;
31604 + goto exit;
31605 + }
31606 +
31607 + if (!gr_acl_handle_open(nd->path.dentry, nd->path.mnt, flag)) {
31608 + error = -EACCES;
31609 + goto exit;
31610 + }
31611 +
31612 goto ok;
31613 }
31614
31615 @@ -1714,6 +1752,24 @@ static struct file *do_last(struct namei
31616 /*
31617 * It already exists.
31618 */
31619 +
31620 + if (gr_handle_rofs_blockwrite(path->dentry, nd->path.mnt, acc_mode)) {
31621 + error = -EPERM;
31622 + goto exit_mutex_unlock;
31623 + }
31624 + if (gr_handle_rawio(path->dentry->d_inode)) {
31625 + error = -EPERM;
31626 + goto exit_mutex_unlock;
31627 + }
31628 + if (!gr_acl_handle_open(path->dentry, nd->path.mnt, flag)) {
31629 + error = -EACCES;
31630 + goto exit_mutex_unlock;
31631 + }
31632 + if (gr_handle_fifo(path->dentry, nd->path.mnt, dir, flag, acc_mode)) {
31633 + error = -EACCES;
31634 + goto exit_mutex_unlock;
31635 + }
31636 +
31637 mutex_unlock(&dir->d_inode->i_mutex);
31638 audit_inode(pathname, path->dentry);
31639
31640 @@ -2034,6 +2090,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
31641 error = may_mknod(mode);
31642 if (error)
31643 goto out_dput;
31644 +
31645 + if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
31646 + error = -EPERM;
31647 + goto out_dput;
31648 + }
31649 +
31650 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
31651 + error = -EACCES;
31652 + goto out_dput;
31653 + }
31654 +
31655 error = mnt_want_write(nd.path.mnt);
31656 if (error)
31657 goto out_dput;
31658 @@ -2054,6 +2121,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
31659 }
31660 out_drop_write:
31661 mnt_drop_write(nd.path.mnt);
31662 +
31663 + if (!error)
31664 + gr_handle_create(dentry, nd.path.mnt);
31665 out_dput:
31666 dput(dentry);
31667 out_unlock:
31668 @@ -2106,6 +2176,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
31669 if (IS_ERR(dentry))
31670 goto out_unlock;
31671
31672 + if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
31673 + error = -EACCES;
31674 + goto out_dput;
31675 + }
31676 +
31677 if (!IS_POSIXACL(nd.path.dentry->d_inode))
31678 mode &= ~current_umask();
31679 error = mnt_want_write(nd.path.mnt);
31680 @@ -2117,6 +2192,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
31681 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
31682 out_drop_write:
31683 mnt_drop_write(nd.path.mnt);
31684 +
31685 + if (!error)
31686 + gr_handle_create(dentry, nd.path.mnt);
31687 +
31688 out_dput:
31689 dput(dentry);
31690 out_unlock:
31691 @@ -2198,6 +2277,8 @@ static long do_rmdir(int dfd, const char
31692 char * name;
31693 struct dentry *dentry;
31694 struct nameidata nd;
31695 + ino_t saved_ino = 0;
31696 + dev_t saved_dev = 0;
31697
31698 error = user_path_parent(dfd, pathname, &nd, &name);
31699 if (error)
31700 @@ -2222,6 +2303,19 @@ static long do_rmdir(int dfd, const char
31701 error = PTR_ERR(dentry);
31702 if (IS_ERR(dentry))
31703 goto exit2;
31704 +
31705 + if (dentry->d_inode != NULL) {
31706 + if (dentry->d_inode->i_nlink <= 1) {
31707 + saved_ino = dentry->d_inode->i_ino;
31708 + saved_dev = dentry->d_inode->i_sb->s_dev;
31709 + }
31710 +
31711 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
31712 + error = -EACCES;
31713 + goto exit3;
31714 + }
31715 + }
31716 +
31717 error = mnt_want_write(nd.path.mnt);
31718 if (error)
31719 goto exit3;
31720 @@ -2229,6 +2323,8 @@ static long do_rmdir(int dfd, const char
31721 if (error)
31722 goto exit4;
31723 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
31724 + if (!error && (saved_dev || saved_ino))
31725 + gr_handle_delete(saved_ino, saved_dev);
31726 exit4:
31727 mnt_drop_write(nd.path.mnt);
31728 exit3:
31729 @@ -2291,6 +2387,8 @@ static long do_unlinkat(int dfd, const c
31730 struct dentry *dentry;
31731 struct nameidata nd;
31732 struct inode *inode = NULL;
31733 + ino_t saved_ino = 0;
31734 + dev_t saved_dev = 0;
31735
31736 error = user_path_parent(dfd, pathname, &nd, &name);
31737 if (error)
31738 @@ -2310,8 +2408,19 @@ static long do_unlinkat(int dfd, const c
31739 if (nd.last.name[nd.last.len])
31740 goto slashes;
31741 inode = dentry->d_inode;
31742 - if (inode)
31743 + if (inode) {
31744 + if (inode->i_nlink <= 1) {
31745 + saved_ino = inode->i_ino;
31746 + saved_dev = inode->i_sb->s_dev;
31747 + }
31748 +
31749 atomic_inc(&inode->i_count);
31750 +
31751 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
31752 + error = -EACCES;
31753 + goto exit2;
31754 + }
31755 + }
31756 error = mnt_want_write(nd.path.mnt);
31757 if (error)
31758 goto exit2;
31759 @@ -2319,6 +2428,8 @@ static long do_unlinkat(int dfd, const c
31760 if (error)
31761 goto exit3;
31762 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
31763 + if (!error && (saved_ino || saved_dev))
31764 + gr_handle_delete(saved_ino, saved_dev);
31765 exit3:
31766 mnt_drop_write(nd.path.mnt);
31767 exit2:
31768 @@ -2396,6 +2507,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
31769 if (IS_ERR(dentry))
31770 goto out_unlock;
31771
31772 + if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
31773 + error = -EACCES;
31774 + goto out_dput;
31775 + }
31776 +
31777 error = mnt_want_write(nd.path.mnt);
31778 if (error)
31779 goto out_dput;
31780 @@ -2403,6 +2519,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
31781 if (error)
31782 goto out_drop_write;
31783 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
31784 + if (!error)
31785 + gr_handle_create(dentry, nd.path.mnt);
31786 out_drop_write:
31787 mnt_drop_write(nd.path.mnt);
31788 out_dput:
31789 @@ -2495,6 +2613,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
31790 error = PTR_ERR(new_dentry);
31791 if (IS_ERR(new_dentry))
31792 goto out_unlock;
31793 +
31794 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
31795 + old_path.dentry->d_inode,
31796 + old_path.dentry->d_inode->i_mode, to)) {
31797 + error = -EACCES;
31798 + goto out_dput;
31799 + }
31800 +
31801 + if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
31802 + old_path.dentry, old_path.mnt, to)) {
31803 + error = -EACCES;
31804 + goto out_dput;
31805 + }
31806 +
31807 error = mnt_want_write(nd.path.mnt);
31808 if (error)
31809 goto out_dput;
31810 @@ -2502,6 +2634,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
31811 if (error)
31812 goto out_drop_write;
31813 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
31814 + if (!error)
31815 + gr_handle_create(new_dentry, nd.path.mnt);
31816 out_drop_write:
31817 mnt_drop_write(nd.path.mnt);
31818 out_dput:
31819 @@ -2735,6 +2869,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
31820 if (new_dentry == trap)
31821 goto exit5;
31822
31823 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
31824 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
31825 + to);
31826 + if (error)
31827 + goto exit5;
31828 +
31829 error = mnt_want_write(oldnd.path.mnt);
31830 if (error)
31831 goto exit5;
31832 @@ -2744,6 +2884,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
31833 goto exit6;
31834 error = vfs_rename(old_dir->d_inode, old_dentry,
31835 new_dir->d_inode, new_dentry);
31836 + if (!error)
31837 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
31838 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
31839 exit6:
31840 mnt_drop_write(oldnd.path.mnt);
31841 exit5:
31842 diff -urNp linux-2.6.35.4/fs/namespace.c linux-2.6.35.4/fs/namespace.c
31843 --- linux-2.6.35.4/fs/namespace.c 2010-08-26 19:47:12.000000000 -0400
31844 +++ linux-2.6.35.4/fs/namespace.c 2010-09-17 20:21:58.000000000 -0400
31845 @@ -1099,6 +1099,9 @@ static int do_umount(struct vfsmount *mn
31846 if (!(sb->s_flags & MS_RDONLY))
31847 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
31848 up_write(&sb->s_umount);
31849 +
31850 + gr_log_remount(mnt->mnt_devname, retval);
31851 +
31852 return retval;
31853 }
31854
31855 @@ -1118,6 +1121,9 @@ static int do_umount(struct vfsmount *mn
31856 spin_unlock(&vfsmount_lock);
31857 up_write(&namespace_sem);
31858 release_mounts(&umount_list);
31859 +
31860 + gr_log_unmount(mnt->mnt_devname, retval);
31861 +
31862 return retval;
31863 }
31864
31865 @@ -1988,6 +1994,16 @@ long do_mount(char *dev_name, char *dir_
31866 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
31867 MS_STRICTATIME);
31868
31869 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
31870 + retval = -EPERM;
31871 + goto dput_out;
31872 + }
31873 +
31874 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
31875 + retval = -EPERM;
31876 + goto dput_out;
31877 + }
31878 +
31879 if (flags & MS_REMOUNT)
31880 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
31881 data_page);
31882 @@ -2002,6 +2018,9 @@ long do_mount(char *dev_name, char *dir_
31883 dev_name, data_page);
31884 dput_out:
31885 path_put(&path);
31886 +
31887 + gr_log_mount(dev_name, dir_name, retval);
31888 +
31889 return retval;
31890 }
31891
31892 @@ -2208,6 +2227,12 @@ SYSCALL_DEFINE2(pivot_root, const char _
31893 goto out1;
31894 }
31895
31896 + if (gr_handle_chroot_pivot()) {
31897 + error = -EPERM;
31898 + path_put(&old);
31899 + goto out1;
31900 + }
31901 +
31902 read_lock(&current->fs->lock);
31903 root = current->fs->root;
31904 path_get(&current->fs->root);
31905 diff -urNp linux-2.6.35.4/fs/nfs/inode.c linux-2.6.35.4/fs/nfs/inode.c
31906 --- linux-2.6.35.4/fs/nfs/inode.c 2010-08-26 19:47:12.000000000 -0400
31907 +++ linux-2.6.35.4/fs/nfs/inode.c 2010-09-17 20:12:09.000000000 -0400
31908 @@ -915,16 +915,16 @@ static int nfs_size_need_update(const st
31909 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
31910 }
31911
31912 -static atomic_long_t nfs_attr_generation_counter;
31913 +static atomic_long_unchecked_t nfs_attr_generation_counter;
31914
31915 static unsigned long nfs_read_attr_generation_counter(void)
31916 {
31917 - return atomic_long_read(&nfs_attr_generation_counter);
31918 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
31919 }
31920
31921 unsigned long nfs_inc_attr_generation_counter(void)
31922 {
31923 - return atomic_long_inc_return(&nfs_attr_generation_counter);
31924 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
31925 }
31926
31927 void nfs_fattr_init(struct nfs_fattr *fattr)
31928 diff -urNp linux-2.6.35.4/fs/nfs/nfs4proc.c linux-2.6.35.4/fs/nfs/nfs4proc.c
31929 --- linux-2.6.35.4/fs/nfs/nfs4proc.c 2010-08-26 19:47:12.000000000 -0400
31930 +++ linux-2.6.35.4/fs/nfs/nfs4proc.c 2010-09-17 20:12:09.000000000 -0400
31931 @@ -1166,7 +1166,7 @@ static int _nfs4_do_open_reclaim(struct
31932 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
31933 {
31934 struct nfs_server *server = NFS_SERVER(state->inode);
31935 - struct nfs4_exception exception = { };
31936 + struct nfs4_exception exception = {0, 0};
31937 int err;
31938 do {
31939 err = _nfs4_do_open_reclaim(ctx, state);
31940 @@ -1208,7 +1208,7 @@ static int _nfs4_open_delegation_recall(
31941
31942 int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
31943 {
31944 - struct nfs4_exception exception = { };
31945 + struct nfs4_exception exception = {0, 0};
31946 struct nfs_server *server = NFS_SERVER(state->inode);
31947 int err;
31948 do {
31949 @@ -1581,7 +1581,7 @@ static int _nfs4_open_expired(struct nfs
31950 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
31951 {
31952 struct nfs_server *server = NFS_SERVER(state->inode);
31953 - struct nfs4_exception exception = { };
31954 + struct nfs4_exception exception = {0, 0};
31955 int err;
31956
31957 do {
31958 @@ -1697,7 +1697,7 @@ out_err:
31959
31960 static struct nfs4_state *nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred)
31961 {
31962 - struct nfs4_exception exception = { };
31963 + struct nfs4_exception exception = {0, 0};
31964 struct nfs4_state *res;
31965 int status;
31966
31967 @@ -1788,7 +1788,7 @@ static int nfs4_do_setattr(struct inode
31968 struct nfs4_state *state)
31969 {
31970 struct nfs_server *server = NFS_SERVER(inode);
31971 - struct nfs4_exception exception = { };
31972 + struct nfs4_exception exception = {0, 0};
31973 int err;
31974 do {
31975 err = nfs4_handle_exception(server,
31976 @@ -2166,7 +2166,7 @@ static int _nfs4_server_capabilities(str
31977
31978 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
31979 {
31980 - struct nfs4_exception exception = { };
31981 + struct nfs4_exception exception = {0, 0};
31982 int err;
31983 do {
31984 err = nfs4_handle_exception(server,
31985 @@ -2200,7 +2200,7 @@ static int _nfs4_lookup_root(struct nfs_
31986 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
31987 struct nfs_fsinfo *info)
31988 {
31989 - struct nfs4_exception exception = { };
31990 + struct nfs4_exception exception = {0, 0};
31991 int err;
31992 do {
31993 err = nfs4_handle_exception(server,
31994 @@ -2289,7 +2289,7 @@ static int _nfs4_proc_getattr(struct nfs
31995
31996 static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
31997 {
31998 - struct nfs4_exception exception = { };
31999 + struct nfs4_exception exception = {0, 0};
32000 int err;
32001 do {
32002 err = nfs4_handle_exception(server,
32003 @@ -2377,7 +2377,7 @@ static int nfs4_proc_lookupfh(struct nfs
32004 struct qstr *name, struct nfs_fh *fhandle,
32005 struct nfs_fattr *fattr)
32006 {
32007 - struct nfs4_exception exception = { };
32008 + struct nfs4_exception exception = {0, 0};
32009 int err;
32010 do {
32011 err = _nfs4_proc_lookupfh(server, dirfh, name, fhandle, fattr);
32012 @@ -2406,7 +2406,7 @@ static int _nfs4_proc_lookup(struct inod
32013
32014 static int nfs4_proc_lookup(struct inode *dir, struct qstr *name, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
32015 {
32016 - struct nfs4_exception exception = { };
32017 + struct nfs4_exception exception = {0, 0};
32018 int err;
32019 do {
32020 err = nfs4_handle_exception(NFS_SERVER(dir),
32021 @@ -2473,7 +2473,7 @@ static int _nfs4_proc_access(struct inod
32022
32023 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
32024 {
32025 - struct nfs4_exception exception = { };
32026 + struct nfs4_exception exception = {0, 0};
32027 int err;
32028 do {
32029 err = nfs4_handle_exception(NFS_SERVER(inode),
32030 @@ -2529,7 +2529,7 @@ static int _nfs4_proc_readlink(struct in
32031 static int nfs4_proc_readlink(struct inode *inode, struct page *page,
32032 unsigned int pgbase, unsigned int pglen)
32033 {
32034 - struct nfs4_exception exception = { };
32035 + struct nfs4_exception exception = {0, 0};
32036 int err;
32037 do {
32038 err = nfs4_handle_exception(NFS_SERVER(inode),
32039 @@ -2625,7 +2625,7 @@ out:
32040
32041 static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
32042 {
32043 - struct nfs4_exception exception = { };
32044 + struct nfs4_exception exception = {0, 0};
32045 int err;
32046 do {
32047 err = nfs4_handle_exception(NFS_SERVER(dir),
32048 @@ -2700,7 +2700,7 @@ out:
32049 static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
32050 struct inode *new_dir, struct qstr *new_name)
32051 {
32052 - struct nfs4_exception exception = { };
32053 + struct nfs4_exception exception = {0, 0};
32054 int err;
32055 do {
32056 err = nfs4_handle_exception(NFS_SERVER(old_dir),
32057 @@ -2749,7 +2749,7 @@ out:
32058
32059 static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
32060 {
32061 - struct nfs4_exception exception = { };
32062 + struct nfs4_exception exception = {0, 0};
32063 int err;
32064 do {
32065 err = nfs4_handle_exception(NFS_SERVER(inode),
32066 @@ -2841,7 +2841,7 @@ out:
32067 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
32068 struct page *page, unsigned int len, struct iattr *sattr)
32069 {
32070 - struct nfs4_exception exception = { };
32071 + struct nfs4_exception exception = {0, 0};
32072 int err;
32073 do {
32074 err = nfs4_handle_exception(NFS_SERVER(dir),
32075 @@ -2872,7 +2872,7 @@ out:
32076 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
32077 struct iattr *sattr)
32078 {
32079 - struct nfs4_exception exception = { };
32080 + struct nfs4_exception exception = {0, 0};
32081 int err;
32082 do {
32083 err = nfs4_handle_exception(NFS_SERVER(dir),
32084 @@ -2921,7 +2921,7 @@ static int _nfs4_proc_readdir(struct den
32085 static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
32086 u64 cookie, struct page *page, unsigned int count, int plus)
32087 {
32088 - struct nfs4_exception exception = { };
32089 + struct nfs4_exception exception = {0, 0};
32090 int err;
32091 do {
32092 err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode),
32093 @@ -2969,7 +2969,7 @@ out:
32094 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
32095 struct iattr *sattr, dev_t rdev)
32096 {
32097 - struct nfs4_exception exception = { };
32098 + struct nfs4_exception exception = {0, 0};
32099 int err;
32100 do {
32101 err = nfs4_handle_exception(NFS_SERVER(dir),
32102 @@ -3001,7 +3001,7 @@ static int _nfs4_proc_statfs(struct nfs_
32103
32104 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
32105 {
32106 - struct nfs4_exception exception = { };
32107 + struct nfs4_exception exception = {0, 0};
32108 int err;
32109 do {
32110 err = nfs4_handle_exception(server,
32111 @@ -3032,7 +3032,7 @@ static int _nfs4_do_fsinfo(struct nfs_se
32112
32113 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
32114 {
32115 - struct nfs4_exception exception = { };
32116 + struct nfs4_exception exception = {0, 0};
32117 int err;
32118
32119 do {
32120 @@ -3078,7 +3078,7 @@ static int _nfs4_proc_pathconf(struct nf
32121 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
32122 struct nfs_pathconf *pathconf)
32123 {
32124 - struct nfs4_exception exception = { };
32125 + struct nfs4_exception exception = {0, 0};
32126 int err;
32127
32128 do {
32129 @@ -3399,7 +3399,7 @@ out_free:
32130
32131 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
32132 {
32133 - struct nfs4_exception exception = { };
32134 + struct nfs4_exception exception = {0, 0};
32135 ssize_t ret;
32136 do {
32137 ret = __nfs4_get_acl_uncached(inode, buf, buflen);
32138 @@ -3455,7 +3455,7 @@ static int __nfs4_proc_set_acl(struct in
32139
32140 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
32141 {
32142 - struct nfs4_exception exception = { };
32143 + struct nfs4_exception exception = {0, 0};
32144 int err;
32145 do {
32146 err = nfs4_handle_exception(NFS_SERVER(inode),
32147 @@ -3745,7 +3745,7 @@ out:
32148 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
32149 {
32150 struct nfs_server *server = NFS_SERVER(inode);
32151 - struct nfs4_exception exception = { };
32152 + struct nfs4_exception exception = {0, 0};
32153 int err;
32154 do {
32155 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
32156 @@ -3818,7 +3818,7 @@ out:
32157
32158 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
32159 {
32160 - struct nfs4_exception exception = { };
32161 + struct nfs4_exception exception = {0, 0};
32162 int err;
32163
32164 do {
32165 @@ -4232,7 +4232,7 @@ static int _nfs4_do_setlk(struct nfs4_st
32166 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
32167 {
32168 struct nfs_server *server = NFS_SERVER(state->inode);
32169 - struct nfs4_exception exception = { };
32170 + struct nfs4_exception exception = {0, 0};
32171 int err;
32172
32173 do {
32174 @@ -4250,7 +4250,7 @@ static int nfs4_lock_reclaim(struct nfs4
32175 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
32176 {
32177 struct nfs_server *server = NFS_SERVER(state->inode);
32178 - struct nfs4_exception exception = { };
32179 + struct nfs4_exception exception = {0, 0};
32180 int err;
32181
32182 err = nfs4_set_lock_state(state, request);
32183 @@ -4315,7 +4315,7 @@ out:
32184
32185 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
32186 {
32187 - struct nfs4_exception exception = { };
32188 + struct nfs4_exception exception = {0, 0};
32189 int err;
32190
32191 do {
32192 @@ -4375,7 +4375,7 @@ nfs4_proc_lock(struct file *filp, int cm
32193 int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
32194 {
32195 struct nfs_server *server = NFS_SERVER(state->inode);
32196 - struct nfs4_exception exception = { };
32197 + struct nfs4_exception exception = {0, 0};
32198 int err;
32199
32200 err = nfs4_set_lock_state(state, fl);
32201 diff -urNp linux-2.6.35.4/fs/nfsd/lockd.c linux-2.6.35.4/fs/nfsd/lockd.c
32202 --- linux-2.6.35.4/fs/nfsd/lockd.c 2010-08-26 19:47:12.000000000 -0400
32203 +++ linux-2.6.35.4/fs/nfsd/lockd.c 2010-09-17 20:12:09.000000000 -0400
32204 @@ -61,7 +61,7 @@ nlm_fclose(struct file *filp)
32205 fput(filp);
32206 }
32207
32208 -static struct nlmsvc_binding nfsd_nlm_ops = {
32209 +static const struct nlmsvc_binding nfsd_nlm_ops = {
32210 .fopen = nlm_fopen, /* open file for locking */
32211 .fclose = nlm_fclose, /* close file */
32212 };
32213 diff -urNp linux-2.6.35.4/fs/nfsd/nfsctl.c linux-2.6.35.4/fs/nfsd/nfsctl.c
32214 --- linux-2.6.35.4/fs/nfsd/nfsctl.c 2010-08-26 19:47:12.000000000 -0400
32215 +++ linux-2.6.35.4/fs/nfsd/nfsctl.c 2010-09-17 20:12:09.000000000 -0400
32216 @@ -163,7 +163,7 @@ static int export_features_open(struct i
32217 return single_open(file, export_features_show, NULL);
32218 }
32219
32220 -static struct file_operations export_features_operations = {
32221 +static const struct file_operations export_features_operations = {
32222 .open = export_features_open,
32223 .read = seq_read,
32224 .llseek = seq_lseek,
32225 diff -urNp linux-2.6.35.4/fs/nfsd/vfs.c linux-2.6.35.4/fs/nfsd/vfs.c
32226 --- linux-2.6.35.4/fs/nfsd/vfs.c 2010-08-26 19:47:12.000000000 -0400
32227 +++ linux-2.6.35.4/fs/nfsd/vfs.c 2010-09-17 20:12:09.000000000 -0400
32228 @@ -933,7 +933,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
32229 } else {
32230 oldfs = get_fs();
32231 set_fs(KERNEL_DS);
32232 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
32233 + host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
32234 set_fs(oldfs);
32235 }
32236
32237 @@ -1056,7 +1056,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
32238
32239 /* Write the data. */
32240 oldfs = get_fs(); set_fs(KERNEL_DS);
32241 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
32242 + host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
32243 set_fs(oldfs);
32244 if (host_err < 0)
32245 goto out_nfserr;
32246 @@ -1541,7 +1541,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
32247 */
32248
32249 oldfs = get_fs(); set_fs(KERNEL_DS);
32250 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
32251 + host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
32252 set_fs(oldfs);
32253
32254 if (host_err < 0)
32255 diff -urNp linux-2.6.35.4/fs/nls/nls_base.c linux-2.6.35.4/fs/nls/nls_base.c
32256 --- linux-2.6.35.4/fs/nls/nls_base.c 2010-08-26 19:47:12.000000000 -0400
32257 +++ linux-2.6.35.4/fs/nls/nls_base.c 2010-09-17 20:12:09.000000000 -0400
32258 @@ -41,7 +41,7 @@ static const struct utf8_table utf8_tabl
32259 {0xF8, 0xF0, 3*6, 0x1FFFFF, 0x10000, /* 4 byte sequence */},
32260 {0xFC, 0xF8, 4*6, 0x3FFFFFF, 0x200000, /* 5 byte sequence */},
32261 {0xFE, 0xFC, 5*6, 0x7FFFFFFF, 0x4000000, /* 6 byte sequence */},
32262 - {0, /* end of table */}
32263 + {0, 0, 0, 0, 0, /* end of table */}
32264 };
32265
32266 #define UNICODE_MAX 0x0010ffff
32267 diff -urNp linux-2.6.35.4/fs/ntfs/file.c linux-2.6.35.4/fs/ntfs/file.c
32268 --- linux-2.6.35.4/fs/ntfs/file.c 2010-08-26 19:47:12.000000000 -0400
32269 +++ linux-2.6.35.4/fs/ntfs/file.c 2010-09-17 20:12:09.000000000 -0400
32270 @@ -2223,6 +2223,6 @@ const struct inode_operations ntfs_file_
32271 #endif /* NTFS_RW */
32272 };
32273
32274 -const struct file_operations ntfs_empty_file_ops = {};
32275 +const struct file_operations ntfs_empty_file_ops __read_only;
32276
32277 -const struct inode_operations ntfs_empty_inode_ops = {};
32278 +const struct inode_operations ntfs_empty_inode_ops __read_only;
32279 diff -urNp linux-2.6.35.4/fs/ocfs2/localalloc.c linux-2.6.35.4/fs/ocfs2/localalloc.c
32280 --- linux-2.6.35.4/fs/ocfs2/localalloc.c 2010-08-26 19:47:12.000000000 -0400
32281 +++ linux-2.6.35.4/fs/ocfs2/localalloc.c 2010-09-17 20:12:09.000000000 -0400
32282 @@ -1307,7 +1307,7 @@ static int ocfs2_local_alloc_slide_windo
32283 goto bail;
32284 }
32285
32286 - atomic_inc(&osb->alloc_stats.moves);
32287 + atomic_inc_unchecked(&osb->alloc_stats.moves);
32288
32289 bail:
32290 if (handle)
32291 diff -urNp linux-2.6.35.4/fs/ocfs2/ocfs2.h linux-2.6.35.4/fs/ocfs2/ocfs2.h
32292 --- linux-2.6.35.4/fs/ocfs2/ocfs2.h 2010-08-26 19:47:12.000000000 -0400
32293 +++ linux-2.6.35.4/fs/ocfs2/ocfs2.h 2010-09-17 20:12:09.000000000 -0400
32294 @@ -223,11 +223,11 @@ enum ocfs2_vol_state
32295
32296 struct ocfs2_alloc_stats
32297 {
32298 - atomic_t moves;
32299 - atomic_t local_data;
32300 - atomic_t bitmap_data;
32301 - atomic_t bg_allocs;
32302 - atomic_t bg_extends;
32303 + atomic_unchecked_t moves;
32304 + atomic_unchecked_t local_data;
32305 + atomic_unchecked_t bitmap_data;
32306 + atomic_unchecked_t bg_allocs;
32307 + atomic_unchecked_t bg_extends;
32308 };
32309
32310 enum ocfs2_local_alloc_state
32311 diff -urNp linux-2.6.35.4/fs/ocfs2/suballoc.c linux-2.6.35.4/fs/ocfs2/suballoc.c
32312 --- linux-2.6.35.4/fs/ocfs2/suballoc.c 2010-08-26 19:47:12.000000000 -0400
32313 +++ linux-2.6.35.4/fs/ocfs2/suballoc.c 2010-09-17 20:12:09.000000000 -0400
32314 @@ -856,7 +856,7 @@ static int ocfs2_reserve_suballoc_bits(s
32315 mlog_errno(status);
32316 goto bail;
32317 }
32318 - atomic_inc(&osb->alloc_stats.bg_extends);
32319 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
32320
32321 /* You should never ask for this much metadata */
32322 BUG_ON(bits_wanted >
32323 @@ -1968,7 +1968,7 @@ int ocfs2_claim_metadata(handle_t *handl
32324 mlog_errno(status);
32325 goto bail;
32326 }
32327 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
32328 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
32329
32330 *suballoc_loc = res.sr_bg_blkno;
32331 *suballoc_bit_start = res.sr_bit_offset;
32332 @@ -2045,7 +2045,7 @@ int ocfs2_claim_new_inode(handle_t *hand
32333 mlog_errno(status);
32334 goto bail;
32335 }
32336 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
32337 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
32338
32339 BUG_ON(res.sr_bits != 1);
32340
32341 @@ -2150,7 +2150,7 @@ int __ocfs2_claim_clusters(handle_t *han
32342 cluster_start,
32343 num_clusters);
32344 if (!status)
32345 - atomic_inc(&osb->alloc_stats.local_data);
32346 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
32347 } else {
32348 if (min_clusters > (osb->bitmap_cpg - 1)) {
32349 /* The only paths asking for contiguousness
32350 @@ -2176,7 +2176,7 @@ int __ocfs2_claim_clusters(handle_t *han
32351 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
32352 res.sr_bg_blkno,
32353 res.sr_bit_offset);
32354 - atomic_inc(&osb->alloc_stats.bitmap_data);
32355 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
32356 *num_clusters = res.sr_bits;
32357 }
32358 }
32359 diff -urNp linux-2.6.35.4/fs/ocfs2/super.c linux-2.6.35.4/fs/ocfs2/super.c
32360 --- linux-2.6.35.4/fs/ocfs2/super.c 2010-08-26 19:47:12.000000000 -0400
32361 +++ linux-2.6.35.4/fs/ocfs2/super.c 2010-09-17 20:12:09.000000000 -0400
32362 @@ -293,11 +293,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
32363 "%10s => GlobalAllocs: %d LocalAllocs: %d "
32364 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
32365 "Stats",
32366 - atomic_read(&osb->alloc_stats.bitmap_data),
32367 - atomic_read(&osb->alloc_stats.local_data),
32368 - atomic_read(&osb->alloc_stats.bg_allocs),
32369 - atomic_read(&osb->alloc_stats.moves),
32370 - atomic_read(&osb->alloc_stats.bg_extends));
32371 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
32372 + atomic_read_unchecked(&osb->alloc_stats.local_data),
32373 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
32374 + atomic_read_unchecked(&osb->alloc_stats.moves),
32375 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
32376
32377 out += snprintf(buf + out, len - out,
32378 "%10s => State: %u Descriptor: %llu Size: %u bits "
32379 @@ -2047,11 +2047,11 @@ static int ocfs2_initialize_super(struct
32380 spin_lock_init(&osb->osb_xattr_lock);
32381 ocfs2_init_steal_slots(osb);
32382
32383 - atomic_set(&osb->alloc_stats.moves, 0);
32384 - atomic_set(&osb->alloc_stats.local_data, 0);
32385 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
32386 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
32387 - atomic_set(&osb->alloc_stats.bg_extends, 0);
32388 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
32389 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
32390 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
32391 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
32392 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
32393
32394 /* Copy the blockcheck stats from the superblock probe */
32395 osb->osb_ecc_stats = *stats;
32396 diff -urNp linux-2.6.35.4/fs/ocfs2/symlink.c linux-2.6.35.4/fs/ocfs2/symlink.c
32397 --- linux-2.6.35.4/fs/ocfs2/symlink.c 2010-08-26 19:47:12.000000000 -0400
32398 +++ linux-2.6.35.4/fs/ocfs2/symlink.c 2010-09-17 20:12:09.000000000 -0400
32399 @@ -148,7 +148,7 @@ bail:
32400
32401 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
32402 {
32403 - char *link = nd_get_link(nd);
32404 + const char *link = nd_get_link(nd);
32405 if (!IS_ERR(link))
32406 kfree(link);
32407 }
32408 diff -urNp linux-2.6.35.4/fs/open.c linux-2.6.35.4/fs/open.c
32409 --- linux-2.6.35.4/fs/open.c 2010-08-26 19:47:12.000000000 -0400
32410 +++ linux-2.6.35.4/fs/open.c 2010-09-17 20:12:37.000000000 -0400
32411 @@ -42,6 +42,9 @@ int do_truncate(struct dentry *dentry, l
32412 if (length < 0)
32413 return -EINVAL;
32414
32415 + if (filp && !gr_acl_handle_truncate(dentry, filp->f_path.mnt))
32416 + return -EACCES;
32417 +
32418 newattrs.ia_size = length;
32419 newattrs.ia_valid = ATTR_SIZE | time_attrs;
32420 if (filp) {
32421 @@ -345,6 +348,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
32422 if (__mnt_is_readonly(path.mnt))
32423 res = -EROFS;
32424
32425 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
32426 + res = -EACCES;
32427 +
32428 out_path_release:
32429 path_put(&path);
32430 out:
32431 @@ -371,6 +377,8 @@ SYSCALL_DEFINE1(chdir, const char __user
32432 if (error)
32433 goto dput_and_out;
32434
32435 + gr_log_chdir(path.dentry, path.mnt);
32436 +
32437 set_fs_pwd(current->fs, &path);
32438
32439 dput_and_out:
32440 @@ -397,6 +405,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
32441 goto out_putf;
32442
32443 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
32444 +
32445 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
32446 + error = -EPERM;
32447 +
32448 + if (!error)
32449 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
32450 +
32451 if (!error)
32452 set_fs_pwd(current->fs, &file->f_path);
32453 out_putf:
32454 @@ -425,7 +440,18 @@ SYSCALL_DEFINE1(chroot, const char __use
32455 if (error)
32456 goto dput_and_out;
32457
32458 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
32459 + goto dput_and_out;
32460 +
32461 + if (gr_handle_chroot_caps(&path)) {
32462 + error = -ENOMEM;
32463 + goto dput_and_out;
32464 + }
32465 +
32466 set_fs_root(current->fs, &path);
32467 +
32468 + gr_handle_chroot_chdir(&path);
32469 +
32470 error = 0;
32471 dput_and_out:
32472 path_put(&path);
32473 @@ -453,6 +479,12 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
32474 err = mnt_want_write_file(file);
32475 if (err)
32476 goto out_putf;
32477 +
32478 + if (!gr_acl_handle_fchmod(dentry, file->f_path.mnt, mode)) {
32479 + err = -EACCES;
32480 + goto out_drop_write;
32481 + }
32482 +
32483 mutex_lock(&inode->i_mutex);
32484 err = security_path_chmod(dentry, file->f_vfsmnt, mode);
32485 if (err)
32486 @@ -464,6 +496,7 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
32487 err = notify_change(dentry, &newattrs);
32488 out_unlock:
32489 mutex_unlock(&inode->i_mutex);
32490 +out_drop_write:
32491 mnt_drop_write(file->f_path.mnt);
32492 out_putf:
32493 fput(file);
32494 @@ -486,17 +519,30 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
32495 error = mnt_want_write(path.mnt);
32496 if (error)
32497 goto dput_and_out;
32498 +
32499 + if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
32500 + error = -EACCES;
32501 + goto out_drop_write;
32502 + }
32503 +
32504 mutex_lock(&inode->i_mutex);
32505 error = security_path_chmod(path.dentry, path.mnt, mode);
32506 if (error)
32507 goto out_unlock;
32508 if (mode == (mode_t) -1)
32509 mode = inode->i_mode;
32510 +
32511 + if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
32512 + error = -EACCES;
32513 + goto out_unlock;
32514 + }
32515 +
32516 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
32517 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
32518 error = notify_change(path.dentry, &newattrs);
32519 out_unlock:
32520 mutex_unlock(&inode->i_mutex);
32521 +out_drop_write:
32522 mnt_drop_write(path.mnt);
32523 dput_and_out:
32524 path_put(&path);
32525 @@ -515,6 +561,9 @@ static int chown_common(struct path *pat
32526 int error;
32527 struct iattr newattrs;
32528
32529 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
32530 + return -EACCES;
32531 +
32532 newattrs.ia_valid = ATTR_CTIME;
32533 if (user != (uid_t) -1) {
32534 newattrs.ia_valid |= ATTR_UID;
32535 diff -urNp linux-2.6.35.4/fs/pipe.c linux-2.6.35.4/fs/pipe.c
32536 --- linux-2.6.35.4/fs/pipe.c 2010-08-26 19:47:12.000000000 -0400
32537 +++ linux-2.6.35.4/fs/pipe.c 2010-09-17 20:12:37.000000000 -0400
32538 @@ -420,9 +420,9 @@ redo:
32539 }
32540 if (bufs) /* More to do? */
32541 continue;
32542 - if (!pipe->writers)
32543 + if (!atomic_read(&pipe->writers))
32544 break;
32545 - if (!pipe->waiting_writers) {
32546 + if (!atomic_read(&pipe->waiting_writers)) {
32547 /* syscall merging: Usually we must not sleep
32548 * if O_NONBLOCK is set, or if we got some data.
32549 * But if a writer sleeps in kernel space, then
32550 @@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const str
32551 mutex_lock(&inode->i_mutex);
32552 pipe = inode->i_pipe;
32553
32554 - if (!pipe->readers) {
32555 + if (!atomic_read(&pipe->readers)) {
32556 send_sig(SIGPIPE, current, 0);
32557 ret = -EPIPE;
32558 goto out;
32559 @@ -530,7 +530,7 @@ redo1:
32560 for (;;) {
32561 int bufs;
32562
32563 - if (!pipe->readers) {
32564 + if (!atomic_read(&pipe->readers)) {
32565 send_sig(SIGPIPE, current, 0);
32566 if (!ret)
32567 ret = -EPIPE;
32568 @@ -616,9 +616,9 @@ redo2:
32569 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
32570 do_wakeup = 0;
32571 }
32572 - pipe->waiting_writers++;
32573 + atomic_inc(&pipe->waiting_writers);
32574 pipe_wait(pipe);
32575 - pipe->waiting_writers--;
32576 + atomic_dec(&pipe->waiting_writers);
32577 }
32578 out:
32579 mutex_unlock(&inode->i_mutex);
32580 @@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table
32581 mask = 0;
32582 if (filp->f_mode & FMODE_READ) {
32583 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
32584 - if (!pipe->writers && filp->f_version != pipe->w_counter)
32585 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
32586 mask |= POLLHUP;
32587 }
32588
32589 @@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table
32590 * Most Unices do not set POLLERR for FIFOs but on Linux they
32591 * behave exactly like pipes for poll().
32592 */
32593 - if (!pipe->readers)
32594 + if (!atomic_read(&pipe->readers))
32595 mask |= POLLERR;
32596 }
32597
32598 @@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int de
32599
32600 mutex_lock(&inode->i_mutex);
32601 pipe = inode->i_pipe;
32602 - pipe->readers -= decr;
32603 - pipe->writers -= decw;
32604 + atomic_sub(decr, &pipe->readers);
32605 + atomic_sub(decw, &pipe->writers);
32606
32607 - if (!pipe->readers && !pipe->writers) {
32608 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
32609 free_pipe_info(inode);
32610 } else {
32611 wake_up_interruptible_sync(&pipe->wait);
32612 @@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, stru
32613
32614 if (inode->i_pipe) {
32615 ret = 0;
32616 - inode->i_pipe->readers++;
32617 + atomic_inc(&inode->i_pipe->readers);
32618 }
32619
32620 mutex_unlock(&inode->i_mutex);
32621 @@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, str
32622
32623 if (inode->i_pipe) {
32624 ret = 0;
32625 - inode->i_pipe->writers++;
32626 + atomic_inc(&inode->i_pipe->writers);
32627 }
32628
32629 mutex_unlock(&inode->i_mutex);
32630 @@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, stru
32631 if (inode->i_pipe) {
32632 ret = 0;
32633 if (filp->f_mode & FMODE_READ)
32634 - inode->i_pipe->readers++;
32635 + atomic_inc(&inode->i_pipe->readers);
32636 if (filp->f_mode & FMODE_WRITE)
32637 - inode->i_pipe->writers++;
32638 + atomic_inc(&inode->i_pipe->writers);
32639 }
32640
32641 mutex_unlock(&inode->i_mutex);
32642 @@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
32643 inode->i_pipe = NULL;
32644 }
32645
32646 -static struct vfsmount *pipe_mnt __read_mostly;
32647 +struct vfsmount *pipe_mnt __read_mostly;
32648
32649 /*
32650 * pipefs_dname() is called from d_path().
32651 @@ -959,7 +959,8 @@ static struct inode * get_pipe_inode(voi
32652 goto fail_iput;
32653 inode->i_pipe = pipe;
32654
32655 - pipe->readers = pipe->writers = 1;
32656 + atomic_set(&pipe->readers, 1);
32657 + atomic_set(&pipe->writers, 1);
32658 inode->i_fop = &rdwr_pipefifo_fops;
32659
32660 /*
32661 diff -urNp linux-2.6.35.4/fs/proc/array.c linux-2.6.35.4/fs/proc/array.c
32662 --- linux-2.6.35.4/fs/proc/array.c 2010-08-26 19:47:12.000000000 -0400
32663 +++ linux-2.6.35.4/fs/proc/array.c 2010-09-17 20:12:37.000000000 -0400
32664 @@ -337,6 +337,21 @@ static void task_cpus_allowed(struct seq
32665 seq_printf(m, "\n");
32666 }
32667
32668 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
32669 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
32670 +{
32671 + if (p->mm)
32672 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
32673 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
32674 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
32675 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
32676 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
32677 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
32678 + else
32679 + seq_printf(m, "PaX:\t-----\n");
32680 +}
32681 +#endif
32682 +
32683 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
32684 struct pid *pid, struct task_struct *task)
32685 {
32686 @@ -357,9 +372,20 @@ int proc_pid_status(struct seq_file *m,
32687 task_show_regs(m, task);
32688 #endif
32689 task_context_switch_counts(m, task);
32690 +
32691 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
32692 + task_pax(m, task);
32693 +#endif
32694 +
32695 return 0;
32696 }
32697
32698 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
32699 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
32700 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
32701 + _mm->pax_flags & MF_PAX_SEGMEXEC))
32702 +#endif
32703 +
32704 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
32705 struct pid *pid, struct task_struct *task, int whole)
32706 {
32707 @@ -452,6 +478,19 @@ static int do_task_stat(struct seq_file
32708 gtime = task->gtime;
32709 }
32710
32711 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
32712 + if (PAX_RAND_FLAGS(mm)) {
32713 + eip = 0;
32714 + esp = 0;
32715 + wchan = 0;
32716 + }
32717 +#endif
32718 +#ifdef CONFIG_GRKERNSEC_HIDESYM
32719 + wchan = 0;
32720 + eip =0;
32721 + esp =0;
32722 +#endif
32723 +
32724 /* scale priority and nice values from timeslices to -20..20 */
32725 /* to make it look like a "normal" Unix priority/nice value */
32726 priority = task_prio(task);
32727 @@ -492,9 +531,15 @@ static int do_task_stat(struct seq_file
32728 vsize,
32729 mm ? get_mm_rss(mm) : 0,
32730 rsslim,
32731 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
32732 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? mm->start_code : 0),
32733 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? mm->end_code : 0),
32734 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
32735 +#else
32736 mm ? mm->start_code : 0,
32737 mm ? mm->end_code : 0,
32738 (permitted && mm) ? mm->start_stack : 0,
32739 +#endif
32740 esp,
32741 eip,
32742 /* The signal information here is obsolete.
32743 @@ -547,3 +592,10 @@ int proc_pid_statm(struct seq_file *m, s
32744
32745 return 0;
32746 }
32747 +
32748 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
32749 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
32750 +{
32751 + return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
32752 +}
32753 +#endif
32754 diff -urNp linux-2.6.35.4/fs/proc/base.c linux-2.6.35.4/fs/proc/base.c
32755 --- linux-2.6.35.4/fs/proc/base.c 2010-08-26 19:47:12.000000000 -0400
32756 +++ linux-2.6.35.4/fs/proc/base.c 2010-09-17 20:12:37.000000000 -0400
32757 @@ -103,6 +103,22 @@ struct pid_entry {
32758 union proc_op op;
32759 };
32760
32761 +struct getdents_callback {
32762 + struct linux_dirent __user * current_dir;
32763 + struct linux_dirent __user * previous;
32764 + struct file * file;
32765 + int count;
32766 + int error;
32767 +};
32768 +
32769 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
32770 + loff_t offset, u64 ino, unsigned int d_type)
32771 +{
32772 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
32773 + buf->error = -EINVAL;
32774 + return 0;
32775 +}
32776 +
32777 #define NOD(NAME, MODE, IOP, FOP, OP) { \
32778 .name = (NAME), \
32779 .len = sizeof(NAME) - 1, \
32780 @@ -202,6 +218,9 @@ static int check_mem_permission(struct t
32781 if (task == current)
32782 return 0;
32783
32784 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
32785 + return -EPERM;
32786 +
32787 /*
32788 * If current is actively ptrace'ing, and would also be
32789 * permitted to freshly attach with ptrace now, permit it.
32790 @@ -249,6 +268,9 @@ static int proc_pid_cmdline(struct task_
32791 if (!mm->arg_end)
32792 goto out_mm; /* Shh! No looking before we're done */
32793
32794 + if (gr_acl_handle_procpidmem(task))
32795 + goto out_mm;
32796 +
32797 len = mm->arg_end - mm->arg_start;
32798
32799 if (len > PAGE_SIZE)
32800 @@ -276,12 +298,26 @@ out:
32801 return res;
32802 }
32803
32804 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
32805 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
32806 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
32807 + _mm->pax_flags & MF_PAX_SEGMEXEC))
32808 +#endif
32809 +
32810 static int proc_pid_auxv(struct task_struct *task, char *buffer)
32811 {
32812 int res = 0;
32813 struct mm_struct *mm = get_task_mm(task);
32814 if (mm) {
32815 unsigned int nwords = 0;
32816 +
32817 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
32818 + if (PAX_RAND_FLAGS(mm)) {
32819 + mmput(mm);
32820 + return res;
32821 + }
32822 +#endif
32823 +
32824 do {
32825 nwords += 2;
32826 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
32827 @@ -317,7 +353,7 @@ static int proc_pid_wchan(struct task_st
32828 }
32829 #endif /* CONFIG_KALLSYMS */
32830
32831 -#ifdef CONFIG_STACKTRACE
32832 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
32833
32834 #define MAX_STACK_TRACE_DEPTH 64
32835
32836 @@ -511,7 +547,7 @@ static int proc_pid_limits(struct task_s
32837 return count;
32838 }
32839
32840 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
32841 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
32842 static int proc_pid_syscall(struct task_struct *task, char *buffer)
32843 {
32844 long nr;
32845 @@ -920,6 +956,9 @@ static ssize_t environ_read(struct file
32846 if (!task)
32847 goto out_no_task;
32848
32849 + if (gr_acl_handle_procpidmem(task))
32850 + goto out;
32851 +
32852 if (!ptrace_may_access(task, PTRACE_MODE_READ))
32853 goto out;
32854
32855 @@ -1514,7 +1553,11 @@ static struct inode *proc_pid_make_inode
32856 rcu_read_lock();
32857 cred = __task_cred(task);
32858 inode->i_uid = cred->euid;
32859 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
32860 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
32861 +#else
32862 inode->i_gid = cred->egid;
32863 +#endif
32864 rcu_read_unlock();
32865 }
32866 security_task_to_inode(task, inode);
32867 @@ -1532,6 +1575,9 @@ static int pid_getattr(struct vfsmount *
32868 struct inode *inode = dentry->d_inode;
32869 struct task_struct *task;
32870 const struct cred *cred;
32871 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
32872 + const struct cred *tmpcred = current_cred();
32873 +#endif
32874
32875 generic_fillattr(inode, stat);
32876
32877 @@ -1539,12 +1585,34 @@ static int pid_getattr(struct vfsmount *
32878 stat->uid = 0;
32879 stat->gid = 0;
32880 task = pid_task(proc_pid(inode), PIDTYPE_PID);
32881 +
32882 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
32883 + rcu_read_unlock();
32884 + return -ENOENT;
32885 + }
32886 +
32887 if (task) {
32888 + cred = __task_cred(task);
32889 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
32890 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
32891 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
32892 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
32893 +#endif
32894 + )
32895 +#endif
32896 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
32897 +#ifdef CONFIG_GRKERNSEC_PROC_USER
32898 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
32899 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
32900 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
32901 +#endif
32902 task_dumpable(task)) {
32903 - cred = __task_cred(task);
32904 stat->uid = cred->euid;
32905 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
32906 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
32907 +#else
32908 stat->gid = cred->egid;
32909 +#endif
32910 }
32911 }
32912 rcu_read_unlock();
32913 @@ -1576,11 +1644,20 @@ static int pid_revalidate(struct dentry
32914
32915 if (task) {
32916 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
32917 +#ifdef CONFIG_GRKERNSEC_PROC_USER
32918 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
32919 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
32920 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
32921 +#endif
32922 task_dumpable(task)) {
32923 rcu_read_lock();
32924 cred = __task_cred(task);
32925 inode->i_uid = cred->euid;
32926 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
32927 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
32928 +#else
32929 inode->i_gid = cred->egid;
32930 +#endif
32931 rcu_read_unlock();
32932 } else {
32933 inode->i_uid = 0;
32934 @@ -1701,7 +1778,8 @@ static int proc_fd_info(struct inode *in
32935 int fd = proc_fd(inode);
32936
32937 if (task) {
32938 - files = get_files_struct(task);
32939 + if (!gr_acl_handle_procpidmem(task))
32940 + files = get_files_struct(task);
32941 put_task_struct(task);
32942 }
32943 if (files) {
32944 @@ -1953,12 +2031,22 @@ static const struct file_operations proc
32945 static int proc_fd_permission(struct inode *inode, int mask)
32946 {
32947 int rv;
32948 + struct task_struct *task;
32949
32950 rv = generic_permission(inode, mask, NULL);
32951 - if (rv == 0)
32952 - return 0;
32953 +
32954 if (task_pid(current) == proc_pid(inode))
32955 rv = 0;
32956 +
32957 + task = get_proc_task(inode);
32958 + if (task == NULL)
32959 + return rv;
32960 +
32961 + if (gr_acl_handle_procpidmem(task))
32962 + rv = -EACCES;
32963 +
32964 + put_task_struct(task);
32965 +
32966 return rv;
32967 }
32968
32969 @@ -2067,6 +2155,9 @@ static struct dentry *proc_pident_lookup
32970 if (!task)
32971 goto out_no_task;
32972
32973 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
32974 + goto out;
32975 +
32976 /*
32977 * Yes, it does not scale. And it should not. Don't add
32978 * new entries into /proc/<tgid>/ without very good reasons.
32979 @@ -2111,6 +2202,9 @@ static int proc_pident_readdir(struct fi
32980 if (!task)
32981 goto out_no_task;
32982
32983 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
32984 + goto out;
32985 +
32986 ret = 0;
32987 i = filp->f_pos;
32988 switch (i) {
32989 @@ -2380,7 +2474,7 @@ static void *proc_self_follow_link(struc
32990 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
32991 void *cookie)
32992 {
32993 - char *s = nd_get_link(nd);
32994 + const char *s = nd_get_link(nd);
32995 if (!IS_ERR(s))
32996 __putname(s);
32997 }
32998 @@ -2580,7 +2674,7 @@ static const struct pid_entry tgid_base_
32999 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
33000 #endif
33001 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
33002 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
33003 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
33004 INF("syscall", S_IRUSR, proc_pid_syscall),
33005 #endif
33006 INF("cmdline", S_IRUGO, proc_pid_cmdline),
33007 @@ -2608,7 +2702,7 @@ static const struct pid_entry tgid_base_
33008 #ifdef CONFIG_KALLSYMS
33009 INF("wchan", S_IRUGO, proc_pid_wchan),
33010 #endif
33011 -#ifdef CONFIG_STACKTRACE
33012 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
33013 ONE("stack", S_IRUSR, proc_pid_stack),
33014 #endif
33015 #ifdef CONFIG_SCHEDSTATS
33016 @@ -2638,6 +2732,9 @@ static const struct pid_entry tgid_base_
33017 #ifdef CONFIG_TASK_IO_ACCOUNTING
33018 INF("io", S_IRUGO, proc_tgid_io_accounting),
33019 #endif
33020 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
33021 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
33022 +#endif
33023 };
33024
33025 static int proc_tgid_base_readdir(struct file * filp,
33026 @@ -2762,7 +2859,14 @@ static struct dentry *proc_pid_instantia
33027 if (!inode)
33028 goto out;
33029
33030 +#ifdef CONFIG_GRKERNSEC_PROC_USER
33031 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
33032 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
33033 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
33034 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
33035 +#else
33036 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
33037 +#endif
33038 inode->i_op = &proc_tgid_base_inode_operations;
33039 inode->i_fop = &proc_tgid_base_operations;
33040 inode->i_flags|=S_IMMUTABLE;
33041 @@ -2804,7 +2908,11 @@ struct dentry *proc_pid_lookup(struct in
33042 if (!task)
33043 goto out;
33044
33045 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
33046 + goto out_put_task;
33047 +
33048 result = proc_pid_instantiate(dir, dentry, task, NULL);
33049 +out_put_task:
33050 put_task_struct(task);
33051 out:
33052 return result;
33053 @@ -2869,6 +2977,11 @@ int proc_pid_readdir(struct file * filp,
33054 {
33055 unsigned int nr = filp->f_pos - FIRST_PROCESS_ENTRY;
33056 struct task_struct *reaper = get_proc_task(filp->f_path.dentry->d_inode);
33057 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
33058 + const struct cred *tmpcred = current_cred();
33059 + const struct cred *itercred;
33060 +#endif
33061 + filldir_t __filldir = filldir;
33062 struct tgid_iter iter;
33063 struct pid_namespace *ns;
33064
33065 @@ -2887,8 +3000,27 @@ int proc_pid_readdir(struct file * filp,
33066 for (iter = next_tgid(ns, iter);
33067 iter.task;
33068 iter.tgid += 1, iter = next_tgid(ns, iter)) {
33069 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
33070 + rcu_read_lock();
33071 + itercred = __task_cred(iter.task);
33072 +#endif
33073 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
33074 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
33075 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
33076 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
33077 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
33078 +#endif
33079 + )
33080 +#endif
33081 + )
33082 + __filldir = &gr_fake_filldir;
33083 + else
33084 + __filldir = filldir;
33085 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
33086 + rcu_read_unlock();
33087 +#endif
33088 filp->f_pos = iter.tgid + TGID_OFFSET;
33089 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
33090 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
33091 put_task_struct(iter.task);
33092 goto out;
33093 }
33094 @@ -2915,7 +3047,7 @@ static const struct pid_entry tid_base_s
33095 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
33096 #endif
33097 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
33098 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
33099 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
33100 INF("syscall", S_IRUSR, proc_pid_syscall),
33101 #endif
33102 INF("cmdline", S_IRUGO, proc_pid_cmdline),
33103 @@ -2942,7 +3074,7 @@ static const struct pid_entry tid_base_s
33104 #ifdef CONFIG_KALLSYMS
33105 INF("wchan", S_IRUGO, proc_pid_wchan),
33106 #endif
33107 -#ifdef CONFIG_STACKTRACE
33108 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
33109 ONE("stack", S_IRUSR, proc_pid_stack),
33110 #endif
33111 #ifdef CONFIG_SCHEDSTATS
33112 diff -urNp linux-2.6.35.4/fs/proc/cmdline.c linux-2.6.35.4/fs/proc/cmdline.c
33113 --- linux-2.6.35.4/fs/proc/cmdline.c 2010-08-26 19:47:12.000000000 -0400
33114 +++ linux-2.6.35.4/fs/proc/cmdline.c 2010-09-17 20:12:37.000000000 -0400
33115 @@ -23,7 +23,11 @@ static const struct file_operations cmdl
33116
33117 static int __init proc_cmdline_init(void)
33118 {
33119 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
33120 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
33121 +#else
33122 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
33123 +#endif
33124 return 0;
33125 }
33126 module_init(proc_cmdline_init);
33127 diff -urNp linux-2.6.35.4/fs/proc/devices.c linux-2.6.35.4/fs/proc/devices.c
33128 --- linux-2.6.35.4/fs/proc/devices.c 2010-08-26 19:47:12.000000000 -0400
33129 +++ linux-2.6.35.4/fs/proc/devices.c 2010-09-17 20:12:37.000000000 -0400
33130 @@ -64,7 +64,11 @@ static const struct file_operations proc
33131
33132 static int __init proc_devices_init(void)
33133 {
33134 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
33135 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
33136 +#else
33137 proc_create("devices", 0, NULL, &proc_devinfo_operations);
33138 +#endif
33139 return 0;
33140 }
33141 module_init(proc_devices_init);
33142 diff -urNp linux-2.6.35.4/fs/proc/inode.c linux-2.6.35.4/fs/proc/inode.c
33143 --- linux-2.6.35.4/fs/proc/inode.c 2010-08-26 19:47:12.000000000 -0400
33144 +++ linux-2.6.35.4/fs/proc/inode.c 2010-09-17 20:12:37.000000000 -0400
33145 @@ -435,7 +435,11 @@ struct inode *proc_get_inode(struct supe
33146 if (de->mode) {
33147 inode->i_mode = de->mode;
33148 inode->i_uid = de->uid;
33149 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
33150 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
33151 +#else
33152 inode->i_gid = de->gid;
33153 +#endif
33154 }
33155 if (de->size)
33156 inode->i_size = de->size;
33157 diff -urNp linux-2.6.35.4/fs/proc/internal.h linux-2.6.35.4/fs/proc/internal.h
33158 --- linux-2.6.35.4/fs/proc/internal.h 2010-08-26 19:47:12.000000000 -0400
33159 +++ linux-2.6.35.4/fs/proc/internal.h 2010-09-17 20:12:37.000000000 -0400
33160 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
33161 struct pid *pid, struct task_struct *task);
33162 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
33163 struct pid *pid, struct task_struct *task);
33164 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
33165 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
33166 +#endif
33167 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
33168
33169 extern const struct file_operations proc_maps_operations;
33170 diff -urNp linux-2.6.35.4/fs/proc/Kconfig linux-2.6.35.4/fs/proc/Kconfig
33171 --- linux-2.6.35.4/fs/proc/Kconfig 2010-08-26 19:47:12.000000000 -0400
33172 +++ linux-2.6.35.4/fs/proc/Kconfig 2010-09-17 20:12:37.000000000 -0400
33173 @@ -30,12 +30,12 @@ config PROC_FS
33174
33175 config PROC_KCORE
33176 bool "/proc/kcore support" if !ARM
33177 - depends on PROC_FS && MMU
33178 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
33179
33180 config PROC_VMCORE
33181 bool "/proc/vmcore support (EXPERIMENTAL)"
33182 - depends on PROC_FS && CRASH_DUMP
33183 - default y
33184 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
33185 + default n
33186 help
33187 Exports the dump image of crashed kernel in ELF format.
33188
33189 @@ -59,8 +59,8 @@ config PROC_SYSCTL
33190 limited in memory.
33191
33192 config PROC_PAGE_MONITOR
33193 - default y
33194 - depends on PROC_FS && MMU
33195 + default n
33196 + depends on PROC_FS && MMU && !GRKERNSEC
33197 bool "Enable /proc page monitoring" if EMBEDDED
33198 help
33199 Various /proc files exist to monitor process memory utilization:
33200 diff -urNp linux-2.6.35.4/fs/proc/kcore.c linux-2.6.35.4/fs/proc/kcore.c
33201 --- linux-2.6.35.4/fs/proc/kcore.c 2010-08-26 19:47:12.000000000 -0400
33202 +++ linux-2.6.35.4/fs/proc/kcore.c 2010-09-17 20:12:37.000000000 -0400
33203 @@ -478,9 +478,10 @@ read_kcore(struct file *file, char __use
33204 * the addresses in the elf_phdr on our list.
33205 */
33206 start = kc_offset_to_vaddr(*fpos - elf_buflen);
33207 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
33208 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
33209 + if (tsz > buflen)
33210 tsz = buflen;
33211 -
33212 +
33213 while (buflen) {
33214 struct kcore_list *m;
33215
33216 @@ -509,20 +510,18 @@ read_kcore(struct file *file, char __use
33217 kfree(elf_buf);
33218 } else {
33219 if (kern_addr_valid(start)) {
33220 - unsigned long n;
33221 + char *elf_buf;
33222
33223 - n = copy_to_user(buffer, (char *)start, tsz);
33224 - /*
33225 - * We cannot distingush between fault on source
33226 - * and fault on destination. When this happens
33227 - * we clear too and hope it will trigger the
33228 - * EFAULT again.
33229 - */
33230 - if (n) {
33231 - if (clear_user(buffer + tsz - n,
33232 - n))
33233 + elf_buf = kmalloc(tsz, GFP_KERNEL);
33234 + if (!elf_buf)
33235 + return -ENOMEM;
33236 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
33237 + if (copy_to_user(buffer, elf_buf, tsz)) {
33238 + kfree(elf_buf);
33239 return -EFAULT;
33240 + }
33241 }
33242 + kfree(elf_buf);
33243 } else {
33244 if (clear_user(buffer, tsz))
33245 return -EFAULT;
33246 @@ -542,6 +541,9 @@ read_kcore(struct file *file, char __use
33247
33248 static int open_kcore(struct inode *inode, struct file *filp)
33249 {
33250 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
33251 + return -EPERM;
33252 +#endif
33253 if (!capable(CAP_SYS_RAWIO))
33254 return -EPERM;
33255 if (kcore_need_update)
33256 diff -urNp linux-2.6.35.4/fs/proc/meminfo.c linux-2.6.35.4/fs/proc/meminfo.c
33257 --- linux-2.6.35.4/fs/proc/meminfo.c 2010-08-26 19:47:12.000000000 -0400
33258 +++ linux-2.6.35.4/fs/proc/meminfo.c 2010-09-17 20:12:09.000000000 -0400
33259 @@ -149,7 +149,7 @@ static int meminfo_proc_show(struct seq_
33260 vmi.used >> 10,
33261 vmi.largest_chunk >> 10
33262 #ifdef CONFIG_MEMORY_FAILURE
33263 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
33264 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
33265 #endif
33266 );
33267
33268 diff -urNp linux-2.6.35.4/fs/proc/nommu.c linux-2.6.35.4/fs/proc/nommu.c
33269 --- linux-2.6.35.4/fs/proc/nommu.c 2010-08-26 19:47:12.000000000 -0400
33270 +++ linux-2.6.35.4/fs/proc/nommu.c 2010-09-17 20:12:09.000000000 -0400
33271 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_
33272 if (len < 1)
33273 len = 1;
33274 seq_printf(m, "%*c", len, ' ');
33275 - seq_path(m, &file->f_path, "");
33276 + seq_path(m, &file->f_path, "\n\\");
33277 }
33278
33279 seq_putc(m, '\n');
33280 diff -urNp linux-2.6.35.4/fs/proc/proc_net.c linux-2.6.35.4/fs/proc/proc_net.c
33281 --- linux-2.6.35.4/fs/proc/proc_net.c 2010-08-26 19:47:12.000000000 -0400
33282 +++ linux-2.6.35.4/fs/proc/proc_net.c 2010-09-17 20:12:37.000000000 -0400
33283 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(str
33284 struct task_struct *task;
33285 struct nsproxy *ns;
33286 struct net *net = NULL;
33287 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
33288 + const struct cred *cred = current_cred();
33289 +#endif
33290 +
33291 +#ifdef CONFIG_GRKERNSEC_PROC_USER
33292 + if (cred->fsuid)
33293 + return net;
33294 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
33295 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
33296 + return net;
33297 +#endif
33298
33299 rcu_read_lock();
33300 task = pid_task(proc_pid(dir), PIDTYPE_PID);
33301 diff -urNp linux-2.6.35.4/fs/proc/proc_sysctl.c linux-2.6.35.4/fs/proc/proc_sysctl.c
33302 --- linux-2.6.35.4/fs/proc/proc_sysctl.c 2010-08-26 19:47:12.000000000 -0400
33303 +++ linux-2.6.35.4/fs/proc/proc_sysctl.c 2010-09-17 20:12:37.000000000 -0400
33304 @@ -7,6 +7,8 @@
33305 #include <linux/security.h>
33306 #include "internal.h"
33307
33308 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
33309 +
33310 static const struct dentry_operations proc_sys_dentry_operations;
33311 static const struct file_operations proc_sys_file_operations;
33312 static const struct inode_operations proc_sys_inode_operations;
33313 @@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(st
33314 if (!p)
33315 goto out;
33316
33317 + if (gr_handle_sysctl(p, MAY_EXEC))
33318 + goto out;
33319 +
33320 err = ERR_PTR(-ENOMEM);
33321 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
33322 if (h)
33323 @@ -228,6 +233,9 @@ static int scan(struct ctl_table_header
33324 if (*pos < file->f_pos)
33325 continue;
33326
33327 + if (gr_handle_sysctl(table, 0))
33328 + continue;
33329 +
33330 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
33331 if (res)
33332 return res;
33333 @@ -344,6 +352,9 @@ static int proc_sys_getattr(struct vfsmo
33334 if (IS_ERR(head))
33335 return PTR_ERR(head);
33336
33337 + if (table && gr_handle_sysctl(table, MAY_EXEC))
33338 + return -ENOENT;
33339 +
33340 generic_fillattr(inode, stat);
33341 if (table)
33342 stat->mode = (stat->mode & S_IFMT) | table->mode;
33343 diff -urNp linux-2.6.35.4/fs/proc/root.c linux-2.6.35.4/fs/proc/root.c
33344 --- linux-2.6.35.4/fs/proc/root.c 2010-08-26 19:47:12.000000000 -0400
33345 +++ linux-2.6.35.4/fs/proc/root.c 2010-09-17 20:12:37.000000000 -0400
33346 @@ -133,7 +133,15 @@ void __init proc_root_init(void)
33347 #ifdef CONFIG_PROC_DEVICETREE
33348 proc_device_tree_init();
33349 #endif
33350 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
33351 +#ifdef CONFIG_GRKERNSEC_PROC_USER
33352 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
33353 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
33354 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
33355 +#endif
33356 +#else
33357 proc_mkdir("bus", NULL);
33358 +#endif
33359 proc_sys_init();
33360 }
33361
33362 diff -urNp linux-2.6.35.4/fs/proc/task_mmu.c linux-2.6.35.4/fs/proc/task_mmu.c
33363 --- linux-2.6.35.4/fs/proc/task_mmu.c 2010-08-26 19:47:12.000000000 -0400
33364 +++ linux-2.6.35.4/fs/proc/task_mmu.c 2010-09-17 20:12:37.000000000 -0400
33365 @@ -49,8 +49,13 @@ void task_mem(struct seq_file *m, struct
33366 "VmExe:\t%8lu kB\n"
33367 "VmLib:\t%8lu kB\n"
33368 "VmPTE:\t%8lu kB\n"
33369 - "VmSwap:\t%8lu kB\n",
33370 - hiwater_vm << (PAGE_SHIFT-10),
33371 + "VmSwap:\t%8lu kB\n"
33372 +
33373 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
33374 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
33375 +#endif
33376 +
33377 + ,hiwater_vm << (PAGE_SHIFT-10),
33378 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
33379 mm->locked_vm << (PAGE_SHIFT-10),
33380 hiwater_rss << (PAGE_SHIFT-10),
33381 @@ -58,7 +63,13 @@ void task_mem(struct seq_file *m, struct
33382 data << (PAGE_SHIFT-10),
33383 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
33384 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
33385 - swap << (PAGE_SHIFT-10));
33386 + swap << (PAGE_SHIFT-10)
33387 +
33388 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
33389 + , mm->context.user_cs_base, mm->context.user_cs_limit
33390 +#endif
33391 +
33392 + );
33393 }
33394
33395 unsigned long task_vsize(struct mm_struct *mm)
33396 @@ -203,6 +214,12 @@ static int do_maps_open(struct inode *in
33397 return ret;
33398 }
33399
33400 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
33401 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
33402 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
33403 + _mm->pax_flags & MF_PAX_SEGMEXEC))
33404 +#endif
33405 +
33406 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
33407 {
33408 struct mm_struct *mm = vma->vm_mm;
33409 @@ -210,7 +227,6 @@ static void show_map_vma(struct seq_file
33410 int flags = vma->vm_flags;
33411 unsigned long ino = 0;
33412 unsigned long long pgoff = 0;
33413 - unsigned long start;
33414 dev_t dev = 0;
33415 int len;
33416
33417 @@ -221,19 +237,24 @@ static void show_map_vma(struct seq_file
33418 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
33419 }
33420
33421 - /* We don't show the stack guard page in /proc/maps */
33422 - start = vma->vm_start;
33423 - if (vma->vm_flags & VM_GROWSDOWN)
33424 - start += PAGE_SIZE;
33425
33426 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
33427 - start,
33428 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
33429 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
33430 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
33431 +#else
33432 + vma->vm_start,
33433 vma->vm_end,
33434 +#endif
33435 flags & VM_READ ? 'r' : '-',
33436 flags & VM_WRITE ? 'w' : '-',
33437 flags & VM_EXEC ? 'x' : '-',
33438 flags & VM_MAYSHARE ? 's' : 'p',
33439 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
33440 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
33441 +#else
33442 pgoff,
33443 +#endif
33444 MAJOR(dev), MINOR(dev), ino, &len);
33445
33446 /*
33447 @@ -242,16 +263,16 @@ static void show_map_vma(struct seq_file
33448 */
33449 if (file) {
33450 pad_len_spaces(m, len);
33451 - seq_path(m, &file->f_path, "\n");
33452 + seq_path(m, &file->f_path, "\n\\");
33453 } else {
33454 const char *name = arch_vma_name(vma);
33455 if (!name) {
33456 if (mm) {
33457 - if (vma->vm_start <= mm->start_brk &&
33458 - vma->vm_end >= mm->brk) {
33459 + if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
33460 name = "[heap]";
33461 - } else if (vma->vm_start <= mm->start_stack &&
33462 - vma->vm_end >= mm->start_stack) {
33463 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
33464 + (vma->vm_start <= mm->start_stack &&
33465 + vma->vm_end >= mm->start_stack)) {
33466 name = "[stack]";
33467 }
33468 } else {
33469 @@ -393,11 +414,16 @@ static int show_smap(struct seq_file *m,
33470 };
33471
33472 memset(&mss, 0, sizeof mss);
33473 - mss.vma = vma;
33474 - /* mmap_sem is held in m_start */
33475 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
33476 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
33477 -
33478 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
33479 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
33480 +#endif
33481 + mss.vma = vma;
33482 + /* mmap_sem is held in m_start */
33483 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
33484 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
33485 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
33486 + }
33487 +#endif
33488 show_map_vma(m, vma);
33489
33490 seq_printf(m,
33491 @@ -412,7 +438,11 @@ static int show_smap(struct seq_file *m,
33492 "Swap: %8lu kB\n"
33493 "KernelPageSize: %8lu kB\n"
33494 "MMUPageSize: %8lu kB\n",
33495 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
33496 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
33497 +#else
33498 (vma->vm_end - vma->vm_start) >> 10,
33499 +#endif
33500 mss.resident >> 10,
33501 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
33502 mss.shared_clean >> 10,
33503 diff -urNp linux-2.6.35.4/fs/proc/task_nommu.c linux-2.6.35.4/fs/proc/task_nommu.c
33504 --- linux-2.6.35.4/fs/proc/task_nommu.c 2010-08-26 19:47:12.000000000 -0400
33505 +++ linux-2.6.35.4/fs/proc/task_nommu.c 2010-09-17 20:12:09.000000000 -0400
33506 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct
33507 else
33508 bytes += kobjsize(mm);
33509
33510 - if (current->fs && current->fs->users > 1)
33511 + if (current->fs && atomic_read(&current->fs->users) > 1)
33512 sbytes += kobjsize(current->fs);
33513 else
33514 bytes += kobjsize(current->fs);
33515 @@ -165,7 +165,7 @@ static int nommu_vma_show(struct seq_fil
33516
33517 if (file) {
33518 pad_len_spaces(m, len);
33519 - seq_path(m, &file->f_path, "");
33520 + seq_path(m, &file->f_path, "\n\\");
33521 } else if (mm) {
33522 if (vma->vm_start <= mm->start_stack &&
33523 vma->vm_end >= mm->start_stack) {
33524 diff -urNp linux-2.6.35.4/fs/readdir.c linux-2.6.35.4/fs/readdir.c
33525 --- linux-2.6.35.4/fs/readdir.c 2010-08-26 19:47:12.000000000 -0400
33526 +++ linux-2.6.35.4/fs/readdir.c 2010-09-17 20:12:37.000000000 -0400
33527 @@ -16,6 +16,7 @@
33528 #include <linux/security.h>
33529 #include <linux/syscalls.h>
33530 #include <linux/unistd.h>
33531 +#include <linux/namei.h>
33532
33533 #include <asm/uaccess.h>
33534
33535 @@ -67,6 +68,7 @@ struct old_linux_dirent {
33536
33537 struct readdir_callback {
33538 struct old_linux_dirent __user * dirent;
33539 + struct file * file;
33540 int result;
33541 };
33542
33543 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
33544 buf->result = -EOVERFLOW;
33545 return -EOVERFLOW;
33546 }
33547 +
33548 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
33549 + return 0;
33550 +
33551 buf->result++;
33552 dirent = buf->dirent;
33553 if (!access_ok(VERIFY_WRITE, dirent,
33554 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
33555
33556 buf.result = 0;
33557 buf.dirent = dirent;
33558 + buf.file = file;
33559
33560 error = vfs_readdir(file, fillonedir, &buf);
33561 if (buf.result)
33562 @@ -142,6 +149,7 @@ struct linux_dirent {
33563 struct getdents_callback {
33564 struct linux_dirent __user * current_dir;
33565 struct linux_dirent __user * previous;
33566 + struct file * file;
33567 int count;
33568 int error;
33569 };
33570 @@ -162,6 +170,10 @@ static int filldir(void * __buf, const c
33571 buf->error = -EOVERFLOW;
33572 return -EOVERFLOW;
33573 }
33574 +
33575 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
33576 + return 0;
33577 +
33578 dirent = buf->previous;
33579 if (dirent) {
33580 if (__put_user(offset, &dirent->d_off))
33581 @@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
33582 buf.previous = NULL;
33583 buf.count = count;
33584 buf.error = 0;
33585 + buf.file = file;
33586
33587 error = vfs_readdir(file, filldir, &buf);
33588 if (error >= 0)
33589 @@ -228,6 +241,7 @@ out:
33590 struct getdents_callback64 {
33591 struct linux_dirent64 __user * current_dir;
33592 struct linux_dirent64 __user * previous;
33593 + struct file *file;
33594 int count;
33595 int error;
33596 };
33597 @@ -242,6 +256,10 @@ static int filldir64(void * __buf, const
33598 buf->error = -EINVAL; /* only used if we fail.. */
33599 if (reclen > buf->count)
33600 return -EINVAL;
33601 +
33602 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
33603 + return 0;
33604 +
33605 dirent = buf->previous;
33606 if (dirent) {
33607 if (__put_user(offset, &dirent->d_off))
33608 @@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
33609
33610 buf.current_dir = dirent;
33611 buf.previous = NULL;
33612 + buf.file = file;
33613 buf.count = count;
33614 buf.error = 0;
33615
33616 diff -urNp linux-2.6.35.4/fs/reiserfs/do_balan.c linux-2.6.35.4/fs/reiserfs/do_balan.c
33617 --- linux-2.6.35.4/fs/reiserfs/do_balan.c 2010-08-26 19:47:12.000000000 -0400
33618 +++ linux-2.6.35.4/fs/reiserfs/do_balan.c 2010-09-17 20:12:09.000000000 -0400
33619 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb,
33620 return;
33621 }
33622
33623 - atomic_inc(&(fs_generation(tb->tb_sb)));
33624 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
33625 do_balance_starts(tb);
33626
33627 /* balance leaf returns 0 except if combining L R and S into
33628 diff -urNp linux-2.6.35.4/fs/reiserfs/item_ops.c linux-2.6.35.4/fs/reiserfs/item_ops.c
33629 --- linux-2.6.35.4/fs/reiserfs/item_ops.c 2010-08-26 19:47:12.000000000 -0400
33630 +++ linux-2.6.35.4/fs/reiserfs/item_ops.c 2010-09-17 20:12:09.000000000 -0400
33631 @@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_i
33632 vi->vi_index, vi->vi_type, vi->vi_ih);
33633 }
33634
33635 -static struct item_operations stat_data_ops = {
33636 +static const struct item_operations stat_data_ops = {
33637 .bytes_number = sd_bytes_number,
33638 .decrement_key = sd_decrement_key,
33639 .is_left_mergeable = sd_is_left_mergeable,
33640 @@ -196,7 +196,7 @@ static void direct_print_vi(struct virtu
33641 vi->vi_index, vi->vi_type, vi->vi_ih);
33642 }
33643
33644 -static struct item_operations direct_ops = {
33645 +static const struct item_operations direct_ops = {
33646 .bytes_number = direct_bytes_number,
33647 .decrement_key = direct_decrement_key,
33648 .is_left_mergeable = direct_is_left_mergeable,
33649 @@ -341,7 +341,7 @@ static void indirect_print_vi(struct vir
33650 vi->vi_index, vi->vi_type, vi->vi_ih);
33651 }
33652
33653 -static struct item_operations indirect_ops = {
33654 +static const struct item_operations indirect_ops = {
33655 .bytes_number = indirect_bytes_number,
33656 .decrement_key = indirect_decrement_key,
33657 .is_left_mergeable = indirect_is_left_mergeable,
33658 @@ -628,7 +628,7 @@ static void direntry_print_vi(struct vir
33659 printk("\n");
33660 }
33661
33662 -static struct item_operations direntry_ops = {
33663 +static const struct item_operations direntry_ops = {
33664 .bytes_number = direntry_bytes_number,
33665 .decrement_key = direntry_decrement_key,
33666 .is_left_mergeable = direntry_is_left_mergeable,
33667 @@ -724,7 +724,7 @@ static void errcatch_print_vi(struct vir
33668 "Invalid item type observed, run fsck ASAP");
33669 }
33670
33671 -static struct item_operations errcatch_ops = {
33672 +static const struct item_operations errcatch_ops = {
33673 errcatch_bytes_number,
33674 errcatch_decrement_key,
33675 errcatch_is_left_mergeable,
33676 @@ -746,7 +746,7 @@ static struct item_operations errcatch_o
33677 #error Item types must use disk-format assigned values.
33678 #endif
33679
33680 -struct item_operations *item_ops[TYPE_ANY + 1] = {
33681 +const struct item_operations * const item_ops[TYPE_ANY + 1] = {
33682 &stat_data_ops,
33683 &indirect_ops,
33684 &direct_ops,
33685 diff -urNp linux-2.6.35.4/fs/reiserfs/procfs.c linux-2.6.35.4/fs/reiserfs/procfs.c
33686 --- linux-2.6.35.4/fs/reiserfs/procfs.c 2010-08-26 19:47:12.000000000 -0400
33687 +++ linux-2.6.35.4/fs/reiserfs/procfs.c 2010-09-17 20:12:09.000000000 -0400
33688 @@ -113,7 +113,7 @@ static int show_super(struct seq_file *m
33689 "SMALL_TAILS " : "NO_TAILS ",
33690 replay_only(sb) ? "REPLAY_ONLY " : "",
33691 convert_reiserfs(sb) ? "CONV " : "",
33692 - atomic_read(&r->s_generation_counter),
33693 + atomic_read_unchecked(&r->s_generation_counter),
33694 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
33695 SF(s_do_balance), SF(s_unneeded_left_neighbor),
33696 SF(s_good_search_by_key_reada), SF(s_bmaps),
33697 diff -urNp linux-2.6.35.4/fs/select.c linux-2.6.35.4/fs/select.c
33698 --- linux-2.6.35.4/fs/select.c 2010-08-26 19:47:12.000000000 -0400
33699 +++ linux-2.6.35.4/fs/select.c 2010-09-17 20:12:37.000000000 -0400
33700 @@ -20,6 +20,7 @@
33701 #include <linux/module.h>
33702 #include <linux/slab.h>
33703 #include <linux/poll.h>
33704 +#include <linux/security.h>
33705 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
33706 #include <linux/file.h>
33707 #include <linux/fdtable.h>
33708 @@ -838,6 +839,7 @@ int do_sys_poll(struct pollfd __user *uf
33709 struct poll_list *walk = head;
33710 unsigned long todo = nfds;
33711
33712 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
33713 if (nfds > rlimit(RLIMIT_NOFILE))
33714 return -EINVAL;
33715
33716 diff -urNp linux-2.6.35.4/fs/seq_file.c linux-2.6.35.4/fs/seq_file.c
33717 --- linux-2.6.35.4/fs/seq_file.c 2010-08-26 19:47:12.000000000 -0400
33718 +++ linux-2.6.35.4/fs/seq_file.c 2010-09-17 20:12:09.000000000 -0400
33719 @@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
33720 return 0;
33721 }
33722 if (!m->buf) {
33723 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
33724 + m->size = PAGE_SIZE;
33725 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
33726 if (!m->buf)
33727 return -ENOMEM;
33728 }
33729 @@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
33730 Eoverflow:
33731 m->op->stop(m, p);
33732 kfree(m->buf);
33733 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
33734 + m->size <<= 1;
33735 + m->buf = kmalloc(m->size, GFP_KERNEL);
33736 return !m->buf ? -ENOMEM : -EAGAIN;
33737 }
33738
33739 @@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
33740 m->version = file->f_version;
33741 /* grab buffer if we didn't have one */
33742 if (!m->buf) {
33743 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
33744 + m->size = PAGE_SIZE;
33745 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
33746 if (!m->buf)
33747 goto Enomem;
33748 }
33749 @@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
33750 goto Fill;
33751 m->op->stop(m, p);
33752 kfree(m->buf);
33753 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
33754 + m->size <<= 1;
33755 + m->buf = kmalloc(m->size, GFP_KERNEL);
33756 if (!m->buf)
33757 goto Enomem;
33758 m->count = 0;
33759 diff -urNp linux-2.6.35.4/fs/smbfs/symlink.c linux-2.6.35.4/fs/smbfs/symlink.c
33760 --- linux-2.6.35.4/fs/smbfs/symlink.c 2010-08-26 19:47:12.000000000 -0400
33761 +++ linux-2.6.35.4/fs/smbfs/symlink.c 2010-09-17 20:12:09.000000000 -0400
33762 @@ -55,7 +55,7 @@ static void *smb_follow_link(struct dent
33763
33764 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
33765 {
33766 - char *s = nd_get_link(nd);
33767 + const char *s = nd_get_link(nd);
33768 if (!IS_ERR(s))
33769 __putname(s);
33770 }
33771 diff -urNp linux-2.6.35.4/fs/splice.c linux-2.6.35.4/fs/splice.c
33772 --- linux-2.6.35.4/fs/splice.c 2010-08-26 19:47:12.000000000 -0400
33773 +++ linux-2.6.35.4/fs/splice.c 2010-09-17 20:12:09.000000000 -0400
33774 @@ -186,7 +186,7 @@ ssize_t splice_to_pipe(struct pipe_inode
33775 pipe_lock(pipe);
33776
33777 for (;;) {
33778 - if (!pipe->readers) {
33779 + if (!atomic_read(&pipe->readers)) {
33780 send_sig(SIGPIPE, current, 0);
33781 if (!ret)
33782 ret = -EPIPE;
33783 @@ -240,9 +240,9 @@ ssize_t splice_to_pipe(struct pipe_inode
33784 do_wakeup = 0;
33785 }
33786
33787 - pipe->waiting_writers++;
33788 + atomic_inc(&pipe->waiting_writers);
33789 pipe_wait(pipe);
33790 - pipe->waiting_writers--;
33791 + atomic_dec(&pipe->waiting_writers);
33792 }
33793
33794 pipe_unlock(pipe);
33795 @@ -566,7 +566,7 @@ static ssize_t kernel_readv(struct file
33796 old_fs = get_fs();
33797 set_fs(get_ds());
33798 /* The cast to a user pointer is valid due to the set_fs() */
33799 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
33800 + res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
33801 set_fs(old_fs);
33802
33803 return res;
33804 @@ -581,7 +581,7 @@ static ssize_t kernel_write(struct file
33805 old_fs = get_fs();
33806 set_fs(get_ds());
33807 /* The cast to a user pointer is valid due to the set_fs() */
33808 - res = vfs_write(file, (const char __user *)buf, count, &pos);
33809 + res = vfs_write(file, (__force const char __user *)buf, count, &pos);
33810 set_fs(old_fs);
33811
33812 return res;
33813 @@ -634,7 +634,7 @@ ssize_t default_file_splice_read(struct
33814 goto err;
33815
33816 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
33817 - vec[i].iov_base = (void __user *) page_address(page);
33818 + vec[i].iov_base = (__force void __user *) page_address(page);
33819 vec[i].iov_len = this_len;
33820 spd.pages[i] = page;
33821 spd.nr_pages++;
33822 @@ -861,10 +861,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
33823 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
33824 {
33825 while (!pipe->nrbufs) {
33826 - if (!pipe->writers)
33827 + if (!atomic_read(&pipe->writers))
33828 return 0;
33829
33830 - if (!pipe->waiting_writers && sd->num_spliced)
33831 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
33832 return 0;
33833
33834 if (sd->flags & SPLICE_F_NONBLOCK)
33835 @@ -1201,7 +1201,7 @@ ssize_t splice_direct_to_actor(struct fi
33836 * out of the pipe right after the splice_to_pipe(). So set
33837 * PIPE_READERS appropriately.
33838 */
33839 - pipe->readers = 1;
33840 + atomic_set(&pipe->readers, 1);
33841
33842 current->splice_pipe = pipe;
33843 }
33844 @@ -1769,9 +1769,9 @@ static int ipipe_prep(struct pipe_inode_
33845 ret = -ERESTARTSYS;
33846 break;
33847 }
33848 - if (!pipe->writers)
33849 + if (!atomic_read(&pipe->writers))
33850 break;
33851 - if (!pipe->waiting_writers) {
33852 + if (!atomic_read(&pipe->waiting_writers)) {
33853 if (flags & SPLICE_F_NONBLOCK) {
33854 ret = -EAGAIN;
33855 break;
33856 @@ -1803,7 +1803,7 @@ static int opipe_prep(struct pipe_inode_
33857 pipe_lock(pipe);
33858
33859 while (pipe->nrbufs >= pipe->buffers) {
33860 - if (!pipe->readers) {
33861 + if (!atomic_read(&pipe->readers)) {
33862 send_sig(SIGPIPE, current, 0);
33863 ret = -EPIPE;
33864 break;
33865 @@ -1816,9 +1816,9 @@ static int opipe_prep(struct pipe_inode_
33866 ret = -ERESTARTSYS;
33867 break;
33868 }
33869 - pipe->waiting_writers++;
33870 + atomic_inc(&pipe->waiting_writers);
33871 pipe_wait(pipe);
33872 - pipe->waiting_writers--;
33873 + atomic_dec(&pipe->waiting_writers);
33874 }
33875
33876 pipe_unlock(pipe);
33877 @@ -1854,14 +1854,14 @@ retry:
33878 pipe_double_lock(ipipe, opipe);
33879
33880 do {
33881 - if (!opipe->readers) {
33882 + if (!atomic_read(&opipe->readers)) {
33883 send_sig(SIGPIPE, current, 0);
33884 if (!ret)
33885 ret = -EPIPE;
33886 break;
33887 }
33888
33889 - if (!ipipe->nrbufs && !ipipe->writers)
33890 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
33891 break;
33892
33893 /*
33894 @@ -1961,7 +1961,7 @@ static int link_pipe(struct pipe_inode_i
33895 pipe_double_lock(ipipe, opipe);
33896
33897 do {
33898 - if (!opipe->readers) {
33899 + if (!atomic_read(&opipe->readers)) {
33900 send_sig(SIGPIPE, current, 0);
33901 if (!ret)
33902 ret = -EPIPE;
33903 @@ -2006,7 +2006,7 @@ static int link_pipe(struct pipe_inode_i
33904 * return EAGAIN if we have the potential of some data in the
33905 * future, otherwise just return 0
33906 */
33907 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
33908 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
33909 ret = -EAGAIN;
33910
33911 pipe_unlock(ipipe);
33912 diff -urNp linux-2.6.35.4/fs/sysfs/symlink.c linux-2.6.35.4/fs/sysfs/symlink.c
33913 --- linux-2.6.35.4/fs/sysfs/symlink.c 2010-08-26 19:47:12.000000000 -0400
33914 +++ linux-2.6.35.4/fs/sysfs/symlink.c 2010-09-17 20:12:09.000000000 -0400
33915 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct de
33916
33917 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
33918 {
33919 - char *page = nd_get_link(nd);
33920 + const char *page = nd_get_link(nd);
33921 if (!IS_ERR(page))
33922 free_page((unsigned long)page);
33923 }
33924 diff -urNp linux-2.6.35.4/fs/udf/misc.c linux-2.6.35.4/fs/udf/misc.c
33925 --- linux-2.6.35.4/fs/udf/misc.c 2010-08-26 19:47:12.000000000 -0400
33926 +++ linux-2.6.35.4/fs/udf/misc.c 2010-09-17 20:12:09.000000000 -0400
33927 @@ -142,8 +142,8 @@ struct genericFormat *udf_add_extendedat
33928 iinfo->i_lenEAttr += size;
33929 return (struct genericFormat *)&ea[offset];
33930 }
33931 - if (loc & 0x02)
33932 - ;
33933 + if (loc & 0x02) {
33934 + }
33935
33936 return NULL;
33937 }
33938 diff -urNp linux-2.6.35.4/fs/udf/udfdecl.h linux-2.6.35.4/fs/udf/udfdecl.h
33939 --- linux-2.6.35.4/fs/udf/udfdecl.h 2010-08-26 19:47:12.000000000 -0400
33940 +++ linux-2.6.35.4/fs/udf/udfdecl.h 2010-09-17 20:12:09.000000000 -0400
33941 @@ -26,7 +26,7 @@ do { \
33942 printk(f, ##a); \
33943 } while (0)
33944 #else
33945 -#define udf_debug(f, a...) /**/
33946 +#define udf_debug(f, a...) do {} while (0)
33947 #endif
33948
33949 #define udf_info(f, a...) \
33950 diff -urNp linux-2.6.35.4/fs/utimes.c linux-2.6.35.4/fs/utimes.c
33951 --- linux-2.6.35.4/fs/utimes.c 2010-08-26 19:47:12.000000000 -0400
33952 +++ linux-2.6.35.4/fs/utimes.c 2010-09-17 20:12:37.000000000 -0400
33953 @@ -1,6 +1,7 @@
33954 #include <linux/compiler.h>
33955 #include <linux/file.h>
33956 #include <linux/fs.h>
33957 +#include <linux/security.h>
33958 #include <linux/linkage.h>
33959 #include <linux/mount.h>
33960 #include <linux/namei.h>
33961 @@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
33962 goto mnt_drop_write_and_out;
33963 }
33964 }
33965 +
33966 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
33967 + error = -EACCES;
33968 + goto mnt_drop_write_and_out;
33969 + }
33970 +
33971 mutex_lock(&inode->i_mutex);
33972 error = notify_change(path->dentry, &newattrs);
33973 mutex_unlock(&inode->i_mutex);
33974 diff -urNp linux-2.6.35.4/fs/xfs/linux-2.6/xfs_ioctl.c linux-2.6.35.4/fs/xfs/linux-2.6/xfs_ioctl.c
33975 --- linux-2.6.35.4/fs/xfs/linux-2.6/xfs_ioctl.c 2010-08-26 19:47:12.000000000 -0400
33976 +++ linux-2.6.35.4/fs/xfs/linux-2.6/xfs_ioctl.c 2010-09-17 20:12:37.000000000 -0400
33977 @@ -136,7 +136,7 @@ xfs_find_handle(
33978 }
33979
33980 error = -EFAULT;
33981 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
33982 + if (hsize > sizeof(handle) || copy_to_user(hreq->ohandle, &handle, hsize) ||
33983 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
33984 goto out_put;
33985
33986 diff -urNp linux-2.6.35.4/fs/xfs/linux-2.6/xfs_iops.c linux-2.6.35.4/fs/xfs/linux-2.6/xfs_iops.c
33987 --- linux-2.6.35.4/fs/xfs/linux-2.6/xfs_iops.c 2010-08-26 19:47:12.000000000 -0400
33988 +++ linux-2.6.35.4/fs/xfs/linux-2.6/xfs_iops.c 2010-09-17 20:12:09.000000000 -0400
33989 @@ -480,7 +480,7 @@ xfs_vn_put_link(
33990 struct nameidata *nd,
33991 void *p)
33992 {
33993 - char *s = nd_get_link(nd);
33994 + const char *s = nd_get_link(nd);
33995
33996 if (!IS_ERR(s))
33997 kfree(s);
33998 diff -urNp linux-2.6.35.4/fs/xfs/xfs_bmap.c linux-2.6.35.4/fs/xfs/xfs_bmap.c
33999 --- linux-2.6.35.4/fs/xfs/xfs_bmap.c 2010-08-26 19:47:12.000000000 -0400
34000 +++ linux-2.6.35.4/fs/xfs/xfs_bmap.c 2010-09-17 20:12:09.000000000 -0400
34001 @@ -296,7 +296,7 @@ xfs_bmap_validate_ret(
34002 int nmap,
34003 int ret_nmap);
34004 #else
34005 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
34006 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
34007 #endif /* DEBUG */
34008
34009 STATIC int
34010 diff -urNp linux-2.6.35.4/grsecurity/gracl_alloc.c linux-2.6.35.4/grsecurity/gracl_alloc.c
34011 --- linux-2.6.35.4/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
34012 +++ linux-2.6.35.4/grsecurity/gracl_alloc.c 2010-09-17 20:12:37.000000000 -0400
34013 @@ -0,0 +1,105 @@
34014 +#include <linux/kernel.h>
34015 +#include <linux/mm.h>
34016 +#include <linux/slab.h>
34017 +#include <linux/vmalloc.h>
34018 +#include <linux/gracl.h>
34019 +#include <linux/grsecurity.h>
34020 +
34021 +static unsigned long alloc_stack_next = 1;
34022 +static unsigned long alloc_stack_size = 1;
34023 +static void **alloc_stack;
34024 +
34025 +static __inline__ int
34026 +alloc_pop(void)
34027 +{
34028 + if (alloc_stack_next == 1)
34029 + return 0;
34030 +
34031 + kfree(alloc_stack[alloc_stack_next - 2]);
34032 +
34033 + alloc_stack_next--;
34034 +
34035 + return 1;
34036 +}
34037 +
34038 +static __inline__ int
34039 +alloc_push(void *buf)
34040 +{
34041 + if (alloc_stack_next >= alloc_stack_size)
34042 + return 1;
34043 +
34044 + alloc_stack[alloc_stack_next - 1] = buf;
34045 +
34046 + alloc_stack_next++;
34047 +
34048 + return 0;
34049 +}
34050 +
34051 +void *
34052 +acl_alloc(unsigned long len)
34053 +{
34054 + void *ret = NULL;
34055 +
34056 + if (!len || len > PAGE_SIZE)
34057 + goto out;
34058 +
34059 + ret = kmalloc(len, GFP_KERNEL);
34060 +
34061 + if (ret) {
34062 + if (alloc_push(ret)) {
34063 + kfree(ret);
34064 + ret = NULL;
34065 + }
34066 + }
34067 +
34068 +out:
34069 + return ret;
34070 +}
34071 +
34072 +void *
34073 +acl_alloc_num(unsigned long num, unsigned long len)
34074 +{
34075 + if (!len || (num > (PAGE_SIZE / len)))
34076 + return NULL;
34077 +
34078 + return acl_alloc(num * len);
34079 +}
34080 +
34081 +void
34082 +acl_free_all(void)
34083 +{
34084 + if (gr_acl_is_enabled() || !alloc_stack)
34085 + return;
34086 +
34087 + while (alloc_pop()) ;
34088 +
34089 + if (alloc_stack) {
34090 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
34091 + kfree(alloc_stack);
34092 + else
34093 + vfree(alloc_stack);
34094 + }
34095 +
34096 + alloc_stack = NULL;
34097 + alloc_stack_size = 1;
34098 + alloc_stack_next = 1;
34099 +
34100 + return;
34101 +}
34102 +
34103 +int
34104 +acl_alloc_stack_init(unsigned long size)
34105 +{
34106 + if ((size * sizeof (void *)) <= PAGE_SIZE)
34107 + alloc_stack =
34108 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
34109 + else
34110 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
34111 +
34112 + alloc_stack_size = size;
34113 +
34114 + if (!alloc_stack)
34115 + return 0;
34116 + else
34117 + return 1;
34118 +}
34119 diff -urNp linux-2.6.35.4/grsecurity/gracl.c linux-2.6.35.4/grsecurity/gracl.c
34120 --- linux-2.6.35.4/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
34121 +++ linux-2.6.35.4/grsecurity/gracl.c 2010-09-17 20:18:36.000000000 -0400
34122 @@ -0,0 +1,3922 @@
34123 +#include <linux/kernel.h>
34124 +#include <linux/module.h>
34125 +#include <linux/sched.h>
34126 +#include <linux/mm.h>
34127 +#include <linux/file.h>
34128 +#include <linux/fs.h>
34129 +#include <linux/namei.h>
34130 +#include <linux/mount.h>
34131 +#include <linux/tty.h>
34132 +#include <linux/proc_fs.h>
34133 +#include <linux/smp_lock.h>
34134 +#include <linux/slab.h>
34135 +#include <linux/vmalloc.h>
34136 +#include <linux/types.h>
34137 +#include <linux/sysctl.h>
34138 +#include <linux/netdevice.h>
34139 +#include <linux/ptrace.h>
34140 +#include <linux/gracl.h>
34141 +#include <linux/gralloc.h>
34142 +#include <linux/grsecurity.h>
34143 +#include <linux/grinternal.h>
34144 +#include <linux/pid_namespace.h>
34145 +#include <linux/fdtable.h>
34146 +#include <linux/percpu.h>
34147 +
34148 +#include <asm/uaccess.h>
34149 +#include <asm/errno.h>
34150 +#include <asm/mman.h>
34151 +
34152 +static struct acl_role_db acl_role_set;
34153 +static struct name_db name_set;
34154 +static struct inodev_db inodev_set;
34155 +
34156 +/* for keeping track of userspace pointers used for subjects, so we
34157 + can share references in the kernel as well
34158 +*/
34159 +
34160 +static struct dentry *real_root;
34161 +static struct vfsmount *real_root_mnt;
34162 +
34163 +static struct acl_subj_map_db subj_map_set;
34164 +
34165 +static struct acl_role_label *default_role;
34166 +
34167 +static struct acl_role_label *role_list;
34168 +
34169 +static u16 acl_sp_role_value;
34170 +
34171 +extern char *gr_shared_page[4];
34172 +static DECLARE_MUTEX(gr_dev_sem);
34173 +DEFINE_RWLOCK(gr_inode_lock);
34174 +
34175 +struct gr_arg *gr_usermode;
34176 +
34177 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
34178 +
34179 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
34180 +extern void gr_clear_learn_entries(void);
34181 +
34182 +#ifdef CONFIG_GRKERNSEC_RESLOG
34183 +extern void gr_log_resource(const struct task_struct *task,
34184 + const int res, const unsigned long wanted, const int gt);
34185 +#endif
34186 +
34187 +unsigned char *gr_system_salt;
34188 +unsigned char *gr_system_sum;
34189 +
34190 +static struct sprole_pw **acl_special_roles = NULL;
34191 +static __u16 num_sprole_pws = 0;
34192 +
34193 +static struct acl_role_label *kernel_role = NULL;
34194 +
34195 +static unsigned int gr_auth_attempts = 0;
34196 +static unsigned long gr_auth_expires = 0UL;
34197 +
34198 +extern struct vfsmount *sock_mnt;
34199 +extern struct vfsmount *pipe_mnt;
34200 +extern struct vfsmount *shm_mnt;
34201 +#ifdef CONFIG_HUGETLBFS
34202 +extern struct vfsmount *hugetlbfs_vfsmount;
34203 +#endif
34204 +
34205 +static struct acl_object_label *fakefs_obj;
34206 +
34207 +extern int gr_init_uidset(void);
34208 +extern void gr_free_uidset(void);
34209 +extern void gr_remove_uid(uid_t uid);
34210 +extern int gr_find_uid(uid_t uid);
34211 +
34212 +extern spinlock_t vfsmount_lock;
34213 +
34214 +__inline__ int
34215 +gr_acl_is_enabled(void)
34216 +{
34217 + return (gr_status & GR_READY);
34218 +}
34219 +
34220 +char gr_roletype_to_char(void)
34221 +{
34222 + switch (current->role->roletype &
34223 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
34224 + GR_ROLE_SPECIAL)) {
34225 + case GR_ROLE_DEFAULT:
34226 + return 'D';
34227 + case GR_ROLE_USER:
34228 + return 'U';
34229 + case GR_ROLE_GROUP:
34230 + return 'G';
34231 + case GR_ROLE_SPECIAL:
34232 + return 'S';
34233 + }
34234 +
34235 + return 'X';
34236 +}
34237 +
34238 +__inline__ int
34239 +gr_acl_tpe_check(void)
34240 +{
34241 + if (unlikely(!(gr_status & GR_READY)))
34242 + return 0;
34243 + if (current->role->roletype & GR_ROLE_TPE)
34244 + return 1;
34245 + else
34246 + return 0;
34247 +}
34248 +
34249 +int
34250 +gr_handle_rawio(const struct inode *inode)
34251 +{
34252 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
34253 + if (inode && S_ISBLK(inode->i_mode) &&
34254 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
34255 + !capable(CAP_SYS_RAWIO))
34256 + return 1;
34257 +#endif
34258 + return 0;
34259 +}
34260 +
34261 +static int
34262 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
34263 +{
34264 + if (likely(lena != lenb))
34265 + return 0;
34266 +
34267 + return !memcmp(a, b, lena);
34268 +}
34269 +
34270 +static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
34271 + struct dentry *root, struct vfsmount *rootmnt,
34272 + char *buffer, int buflen)
34273 +{
34274 + char * end = buffer+buflen;
34275 + char * retval;
34276 + int namelen;
34277 +
34278 + spin_lock(&vfsmount_lock);
34279 + *--end = '\0';
34280 + buflen--;
34281 +
34282 + if (buflen < 1)
34283 + goto Elong;
34284 + /* Get '/' right */
34285 + retval = end-1;
34286 + *retval = '/';
34287 +
34288 + for (;;) {
34289 + struct dentry * parent;
34290 +
34291 + if (dentry == root && vfsmnt == rootmnt)
34292 + break;
34293 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
34294 + /* Global root? */
34295 + if (vfsmnt->mnt_parent == vfsmnt) {
34296 + goto global_root;
34297 + }
34298 + dentry = vfsmnt->mnt_mountpoint;
34299 + vfsmnt = vfsmnt->mnt_parent;
34300 + continue;
34301 + }
34302 + parent = dentry->d_parent;
34303 + prefetch(parent);
34304 + namelen = dentry->d_name.len;
34305 + buflen -= namelen + 1;
34306 + if (buflen < 0)
34307 + goto Elong;
34308 + end -= namelen;
34309 + memcpy(end, dentry->d_name.name, namelen);
34310 + *--end = '/';
34311 + retval = end;
34312 + dentry = parent;
34313 + }
34314 +
34315 +out:
34316 + spin_unlock(&vfsmount_lock);
34317 + return retval;
34318 +
34319 +global_root:
34320 + namelen = dentry->d_name.len;
34321 + buflen -= namelen;
34322 + if (buflen < 0)
34323 + goto Elong;
34324 + retval -= namelen-1; /* hit the slash */
34325 + memcpy(retval, dentry->d_name.name, namelen);
34326 + goto out;
34327 +Elong:
34328 + retval = ERR_PTR(-ENAMETOOLONG);
34329 + goto out;
34330 +}
34331 +
34332 +static char *
34333 +gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
34334 + struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
34335 +{
34336 + char *retval;
34337 +
34338 + retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
34339 + if (unlikely(IS_ERR(retval)))
34340 + retval = strcpy(buf, "<path too long>");
34341 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
34342 + retval[1] = '\0';
34343 +
34344 + return retval;
34345 +}
34346 +
34347 +static char *
34348 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
34349 + char *buf, int buflen)
34350 +{
34351 + char *res;
34352 +
34353 + /* we can use real_root, real_root_mnt, because this is only called
34354 + by the RBAC system */
34355 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
34356 +
34357 + return res;
34358 +}
34359 +
34360 +static char *
34361 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
34362 + char *buf, int buflen)
34363 +{
34364 + char *res;
34365 + struct dentry *root;
34366 + struct vfsmount *rootmnt;
34367 + struct task_struct *reaper = &init_task;
34368 +
34369 + /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
34370 + read_lock(&reaper->fs->lock);
34371 + root = dget(reaper->fs->root.dentry);
34372 + rootmnt = mntget(reaper->fs->root.mnt);
34373 + read_unlock(&reaper->fs->lock);
34374 +
34375 + spin_lock(&dcache_lock);
34376 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
34377 + spin_unlock(&dcache_lock);
34378 +
34379 + dput(root);
34380 + mntput(rootmnt);
34381 + return res;
34382 +}
34383 +
34384 +static char *
34385 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
34386 +{
34387 + char *ret;
34388 + spin_lock(&dcache_lock);
34389 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
34390 + PAGE_SIZE);
34391 + spin_unlock(&dcache_lock);
34392 + return ret;
34393 +}
34394 +
34395 +char *
34396 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
34397 +{
34398 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
34399 + PAGE_SIZE);
34400 +}
34401 +
34402 +char *
34403 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
34404 +{
34405 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
34406 + PAGE_SIZE);
34407 +}
34408 +
34409 +char *
34410 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
34411 +{
34412 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
34413 + PAGE_SIZE);
34414 +}
34415 +
34416 +char *
34417 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
34418 +{
34419 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
34420 + PAGE_SIZE);
34421 +}
34422 +
34423 +char *
34424 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
34425 +{
34426 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
34427 + PAGE_SIZE);
34428 +}
34429 +
34430 +__inline__ __u32
34431 +to_gr_audit(const __u32 reqmode)
34432 +{
34433 + /* masks off auditable permission flags, then shifts them to create
34434 + auditing flags, and adds the special case of append auditing if
34435 + we're requesting write */
34436 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
34437 +}
34438 +
34439 +struct acl_subject_label *
34440 +lookup_subject_map(const struct acl_subject_label *userp)
34441 +{
34442 + unsigned int index = shash(userp, subj_map_set.s_size);
34443 + struct subject_map *match;
34444 +
34445 + match = subj_map_set.s_hash[index];
34446 +
34447 + while (match && match->user != userp)
34448 + match = match->next;
34449 +
34450 + if (match != NULL)
34451 + return match->kernel;
34452 + else
34453 + return NULL;
34454 +}
34455 +
34456 +static void
34457 +insert_subj_map_entry(struct subject_map *subjmap)
34458 +{
34459 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
34460 + struct subject_map **curr;
34461 +
34462 + subjmap->prev = NULL;
34463 +
34464 + curr = &subj_map_set.s_hash[index];
34465 + if (*curr != NULL)
34466 + (*curr)->prev = subjmap;
34467 +
34468 + subjmap->next = *curr;
34469 + *curr = subjmap;
34470 +
34471 + return;
34472 +}
34473 +
34474 +static struct acl_role_label *
34475 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
34476 + const gid_t gid)
34477 +{
34478 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
34479 + struct acl_role_label *match;
34480 + struct role_allowed_ip *ipp;
34481 + unsigned int x;
34482 +
34483 + match = acl_role_set.r_hash[index];
34484 +
34485 + while (match) {
34486 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
34487 + for (x = 0; x < match->domain_child_num; x++) {
34488 + if (match->domain_children[x] == uid)
34489 + goto found;
34490 + }
34491 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
34492 + break;
34493 + match = match->next;
34494 + }
34495 +found:
34496 + if (match == NULL) {
34497 + try_group:
34498 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
34499 + match = acl_role_set.r_hash[index];
34500 +
34501 + while (match) {
34502 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
34503 + for (x = 0; x < match->domain_child_num; x++) {
34504 + if (match->domain_children[x] == gid)
34505 + goto found2;
34506 + }
34507 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
34508 + break;
34509 + match = match->next;
34510 + }
34511 +found2:
34512 + if (match == NULL)
34513 + match = default_role;
34514 + if (match->allowed_ips == NULL)
34515 + return match;
34516 + else {
34517 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
34518 + if (likely
34519 + ((ntohl(task->signal->curr_ip) & ipp->netmask) ==
34520 + (ntohl(ipp->addr) & ipp->netmask)))
34521 + return match;
34522 + }
34523 + match = default_role;
34524 + }
34525 + } else if (match->allowed_ips == NULL) {
34526 + return match;
34527 + } else {
34528 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
34529 + if (likely
34530 + ((ntohl(task->signal->curr_ip) & ipp->netmask) ==
34531 + (ntohl(ipp->addr) & ipp->netmask)))
34532 + return match;
34533 + }
34534 + goto try_group;
34535 + }
34536 +
34537 + return match;
34538 +}
34539 +
34540 +struct acl_subject_label *
34541 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
34542 + const struct acl_role_label *role)
34543 +{
34544 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
34545 + struct acl_subject_label *match;
34546 +
34547 + match = role->subj_hash[index];
34548 +
34549 + while (match && (match->inode != ino || match->device != dev ||
34550 + (match->mode & GR_DELETED))) {
34551 + match = match->next;
34552 + }
34553 +
34554 + if (match && !(match->mode & GR_DELETED))
34555 + return match;
34556 + else
34557 + return NULL;
34558 +}
34559 +
34560 +struct acl_subject_label *
34561 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
34562 + const struct acl_role_label *role)
34563 +{
34564 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
34565 + struct acl_subject_label *match;
34566 +
34567 + match = role->subj_hash[index];
34568 +
34569 + while (match && (match->inode != ino || match->device != dev ||
34570 + !(match->mode & GR_DELETED))) {
34571 + match = match->next;
34572 + }
34573 +
34574 + if (match && (match->mode & GR_DELETED))
34575 + return match;
34576 + else
34577 + return NULL;
34578 +}
34579 +
34580 +static struct acl_object_label *
34581 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
34582 + const struct acl_subject_label *subj)
34583 +{
34584 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
34585 + struct acl_object_label *match;
34586 +
34587 + match = subj->obj_hash[index];
34588 +
34589 + while (match && (match->inode != ino || match->device != dev ||
34590 + (match->mode & GR_DELETED))) {
34591 + match = match->next;
34592 + }
34593 +
34594 + if (match && !(match->mode & GR_DELETED))
34595 + return match;
34596 + else
34597 + return NULL;
34598 +}
34599 +
34600 +static struct acl_object_label *
34601 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
34602 + const struct acl_subject_label *subj)
34603 +{
34604 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
34605 + struct acl_object_label *match;
34606 +
34607 + match = subj->obj_hash[index];
34608 +
34609 + while (match && (match->inode != ino || match->device != dev ||
34610 + !(match->mode & GR_DELETED))) {
34611 + match = match->next;
34612 + }
34613 +
34614 + if (match && (match->mode & GR_DELETED))
34615 + return match;
34616 +
34617 + match = subj->obj_hash[index];
34618 +
34619 + while (match && (match->inode != ino || match->device != dev ||
34620 + (match->mode & GR_DELETED))) {
34621 + match = match->next;
34622 + }
34623 +
34624 + if (match && !(match->mode & GR_DELETED))
34625 + return match;
34626 + else
34627 + return NULL;
34628 +}
34629 +
34630 +static struct name_entry *
34631 +lookup_name_entry(const char *name)
34632 +{
34633 + unsigned int len = strlen(name);
34634 + unsigned int key = full_name_hash(name, len);
34635 + unsigned int index = key % name_set.n_size;
34636 + struct name_entry *match;
34637 +
34638 + match = name_set.n_hash[index];
34639 +
34640 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
34641 + match = match->next;
34642 +
34643 + return match;
34644 +}
34645 +
34646 +static struct name_entry *
34647 +lookup_name_entry_create(const char *name)
34648 +{
34649 + unsigned int len = strlen(name);
34650 + unsigned int key = full_name_hash(name, len);
34651 + unsigned int index = key % name_set.n_size;
34652 + struct name_entry *match;
34653 +
34654 + match = name_set.n_hash[index];
34655 +
34656 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
34657 + !match->deleted))
34658 + match = match->next;
34659 +
34660 + if (match && match->deleted)
34661 + return match;
34662 +
34663 + match = name_set.n_hash[index];
34664 +
34665 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
34666 + match->deleted))
34667 + match = match->next;
34668 +
34669 + if (match && !match->deleted)
34670 + return match;
34671 + else
34672 + return NULL;
34673 +}
34674 +
34675 +static struct inodev_entry *
34676 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
34677 +{
34678 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
34679 + struct inodev_entry *match;
34680 +
34681 + match = inodev_set.i_hash[index];
34682 +
34683 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
34684 + match = match->next;
34685 +
34686 + return match;
34687 +}
34688 +
34689 +static void
34690 +insert_inodev_entry(struct inodev_entry *entry)
34691 +{
34692 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
34693 + inodev_set.i_size);
34694 + struct inodev_entry **curr;
34695 +
34696 + entry->prev = NULL;
34697 +
34698 + curr = &inodev_set.i_hash[index];
34699 + if (*curr != NULL)
34700 + (*curr)->prev = entry;
34701 +
34702 + entry->next = *curr;
34703 + *curr = entry;
34704 +
34705 + return;
34706 +}
34707 +
34708 +static void
34709 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
34710 +{
34711 + unsigned int index =
34712 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
34713 + struct acl_role_label **curr;
34714 + struct acl_role_label *tmp;
34715 +
34716 + curr = &acl_role_set.r_hash[index];
34717 +
34718 + /* if role was already inserted due to domains and already has
34719 + a role in the same bucket as it attached, then we need to
34720 + combine these two buckets
34721 + */
34722 + if (role->next) {
34723 + tmp = role->next;
34724 + while (tmp->next)
34725 + tmp = tmp->next;
34726 + tmp->next = *curr;
34727 + } else
34728 + role->next = *curr;
34729 + *curr = role;
34730 +
34731 + return;
34732 +}
34733 +
34734 +static void
34735 +insert_acl_role_label(struct acl_role_label *role)
34736 +{
34737 + int i;
34738 +
34739 + if (role_list == NULL) {
34740 + role_list = role;
34741 + role->prev = NULL;
34742 + } else {
34743 + role->prev = role_list;
34744 + role_list = role;
34745 + }
34746 +
34747 + /* used for hash chains */
34748 + role->next = NULL;
34749 +
34750 + if (role->roletype & GR_ROLE_DOMAIN) {
34751 + for (i = 0; i < role->domain_child_num; i++)
34752 + __insert_acl_role_label(role, role->domain_children[i]);
34753 + } else
34754 + __insert_acl_role_label(role, role->uidgid);
34755 +}
34756 +
34757 +static int
34758 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
34759 +{
34760 + struct name_entry **curr, *nentry;
34761 + struct inodev_entry *ientry;
34762 + unsigned int len = strlen(name);
34763 + unsigned int key = full_name_hash(name, len);
34764 + unsigned int index = key % name_set.n_size;
34765 +
34766 + curr = &name_set.n_hash[index];
34767 +
34768 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
34769 + curr = &((*curr)->next);
34770 +
34771 + if (*curr != NULL)
34772 + return 1;
34773 +
34774 + nentry = acl_alloc(sizeof (struct name_entry));
34775 + if (nentry == NULL)
34776 + return 0;
34777 + ientry = acl_alloc(sizeof (struct inodev_entry));
34778 + if (ientry == NULL)
34779 + return 0;
34780 + ientry->nentry = nentry;
34781 +
34782 + nentry->key = key;
34783 + nentry->name = name;
34784 + nentry->inode = inode;
34785 + nentry->device = device;
34786 + nentry->len = len;
34787 + nentry->deleted = deleted;
34788 +
34789 + nentry->prev = NULL;
34790 + curr = &name_set.n_hash[index];
34791 + if (*curr != NULL)
34792 + (*curr)->prev = nentry;
34793 + nentry->next = *curr;
34794 + *curr = nentry;
34795 +
34796 + /* insert us into the table searchable by inode/dev */
34797 + insert_inodev_entry(ientry);
34798 +
34799 + return 1;
34800 +}
34801 +
34802 +static void
34803 +insert_acl_obj_label(struct acl_object_label *obj,
34804 + struct acl_subject_label *subj)
34805 +{
34806 + unsigned int index =
34807 + fhash(obj->inode, obj->device, subj->obj_hash_size);
34808 + struct acl_object_label **curr;
34809 +
34810 +
34811 + obj->prev = NULL;
34812 +
34813 + curr = &subj->obj_hash[index];
34814 + if (*curr != NULL)
34815 + (*curr)->prev = obj;
34816 +
34817 + obj->next = *curr;
34818 + *curr = obj;
34819 +
34820 + return;
34821 +}
34822 +
34823 +static void
34824 +insert_acl_subj_label(struct acl_subject_label *obj,
34825 + struct acl_role_label *role)
34826 +{
34827 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
34828 + struct acl_subject_label **curr;
34829 +
34830 + obj->prev = NULL;
34831 +
34832 + curr = &role->subj_hash[index];
34833 + if (*curr != NULL)
34834 + (*curr)->prev = obj;
34835 +
34836 + obj->next = *curr;
34837 + *curr = obj;
34838 +
34839 + return;
34840 +}
34841 +
34842 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
34843 +
34844 +static void *
34845 +create_table(__u32 * len, int elementsize)
34846 +{
34847 + unsigned int table_sizes[] = {
34848 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
34849 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
34850 + 4194301, 8388593, 16777213, 33554393, 67108859
34851 + };
34852 + void *newtable = NULL;
34853 + unsigned int pwr = 0;
34854 +
34855 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
34856 + table_sizes[pwr] <= *len)
34857 + pwr++;
34858 +
34859 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
34860 + return newtable;
34861 +
34862 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
34863 + newtable =
34864 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
34865 + else
34866 + newtable = vmalloc(table_sizes[pwr] * elementsize);
34867 +
34868 + *len = table_sizes[pwr];
34869 +
34870 + return newtable;
34871 +}
34872 +
34873 +static int
34874 +init_variables(const struct gr_arg *arg)
34875 +{
34876 + struct task_struct *reaper = &init_task;
34877 + unsigned int stacksize;
34878 +
34879 + subj_map_set.s_size = arg->role_db.num_subjects;
34880 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
34881 + name_set.n_size = arg->role_db.num_objects;
34882 + inodev_set.i_size = arg->role_db.num_objects;
34883 +
34884 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
34885 + !name_set.n_size || !inodev_set.i_size)
34886 + return 1;
34887 +
34888 + if (!gr_init_uidset())
34889 + return 1;
34890 +
34891 + /* set up the stack that holds allocation info */
34892 +
34893 + stacksize = arg->role_db.num_pointers + 5;
34894 +
34895 + if (!acl_alloc_stack_init(stacksize))
34896 + return 1;
34897 +
34898 + /* grab reference for the real root dentry and vfsmount */
34899 + read_lock(&reaper->fs->lock);
34900 + real_root_mnt = mntget(reaper->fs->root.mnt);
34901 + real_root = dget(reaper->fs->root.dentry);
34902 + read_unlock(&reaper->fs->lock);
34903 +
34904 + fakefs_obj = acl_alloc(sizeof(struct acl_object_label));
34905 + if (fakefs_obj == NULL)
34906 + return 1;
34907 + fakefs_obj->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
34908 +
34909 + subj_map_set.s_hash =
34910 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
34911 + acl_role_set.r_hash =
34912 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
34913 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
34914 + inodev_set.i_hash =
34915 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
34916 +
34917 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
34918 + !name_set.n_hash || !inodev_set.i_hash)
34919 + return 1;
34920 +
34921 + memset(subj_map_set.s_hash, 0,
34922 + sizeof(struct subject_map *) * subj_map_set.s_size);
34923 + memset(acl_role_set.r_hash, 0,
34924 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
34925 + memset(name_set.n_hash, 0,
34926 + sizeof (struct name_entry *) * name_set.n_size);
34927 + memset(inodev_set.i_hash, 0,
34928 + sizeof (struct inodev_entry *) * inodev_set.i_size);
34929 +
34930 + return 0;
34931 +}
34932 +
34933 +/* free information not needed after startup
34934 + currently contains user->kernel pointer mappings for subjects
34935 +*/
34936 +
34937 +static void
34938 +free_init_variables(void)
34939 +{
34940 + __u32 i;
34941 +
34942 + if (subj_map_set.s_hash) {
34943 + for (i = 0; i < subj_map_set.s_size; i++) {
34944 + if (subj_map_set.s_hash[i]) {
34945 + kfree(subj_map_set.s_hash[i]);
34946 + subj_map_set.s_hash[i] = NULL;
34947 + }
34948 + }
34949 +
34950 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
34951 + PAGE_SIZE)
34952 + kfree(subj_map_set.s_hash);
34953 + else
34954 + vfree(subj_map_set.s_hash);
34955 + }
34956 +
34957 + return;
34958 +}
34959 +
34960 +static void
34961 +free_variables(void)
34962 +{
34963 + struct acl_subject_label *s;
34964 + struct acl_role_label *r;
34965 + struct task_struct *task, *task2;
34966 + unsigned int x;
34967 +
34968 + gr_clear_learn_entries();
34969 +
34970 + read_lock(&tasklist_lock);
34971 + do_each_thread(task2, task) {
34972 + task->acl_sp_role = 0;
34973 + task->acl_role_id = 0;
34974 + task->acl = NULL;
34975 + task->role = NULL;
34976 + } while_each_thread(task2, task);
34977 + read_unlock(&tasklist_lock);
34978 +
34979 + /* release the reference to the real root dentry and vfsmount */
34980 + if (real_root)
34981 + dput(real_root);
34982 + real_root = NULL;
34983 + if (real_root_mnt)
34984 + mntput(real_root_mnt);
34985 + real_root_mnt = NULL;
34986 +
34987 + /* free all object hash tables */
34988 +
34989 + FOR_EACH_ROLE_START(r)
34990 + if (r->subj_hash == NULL)
34991 + goto next_role;
34992 + FOR_EACH_SUBJECT_START(r, s, x)
34993 + if (s->obj_hash == NULL)
34994 + break;
34995 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
34996 + kfree(s->obj_hash);
34997 + else
34998 + vfree(s->obj_hash);
34999 + FOR_EACH_SUBJECT_END(s, x)
35000 + FOR_EACH_NESTED_SUBJECT_START(r, s)
35001 + if (s->obj_hash == NULL)
35002 + break;
35003 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
35004 + kfree(s->obj_hash);
35005 + else
35006 + vfree(s->obj_hash);
35007 + FOR_EACH_NESTED_SUBJECT_END(s)
35008 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
35009 + kfree(r->subj_hash);
35010 + else
35011 + vfree(r->subj_hash);
35012 + r->subj_hash = NULL;
35013 +next_role:
35014 + FOR_EACH_ROLE_END(r)
35015 +
35016 + acl_free_all();
35017 +
35018 + if (acl_role_set.r_hash) {
35019 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
35020 + PAGE_SIZE)
35021 + kfree(acl_role_set.r_hash);
35022 + else
35023 + vfree(acl_role_set.r_hash);
35024 + }
35025 + if (name_set.n_hash) {
35026 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
35027 + PAGE_SIZE)
35028 + kfree(name_set.n_hash);
35029 + else
35030 + vfree(name_set.n_hash);
35031 + }
35032 +
35033 + if (inodev_set.i_hash) {
35034 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
35035 + PAGE_SIZE)
35036 + kfree(inodev_set.i_hash);
35037 + else
35038 + vfree(inodev_set.i_hash);
35039 + }
35040 +
35041 + gr_free_uidset();
35042 +
35043 + memset(&name_set, 0, sizeof (struct name_db));
35044 + memset(&inodev_set, 0, sizeof (struct inodev_db));
35045 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
35046 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
35047 +
35048 + default_role = NULL;
35049 + role_list = NULL;
35050 +
35051 + return;
35052 +}
35053 +
35054 +static __u32
35055 +count_user_objs(struct acl_object_label *userp)
35056 +{
35057 + struct acl_object_label o_tmp;
35058 + __u32 num = 0;
35059 +
35060 + while (userp) {
35061 + if (copy_from_user(&o_tmp, userp,
35062 + sizeof (struct acl_object_label)))
35063 + break;
35064 +
35065 + userp = o_tmp.prev;
35066 + num++;
35067 + }
35068 +
35069 + return num;
35070 +}
35071 +
35072 +static struct acl_subject_label *
35073 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
35074 +
35075 +static int
35076 +copy_user_glob(struct acl_object_label *obj)
35077 +{
35078 + struct acl_object_label *g_tmp, **guser;
35079 + unsigned int len;
35080 + char *tmp;
35081 +
35082 + if (obj->globbed == NULL)
35083 + return 0;
35084 +
35085 + guser = &obj->globbed;
35086 + while (*guser) {
35087 + g_tmp = (struct acl_object_label *)
35088 + acl_alloc(sizeof (struct acl_object_label));
35089 + if (g_tmp == NULL)
35090 + return -ENOMEM;
35091 +
35092 + if (copy_from_user(g_tmp, *guser,
35093 + sizeof (struct acl_object_label)))
35094 + return -EFAULT;
35095 +
35096 + len = strnlen_user(g_tmp->filename, PATH_MAX);
35097 +
35098 + if (!len || len >= PATH_MAX)
35099 + return -EINVAL;
35100 +
35101 + if ((tmp = (char *) acl_alloc(len)) == NULL)
35102 + return -ENOMEM;
35103 +
35104 + if (copy_from_user(tmp, g_tmp->filename, len))
35105 + return -EFAULT;
35106 + tmp[len-1] = '\0';
35107 + g_tmp->filename = tmp;
35108 +
35109 + *guser = g_tmp;
35110 + guser = &(g_tmp->next);
35111 + }
35112 +
35113 + return 0;
35114 +}
35115 +
35116 +static int
35117 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
35118 + struct acl_role_label *role)
35119 +{
35120 + struct acl_object_label *o_tmp;
35121 + unsigned int len;
35122 + int ret;
35123 + char *tmp;
35124 +
35125 + while (userp) {
35126 + if ((o_tmp = (struct acl_object_label *)
35127 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
35128 + return -ENOMEM;
35129 +
35130 + if (copy_from_user(o_tmp, userp,
35131 + sizeof (struct acl_object_label)))
35132 + return -EFAULT;
35133 +
35134 + userp = o_tmp->prev;
35135 +
35136 + len = strnlen_user(o_tmp->filename, PATH_MAX);
35137 +
35138 + if (!len || len >= PATH_MAX)
35139 + return -EINVAL;
35140 +
35141 + if ((tmp = (char *) acl_alloc(len)) == NULL)
35142 + return -ENOMEM;
35143 +
35144 + if (copy_from_user(tmp, o_tmp->filename, len))
35145 + return -EFAULT;
35146 + tmp[len-1] = '\0';
35147 + o_tmp->filename = tmp;
35148 +
35149 + insert_acl_obj_label(o_tmp, subj);
35150 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
35151 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
35152 + return -ENOMEM;
35153 +
35154 + ret = copy_user_glob(o_tmp);
35155 + if (ret)
35156 + return ret;
35157 +
35158 + if (o_tmp->nested) {
35159 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
35160 + if (IS_ERR(o_tmp->nested))
35161 + return PTR_ERR(o_tmp->nested);
35162 +
35163 + /* insert into nested subject list */
35164 + o_tmp->nested->next = role->hash->first;
35165 + role->hash->first = o_tmp->nested;
35166 + }
35167 + }
35168 +
35169 + return 0;
35170 +}
35171 +
35172 +static __u32
35173 +count_user_subjs(struct acl_subject_label *userp)
35174 +{
35175 + struct acl_subject_label s_tmp;
35176 + __u32 num = 0;
35177 +
35178 + while (userp) {
35179 + if (copy_from_user(&s_tmp, userp,
35180 + sizeof (struct acl_subject_label)))
35181 + break;
35182 +
35183 + userp = s_tmp.prev;
35184 + /* do not count nested subjects against this count, since
35185 + they are not included in the hash table, but are
35186 + attached to objects. We have already counted
35187 + the subjects in userspace for the allocation
35188 + stack
35189 + */
35190 + if (!(s_tmp.mode & GR_NESTED))
35191 + num++;
35192 + }
35193 +
35194 + return num;
35195 +}
35196 +
35197 +static int
35198 +copy_user_allowedips(struct acl_role_label *rolep)
35199 +{
35200 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
35201 +
35202 + ruserip = rolep->allowed_ips;
35203 +
35204 + while (ruserip) {
35205 + rlast = rtmp;
35206 +
35207 + if ((rtmp = (struct role_allowed_ip *)
35208 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
35209 + return -ENOMEM;
35210 +
35211 + if (copy_from_user(rtmp, ruserip,
35212 + sizeof (struct role_allowed_ip)))
35213 + return -EFAULT;
35214 +
35215 + ruserip = rtmp->prev;
35216 +
35217 + if (!rlast) {
35218 + rtmp->prev = NULL;
35219 + rolep->allowed_ips = rtmp;
35220 + } else {
35221 + rlast->next = rtmp;
35222 + rtmp->prev = rlast;
35223 + }
35224 +
35225 + if (!ruserip)
35226 + rtmp->next = NULL;
35227 + }
35228 +
35229 + return 0;
35230 +}
35231 +
35232 +static int
35233 +copy_user_transitions(struct acl_role_label *rolep)
35234 +{
35235 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
35236 +
35237 + unsigned int len;
35238 + char *tmp;
35239 +
35240 + rusertp = rolep->transitions;
35241 +
35242 + while (rusertp) {
35243 + rlast = rtmp;
35244 +
35245 + if ((rtmp = (struct role_transition *)
35246 + acl_alloc(sizeof (struct role_transition))) == NULL)
35247 + return -ENOMEM;
35248 +
35249 + if (copy_from_user(rtmp, rusertp,
35250 + sizeof (struct role_transition)))
35251 + return -EFAULT;
35252 +
35253 + rusertp = rtmp->prev;
35254 +
35255 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
35256 +
35257 + if (!len || len >= GR_SPROLE_LEN)
35258 + return -EINVAL;
35259 +
35260 + if ((tmp = (char *) acl_alloc(len)) == NULL)
35261 + return -ENOMEM;
35262 +
35263 + if (copy_from_user(tmp, rtmp->rolename, len))
35264 + return -EFAULT;
35265 + tmp[len-1] = '\0';
35266 + rtmp->rolename = tmp;
35267 +
35268 + if (!rlast) {
35269 + rtmp->prev = NULL;
35270 + rolep->transitions = rtmp;
35271 + } else {
35272 + rlast->next = rtmp;
35273 + rtmp->prev = rlast;
35274 + }
35275 +
35276 + if (!rusertp)
35277 + rtmp->next = NULL;
35278 + }
35279 +
35280 + return 0;
35281 +}
35282 +
35283 +static struct acl_subject_label *
35284 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
35285 +{
35286 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
35287 + unsigned int len;
35288 + char *tmp;
35289 + __u32 num_objs;
35290 + struct acl_ip_label **i_tmp, *i_utmp2;
35291 + struct gr_hash_struct ghash;
35292 + struct subject_map *subjmap;
35293 + unsigned int i_num;
35294 + int err;
35295 +
35296 + s_tmp = lookup_subject_map(userp);
35297 +
35298 + /* we've already copied this subject into the kernel, just return
35299 + the reference to it, and don't copy it over again
35300 + */
35301 + if (s_tmp)
35302 + return(s_tmp);
35303 +
35304 + if ((s_tmp = (struct acl_subject_label *)
35305 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
35306 + return ERR_PTR(-ENOMEM);
35307 +
35308 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
35309 + if (subjmap == NULL)
35310 + return ERR_PTR(-ENOMEM);
35311 +
35312 + subjmap->user = userp;
35313 + subjmap->kernel = s_tmp;
35314 + insert_subj_map_entry(subjmap);
35315 +
35316 + if (copy_from_user(s_tmp, userp,
35317 + sizeof (struct acl_subject_label)))
35318 + return ERR_PTR(-EFAULT);
35319 +
35320 + len = strnlen_user(s_tmp->filename, PATH_MAX);
35321 +
35322 + if (!len || len >= PATH_MAX)
35323 + return ERR_PTR(-EINVAL);
35324 +
35325 + if ((tmp = (char *) acl_alloc(len)) == NULL)
35326 + return ERR_PTR(-ENOMEM);
35327 +
35328 + if (copy_from_user(tmp, s_tmp->filename, len))
35329 + return ERR_PTR(-EFAULT);
35330 + tmp[len-1] = '\0';
35331 + s_tmp->filename = tmp;
35332 +
35333 + if (!strcmp(s_tmp->filename, "/"))
35334 + role->root_label = s_tmp;
35335 +
35336 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
35337 + return ERR_PTR(-EFAULT);
35338 +
35339 + /* copy user and group transition tables */
35340 +
35341 + if (s_tmp->user_trans_num) {
35342 + uid_t *uidlist;
35343 +
35344 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
35345 + if (uidlist == NULL)
35346 + return ERR_PTR(-ENOMEM);
35347 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
35348 + return ERR_PTR(-EFAULT);
35349 +
35350 + s_tmp->user_transitions = uidlist;
35351 + }
35352 +
35353 + if (s_tmp->group_trans_num) {
35354 + gid_t *gidlist;
35355 +
35356 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
35357 + if (gidlist == NULL)
35358 + return ERR_PTR(-ENOMEM);
35359 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
35360 + return ERR_PTR(-EFAULT);
35361 +
35362 + s_tmp->group_transitions = gidlist;
35363 + }
35364 +
35365 + /* set up object hash table */
35366 + num_objs = count_user_objs(ghash.first);
35367 +
35368 + s_tmp->obj_hash_size = num_objs;
35369 + s_tmp->obj_hash =
35370 + (struct acl_object_label **)
35371 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
35372 +
35373 + if (!s_tmp->obj_hash)
35374 + return ERR_PTR(-ENOMEM);
35375 +
35376 + memset(s_tmp->obj_hash, 0,
35377 + s_tmp->obj_hash_size *
35378 + sizeof (struct acl_object_label *));
35379 +
35380 + /* add in objects */
35381 + err = copy_user_objs(ghash.first, s_tmp, role);
35382 +
35383 + if (err)
35384 + return ERR_PTR(err);
35385 +
35386 + /* set pointer for parent subject */
35387 + if (s_tmp->parent_subject) {
35388 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
35389 +
35390 + if (IS_ERR(s_tmp2))
35391 + return s_tmp2;
35392 +
35393 + s_tmp->parent_subject = s_tmp2;
35394 + }
35395 +
35396 + /* add in ip acls */
35397 +
35398 + if (!s_tmp->ip_num) {
35399 + s_tmp->ips = NULL;
35400 + goto insert;
35401 + }
35402 +
35403 + i_tmp =
35404 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
35405 + sizeof (struct acl_ip_label *));
35406 +
35407 + if (!i_tmp)
35408 + return ERR_PTR(-ENOMEM);
35409 +
35410 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
35411 + *(i_tmp + i_num) =
35412 + (struct acl_ip_label *)
35413 + acl_alloc(sizeof (struct acl_ip_label));
35414 + if (!*(i_tmp + i_num))
35415 + return ERR_PTR(-ENOMEM);
35416 +
35417 + if (copy_from_user
35418 + (&i_utmp2, s_tmp->ips + i_num,
35419 + sizeof (struct acl_ip_label *)))
35420 + return ERR_PTR(-EFAULT);
35421 +
35422 + if (copy_from_user
35423 + (*(i_tmp + i_num), i_utmp2,
35424 + sizeof (struct acl_ip_label)))
35425 + return ERR_PTR(-EFAULT);
35426 +
35427 + if ((*(i_tmp + i_num))->iface == NULL)
35428 + continue;
35429 +
35430 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
35431 + if (!len || len >= IFNAMSIZ)
35432 + return ERR_PTR(-EINVAL);
35433 + tmp = acl_alloc(len);
35434 + if (tmp == NULL)
35435 + return ERR_PTR(-ENOMEM);
35436 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
35437 + return ERR_PTR(-EFAULT);
35438 + (*(i_tmp + i_num))->iface = tmp;
35439 + }
35440 +
35441 + s_tmp->ips = i_tmp;
35442 +
35443 +insert:
35444 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
35445 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
35446 + return ERR_PTR(-ENOMEM);
35447 +
35448 + return s_tmp;
35449 +}
35450 +
35451 +static int
35452 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
35453 +{
35454 + struct acl_subject_label s_pre;
35455 + struct acl_subject_label * ret;
35456 + int err;
35457 +
35458 + while (userp) {
35459 + if (copy_from_user(&s_pre, userp,
35460 + sizeof (struct acl_subject_label)))
35461 + return -EFAULT;
35462 +
35463 + /* do not add nested subjects here, add
35464 + while parsing objects
35465 + */
35466 +
35467 + if (s_pre.mode & GR_NESTED) {
35468 + userp = s_pre.prev;
35469 + continue;
35470 + }
35471 +
35472 + ret = do_copy_user_subj(userp, role);
35473 +
35474 + err = PTR_ERR(ret);
35475 + if (IS_ERR(ret))
35476 + return err;
35477 +
35478 + insert_acl_subj_label(ret, role);
35479 +
35480 + userp = s_pre.prev;
35481 + }
35482 +
35483 + return 0;
35484 +}
35485 +
35486 +static int
35487 +copy_user_acl(struct gr_arg *arg)
35488 +{
35489 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
35490 + struct sprole_pw *sptmp;
35491 + struct gr_hash_struct *ghash;
35492 + uid_t *domainlist;
35493 + unsigned int r_num;
35494 + unsigned int len;
35495 + char *tmp;
35496 + int err = 0;
35497 + __u16 i;
35498 + __u32 num_subjs;
35499 +
35500 + /* we need a default and kernel role */
35501 + if (arg->role_db.num_roles < 2)
35502 + return -EINVAL;
35503 +
35504 + /* copy special role authentication info from userspace */
35505 +
35506 + num_sprole_pws = arg->num_sprole_pws;
35507 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
35508 +
35509 + if (!acl_special_roles) {
35510 + err = -ENOMEM;
35511 + goto cleanup;
35512 + }
35513 +
35514 + for (i = 0; i < num_sprole_pws; i++) {
35515 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
35516 + if (!sptmp) {
35517 + err = -ENOMEM;
35518 + goto cleanup;
35519 + }
35520 + if (copy_from_user(sptmp, arg->sprole_pws + i,
35521 + sizeof (struct sprole_pw))) {
35522 + err = -EFAULT;
35523 + goto cleanup;
35524 + }
35525 +
35526 + len =
35527 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
35528 +
35529 + if (!len || len >= GR_SPROLE_LEN) {
35530 + err = -EINVAL;
35531 + goto cleanup;
35532 + }
35533 +
35534 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
35535 + err = -ENOMEM;
35536 + goto cleanup;
35537 + }
35538 +
35539 + if (copy_from_user(tmp, sptmp->rolename, len)) {
35540 + err = -EFAULT;
35541 + goto cleanup;
35542 + }
35543 + tmp[len-1] = '\0';
35544 +#ifdef CONFIG_GRKERNSEC_ACL_DEBUG
35545 + printk(KERN_ALERT "Copying special role %s\n", tmp);
35546 +#endif
35547 + sptmp->rolename = tmp;
35548 + acl_special_roles[i] = sptmp;
35549 + }
35550 +
35551 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
35552 +
35553 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
35554 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
35555 +
35556 + if (!r_tmp) {
35557 + err = -ENOMEM;
35558 + goto cleanup;
35559 + }
35560 +
35561 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
35562 + sizeof (struct acl_role_label *))) {
35563 + err = -EFAULT;
35564 + goto cleanup;
35565 + }
35566 +
35567 + if (copy_from_user(r_tmp, r_utmp2,
35568 + sizeof (struct acl_role_label))) {
35569 + err = -EFAULT;
35570 + goto cleanup;
35571 + }
35572 +
35573 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
35574 +
35575 + if (!len || len >= PATH_MAX) {
35576 + err = -EINVAL;
35577 + goto cleanup;
35578 + }
35579 +
35580 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
35581 + err = -ENOMEM;
35582 + goto cleanup;
35583 + }
35584 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
35585 + err = -EFAULT;
35586 + goto cleanup;
35587 + }
35588 + tmp[len-1] = '\0';
35589 + r_tmp->rolename = tmp;
35590 +
35591 + if (!strcmp(r_tmp->rolename, "default")
35592 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
35593 + default_role = r_tmp;
35594 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
35595 + kernel_role = r_tmp;
35596 + }
35597 +
35598 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
35599 + err = -ENOMEM;
35600 + goto cleanup;
35601 + }
35602 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
35603 + err = -EFAULT;
35604 + goto cleanup;
35605 + }
35606 +
35607 + r_tmp->hash = ghash;
35608 +
35609 + num_subjs = count_user_subjs(r_tmp->hash->first);
35610 +
35611 + r_tmp->subj_hash_size = num_subjs;
35612 + r_tmp->subj_hash =
35613 + (struct acl_subject_label **)
35614 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
35615 +
35616 + if (!r_tmp->subj_hash) {
35617 + err = -ENOMEM;
35618 + goto cleanup;
35619 + }
35620 +
35621 + err = copy_user_allowedips(r_tmp);
35622 + if (err)
35623 + goto cleanup;
35624 +
35625 + /* copy domain info */
35626 + if (r_tmp->domain_children != NULL) {
35627 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
35628 + if (domainlist == NULL) {
35629 + err = -ENOMEM;
35630 + goto cleanup;
35631 + }
35632 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
35633 + err = -EFAULT;
35634 + goto cleanup;
35635 + }
35636 + r_tmp->domain_children = domainlist;
35637 + }
35638 +
35639 + err = copy_user_transitions(r_tmp);
35640 + if (err)
35641 + goto cleanup;
35642 +
35643 + memset(r_tmp->subj_hash, 0,
35644 + r_tmp->subj_hash_size *
35645 + sizeof (struct acl_subject_label *));
35646 +
35647 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
35648 +
35649 + if (err)
35650 + goto cleanup;
35651 +
35652 + /* set nested subject list to null */
35653 + r_tmp->hash->first = NULL;
35654 +
35655 + insert_acl_role_label(r_tmp);
35656 + }
35657 +
35658 + goto return_err;
35659 + cleanup:
35660 + free_variables();
35661 + return_err:
35662 + return err;
35663 +
35664 +}
35665 +
35666 +static int
35667 +gracl_init(struct gr_arg *args)
35668 +{
35669 + int error = 0;
35670 +
35671 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
35672 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
35673 +
35674 + if (init_variables(args)) {
35675 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
35676 + error = -ENOMEM;
35677 + free_variables();
35678 + goto out;
35679 + }
35680 +
35681 + error = copy_user_acl(args);
35682 + free_init_variables();
35683 + if (error) {
35684 + free_variables();
35685 + goto out;
35686 + }
35687 +
35688 + if ((error = gr_set_acls(0))) {
35689 + free_variables();
35690 + goto out;
35691 + }
35692 +
35693 + pax_open_kernel();
35694 + gr_status |= GR_READY;
35695 + pax_close_kernel();
35696 +
35697 + out:
35698 + return error;
35699 +}
35700 +
35701 +/* derived from glibc fnmatch() 0: match, 1: no match*/
35702 +
35703 +static int
35704 +glob_match(const char *p, const char *n)
35705 +{
35706 + char c;
35707 +
35708 + while ((c = *p++) != '\0') {
35709 + switch (c) {
35710 + case '?':
35711 + if (*n == '\0')
35712 + return 1;
35713 + else if (*n == '/')
35714 + return 1;
35715 + break;
35716 + case '\\':
35717 + if (*n != c)
35718 + return 1;
35719 + break;
35720 + case '*':
35721 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
35722 + if (*n == '/')
35723 + return 1;
35724 + else if (c == '?') {
35725 + if (*n == '\0')
35726 + return 1;
35727 + else
35728 + ++n;
35729 + }
35730 + }
35731 + if (c == '\0') {
35732 + return 0;
35733 + } else {
35734 + const char *endp;
35735 +
35736 + if ((endp = strchr(n, '/')) == NULL)
35737 + endp = n + strlen(n);
35738 +
35739 + if (c == '[') {
35740 + for (--p; n < endp; ++n)
35741 + if (!glob_match(p, n))
35742 + return 0;
35743 + } else if (c == '/') {
35744 + while (*n != '\0' && *n != '/')
35745 + ++n;
35746 + if (*n == '/' && !glob_match(p, n + 1))
35747 + return 0;
35748 + } else {
35749 + for (--p; n < endp; ++n)
35750 + if (*n == c && !glob_match(p, n))
35751 + return 0;
35752 + }
35753 +
35754 + return 1;
35755 + }
35756 + case '[':
35757 + {
35758 + int not;
35759 + char cold;
35760 +
35761 + if (*n == '\0' || *n == '/')
35762 + return 1;
35763 +
35764 + not = (*p == '!' || *p == '^');
35765 + if (not)
35766 + ++p;
35767 +
35768 + c = *p++;
35769 + for (;;) {
35770 + unsigned char fn = (unsigned char)*n;
35771 +
35772 + if (c == '\0')
35773 + return 1;
35774 + else {
35775 + if (c == fn)
35776 + goto matched;
35777 + cold = c;
35778 + c = *p++;
35779 +
35780 + if (c == '-' && *p != ']') {
35781 + unsigned char cend = *p++;
35782 +
35783 + if (cend == '\0')
35784 + return 1;
35785 +
35786 + if (cold <= fn && fn <= cend)
35787 + goto matched;
35788 +
35789 + c = *p++;
35790 + }
35791 + }
35792 +
35793 + if (c == ']')
35794 + break;
35795 + }
35796 + if (!not)
35797 + return 1;
35798 + break;
35799 + matched:
35800 + while (c != ']') {
35801 + if (c == '\0')
35802 + return 1;
35803 +
35804 + c = *p++;
35805 + }
35806 + if (not)
35807 + return 1;
35808 + }
35809 + break;
35810 + default:
35811 + if (c != *n)
35812 + return 1;
35813 + }
35814 +
35815 + ++n;
35816 + }
35817 +
35818 + if (*n == '\0')
35819 + return 0;
35820 +
35821 + if (*n == '/')
35822 + return 0;
35823 +
35824 + return 1;
35825 +}
35826 +
35827 +static struct acl_object_label *
35828 +chk_glob_label(struct acl_object_label *globbed,
35829 + struct dentry *dentry, struct vfsmount *mnt, char **path)
35830 +{
35831 + struct acl_object_label *tmp;
35832 +
35833 + if (*path == NULL)
35834 + *path = gr_to_filename_nolock(dentry, mnt);
35835 +
35836 + tmp = globbed;
35837 +
35838 + while (tmp) {
35839 + if (!glob_match(tmp->filename, *path))
35840 + return tmp;
35841 + tmp = tmp->next;
35842 + }
35843 +
35844 + return NULL;
35845 +}
35846 +
35847 +static struct acl_object_label *
35848 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
35849 + const ino_t curr_ino, const dev_t curr_dev,
35850 + const struct acl_subject_label *subj, char **path, const int checkglob)
35851 +{
35852 + struct acl_subject_label *tmpsubj;
35853 + struct acl_object_label *retval;
35854 + struct acl_object_label *retval2;
35855 +
35856 + tmpsubj = (struct acl_subject_label *) subj;
35857 + read_lock(&gr_inode_lock);
35858 + do {
35859 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
35860 + if (retval) {
35861 + if (checkglob && retval->globbed) {
35862 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
35863 + (struct vfsmount *)orig_mnt, path);
35864 + if (retval2)
35865 + retval = retval2;
35866 + }
35867 + break;
35868 + }
35869 + } while ((tmpsubj = tmpsubj->parent_subject));
35870 + read_unlock(&gr_inode_lock);
35871 +
35872 + return retval;
35873 +}
35874 +
35875 +static __inline__ struct acl_object_label *
35876 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
35877 + const struct dentry *curr_dentry,
35878 + const struct acl_subject_label *subj, char **path, const int checkglob)
35879 +{
35880 + return __full_lookup(orig_dentry, orig_mnt,
35881 + curr_dentry->d_inode->i_ino,
35882 + curr_dentry->d_inode->i_sb->s_dev, subj, path, checkglob);
35883 +}
35884 +
35885 +static struct acl_object_label *
35886 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
35887 + const struct acl_subject_label *subj, char *path, const int checkglob)
35888 +{
35889 + struct dentry *dentry = (struct dentry *) l_dentry;
35890 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
35891 + struct acl_object_label *retval;
35892 +
35893 + spin_lock(&dcache_lock);
35894 +
35895 + if (unlikely(mnt == shm_mnt || mnt == pipe_mnt || mnt == sock_mnt ||
35896 +#ifdef CONFIG_HUGETLBFS
35897 + mnt == hugetlbfs_vfsmount ||
35898 +#endif
35899 + /* ignore Eric Biederman */
35900 + IS_PRIVATE(l_dentry->d_inode))) {
35901 + retval = fakefs_obj;
35902 + goto out;
35903 + }
35904 +
35905 + for (;;) {
35906 + if (dentry == real_root && mnt == real_root_mnt)
35907 + break;
35908 +
35909 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
35910 + if (mnt->mnt_parent == mnt)
35911 + break;
35912 +
35913 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
35914 + if (retval != NULL)
35915 + goto out;
35916 +
35917 + dentry = mnt->mnt_mountpoint;
35918 + mnt = mnt->mnt_parent;
35919 + continue;
35920 + }
35921 +
35922 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
35923 + if (retval != NULL)
35924 + goto out;
35925 +
35926 + dentry = dentry->d_parent;
35927 + }
35928 +
35929 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
35930 +
35931 + if (retval == NULL)
35932 + retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
35933 +out:
35934 + spin_unlock(&dcache_lock);
35935 + return retval;
35936 +}
35937 +
35938 +static __inline__ struct acl_object_label *
35939 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
35940 + const struct acl_subject_label *subj)
35941 +{
35942 + char *path = NULL;
35943 + return __chk_obj_label(l_dentry, l_mnt, subj, path, 1);
35944 +}
35945 +
35946 +static __inline__ struct acl_object_label *
35947 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
35948 + const struct acl_subject_label *subj)
35949 +{
35950 + char *path = NULL;
35951 + return __chk_obj_label(l_dentry, l_mnt, subj, path, 0);
35952 +}
35953 +
35954 +static __inline__ struct acl_object_label *
35955 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
35956 + const struct acl_subject_label *subj, char *path)
35957 +{
35958 + return __chk_obj_label(l_dentry, l_mnt, subj, path, 1);
35959 +}
35960 +
35961 +static struct acl_subject_label *
35962 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
35963 + const struct acl_role_label *role)
35964 +{
35965 + struct dentry *dentry = (struct dentry *) l_dentry;
35966 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
35967 + struct acl_subject_label *retval;
35968 +
35969 + spin_lock(&dcache_lock);
35970 +
35971 + for (;;) {
35972 + if (dentry == real_root && mnt == real_root_mnt)
35973 + break;
35974 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
35975 + if (mnt->mnt_parent == mnt)
35976 + break;
35977 +
35978 + read_lock(&gr_inode_lock);
35979 + retval =
35980 + lookup_acl_subj_label(dentry->d_inode->i_ino,
35981 + dentry->d_inode->i_sb->s_dev, role);
35982 + read_unlock(&gr_inode_lock);
35983 + if (retval != NULL)
35984 + goto out;
35985 +
35986 + dentry = mnt->mnt_mountpoint;
35987 + mnt = mnt->mnt_parent;
35988 + continue;
35989 + }
35990 +
35991 + read_lock(&gr_inode_lock);
35992 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
35993 + dentry->d_inode->i_sb->s_dev, role);
35994 + read_unlock(&gr_inode_lock);
35995 + if (retval != NULL)
35996 + goto out;
35997 +
35998 + dentry = dentry->d_parent;
35999 + }
36000 +
36001 + read_lock(&gr_inode_lock);
36002 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
36003 + dentry->d_inode->i_sb->s_dev, role);
36004 + read_unlock(&gr_inode_lock);
36005 +
36006 + if (unlikely(retval == NULL)) {
36007 + read_lock(&gr_inode_lock);
36008 + retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
36009 + real_root->d_inode->i_sb->s_dev, role);
36010 + read_unlock(&gr_inode_lock);
36011 + }
36012 +out:
36013 + spin_unlock(&dcache_lock);
36014 +
36015 + return retval;
36016 +}
36017 +
36018 +static void
36019 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
36020 +{
36021 + struct task_struct *task = current;
36022 + const struct cred *cred = current_cred();
36023 +
36024 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
36025 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
36026 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
36027 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->curr_ip);
36028 +
36029 + return;
36030 +}
36031 +
36032 +static void
36033 +gr_log_learn_sysctl(const char *path, const __u32 mode)
36034 +{
36035 + struct task_struct *task = current;
36036 + const struct cred *cred = current_cred();
36037 +
36038 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
36039 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
36040 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
36041 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->curr_ip);
36042 +
36043 + return;
36044 +}
36045 +
36046 +static void
36047 +gr_log_learn_id_change(const char type, const unsigned int real,
36048 + const unsigned int effective, const unsigned int fs)
36049 +{
36050 + struct task_struct *task = current;
36051 + const struct cred *cred = current_cred();
36052 +
36053 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
36054 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
36055 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
36056 + type, real, effective, fs, &task->signal->curr_ip);
36057 +
36058 + return;
36059 +}
36060 +
36061 +__u32
36062 +gr_check_link(const struct dentry * new_dentry,
36063 + const struct dentry * parent_dentry,
36064 + const struct vfsmount * parent_mnt,
36065 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
36066 +{
36067 + struct acl_object_label *obj;
36068 + __u32 oldmode, newmode;
36069 + __u32 needmode;
36070 +
36071 + if (unlikely(!(gr_status & GR_READY)))
36072 + return (GR_CREATE | GR_LINK);
36073 +
36074 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
36075 + oldmode = obj->mode;
36076 +
36077 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
36078 + oldmode |= (GR_CREATE | GR_LINK);
36079 +
36080 + needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
36081 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
36082 + needmode |= GR_SETID | GR_AUDIT_SETID;
36083 +
36084 + newmode =
36085 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
36086 + oldmode | needmode);
36087 +
36088 + needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
36089 + GR_SETID | GR_READ | GR_FIND | GR_DELETE |
36090 + GR_INHERIT | GR_AUDIT_INHERIT);
36091 +
36092 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
36093 + goto bad;
36094 +
36095 + if ((oldmode & needmode) != needmode)
36096 + goto bad;
36097 +
36098 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
36099 + if ((newmode & needmode) != needmode)
36100 + goto bad;
36101 +
36102 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
36103 + return newmode;
36104 +bad:
36105 + needmode = oldmode;
36106 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
36107 + needmode |= GR_SETID;
36108 +
36109 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
36110 + gr_log_learn(old_dentry, old_mnt, needmode);
36111 + return (GR_CREATE | GR_LINK);
36112 + } else if (newmode & GR_SUPPRESS)
36113 + return GR_SUPPRESS;
36114 + else
36115 + return 0;
36116 +}
36117 +
36118 +__u32
36119 +gr_search_file(const struct dentry * dentry, const __u32 mode,
36120 + const struct vfsmount * mnt)
36121 +{
36122 + __u32 retval = mode;
36123 + struct acl_subject_label *curracl;
36124 + struct acl_object_label *currobj;
36125 +
36126 + if (unlikely(!(gr_status & GR_READY)))
36127 + return (mode & ~GR_AUDITS);
36128 +
36129 + curracl = current->acl;
36130 +
36131 + currobj = chk_obj_label(dentry, mnt, curracl);
36132 + retval = currobj->mode & mode;
36133 +
36134 + if (unlikely
36135 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
36136 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
36137 + __u32 new_mode = mode;
36138 +
36139 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
36140 +
36141 + retval = new_mode;
36142 +
36143 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
36144 + new_mode |= GR_INHERIT;
36145 +
36146 + if (!(mode & GR_NOLEARN))
36147 + gr_log_learn(dentry, mnt, new_mode);
36148 + }
36149 +
36150 + return retval;
36151 +}
36152 +
36153 +__u32
36154 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
36155 + const struct vfsmount * mnt, const __u32 mode)
36156 +{
36157 + struct name_entry *match;
36158 + struct acl_object_label *matchpo;
36159 + struct acl_subject_label *curracl;
36160 + char *path;
36161 + __u32 retval;
36162 +
36163 + if (unlikely(!(gr_status & GR_READY)))
36164 + return (mode & ~GR_AUDITS);
36165 +
36166 + preempt_disable();
36167 + path = gr_to_filename_rbac(new_dentry, mnt);
36168 + match = lookup_name_entry_create(path);
36169 +
36170 + if (!match)
36171 + goto check_parent;
36172 +
36173 + curracl = current->acl;
36174 +
36175 + read_lock(&gr_inode_lock);
36176 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
36177 + read_unlock(&gr_inode_lock);
36178 +
36179 + if (matchpo) {
36180 + if ((matchpo->mode & mode) !=
36181 + (mode & ~(GR_AUDITS | GR_SUPPRESS))
36182 + && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
36183 + __u32 new_mode = mode;
36184 +
36185 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
36186 +
36187 + gr_log_learn(new_dentry, mnt, new_mode);
36188 +
36189 + preempt_enable();
36190 + return new_mode;
36191 + }
36192 + preempt_enable();
36193 + return (matchpo->mode & mode);
36194 + }
36195 +
36196 + check_parent:
36197 + curracl = current->acl;
36198 +
36199 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
36200 + retval = matchpo->mode & mode;
36201 +
36202 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
36203 + && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
36204 + __u32 new_mode = mode;
36205 +
36206 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
36207 +
36208 + gr_log_learn(new_dentry, mnt, new_mode);
36209 + preempt_enable();
36210 + return new_mode;
36211 + }
36212 +
36213 + preempt_enable();
36214 + return retval;
36215 +}
36216 +
36217 +int
36218 +gr_check_hidden_task(const struct task_struct *task)
36219 +{
36220 + if (unlikely(!(gr_status & GR_READY)))
36221 + return 0;
36222 +
36223 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
36224 + return 1;
36225 +
36226 + return 0;
36227 +}
36228 +
36229 +int
36230 +gr_check_protected_task(const struct task_struct *task)
36231 +{
36232 + if (unlikely(!(gr_status & GR_READY) || !task))
36233 + return 0;
36234 +
36235 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
36236 + task->acl != current->acl)
36237 + return 1;
36238 +
36239 + return 0;
36240 +}
36241 +
36242 +int
36243 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
36244 +{
36245 + struct task_struct *p;
36246 + int ret = 0;
36247 +
36248 + if (unlikely(!(gr_status & GR_READY) || !pid))
36249 + return ret;
36250 +
36251 + read_lock(&tasklist_lock);
36252 + do_each_pid_task(pid, type, p) {
36253 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
36254 + p->acl != current->acl) {
36255 + ret = 1;
36256 + goto out;
36257 + }
36258 + } while_each_pid_task(pid, type, p);
36259 +out:
36260 + read_unlock(&tasklist_lock);
36261 +
36262 + return ret;
36263 +}
36264 +
36265 +void
36266 +gr_copy_label(struct task_struct *tsk)
36267 +{
36268 + tsk->signal->used_accept = 0;
36269 + tsk->acl_sp_role = 0;
36270 + tsk->acl_role_id = current->acl_role_id;
36271 + tsk->acl = current->acl;
36272 + tsk->role = current->role;
36273 + tsk->signal->curr_ip = current->signal->curr_ip;
36274 + if (current->exec_file)
36275 + get_file(current->exec_file);
36276 + tsk->exec_file = current->exec_file;
36277 + tsk->is_writable = current->is_writable;
36278 + if (unlikely(current->signal->used_accept))
36279 + current->signal->curr_ip = 0;
36280 +
36281 + return;
36282 +}
36283 +
36284 +static void
36285 +gr_set_proc_res(struct task_struct *task)
36286 +{
36287 + struct acl_subject_label *proc;
36288 + unsigned short i;
36289 +
36290 + proc = task->acl;
36291 +
36292 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
36293 + return;
36294 +
36295 + for (i = 0; i < RLIM_NLIMITS; i++) {
36296 + if (!(proc->resmask & (1 << i)))
36297 + continue;
36298 +
36299 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
36300 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
36301 + }
36302 +
36303 + return;
36304 +}
36305 +
36306 +int
36307 +gr_check_user_change(int real, int effective, int fs)
36308 +{
36309 + unsigned int i;
36310 + __u16 num;
36311 + uid_t *uidlist;
36312 + int curuid;
36313 + int realok = 0;
36314 + int effectiveok = 0;
36315 + int fsok = 0;
36316 +
36317 + if (unlikely(!(gr_status & GR_READY)))
36318 + return 0;
36319 +
36320 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
36321 + gr_log_learn_id_change('u', real, effective, fs);
36322 +
36323 + num = current->acl->user_trans_num;
36324 + uidlist = current->acl->user_transitions;
36325 +
36326 + if (uidlist == NULL)
36327 + return 0;
36328 +
36329 + if (real == -1)
36330 + realok = 1;
36331 + if (effective == -1)
36332 + effectiveok = 1;
36333 + if (fs == -1)
36334 + fsok = 1;
36335 +
36336 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
36337 + for (i = 0; i < num; i++) {
36338 + curuid = (int)uidlist[i];
36339 + if (real == curuid)
36340 + realok = 1;
36341 + if (effective == curuid)
36342 + effectiveok = 1;
36343 + if (fs == curuid)
36344 + fsok = 1;
36345 + }
36346 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
36347 + for (i = 0; i < num; i++) {
36348 + curuid = (int)uidlist[i];
36349 + if (real == curuid)
36350 + break;
36351 + if (effective == curuid)
36352 + break;
36353 + if (fs == curuid)
36354 + break;
36355 + }
36356 + /* not in deny list */
36357 + if (i == num) {
36358 + realok = 1;
36359 + effectiveok = 1;
36360 + fsok = 1;
36361 + }
36362 + }
36363 +
36364 + if (realok && effectiveok && fsok)
36365 + return 0;
36366 + else {
36367 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
36368 + return 1;
36369 + }
36370 +}
36371 +
36372 +int
36373 +gr_check_group_change(int real, int effective, int fs)
36374 +{
36375 + unsigned int i;
36376 + __u16 num;
36377 + gid_t *gidlist;
36378 + int curgid;
36379 + int realok = 0;
36380 + int effectiveok = 0;
36381 + int fsok = 0;
36382 +
36383 + if (unlikely(!(gr_status & GR_READY)))
36384 + return 0;
36385 +
36386 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
36387 + gr_log_learn_id_change('g', real, effective, fs);
36388 +
36389 + num = current->acl->group_trans_num;
36390 + gidlist = current->acl->group_transitions;
36391 +
36392 + if (gidlist == NULL)
36393 + return 0;
36394 +
36395 + if (real == -1)
36396 + realok = 1;
36397 + if (effective == -1)
36398 + effectiveok = 1;
36399 + if (fs == -1)
36400 + fsok = 1;
36401 +
36402 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
36403 + for (i = 0; i < num; i++) {
36404 + curgid = (int)gidlist[i];
36405 + if (real == curgid)
36406 + realok = 1;
36407 + if (effective == curgid)
36408 + effectiveok = 1;
36409 + if (fs == curgid)
36410 + fsok = 1;
36411 + }
36412 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
36413 + for (i = 0; i < num; i++) {
36414 + curgid = (int)gidlist[i];
36415 + if (real == curgid)
36416 + break;
36417 + if (effective == curgid)
36418 + break;
36419 + if (fs == curgid)
36420 + break;
36421 + }
36422 + /* not in deny list */
36423 + if (i == num) {
36424 + realok = 1;
36425 + effectiveok = 1;
36426 + fsok = 1;
36427 + }
36428 + }
36429 +
36430 + if (realok && effectiveok && fsok)
36431 + return 0;
36432 + else {
36433 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
36434 + return 1;
36435 + }
36436 +}
36437 +
36438 +void
36439 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
36440 +{
36441 + struct acl_role_label *role = task->role;
36442 + struct acl_subject_label *subj = NULL;
36443 + struct acl_object_label *obj;
36444 + struct file *filp;
36445 +
36446 + if (unlikely(!(gr_status & GR_READY)))
36447 + return;
36448 +
36449 + filp = task->exec_file;
36450 +
36451 + /* kernel process, we'll give them the kernel role */
36452 + if (unlikely(!filp)) {
36453 + task->role = kernel_role;
36454 + task->acl = kernel_role->root_label;
36455 + return;
36456 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
36457 + role = lookup_acl_role_label(task, uid, gid);
36458 +
36459 + /* perform subject lookup in possibly new role
36460 + we can use this result below in the case where role == task->role
36461 + */
36462 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
36463 +
36464 + /* if we changed uid/gid, but result in the same role
36465 + and are using inheritance, don't lose the inherited subject
36466 + if current subject is other than what normal lookup
36467 + would result in, we arrived via inheritance, don't
36468 + lose subject
36469 + */
36470 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
36471 + (subj == task->acl)))
36472 + task->acl = subj;
36473 +
36474 + task->role = role;
36475 +
36476 + task->is_writable = 0;
36477 +
36478 + /* ignore additional mmap checks for processes that are writable
36479 + by the default ACL */
36480 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
36481 + if (unlikely(obj->mode & GR_WRITE))
36482 + task->is_writable = 1;
36483 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
36484 + if (unlikely(obj->mode & GR_WRITE))
36485 + task->is_writable = 1;
36486 +
36487 +#ifdef CONFIG_GRKERNSEC_ACL_DEBUG
36488 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
36489 +#endif
36490 +
36491 + gr_set_proc_res(task);
36492 +
36493 + return;
36494 +}
36495 +
36496 +int
36497 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
36498 + const int unsafe_share)
36499 +{
36500 + struct task_struct *task = current;
36501 + struct acl_subject_label *newacl;
36502 + struct acl_object_label *obj;
36503 + __u32 retmode;
36504 +
36505 + if (unlikely(!(gr_status & GR_READY)))
36506 + return 0;
36507 +
36508 + newacl = chk_subj_label(dentry, mnt, task->role);
36509 +
36510 + task_lock(task);
36511 + if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
36512 + !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
36513 + !(task->role->roletype & GR_ROLE_GOD) &&
36514 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
36515 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
36516 + task_unlock(task);
36517 + if (unsafe_share)
36518 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
36519 + else
36520 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
36521 + return -EACCES;
36522 + }
36523 + task_unlock(task);
36524 +
36525 + obj = chk_obj_label(dentry, mnt, task->acl);
36526 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
36527 +
36528 + if (!(task->acl->mode & GR_INHERITLEARN) &&
36529 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
36530 + if (obj->nested)
36531 + task->acl = obj->nested;
36532 + else
36533 + task->acl = newacl;
36534 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
36535 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
36536 +
36537 + task->is_writable = 0;
36538 +
36539 + /* ignore additional mmap checks for processes that are writable
36540 + by the default ACL */
36541 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
36542 + if (unlikely(obj->mode & GR_WRITE))
36543 + task->is_writable = 1;
36544 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
36545 + if (unlikely(obj->mode & GR_WRITE))
36546 + task->is_writable = 1;
36547 +
36548 + gr_set_proc_res(task);
36549 +
36550 +#ifdef CONFIG_GRKERNSEC_ACL_DEBUG
36551 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
36552 +#endif
36553 + return 0;
36554 +}
36555 +
36556 +/* always called with valid inodev ptr */
36557 +static void
36558 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
36559 +{
36560 + struct acl_object_label *matchpo;
36561 + struct acl_subject_label *matchps;
36562 + struct acl_subject_label *subj;
36563 + struct acl_role_label *role;
36564 + unsigned int x;
36565 +
36566 + FOR_EACH_ROLE_START(role)
36567 + FOR_EACH_SUBJECT_START(role, subj, x)
36568 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
36569 + matchpo->mode |= GR_DELETED;
36570 + FOR_EACH_SUBJECT_END(subj,x)
36571 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
36572 + if (subj->inode == ino && subj->device == dev)
36573 + subj->mode |= GR_DELETED;
36574 + FOR_EACH_NESTED_SUBJECT_END(subj)
36575 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
36576 + matchps->mode |= GR_DELETED;
36577 + FOR_EACH_ROLE_END(role)
36578 +
36579 + inodev->nentry->deleted = 1;
36580 +
36581 + return;
36582 +}
36583 +
36584 +void
36585 +gr_handle_delete(const ino_t ino, const dev_t dev)
36586 +{
36587 + struct inodev_entry *inodev;
36588 +
36589 + if (unlikely(!(gr_status & GR_READY)))
36590 + return;
36591 +
36592 + write_lock(&gr_inode_lock);
36593 + inodev = lookup_inodev_entry(ino, dev);
36594 + if (inodev != NULL)
36595 + do_handle_delete(inodev, ino, dev);
36596 + write_unlock(&gr_inode_lock);
36597 +
36598 + return;
36599 +}
36600 +
36601 +static void
36602 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
36603 + const ino_t newinode, const dev_t newdevice,
36604 + struct acl_subject_label *subj)
36605 +{
36606 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
36607 + struct acl_object_label *match;
36608 +
36609 + match = subj->obj_hash[index];
36610 +
36611 + while (match && (match->inode != oldinode ||
36612 + match->device != olddevice ||
36613 + !(match->mode & GR_DELETED)))
36614 + match = match->next;
36615 +
36616 + if (match && (match->inode == oldinode)
36617 + && (match->device == olddevice)
36618 + && (match->mode & GR_DELETED)) {
36619 + if (match->prev == NULL) {
36620 + subj->obj_hash[index] = match->next;
36621 + if (match->next != NULL)
36622 + match->next->prev = NULL;
36623 + } else {
36624 + match->prev->next = match->next;
36625 + if (match->next != NULL)
36626 + match->next->prev = match->prev;
36627 + }
36628 + match->prev = NULL;
36629 + match->next = NULL;
36630 + match->inode = newinode;
36631 + match->device = newdevice;
36632 + match->mode &= ~GR_DELETED;
36633 +
36634 + insert_acl_obj_label(match, subj);
36635 + }
36636 +
36637 + return;
36638 +}
36639 +
36640 +static void
36641 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
36642 + const ino_t newinode, const dev_t newdevice,
36643 + struct acl_role_label *role)
36644 +{
36645 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
36646 + struct acl_subject_label *match;
36647 +
36648 + match = role->subj_hash[index];
36649 +
36650 + while (match && (match->inode != oldinode ||
36651 + match->device != olddevice ||
36652 + !(match->mode & GR_DELETED)))
36653 + match = match->next;
36654 +
36655 + if (match && (match->inode == oldinode)
36656 + && (match->device == olddevice)
36657 + && (match->mode & GR_DELETED)) {
36658 + if (match->prev == NULL) {
36659 + role->subj_hash[index] = match->next;
36660 + if (match->next != NULL)
36661 + match->next->prev = NULL;
36662 + } else {
36663 + match->prev->next = match->next;
36664 + if (match->next != NULL)
36665 + match->next->prev = match->prev;
36666 + }
36667 + match->prev = NULL;
36668 + match->next = NULL;
36669 + match->inode = newinode;
36670 + match->device = newdevice;
36671 + match->mode &= ~GR_DELETED;
36672 +
36673 + insert_acl_subj_label(match, role);
36674 + }
36675 +
36676 + return;
36677 +}
36678 +
36679 +static void
36680 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
36681 + const ino_t newinode, const dev_t newdevice)
36682 +{
36683 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
36684 + struct inodev_entry *match;
36685 +
36686 + match = inodev_set.i_hash[index];
36687 +
36688 + while (match && (match->nentry->inode != oldinode ||
36689 + match->nentry->device != olddevice || !match->nentry->deleted))
36690 + match = match->next;
36691 +
36692 + if (match && (match->nentry->inode == oldinode)
36693 + && (match->nentry->device == olddevice) &&
36694 + match->nentry->deleted) {
36695 + if (match->prev == NULL) {
36696 + inodev_set.i_hash[index] = match->next;
36697 + if (match->next != NULL)
36698 + match->next->prev = NULL;
36699 + } else {
36700 + match->prev->next = match->next;
36701 + if (match->next != NULL)
36702 + match->next->prev = match->prev;
36703 + }
36704 + match->prev = NULL;
36705 + match->next = NULL;
36706 + match->nentry->inode = newinode;
36707 + match->nentry->device = newdevice;
36708 + match->nentry->deleted = 0;
36709 +
36710 + insert_inodev_entry(match);
36711 + }
36712 +
36713 + return;
36714 +}
36715 +
36716 +static void
36717 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
36718 + const struct vfsmount *mnt)
36719 +{
36720 + struct acl_subject_label *subj;
36721 + struct acl_role_label *role;
36722 + unsigned int x;
36723 +
36724 + FOR_EACH_ROLE_START(role)
36725 + update_acl_subj_label(matchn->inode, matchn->device,
36726 + dentry->d_inode->i_ino,
36727 + dentry->d_inode->i_sb->s_dev, role);
36728 +
36729 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
36730 + if ((subj->inode == dentry->d_inode->i_ino) &&
36731 + (subj->device == dentry->d_inode->i_sb->s_dev)) {
36732 + subj->inode = dentry->d_inode->i_ino;
36733 + subj->device = dentry->d_inode->i_sb->s_dev;
36734 + }
36735 + FOR_EACH_NESTED_SUBJECT_END(subj)
36736 + FOR_EACH_SUBJECT_START(role, subj, x)
36737 + update_acl_obj_label(matchn->inode, matchn->device,
36738 + dentry->d_inode->i_ino,
36739 + dentry->d_inode->i_sb->s_dev, subj);
36740 + FOR_EACH_SUBJECT_END(subj,x)
36741 + FOR_EACH_ROLE_END(role)
36742 +
36743 + update_inodev_entry(matchn->inode, matchn->device,
36744 + dentry->d_inode->i_ino, dentry->d_inode->i_sb->s_dev);
36745 +
36746 + return;
36747 +}
36748 +
36749 +void
36750 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
36751 +{
36752 + struct name_entry *matchn;
36753 +
36754 + if (unlikely(!(gr_status & GR_READY)))
36755 + return;
36756 +
36757 + preempt_disable();
36758 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
36759 +
36760 + if (unlikely((unsigned long)matchn)) {
36761 + write_lock(&gr_inode_lock);
36762 + do_handle_create(matchn, dentry, mnt);
36763 + write_unlock(&gr_inode_lock);
36764 + }
36765 + preempt_enable();
36766 +
36767 + return;
36768 +}
36769 +
36770 +void
36771 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
36772 + struct dentry *old_dentry,
36773 + struct dentry *new_dentry,
36774 + struct vfsmount *mnt, const __u8 replace)
36775 +{
36776 + struct name_entry *matchn;
36777 + struct inodev_entry *inodev;
36778 +
36779 + /* vfs_rename swaps the name and parent link for old_dentry and
36780 + new_dentry
36781 + at this point, old_dentry has the new name, parent link, and inode
36782 + for the renamed file
36783 + if a file is being replaced by a rename, new_dentry has the inode
36784 + and name for the replaced file
36785 + */
36786 +
36787 + if (unlikely(!(gr_status & GR_READY)))
36788 + return;
36789 +
36790 + preempt_disable();
36791 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
36792 +
36793 + /* we wouldn't have to check d_inode if it weren't for
36794 + NFS silly-renaming
36795 + */
36796 +
36797 + write_lock(&gr_inode_lock);
36798 + if (unlikely(replace && new_dentry->d_inode)) {
36799 + inodev = lookup_inodev_entry(new_dentry->d_inode->i_ino,
36800 + new_dentry->d_inode->i_sb->s_dev);
36801 + if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
36802 + do_handle_delete(inodev, new_dentry->d_inode->i_ino,
36803 + new_dentry->d_inode->i_sb->s_dev);
36804 + }
36805 +
36806 + inodev = lookup_inodev_entry(old_dentry->d_inode->i_ino,
36807 + old_dentry->d_inode->i_sb->s_dev);
36808 + if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
36809 + do_handle_delete(inodev, old_dentry->d_inode->i_ino,
36810 + old_dentry->d_inode->i_sb->s_dev);
36811 +
36812 + if (unlikely((unsigned long)matchn))
36813 + do_handle_create(matchn, old_dentry, mnt);
36814 +
36815 + write_unlock(&gr_inode_lock);
36816 + preempt_enable();
36817 +
36818 + return;
36819 +}
36820 +
36821 +static int
36822 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
36823 + unsigned char **sum)
36824 +{
36825 + struct acl_role_label *r;
36826 + struct role_allowed_ip *ipp;
36827 + struct role_transition *trans;
36828 + unsigned int i;
36829 + int found = 0;
36830 +
36831 + /* check transition table */
36832 +
36833 + for (trans = current->role->transitions; trans; trans = trans->next) {
36834 + if (!strcmp(rolename, trans->rolename)) {
36835 + found = 1;
36836 + break;
36837 + }
36838 + }
36839 +
36840 + if (!found)
36841 + return 0;
36842 +
36843 + /* handle special roles that do not require authentication
36844 + and check ip */
36845 +
36846 + FOR_EACH_ROLE_START(r)
36847 + if (!strcmp(rolename, r->rolename) &&
36848 + (r->roletype & GR_ROLE_SPECIAL)) {
36849 + found = 0;
36850 + if (r->allowed_ips != NULL) {
36851 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
36852 + if ((ntohl(current->signal->curr_ip) & ipp->netmask) ==
36853 + (ntohl(ipp->addr) & ipp->netmask))
36854 + found = 1;
36855 + }
36856 + } else
36857 + found = 2;
36858 + if (!found)
36859 + return 0;
36860 +
36861 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
36862 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
36863 + *salt = NULL;
36864 + *sum = NULL;
36865 + return 1;
36866 + }
36867 + }
36868 + FOR_EACH_ROLE_END(r)
36869 +
36870 + for (i = 0; i < num_sprole_pws; i++) {
36871 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
36872 + *salt = acl_special_roles[i]->salt;
36873 + *sum = acl_special_roles[i]->sum;
36874 + return 1;
36875 + }
36876 + }
36877 +
36878 + return 0;
36879 +}
36880 +
36881 +static void
36882 +assign_special_role(char *rolename)
36883 +{
36884 + struct acl_object_label *obj;
36885 + struct acl_role_label *r;
36886 + struct acl_role_label *assigned = NULL;
36887 + struct task_struct *tsk;
36888 + struct file *filp;
36889 +
36890 + FOR_EACH_ROLE_START(r)
36891 + if (!strcmp(rolename, r->rolename) &&
36892 + (r->roletype & GR_ROLE_SPECIAL)) {
36893 + assigned = r;
36894 + break;
36895 + }
36896 + FOR_EACH_ROLE_END(r)
36897 +
36898 + if (!assigned)
36899 + return;
36900 +
36901 + read_lock(&tasklist_lock);
36902 + read_lock(&grsec_exec_file_lock);
36903 +
36904 + tsk = current->parent;
36905 + if (tsk == NULL)
36906 + goto out_unlock;
36907 +
36908 + filp = tsk->exec_file;
36909 + if (filp == NULL)
36910 + goto out_unlock;
36911 +
36912 + tsk->is_writable = 0;
36913 +
36914 + tsk->acl_sp_role = 1;
36915 + tsk->acl_role_id = ++acl_sp_role_value;
36916 + tsk->role = assigned;
36917 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
36918 +
36919 + /* ignore additional mmap checks for processes that are writable
36920 + by the default ACL */
36921 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
36922 + if (unlikely(obj->mode & GR_WRITE))
36923 + tsk->is_writable = 1;
36924 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
36925 + if (unlikely(obj->mode & GR_WRITE))
36926 + tsk->is_writable = 1;
36927 +
36928 +#ifdef CONFIG_GRKERNSEC_ACL_DEBUG
36929 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
36930 +#endif
36931 +
36932 +out_unlock:
36933 + read_unlock(&grsec_exec_file_lock);
36934 + read_unlock(&tasklist_lock);
36935 + return;
36936 +}
36937 +
36938 +int gr_check_secure_terminal(struct task_struct *task)
36939 +{
36940 + struct task_struct *p, *p2, *p3;
36941 + struct files_struct *files;
36942 + struct fdtable *fdt;
36943 + struct file *our_file = NULL, *file;
36944 + int i;
36945 +
36946 + if (task->signal->tty == NULL)
36947 + return 1;
36948 +
36949 + files = get_files_struct(task);
36950 + if (files != NULL) {
36951 + rcu_read_lock();
36952 + fdt = files_fdtable(files);
36953 + for (i=0; i < fdt->max_fds; i++) {
36954 + file = fcheck_files(files, i);
36955 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
36956 + get_file(file);
36957 + our_file = file;
36958 + }
36959 + }
36960 + rcu_read_unlock();
36961 + put_files_struct(files);
36962 + }
36963 +
36964 + if (our_file == NULL)
36965 + return 1;
36966 +
36967 + read_lock(&tasklist_lock);
36968 + do_each_thread(p2, p) {
36969 + files = get_files_struct(p);
36970 + if (files == NULL ||
36971 + (p->signal && p->signal->tty == task->signal->tty)) {
36972 + if (files != NULL)
36973 + put_files_struct(files);
36974 + continue;
36975 + }
36976 + rcu_read_lock();
36977 + fdt = files_fdtable(files);
36978 + for (i=0; i < fdt->max_fds; i++) {
36979 + file = fcheck_files(files, i);
36980 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
36981 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
36982 + p3 = task;
36983 + while (p3->pid > 0) {
36984 + if (p3 == p)
36985 + break;
36986 + p3 = p3->parent;
36987 + }
36988 + if (p3 == p)
36989 + break;
36990 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
36991 + gr_handle_alertkill(p);
36992 + rcu_read_unlock();
36993 + put_files_struct(files);
36994 + read_unlock(&tasklist_lock);
36995 + fput(our_file);
36996 + return 0;
36997 + }
36998 + }
36999 + rcu_read_unlock();
37000 + put_files_struct(files);
37001 + } while_each_thread(p2, p);
37002 + read_unlock(&tasklist_lock);
37003 +
37004 + fput(our_file);
37005 + return 1;
37006 +}
37007 +
37008 +ssize_t
37009 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
37010 +{
37011 + struct gr_arg_wrapper uwrap;
37012 + unsigned char *sprole_salt = NULL;
37013 + unsigned char *sprole_sum = NULL;
37014 + int error = sizeof (struct gr_arg_wrapper);
37015 + int error2 = 0;
37016 +
37017 + down(&gr_dev_sem);
37018 +
37019 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
37020 + error = -EPERM;
37021 + goto out;
37022 + }
37023 +
37024 + if (count != sizeof (struct gr_arg_wrapper)) {
37025 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
37026 + error = -EINVAL;
37027 + goto out;
37028 + }
37029 +
37030 +
37031 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
37032 + gr_auth_expires = 0;
37033 + gr_auth_attempts = 0;
37034 + }
37035 +
37036 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
37037 + error = -EFAULT;
37038 + goto out;
37039 + }
37040 +
37041 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
37042 + error = -EINVAL;
37043 + goto out;
37044 + }
37045 +
37046 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
37047 + error = -EFAULT;
37048 + goto out;
37049 + }
37050 +
37051 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
37052 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
37053 + time_after(gr_auth_expires, get_seconds())) {
37054 + error = -EBUSY;
37055 + goto out;
37056 + }
37057 +
37058 + /* if non-root trying to do anything other than use a special role,
37059 + do not attempt authentication, do not count towards authentication
37060 + locking
37061 + */
37062 +
37063 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
37064 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
37065 + current_uid()) {
37066 + error = -EPERM;
37067 + goto out;
37068 + }
37069 +
37070 + /* ensure pw and special role name are null terminated */
37071 +
37072 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
37073 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
37074 +
37075 + /* Okay.
37076 + * We have our enough of the argument structure..(we have yet
37077 + * to copy_from_user the tables themselves) . Copy the tables
37078 + * only if we need them, i.e. for loading operations. */
37079 +
37080 + switch (gr_usermode->mode) {
37081 + case GR_STATUS:
37082 + if (gr_status & GR_READY) {
37083 + error = 1;
37084 + if (!gr_check_secure_terminal(current))
37085 + error = 3;
37086 + } else
37087 + error = 2;
37088 + goto out;
37089 + case GR_SHUTDOWN:
37090 + if ((gr_status & GR_READY)
37091 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
37092 + pax_open_kernel();
37093 + gr_status &= ~GR_READY;
37094 + pax_close_kernel();
37095 +
37096 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
37097 + free_variables();
37098 + memset(gr_usermode, 0, sizeof (struct gr_arg));
37099 + memset(gr_system_salt, 0, GR_SALT_LEN);
37100 + memset(gr_system_sum, 0, GR_SHA_LEN);
37101 + } else if (gr_status & GR_READY) {
37102 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
37103 + error = -EPERM;
37104 + } else {
37105 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
37106 + error = -EAGAIN;
37107 + }
37108 + break;
37109 + case GR_ENABLE:
37110 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
37111 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
37112 + else {
37113 + if (gr_status & GR_READY)
37114 + error = -EAGAIN;
37115 + else
37116 + error = error2;
37117 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
37118 + }
37119 + break;
37120 + case GR_RELOAD:
37121 + if (!(gr_status & GR_READY)) {
37122 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
37123 + error = -EAGAIN;
37124 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
37125 + lock_kernel();
37126 +
37127 + pax_open_kernel();
37128 + gr_status &= ~GR_READY;
37129 + pax_close_kernel();
37130 +
37131 + free_variables();
37132 + if (!(error2 = gracl_init(gr_usermode))) {
37133 + unlock_kernel();
37134 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
37135 + } else {
37136 + unlock_kernel();
37137 + error = error2;
37138 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
37139 + }
37140 + } else {
37141 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
37142 + error = -EPERM;
37143 + }
37144 + break;
37145 + case GR_SEGVMOD:
37146 + if (unlikely(!(gr_status & GR_READY))) {
37147 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
37148 + error = -EAGAIN;
37149 + break;
37150 + }
37151 +
37152 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
37153 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
37154 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
37155 + struct acl_subject_label *segvacl;
37156 + segvacl =
37157 + lookup_acl_subj_label(gr_usermode->segv_inode,
37158 + gr_usermode->segv_device,
37159 + current->role);
37160 + if (segvacl) {
37161 + segvacl->crashes = 0;
37162 + segvacl->expires = 0;
37163 + }
37164 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
37165 + gr_remove_uid(gr_usermode->segv_uid);
37166 + }
37167 + } else {
37168 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
37169 + error = -EPERM;
37170 + }
37171 + break;
37172 + case GR_SPROLE:
37173 + case GR_SPROLEPAM:
37174 + if (unlikely(!(gr_status & GR_READY))) {
37175 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
37176 + error = -EAGAIN;
37177 + break;
37178 + }
37179 +
37180 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
37181 + current->role->expires = 0;
37182 + current->role->auth_attempts = 0;
37183 + }
37184 +
37185 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
37186 + time_after(current->role->expires, get_seconds())) {
37187 + error = -EBUSY;
37188 + goto out;
37189 + }
37190 +
37191 + if (lookup_special_role_auth
37192 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
37193 + && ((!sprole_salt && !sprole_sum)
37194 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
37195 + char *p = "";
37196 + assign_special_role(gr_usermode->sp_role);
37197 + read_lock(&tasklist_lock);
37198 + if (current->parent)
37199 + p = current->parent->role->rolename;
37200 + read_unlock(&tasklist_lock);
37201 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
37202 + p, acl_sp_role_value);
37203 + } else {
37204 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
37205 + error = -EPERM;
37206 + if(!(current->role->auth_attempts++))
37207 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
37208 +
37209 + goto out;
37210 + }
37211 + break;
37212 + case GR_UNSPROLE:
37213 + if (unlikely(!(gr_status & GR_READY))) {
37214 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
37215 + error = -EAGAIN;
37216 + break;
37217 + }
37218 +
37219 + if (current->role->roletype & GR_ROLE_SPECIAL) {
37220 + char *p = "";
37221 + int i = 0;
37222 +
37223 + read_lock(&tasklist_lock);
37224 + if (current->parent) {
37225 + p = current->parent->role->rolename;
37226 + i = current->parent->acl_role_id;
37227 + }
37228 + read_unlock(&tasklist_lock);
37229 +
37230 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
37231 + gr_set_acls(1);
37232 + } else {
37233 + error = -EPERM;
37234 + goto out;
37235 + }
37236 + break;
37237 + default:
37238 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
37239 + error = -EINVAL;
37240 + break;
37241 + }
37242 +
37243 + if (error != -EPERM)
37244 + goto out;
37245 +
37246 + if(!(gr_auth_attempts++))
37247 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
37248 +
37249 + out:
37250 + up(&gr_dev_sem);
37251 + return error;
37252 +}
37253 +
37254 +int
37255 +gr_set_acls(const int type)
37256 +{
37257 + struct acl_object_label *obj;
37258 + struct task_struct *task, *task2;
37259 + struct file *filp;
37260 + struct acl_role_label *role = current->role;
37261 + __u16 acl_role_id = current->acl_role_id;
37262 + const struct cred *cred;
37263 + char *tmpname;
37264 + struct name_entry *nmatch;
37265 + struct acl_subject_label *tmpsubj;
37266 +
37267 + rcu_read_lock();
37268 + read_lock(&tasklist_lock);
37269 + read_lock(&grsec_exec_file_lock);
37270 + do_each_thread(task2, task) {
37271 + /* check to see if we're called from the exit handler,
37272 + if so, only replace ACLs that have inherited the admin
37273 + ACL */
37274 +
37275 + if (type && (task->role != role ||
37276 + task->acl_role_id != acl_role_id))
37277 + continue;
37278 +
37279 + task->acl_role_id = 0;
37280 + task->acl_sp_role = 0;
37281 +
37282 + if ((filp = task->exec_file)) {
37283 + cred = __task_cred(task);
37284 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
37285 +
37286 + /* the following is to apply the correct subject
37287 + on binaries running when the RBAC system
37288 + is enabled, when the binaries have been
37289 + replaced or deleted since their execution
37290 + -----
37291 + when the RBAC system starts, the inode/dev
37292 + from exec_file will be one the RBAC system
37293 + is unaware of. It only knows the inode/dev
37294 + of the present file on disk, or the absence
37295 + of it.
37296 + */
37297 + preempt_disable();
37298 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
37299 +
37300 + nmatch = lookup_name_entry(tmpname);
37301 + preempt_enable();
37302 + tmpsubj = NULL;
37303 + if (nmatch) {
37304 + if (nmatch->deleted)
37305 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
37306 + else
37307 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
37308 + if (tmpsubj != NULL)
37309 + task->acl = tmpsubj;
37310 + }
37311 + if (tmpsubj == NULL)
37312 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
37313 + task->role);
37314 + if (task->acl) {
37315 + struct acl_subject_label *curr;
37316 + curr = task->acl;
37317 +
37318 + task->is_writable = 0;
37319 + /* ignore additional mmap checks for processes that are writable
37320 + by the default ACL */
37321 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
37322 + if (unlikely(obj->mode & GR_WRITE))
37323 + task->is_writable = 1;
37324 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
37325 + if (unlikely(obj->mode & GR_WRITE))
37326 + task->is_writable = 1;
37327 +
37328 + gr_set_proc_res(task);
37329 +
37330 +#ifdef CONFIG_GRKERNSEC_ACL_DEBUG
37331 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
37332 +#endif
37333 + } else {
37334 + read_unlock(&grsec_exec_file_lock);
37335 + read_unlock(&tasklist_lock);
37336 + rcu_read_unlock();
37337 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
37338 + return 1;
37339 + }
37340 + } else {
37341 + // it's a kernel process
37342 + task->role = kernel_role;
37343 + task->acl = kernel_role->root_label;
37344 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
37345 + task->acl->mode &= ~GR_PROCFIND;
37346 +#endif
37347 + }
37348 + } while_each_thread(task2, task);
37349 + read_unlock(&grsec_exec_file_lock);
37350 + read_unlock(&tasklist_lock);
37351 + rcu_read_unlock();
37352 +
37353 + return 0;
37354 +}
37355 +
37356 +void
37357 +gr_learn_resource(const struct task_struct *task,
37358 + const int res, const unsigned long wanted, const int gt)
37359 +{
37360 + struct acl_subject_label *acl;
37361 + const struct cred *cred;
37362 +
37363 + if (unlikely((gr_status & GR_READY) &&
37364 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
37365 + goto skip_reslog;
37366 +
37367 +#ifdef CONFIG_GRKERNSEC_RESLOG
37368 + gr_log_resource(task, res, wanted, gt);
37369 +#endif
37370 + skip_reslog:
37371 +
37372 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
37373 + return;
37374 +
37375 + acl = task->acl;
37376 +
37377 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
37378 + !(acl->resmask & (1 << (unsigned short) res))))
37379 + return;
37380 +
37381 + if (wanted >= acl->res[res].rlim_cur) {
37382 + unsigned long res_add;
37383 +
37384 + res_add = wanted;
37385 + switch (res) {
37386 + case RLIMIT_CPU:
37387 + res_add += GR_RLIM_CPU_BUMP;
37388 + break;
37389 + case RLIMIT_FSIZE:
37390 + res_add += GR_RLIM_FSIZE_BUMP;
37391 + break;
37392 + case RLIMIT_DATA:
37393 + res_add += GR_RLIM_DATA_BUMP;
37394 + break;
37395 + case RLIMIT_STACK:
37396 + res_add += GR_RLIM_STACK_BUMP;
37397 + break;
37398 + case RLIMIT_CORE:
37399 + res_add += GR_RLIM_CORE_BUMP;
37400 + break;
37401 + case RLIMIT_RSS:
37402 + res_add += GR_RLIM_RSS_BUMP;
37403 + break;
37404 + case RLIMIT_NPROC:
37405 + res_add += GR_RLIM_NPROC_BUMP;
37406 + break;
37407 + case RLIMIT_NOFILE:
37408 + res_add += GR_RLIM_NOFILE_BUMP;
37409 + break;
37410 + case RLIMIT_MEMLOCK:
37411 + res_add += GR_RLIM_MEMLOCK_BUMP;
37412 + break;
37413 + case RLIMIT_AS:
37414 + res_add += GR_RLIM_AS_BUMP;
37415 + break;
37416 + case RLIMIT_LOCKS:
37417 + res_add += GR_RLIM_LOCKS_BUMP;
37418 + break;
37419 + case RLIMIT_SIGPENDING:
37420 + res_add += GR_RLIM_SIGPENDING_BUMP;
37421 + break;
37422 + case RLIMIT_MSGQUEUE:
37423 + res_add += GR_RLIM_MSGQUEUE_BUMP;
37424 + break;
37425 + case RLIMIT_NICE:
37426 + res_add += GR_RLIM_NICE_BUMP;
37427 + break;
37428 + case RLIMIT_RTPRIO:
37429 + res_add += GR_RLIM_RTPRIO_BUMP;
37430 + break;
37431 + case RLIMIT_RTTIME:
37432 + res_add += GR_RLIM_RTTIME_BUMP;
37433 + break;
37434 + }
37435 +
37436 + acl->res[res].rlim_cur = res_add;
37437 +
37438 + if (wanted > acl->res[res].rlim_max)
37439 + acl->res[res].rlim_max = res_add;
37440 +
37441 + /* only log the subject filename, since resource logging is supported for
37442 + single-subject learning only */
37443 + rcu_read_lock();
37444 + cred = __task_cred(task);
37445 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
37446 + task->role->roletype, cred->uid, cred->gid, acl->filename,
37447 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
37448 + "", (unsigned long) res, &task->signal->curr_ip);
37449 + rcu_read_unlock();
37450 + }
37451 +
37452 + return;
37453 +}
37454 +
37455 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
37456 +void
37457 +pax_set_initial_flags(struct linux_binprm *bprm)
37458 +{
37459 + struct task_struct *task = current;
37460 + struct acl_subject_label *proc;
37461 + unsigned long flags;
37462 +
37463 + if (unlikely(!(gr_status & GR_READY)))
37464 + return;
37465 +
37466 + flags = pax_get_flags(task);
37467 +
37468 + proc = task->acl;
37469 +
37470 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
37471 + flags &= ~MF_PAX_PAGEEXEC;
37472 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
37473 + flags &= ~MF_PAX_SEGMEXEC;
37474 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
37475 + flags &= ~MF_PAX_RANDMMAP;
37476 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
37477 + flags &= ~MF_PAX_EMUTRAMP;
37478 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
37479 + flags &= ~MF_PAX_MPROTECT;
37480 +
37481 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
37482 + flags |= MF_PAX_PAGEEXEC;
37483 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
37484 + flags |= MF_PAX_SEGMEXEC;
37485 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
37486 + flags |= MF_PAX_RANDMMAP;
37487 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
37488 + flags |= MF_PAX_EMUTRAMP;
37489 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
37490 + flags |= MF_PAX_MPROTECT;
37491 +
37492 + pax_set_flags(task, flags);
37493 +
37494 + return;
37495 +}
37496 +#endif
37497 +
37498 +#ifdef CONFIG_SYSCTL
37499 +/* Eric Biederman likes breaking userland ABI and every inode-based security
37500 + system to save 35kb of memory */
37501 +
37502 +/* we modify the passed in filename, but adjust it back before returning */
37503 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
37504 +{
37505 + struct name_entry *nmatch;
37506 + char *p, *lastp = NULL;
37507 + struct acl_object_label *obj = NULL, *tmp;
37508 + struct acl_subject_label *tmpsubj;
37509 + char c = '\0';
37510 +
37511 + read_lock(&gr_inode_lock);
37512 +
37513 + p = name + len - 1;
37514 + do {
37515 + nmatch = lookup_name_entry(name);
37516 + if (lastp != NULL)
37517 + *lastp = c;
37518 +
37519 + if (nmatch == NULL)
37520 + goto next_component;
37521 + tmpsubj = current->acl;
37522 + do {
37523 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
37524 + if (obj != NULL) {
37525 + tmp = obj->globbed;
37526 + while (tmp) {
37527 + if (!glob_match(tmp->filename, name)) {
37528 + obj = tmp;
37529 + goto found_obj;
37530 + }
37531 + tmp = tmp->next;
37532 + }
37533 + goto found_obj;
37534 + }
37535 + } while ((tmpsubj = tmpsubj->parent_subject));
37536 +next_component:
37537 + /* end case */
37538 + if (p == name)
37539 + break;
37540 +
37541 + while (*p != '/')
37542 + p--;
37543 + if (p == name)
37544 + lastp = p + 1;
37545 + else {
37546 + lastp = p;
37547 + p--;
37548 + }
37549 + c = *lastp;
37550 + *lastp = '\0';
37551 + } while (1);
37552 +found_obj:
37553 + read_unlock(&gr_inode_lock);
37554 + /* obj returned will always be non-null */
37555 + return obj;
37556 +}
37557 +
37558 +/* returns 0 when allowing, non-zero on error
37559 + op of 0 is used for readdir, so we don't log the names of hidden files
37560 +*/
37561 +__u32
37562 +gr_handle_sysctl(const struct ctl_table *table, const int op)
37563 +{
37564 + struct ctl_table *tmp;
37565 + const char *proc_sys = "/proc/sys";
37566 + char *path;
37567 + struct acl_object_label *obj;
37568 + unsigned short len = 0, pos = 0, depth = 0, i;
37569 + __u32 err = 0;
37570 + __u32 mode = 0;
37571 +
37572 + if (unlikely(!(gr_status & GR_READY)))
37573 + return 0;
37574 +
37575 + /* for now, ignore operations on non-sysctl entries if it's not a
37576 + readdir*/
37577 + if (table->child != NULL && op != 0)
37578 + return 0;
37579 +
37580 + mode |= GR_FIND;
37581 + /* it's only a read if it's an entry, read on dirs is for readdir */
37582 + if (op & MAY_READ)
37583 + mode |= GR_READ;
37584 + if (op & MAY_WRITE)
37585 + mode |= GR_WRITE;
37586 +
37587 + preempt_disable();
37588 +
37589 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
37590 +
37591 + /* it's only a read/write if it's an actual entry, not a dir
37592 + (which are opened for readdir)
37593 + */
37594 +
37595 + /* convert the requested sysctl entry into a pathname */
37596 +
37597 + for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
37598 + len += strlen(tmp->procname);
37599 + len++;
37600 + depth++;
37601 + }
37602 +
37603 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
37604 + /* deny */
37605 + goto out;
37606 + }
37607 +
37608 + memset(path, 0, PAGE_SIZE);
37609 +
37610 + memcpy(path, proc_sys, strlen(proc_sys));
37611 +
37612 + pos += strlen(proc_sys);
37613 +
37614 + for (; depth > 0; depth--) {
37615 + path[pos] = '/';
37616 + pos++;
37617 + for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
37618 + if (depth == i) {
37619 + memcpy(path + pos, tmp->procname,
37620 + strlen(tmp->procname));
37621 + pos += strlen(tmp->procname);
37622 + }
37623 + i++;
37624 + }
37625 + }
37626 +
37627 + obj = gr_lookup_by_name(path, pos);
37628 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
37629 +
37630 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
37631 + ((err & mode) != mode))) {
37632 + __u32 new_mode = mode;
37633 +
37634 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
37635 +
37636 + err = 0;
37637 + gr_log_learn_sysctl(path, new_mode);
37638 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
37639 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
37640 + err = -ENOENT;
37641 + } else if (!(err & GR_FIND)) {
37642 + err = -ENOENT;
37643 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
37644 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
37645 + path, (mode & GR_READ) ? " reading" : "",
37646 + (mode & GR_WRITE) ? " writing" : "");
37647 + err = -EACCES;
37648 + } else if ((err & mode) != mode) {
37649 + err = -EACCES;
37650 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
37651 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
37652 + path, (mode & GR_READ) ? " reading" : "",
37653 + (mode & GR_WRITE) ? " writing" : "");
37654 + err = 0;
37655 + } else
37656 + err = 0;
37657 +
37658 + out:
37659 + preempt_enable();
37660 +
37661 + return err;
37662 +}
37663 +#endif
37664 +
37665 +int
37666 +gr_handle_proc_ptrace(struct task_struct *task)
37667 +{
37668 + struct file *filp;
37669 + struct task_struct *tmp = task;
37670 + struct task_struct *curtemp = current;
37671 + __u32 retmode;
37672 +
37673 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
37674 + if (unlikely(!(gr_status & GR_READY)))
37675 + return 0;
37676 +#endif
37677 +
37678 + read_lock(&tasklist_lock);
37679 + read_lock(&grsec_exec_file_lock);
37680 + filp = task->exec_file;
37681 +
37682 + while (tmp->pid > 0) {
37683 + if (tmp == curtemp)
37684 + break;
37685 + tmp = tmp->parent;
37686 + }
37687 +
37688 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
37689 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
37690 + read_unlock(&grsec_exec_file_lock);
37691 + read_unlock(&tasklist_lock);
37692 + return 1;
37693 + }
37694 +
37695 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
37696 + if (!(gr_status & GR_READY)) {
37697 + read_unlock(&grsec_exec_file_lock);
37698 + read_unlock(&tasklist_lock);
37699 + return 0;
37700 + }
37701 +#endif
37702 +
37703 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
37704 + read_unlock(&grsec_exec_file_lock);
37705 + read_unlock(&tasklist_lock);
37706 +
37707 + if (retmode & GR_NOPTRACE)
37708 + return 1;
37709 +
37710 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
37711 + && (current->acl != task->acl || (current->acl != current->role->root_label
37712 + && current->pid != task->pid)))
37713 + return 1;
37714 +
37715 + return 0;
37716 +}
37717 +
37718 +int
37719 +gr_handle_ptrace(struct task_struct *task, const long request)
37720 +{
37721 + struct task_struct *tmp = task;
37722 + struct task_struct *curtemp = current;
37723 + __u32 retmode;
37724 +
37725 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
37726 + if (unlikely(!(gr_status & GR_READY)))
37727 + return 0;
37728 +#endif
37729 +
37730 + read_lock(&tasklist_lock);
37731 + while (tmp->pid > 0) {
37732 + if (tmp == curtemp)
37733 + break;
37734 + tmp = tmp->parent;
37735 + }
37736 +
37737 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
37738 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
37739 + read_unlock(&tasklist_lock);
37740 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
37741 + return 1;
37742 + }
37743 + read_unlock(&tasklist_lock);
37744 +
37745 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
37746 + if (!(gr_status & GR_READY))
37747 + return 0;
37748 +#endif
37749 +
37750 + read_lock(&grsec_exec_file_lock);
37751 + if (unlikely(!task->exec_file)) {
37752 + read_unlock(&grsec_exec_file_lock);
37753 + return 0;
37754 + }
37755 +
37756 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
37757 + read_unlock(&grsec_exec_file_lock);
37758 +
37759 + if (retmode & GR_NOPTRACE) {
37760 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
37761 + return 1;
37762 + }
37763 +
37764 + if (retmode & GR_PTRACERD) {
37765 + switch (request) {
37766 + case PTRACE_POKETEXT:
37767 + case PTRACE_POKEDATA:
37768 + case PTRACE_POKEUSR:
37769 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
37770 + case PTRACE_SETREGS:
37771 + case PTRACE_SETFPREGS:
37772 +#endif
37773 +#ifdef CONFIG_X86
37774 + case PTRACE_SETFPXREGS:
37775 +#endif
37776 +#ifdef CONFIG_ALTIVEC
37777 + case PTRACE_SETVRREGS:
37778 +#endif
37779 + return 1;
37780 + default:
37781 + return 0;
37782 + }
37783 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
37784 + !(current->role->roletype & GR_ROLE_GOD) &&
37785 + (current->acl != task->acl)) {
37786 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
37787 + return 1;
37788 + }
37789 +
37790 + return 0;
37791 +}
37792 +
37793 +static int is_writable_mmap(const struct file *filp)
37794 +{
37795 + struct task_struct *task = current;
37796 + struct acl_object_label *obj, *obj2;
37797 +
37798 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
37799 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && filp->f_path.mnt != shm_mnt) {
37800 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
37801 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
37802 + task->role->root_label);
37803 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
37804 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
37805 + return 1;
37806 + }
37807 + }
37808 + return 0;
37809 +}
37810 +
37811 +int
37812 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
37813 +{
37814 + __u32 mode;
37815 +
37816 + if (unlikely(!file || !(prot & PROT_EXEC)))
37817 + return 1;
37818 +
37819 + if (is_writable_mmap(file))
37820 + return 0;
37821 +
37822 + mode =
37823 + gr_search_file(file->f_path.dentry,
37824 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
37825 + file->f_path.mnt);
37826 +
37827 + if (!gr_tpe_allow(file))
37828 + return 0;
37829 +
37830 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
37831 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
37832 + return 0;
37833 + } else if (unlikely(!(mode & GR_EXEC))) {
37834 + return 0;
37835 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
37836 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
37837 + return 1;
37838 + }
37839 +
37840 + return 1;
37841 +}
37842 +
37843 +int
37844 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
37845 +{
37846 + __u32 mode;
37847 +
37848 + if (unlikely(!file || !(prot & PROT_EXEC)))
37849 + return 1;
37850 +
37851 + if (is_writable_mmap(file))
37852 + return 0;
37853 +
37854 + mode =
37855 + gr_search_file(file->f_path.dentry,
37856 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
37857 + file->f_path.mnt);
37858 +
37859 + if (!gr_tpe_allow(file))
37860 + return 0;
37861 +
37862 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
37863 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
37864 + return 0;
37865 + } else if (unlikely(!(mode & GR_EXEC))) {
37866 + return 0;
37867 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
37868 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
37869 + return 1;
37870 + }
37871 +
37872 + return 1;
37873 +}
37874 +
37875 +void
37876 +gr_acl_handle_psacct(struct task_struct *task, const long code)
37877 +{
37878 + unsigned long runtime;
37879 + unsigned long cputime;
37880 + unsigned int wday, cday;
37881 + __u8 whr, chr;
37882 + __u8 wmin, cmin;
37883 + __u8 wsec, csec;
37884 + struct timespec timeval;
37885 +
37886 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
37887 + !(task->acl->mode & GR_PROCACCT)))
37888 + return;
37889 +
37890 + do_posix_clock_monotonic_gettime(&timeval);
37891 + runtime = timeval.tv_sec - task->start_time.tv_sec;
37892 + wday = runtime / (3600 * 24);
37893 + runtime -= wday * (3600 * 24);
37894 + whr = runtime / 3600;
37895 + runtime -= whr * 3600;
37896 + wmin = runtime / 60;
37897 + runtime -= wmin * 60;
37898 + wsec = runtime;
37899 +
37900 + cputime = (task->utime + task->stime) / HZ;
37901 + cday = cputime / (3600 * 24);
37902 + cputime -= cday * (3600 * 24);
37903 + chr = cputime / 3600;
37904 + cputime -= chr * 3600;
37905 + cmin = cputime / 60;
37906 + cputime -= cmin * 60;
37907 + csec = cputime;
37908 +
37909 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
37910 +
37911 + return;
37912 +}
37913 +
37914 +void gr_set_kernel_label(struct task_struct *task)
37915 +{
37916 + if (gr_status & GR_READY) {
37917 + task->role = kernel_role;
37918 + task->acl = kernel_role->root_label;
37919 + }
37920 + return;
37921 +}
37922 +
37923 +#ifdef CONFIG_TASKSTATS
37924 +int gr_is_taskstats_denied(int pid)
37925 +{
37926 + struct task_struct *task;
37927 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
37928 + const struct cred *cred;
37929 +#endif
37930 + int ret = 0;
37931 +
37932 + /* restrict taskstats viewing to un-chrooted root users
37933 + who have the 'view' subject flag if the RBAC system is enabled
37934 + */
37935 +
37936 + rcu_read_lock();
37937 + read_lock(&tasklist_lock);
37938 + task = find_task_by_vpid(pid);
37939 + if (task) {
37940 +#ifdef CONFIG_GRKERNSEC_CHROOT
37941 + if (proc_is_chrooted(task))
37942 + ret = -EACCES;
37943 +#endif
37944 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
37945 + cred = __task_cred(task);
37946 +#ifdef CONFIG_GRKERNSEC_PROC_USER
37947 + if (cred->uid != 0)
37948 + ret = -EACCES;
37949 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
37950 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
37951 + ret = -EACCES;
37952 +#endif
37953 +#endif
37954 + if (gr_status & GR_READY) {
37955 + if (!(task->acl->mode & GR_VIEW))
37956 + ret = -EACCES;
37957 + }
37958 + } else
37959 + ret = -ENOENT;
37960 +
37961 + read_unlock(&tasklist_lock);
37962 + rcu_read_unlock();
37963 +
37964 + return ret;
37965 +}
37966 +#endif
37967 +
37968 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
37969 +{
37970 + struct task_struct *task = current;
37971 + struct dentry *dentry = file->f_path.dentry;
37972 + struct vfsmount *mnt = file->f_path.mnt;
37973 + struct acl_object_label *obj, *tmp;
37974 + struct acl_subject_label *subj;
37975 + unsigned int bufsize;
37976 + int is_not_root;
37977 + char *path;
37978 +
37979 + if (unlikely(!(gr_status & GR_READY)))
37980 + return 1;
37981 +
37982 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
37983 + return 1;
37984 +
37985 + /* ignore Eric Biederman */
37986 + if (IS_PRIVATE(dentry->d_inode))
37987 + return 1;
37988 +
37989 + subj = task->acl;
37990 + do {
37991 + obj = lookup_acl_obj_label(ino, dentry->d_inode->i_sb->s_dev, subj);
37992 + if (obj != NULL)
37993 + return (obj->mode & GR_FIND) ? 1 : 0;
37994 + } while ((subj = subj->parent_subject));
37995 +
37996 + /* this is purely an optimization since we're looking for an object
37997 + for the directory we're doing a readdir on
37998 + if it's possible for any globbed object to match the entry we're
37999 + filling into the directory, then the object we find here will be
38000 + an anchor point with attached globbed objects
38001 + */
38002 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
38003 + if (obj->globbed == NULL)
38004 + return (obj->mode & GR_FIND) ? 1 : 0;
38005 +
38006 + is_not_root = ((obj->filename[0] == '/') &&
38007 + (obj->filename[1] == '\0')) ? 0 : 1;
38008 + bufsize = PAGE_SIZE - namelen - is_not_root;
38009 +
38010 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
38011 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
38012 + return 1;
38013 +
38014 + preempt_disable();
38015 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
38016 + bufsize);
38017 +
38018 + bufsize = strlen(path);
38019 +
38020 + /* if base is "/", don't append an additional slash */
38021 + if (is_not_root)
38022 + *(path + bufsize) = '/';
38023 + memcpy(path + bufsize + is_not_root, name, namelen);
38024 + *(path + bufsize + namelen + is_not_root) = '\0';
38025 +
38026 + tmp = obj->globbed;
38027 + while (tmp) {
38028 + if (!glob_match(tmp->filename, path)) {
38029 + preempt_enable();
38030 + return (tmp->mode & GR_FIND) ? 1 : 0;
38031 + }
38032 + tmp = tmp->next;
38033 + }
38034 + preempt_enable();
38035 + return (obj->mode & GR_FIND) ? 1 : 0;
38036 +}
38037 +
38038 +EXPORT_SYMBOL(gr_learn_resource);
38039 +EXPORT_SYMBOL(gr_set_kernel_label);
38040 +#ifdef CONFIG_SECURITY
38041 +EXPORT_SYMBOL(gr_check_user_change);
38042 +EXPORT_SYMBOL(gr_check_group_change);
38043 +#endif
38044 +
38045 diff -urNp linux-2.6.35.4/grsecurity/gracl_cap.c linux-2.6.35.4/grsecurity/gracl_cap.c
38046 --- linux-2.6.35.4/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
38047 +++ linux-2.6.35.4/grsecurity/gracl_cap.c 2010-09-17 20:12:37.000000000 -0400
38048 @@ -0,0 +1,138 @@
38049 +#include <linux/kernel.h>
38050 +#include <linux/module.h>
38051 +#include <linux/sched.h>
38052 +#include <linux/gracl.h>
38053 +#include <linux/grsecurity.h>
38054 +#include <linux/grinternal.h>
38055 +
38056 +static const char *captab_log[] = {
38057 + "CAP_CHOWN",
38058 + "CAP_DAC_OVERRIDE",
38059 + "CAP_DAC_READ_SEARCH",
38060 + "CAP_FOWNER",
38061 + "CAP_FSETID",
38062 + "CAP_KILL",
38063 + "CAP_SETGID",
38064 + "CAP_SETUID",
38065 + "CAP_SETPCAP",
38066 + "CAP_LINUX_IMMUTABLE",
38067 + "CAP_NET_BIND_SERVICE",
38068 + "CAP_NET_BROADCAST",
38069 + "CAP_NET_ADMIN",
38070 + "CAP_NET_RAW",
38071 + "CAP_IPC_LOCK",
38072 + "CAP_IPC_OWNER",
38073 + "CAP_SYS_MODULE",
38074 + "CAP_SYS_RAWIO",
38075 + "CAP_SYS_CHROOT",
38076 + "CAP_SYS_PTRACE",
38077 + "CAP_SYS_PACCT",
38078 + "CAP_SYS_ADMIN",
38079 + "CAP_SYS_BOOT",
38080 + "CAP_SYS_NICE",
38081 + "CAP_SYS_RESOURCE",
38082 + "CAP_SYS_TIME",
38083 + "CAP_SYS_TTY_CONFIG",
38084 + "CAP_MKNOD",
38085 + "CAP_LEASE",
38086 + "CAP_AUDIT_WRITE",
38087 + "CAP_AUDIT_CONTROL",
38088 + "CAP_SETFCAP",
38089 + "CAP_MAC_OVERRIDE",
38090 + "CAP_MAC_ADMIN"
38091 +};
38092 +
38093 +EXPORT_SYMBOL(gr_is_capable);
38094 +EXPORT_SYMBOL(gr_is_capable_nolog);
38095 +
38096 +int
38097 +gr_is_capable(const int cap)
38098 +{
38099 + struct task_struct *task = current;
38100 + const struct cred *cred = current_cred();
38101 + struct acl_subject_label *curracl;
38102 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
38103 + kernel_cap_t cap_audit = __cap_empty_set;
38104 +
38105 + if (!gr_acl_is_enabled())
38106 + return 1;
38107 +
38108 + curracl = task->acl;
38109 +
38110 + cap_drop = curracl->cap_lower;
38111 + cap_mask = curracl->cap_mask;
38112 + cap_audit = curracl->cap_invert_audit;
38113 +
38114 + while ((curracl = curracl->parent_subject)) {
38115 + /* if the cap isn't specified in the current computed mask but is specified in the
38116 + current level subject, and is lowered in the current level subject, then add
38117 + it to the set of dropped capabilities
38118 + otherwise, add the current level subject's mask to the current computed mask
38119 + */
38120 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
38121 + cap_raise(cap_mask, cap);
38122 + if (cap_raised(curracl->cap_lower, cap))
38123 + cap_raise(cap_drop, cap);
38124 + if (cap_raised(curracl->cap_invert_audit, cap))
38125 + cap_raise(cap_audit, cap);
38126 + }
38127 + }
38128 +
38129 + if (!cap_raised(cap_drop, cap)) {
38130 + if (cap_raised(cap_audit, cap))
38131 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
38132 + return 1;
38133 + }
38134 +
38135 + curracl = task->acl;
38136 +
38137 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
38138 + && cap_raised(cred->cap_effective, cap)) {
38139 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
38140 + task->role->roletype, cred->uid,
38141 + cred->gid, task->exec_file ?
38142 + gr_to_filename(task->exec_file->f_path.dentry,
38143 + task->exec_file->f_path.mnt) : curracl->filename,
38144 + curracl->filename, 0UL,
38145 + 0UL, "", (unsigned long) cap, &task->signal->curr_ip);
38146 + return 1;
38147 + }
38148 +
38149 + if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
38150 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
38151 + return 0;
38152 +}
38153 +
38154 +int
38155 +gr_is_capable_nolog(const int cap)
38156 +{
38157 + struct acl_subject_label *curracl;
38158 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
38159 +
38160 + if (!gr_acl_is_enabled())
38161 + return 1;
38162 +
38163 + curracl = current->acl;
38164 +
38165 + cap_drop = curracl->cap_lower;
38166 + cap_mask = curracl->cap_mask;
38167 +
38168 + while ((curracl = curracl->parent_subject)) {
38169 + /* if the cap isn't specified in the current computed mask but is specified in the
38170 + current level subject, and is lowered in the current level subject, then add
38171 + it to the set of dropped capabilities
38172 + otherwise, add the current level subject's mask to the current computed mask
38173 + */
38174 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
38175 + cap_raise(cap_mask, cap);
38176 + if (cap_raised(curracl->cap_lower, cap))
38177 + cap_raise(cap_drop, cap);
38178 + }
38179 + }
38180 +
38181 + if (!cap_raised(cap_drop, cap))
38182 + return 1;
38183 +
38184 + return 0;
38185 +}
38186 +
38187 diff -urNp linux-2.6.35.4/grsecurity/gracl_fs.c linux-2.6.35.4/grsecurity/gracl_fs.c
38188 --- linux-2.6.35.4/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
38189 +++ linux-2.6.35.4/grsecurity/gracl_fs.c 2010-09-17 20:12:37.000000000 -0400
38190 @@ -0,0 +1,424 @@
38191 +#include <linux/kernel.h>
38192 +#include <linux/sched.h>
38193 +#include <linux/types.h>
38194 +#include <linux/fs.h>
38195 +#include <linux/file.h>
38196 +#include <linux/stat.h>
38197 +#include <linux/grsecurity.h>
38198 +#include <linux/grinternal.h>
38199 +#include <linux/gracl.h>
38200 +
38201 +__u32
38202 +gr_acl_handle_hidden_file(const struct dentry * dentry,
38203 + const struct vfsmount * mnt)
38204 +{
38205 + __u32 mode;
38206 +
38207 + if (unlikely(!dentry->d_inode))
38208 + return GR_FIND;
38209 +
38210 + mode =
38211 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
38212 +
38213 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
38214 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
38215 + return mode;
38216 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
38217 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
38218 + return 0;
38219 + } else if (unlikely(!(mode & GR_FIND)))
38220 + return 0;
38221 +
38222 + return GR_FIND;
38223 +}
38224 +
38225 +__u32
38226 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
38227 + const int fmode)
38228 +{
38229 + __u32 reqmode = GR_FIND;
38230 + __u32 mode;
38231 +
38232 + if (unlikely(!dentry->d_inode))
38233 + return reqmode;
38234 +
38235 + if (unlikely(fmode & O_APPEND))
38236 + reqmode |= GR_APPEND;
38237 + else if (unlikely(fmode & FMODE_WRITE))
38238 + reqmode |= GR_WRITE;
38239 + if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
38240 + reqmode |= GR_READ;
38241 + if ((fmode & FMODE_GREXEC) && (fmode & FMODE_EXEC))
38242 + reqmode &= ~GR_READ;
38243 + mode =
38244 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
38245 + mnt);
38246 +
38247 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
38248 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
38249 + reqmode & GR_READ ? " reading" : "",
38250 + reqmode & GR_WRITE ? " writing" : reqmode &
38251 + GR_APPEND ? " appending" : "");
38252 + return reqmode;
38253 + } else
38254 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
38255 + {
38256 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
38257 + reqmode & GR_READ ? " reading" : "",
38258 + reqmode & GR_WRITE ? " writing" : reqmode &
38259 + GR_APPEND ? " appending" : "");
38260 + return 0;
38261 + } else if (unlikely((mode & reqmode) != reqmode))
38262 + return 0;
38263 +
38264 + return reqmode;
38265 +}
38266 +
38267 +__u32
38268 +gr_acl_handle_creat(const struct dentry * dentry,
38269 + const struct dentry * p_dentry,
38270 + const struct vfsmount * p_mnt, const int fmode,
38271 + const int imode)
38272 +{
38273 + __u32 reqmode = GR_WRITE | GR_CREATE;
38274 + __u32 mode;
38275 +
38276 + if (unlikely(fmode & O_APPEND))
38277 + reqmode |= GR_APPEND;
38278 + if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
38279 + reqmode |= GR_READ;
38280 + if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
38281 + reqmode |= GR_SETID;
38282 +
38283 + mode =
38284 + gr_check_create(dentry, p_dentry, p_mnt,
38285 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
38286 +
38287 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
38288 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
38289 + reqmode & GR_READ ? " reading" : "",
38290 + reqmode & GR_WRITE ? " writing" : reqmode &
38291 + GR_APPEND ? " appending" : "");
38292 + return reqmode;
38293 + } else
38294 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
38295 + {
38296 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
38297 + reqmode & GR_READ ? " reading" : "",
38298 + reqmode & GR_WRITE ? " writing" : reqmode &
38299 + GR_APPEND ? " appending" : "");
38300 + return 0;
38301 + } else if (unlikely((mode & reqmode) != reqmode))
38302 + return 0;
38303 +
38304 + return reqmode;
38305 +}
38306 +
38307 +__u32
38308 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
38309 + const int fmode)
38310 +{
38311 + __u32 mode, reqmode = GR_FIND;
38312 +
38313 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
38314 + reqmode |= GR_EXEC;
38315 + if (fmode & S_IWOTH)
38316 + reqmode |= GR_WRITE;
38317 + if (fmode & S_IROTH)
38318 + reqmode |= GR_READ;
38319 +
38320 + mode =
38321 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
38322 + mnt);
38323 +
38324 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
38325 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
38326 + reqmode & GR_READ ? " reading" : "",
38327 + reqmode & GR_WRITE ? " writing" : "",
38328 + reqmode & GR_EXEC ? " executing" : "");
38329 + return reqmode;
38330 + } else
38331 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
38332 + {
38333 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
38334 + reqmode & GR_READ ? " reading" : "",
38335 + reqmode & GR_WRITE ? " writing" : "",
38336 + reqmode & GR_EXEC ? " executing" : "");
38337 + return 0;
38338 + } else if (unlikely((mode & reqmode) != reqmode))
38339 + return 0;
38340 +
38341 + return reqmode;
38342 +}
38343 +
38344 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
38345 +{
38346 + __u32 mode;
38347 +
38348 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
38349 +
38350 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
38351 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
38352 + return mode;
38353 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
38354 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
38355 + return 0;
38356 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
38357 + return 0;
38358 +
38359 + return (reqmode);
38360 +}
38361 +
38362 +__u32
38363 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
38364 +{
38365 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
38366 +}
38367 +
38368 +__u32
38369 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
38370 +{
38371 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
38372 +}
38373 +
38374 +__u32
38375 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
38376 +{
38377 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
38378 +}
38379 +
38380 +__u32
38381 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
38382 +{
38383 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
38384 +}
38385 +
38386 +__u32
38387 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
38388 + mode_t mode)
38389 +{
38390 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
38391 + return 1;
38392 +
38393 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
38394 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
38395 + GR_FCHMOD_ACL_MSG);
38396 + } else {
38397 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
38398 + }
38399 +}
38400 +
38401 +__u32
38402 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
38403 + mode_t mode)
38404 +{
38405 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
38406 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
38407 + GR_CHMOD_ACL_MSG);
38408 + } else {
38409 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
38410 + }
38411 +}
38412 +
38413 +__u32
38414 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
38415 +{
38416 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
38417 +}
38418 +
38419 +__u32
38420 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
38421 +{
38422 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
38423 +}
38424 +
38425 +__u32
38426 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
38427 +{
38428 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
38429 + GR_UNIXCONNECT_ACL_MSG);
38430 +}
38431 +
38432 +/* hardlinks require at minimum create permission,
38433 + any additional privilege required is based on the
38434 + privilege of the file being linked to
38435 +*/
38436 +__u32
38437 +gr_acl_handle_link(const struct dentry * new_dentry,
38438 + const struct dentry * parent_dentry,
38439 + const struct vfsmount * parent_mnt,
38440 + const struct dentry * old_dentry,
38441 + const struct vfsmount * old_mnt, const char *to)
38442 +{
38443 + __u32 mode;
38444 + __u32 needmode = GR_CREATE | GR_LINK;
38445 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
38446 +
38447 + mode =
38448 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
38449 + old_mnt);
38450 +
38451 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
38452 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
38453 + return mode;
38454 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
38455 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
38456 + return 0;
38457 + } else if (unlikely((mode & needmode) != needmode))
38458 + return 0;
38459 +
38460 + return 1;
38461 +}
38462 +
38463 +__u32
38464 +gr_acl_handle_symlink(const struct dentry * new_dentry,
38465 + const struct dentry * parent_dentry,
38466 + const struct vfsmount * parent_mnt, const char *from)
38467 +{
38468 + __u32 needmode = GR_WRITE | GR_CREATE;
38469 + __u32 mode;
38470 +
38471 + mode =
38472 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
38473 + GR_CREATE | GR_AUDIT_CREATE |
38474 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
38475 +
38476 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
38477 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
38478 + return mode;
38479 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
38480 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
38481 + return 0;
38482 + } else if (unlikely((mode & needmode) != needmode))
38483 + return 0;
38484 +
38485 + return (GR_WRITE | GR_CREATE);
38486 +}
38487 +
38488 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
38489 +{
38490 + __u32 mode;
38491 +
38492 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
38493 +
38494 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
38495 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
38496 + return mode;
38497 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
38498 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
38499 + return 0;
38500 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
38501 + return 0;
38502 +
38503 + return (reqmode);
38504 +}
38505 +
38506 +__u32
38507 +gr_acl_handle_mknod(const struct dentry * new_dentry,
38508 + const struct dentry * parent_dentry,
38509 + const struct vfsmount * parent_mnt,
38510 + const int mode)
38511 +{
38512 + __u32 reqmode = GR_WRITE | GR_CREATE;
38513 + if (unlikely(mode & (S_ISUID | S_ISGID)))
38514 + reqmode |= GR_SETID;
38515 +
38516 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
38517 + reqmode, GR_MKNOD_ACL_MSG);
38518 +}
38519 +
38520 +__u32
38521 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
38522 + const struct dentry *parent_dentry,
38523 + const struct vfsmount *parent_mnt)
38524 +{
38525 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
38526 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
38527 +}
38528 +
38529 +#define RENAME_CHECK_SUCCESS(old, new) \
38530 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
38531 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
38532 +
38533 +int
38534 +gr_acl_handle_rename(struct dentry *new_dentry,
38535 + struct dentry *parent_dentry,
38536 + const struct vfsmount *parent_mnt,
38537 + struct dentry *old_dentry,
38538 + struct inode *old_parent_inode,
38539 + struct vfsmount *old_mnt, const char *newname)
38540 +{
38541 + __u32 comp1, comp2;
38542 + int error = 0;
38543 +
38544 + if (unlikely(!gr_acl_is_enabled()))
38545 + return 0;
38546 +
38547 + if (!new_dentry->d_inode) {
38548 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
38549 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
38550 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
38551 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
38552 + GR_DELETE | GR_AUDIT_DELETE |
38553 + GR_AUDIT_READ | GR_AUDIT_WRITE |
38554 + GR_SUPPRESS, old_mnt);
38555 + } else {
38556 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
38557 + GR_CREATE | GR_DELETE |
38558 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
38559 + GR_AUDIT_READ | GR_AUDIT_WRITE |
38560 + GR_SUPPRESS, parent_mnt);
38561 + comp2 =
38562 + gr_search_file(old_dentry,
38563 + GR_READ | GR_WRITE | GR_AUDIT_READ |
38564 + GR_DELETE | GR_AUDIT_DELETE |
38565 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
38566 + }
38567 +
38568 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
38569 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
38570 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
38571 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
38572 + && !(comp2 & GR_SUPPRESS)) {
38573 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
38574 + error = -EACCES;
38575 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
38576 + error = -EACCES;
38577 +
38578 + return error;
38579 +}
38580 +
38581 +void
38582 +gr_acl_handle_exit(void)
38583 +{
38584 + u16 id;
38585 + char *rolename;
38586 + struct file *exec_file;
38587 +
38588 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled())) {
38589 + id = current->acl_role_id;
38590 + rolename = current->role->rolename;
38591 + gr_set_acls(1);
38592 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
38593 + }
38594 +
38595 + write_lock(&grsec_exec_file_lock);
38596 + exec_file = current->exec_file;
38597 + current->exec_file = NULL;
38598 + write_unlock(&grsec_exec_file_lock);
38599 +
38600 + if (exec_file)
38601 + fput(exec_file);
38602 +}
38603 +
38604 +int
38605 +gr_acl_handle_procpidmem(const struct task_struct *task)
38606 +{
38607 + if (unlikely(!gr_acl_is_enabled()))
38608 + return 0;
38609 +
38610 + if (task != current && task->acl->mode & GR_PROTPROCFD)
38611 + return -EACCES;
38612 +
38613 + return 0;
38614 +}
38615 diff -urNp linux-2.6.35.4/grsecurity/gracl_ip.c linux-2.6.35.4/grsecurity/gracl_ip.c
38616 --- linux-2.6.35.4/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
38617 +++ linux-2.6.35.4/grsecurity/gracl_ip.c 2010-09-17 20:12:37.000000000 -0400
38618 @@ -0,0 +1,339 @@
38619 +#include <linux/kernel.h>
38620 +#include <asm/uaccess.h>
38621 +#include <asm/errno.h>
38622 +#include <net/sock.h>
38623 +#include <linux/file.h>
38624 +#include <linux/fs.h>
38625 +#include <linux/net.h>
38626 +#include <linux/in.h>
38627 +#include <linux/skbuff.h>
38628 +#include <linux/ip.h>
38629 +#include <linux/udp.h>
38630 +#include <linux/smp_lock.h>
38631 +#include <linux/types.h>
38632 +#include <linux/sched.h>
38633 +#include <linux/netdevice.h>
38634 +#include <linux/inetdevice.h>
38635 +#include <linux/gracl.h>
38636 +#include <linux/grsecurity.h>
38637 +#include <linux/grinternal.h>
38638 +
38639 +#define GR_BIND 0x01
38640 +#define GR_CONNECT 0x02
38641 +#define GR_INVERT 0x04
38642 +#define GR_BINDOVERRIDE 0x08
38643 +#define GR_CONNECTOVERRIDE 0x10
38644 +
38645 +static const char * gr_protocols[256] = {
38646 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
38647 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
38648 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
38649 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
38650 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
38651 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
38652 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
38653 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
38654 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
38655 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
38656 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
38657 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
38658 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
38659 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
38660 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
38661 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
38662 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
38663 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
38664 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
38665 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
38666 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
38667 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
38668 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
38669 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
38670 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
38671 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
38672 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
38673 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
38674 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
38675 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
38676 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
38677 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
38678 + };
38679 +
38680 +static const char * gr_socktypes[11] = {
38681 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
38682 + "unknown:7", "unknown:8", "unknown:9", "packet"
38683 + };
38684 +
38685 +const char *
38686 +gr_proto_to_name(unsigned char proto)
38687 +{
38688 + return gr_protocols[proto];
38689 +}
38690 +
38691 +const char *
38692 +gr_socktype_to_name(unsigned char type)
38693 +{
38694 + return gr_socktypes[type];
38695 +}
38696 +
38697 +int
38698 +gr_search_socket(const int domain, const int type, const int protocol)
38699 +{
38700 + struct acl_subject_label *curr;
38701 + const struct cred *cred = current_cred();
38702 +
38703 + if (unlikely(!gr_acl_is_enabled()))
38704 + goto exit;
38705 +
38706 + if ((domain < 0) || (type < 0) || (protocol < 0) || (domain != PF_INET)
38707 + || (domain >= NPROTO) || (type >= SOCK_MAX) || (protocol > 255))
38708 + goto exit; // let the kernel handle it
38709 +
38710 + curr = current->acl;
38711 +
38712 + if (!curr->ips)
38713 + goto exit;
38714 +
38715 + if ((curr->ip_type & (1 << type)) &&
38716 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
38717 + goto exit;
38718 +
38719 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
38720 + /* we don't place acls on raw sockets , and sometimes
38721 + dgram/ip sockets are opened for ioctl and not
38722 + bind/connect, so we'll fake a bind learn log */
38723 + if (type == SOCK_RAW || type == SOCK_PACKET) {
38724 + __u32 fakeip = 0;
38725 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
38726 + current->role->roletype, cred->uid,
38727 + cred->gid, current->exec_file ?
38728 + gr_to_filename(current->exec_file->f_path.dentry,
38729 + current->exec_file->f_path.mnt) :
38730 + curr->filename, curr->filename,
38731 + &fakeip, 0, type,
38732 + protocol, GR_CONNECT, &current->signal->curr_ip);
38733 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
38734 + __u32 fakeip = 0;
38735 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
38736 + current->role->roletype, cred->uid,
38737 + cred->gid, current->exec_file ?
38738 + gr_to_filename(current->exec_file->f_path.dentry,
38739 + current->exec_file->f_path.mnt) :
38740 + curr->filename, curr->filename,
38741 + &fakeip, 0, type,
38742 + protocol, GR_BIND, &current->signal->curr_ip);
38743 + }
38744 + /* we'll log when they use connect or bind */
38745 + goto exit;
38746 + }
38747 +
38748 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, "inet",
38749 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
38750 +
38751 + return 0;
38752 + exit:
38753 + return 1;
38754 +}
38755 +
38756 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
38757 +{
38758 + if ((ip->mode & mode) &&
38759 + (ip_port >= ip->low) &&
38760 + (ip_port <= ip->high) &&
38761 + ((ntohl(ip_addr) & our_netmask) ==
38762 + (ntohl(our_addr) & our_netmask))
38763 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
38764 + && (ip->type & (1 << type))) {
38765 + if (ip->mode & GR_INVERT)
38766 + return 2; // specifically denied
38767 + else
38768 + return 1; // allowed
38769 + }
38770 +
38771 + return 0; // not specifically allowed, may continue parsing
38772 +}
38773 +
38774 +static int
38775 +gr_search_connectbind(const int full_mode, struct sock *sk,
38776 + struct sockaddr_in *addr, const int type)
38777 +{
38778 + char iface[IFNAMSIZ] = {0};
38779 + struct acl_subject_label *curr;
38780 + struct acl_ip_label *ip;
38781 + struct inet_sock *isk;
38782 + struct net_device *dev;
38783 + struct in_device *idev;
38784 + unsigned long i;
38785 + int ret;
38786 + int mode = full_mode & (GR_BIND | GR_CONNECT);
38787 + __u32 ip_addr = 0;
38788 + __u32 our_addr;
38789 + __u32 our_netmask;
38790 + char *p;
38791 + __u16 ip_port = 0;
38792 + const struct cred *cred = current_cred();
38793 +
38794 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
38795 + return 0;
38796 +
38797 + curr = current->acl;
38798 + isk = inet_sk(sk);
38799 +
38800 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
38801 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
38802 + addr->sin_addr.s_addr = curr->inaddr_any_override;
38803 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
38804 + struct sockaddr_in saddr;
38805 + int err;
38806 +
38807 + saddr.sin_family = AF_INET;
38808 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
38809 + saddr.sin_port = isk->inet_sport;
38810 +
38811 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
38812 + if (err)
38813 + return err;
38814 +
38815 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
38816 + if (err)
38817 + return err;
38818 + }
38819 +
38820 + if (!curr->ips)
38821 + return 0;
38822 +
38823 + ip_addr = addr->sin_addr.s_addr;
38824 + ip_port = ntohs(addr->sin_port);
38825 +
38826 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
38827 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
38828 + current->role->roletype, cred->uid,
38829 + cred->gid, current->exec_file ?
38830 + gr_to_filename(current->exec_file->f_path.dentry,
38831 + current->exec_file->f_path.mnt) :
38832 + curr->filename, curr->filename,
38833 + &ip_addr, ip_port, type,
38834 + sk->sk_protocol, mode, &current->signal->curr_ip);
38835 + return 0;
38836 + }
38837 +
38838 + for (i = 0; i < curr->ip_num; i++) {
38839 + ip = *(curr->ips + i);
38840 + if (ip->iface != NULL) {
38841 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
38842 + p = strchr(iface, ':');
38843 + if (p != NULL)
38844 + *p = '\0';
38845 + dev = dev_get_by_name(sock_net(sk), iface);
38846 + if (dev == NULL)
38847 + continue;
38848 + idev = in_dev_get(dev);
38849 + if (idev == NULL) {
38850 + dev_put(dev);
38851 + continue;
38852 + }
38853 + rcu_read_lock();
38854 + for_ifa(idev) {
38855 + if (!strcmp(ip->iface, ifa->ifa_label)) {
38856 + our_addr = ifa->ifa_address;
38857 + our_netmask = 0xffffffff;
38858 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
38859 + if (ret == 1) {
38860 + rcu_read_unlock();
38861 + in_dev_put(idev);
38862 + dev_put(dev);
38863 + return 0;
38864 + } else if (ret == 2) {
38865 + rcu_read_unlock();
38866 + in_dev_put(idev);
38867 + dev_put(dev);
38868 + goto denied;
38869 + }
38870 + }
38871 + } endfor_ifa(idev);
38872 + rcu_read_unlock();
38873 + in_dev_put(idev);
38874 + dev_put(dev);
38875 + } else {
38876 + our_addr = ip->addr;
38877 + our_netmask = ip->netmask;
38878 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
38879 + if (ret == 1)
38880 + return 0;
38881 + else if (ret == 2)
38882 + goto denied;
38883 + }
38884 + }
38885 +
38886 +denied:
38887 + if (mode == GR_BIND)
38888 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
38889 + else if (mode == GR_CONNECT)
38890 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
38891 +
38892 + return -EACCES;
38893 +}
38894 +
38895 +int
38896 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
38897 +{
38898 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
38899 +}
38900 +
38901 +int
38902 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
38903 +{
38904 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
38905 +}
38906 +
38907 +int gr_search_listen(struct socket *sock)
38908 +{
38909 + struct sock *sk = sock->sk;
38910 + struct sockaddr_in addr;
38911 +
38912 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
38913 + addr.sin_port = inet_sk(sk)->inet_sport;
38914 +
38915 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
38916 +}
38917 +
38918 +int gr_search_accept(struct socket *sock)
38919 +{
38920 + struct sock *sk = sock->sk;
38921 + struct sockaddr_in addr;
38922 +
38923 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
38924 + addr.sin_port = inet_sk(sk)->inet_sport;
38925 +
38926 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
38927 +}
38928 +
38929 +int
38930 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
38931 +{
38932 + if (addr)
38933 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
38934 + else {
38935 + struct sockaddr_in sin;
38936 + const struct inet_sock *inet = inet_sk(sk);
38937 +
38938 + sin.sin_addr.s_addr = inet->inet_daddr;
38939 + sin.sin_port = inet->inet_dport;
38940 +
38941 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
38942 + }
38943 +}
38944 +
38945 +int
38946 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
38947 +{
38948 + struct sockaddr_in sin;
38949 +
38950 + if (unlikely(skb->len < sizeof (struct udphdr)))
38951 + return 0; // skip this packet
38952 +
38953 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
38954 + sin.sin_port = udp_hdr(skb)->source;
38955 +
38956 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
38957 +}
38958 diff -urNp linux-2.6.35.4/grsecurity/gracl_learn.c linux-2.6.35.4/grsecurity/gracl_learn.c
38959 --- linux-2.6.35.4/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
38960 +++ linux-2.6.35.4/grsecurity/gracl_learn.c 2010-09-17 20:12:37.000000000 -0400
38961 @@ -0,0 +1,211 @@
38962 +#include <linux/kernel.h>
38963 +#include <linux/mm.h>
38964 +#include <linux/sched.h>
38965 +#include <linux/poll.h>
38966 +#include <linux/smp_lock.h>
38967 +#include <linux/string.h>
38968 +#include <linux/file.h>
38969 +#include <linux/types.h>
38970 +#include <linux/vmalloc.h>
38971 +#include <linux/grinternal.h>
38972 +
38973 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
38974 + size_t count, loff_t *ppos);
38975 +extern int gr_acl_is_enabled(void);
38976 +
38977 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
38978 +static int gr_learn_attached;
38979 +
38980 +/* use a 512k buffer */
38981 +#define LEARN_BUFFER_SIZE (512 * 1024)
38982 +
38983 +static DEFINE_SPINLOCK(gr_learn_lock);
38984 +static DECLARE_MUTEX(gr_learn_user_sem);
38985 +
38986 +/* we need to maintain two buffers, so that the kernel context of grlearn
38987 + uses a semaphore around the userspace copying, and the other kernel contexts
38988 + use a spinlock when copying into the buffer, since they cannot sleep
38989 +*/
38990 +static char *learn_buffer;
38991 +static char *learn_buffer_user;
38992 +static int learn_buffer_len;
38993 +static int learn_buffer_user_len;
38994 +
38995 +static ssize_t
38996 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
38997 +{
38998 + DECLARE_WAITQUEUE(wait, current);
38999 + ssize_t retval = 0;
39000 +
39001 + add_wait_queue(&learn_wait, &wait);
39002 + set_current_state(TASK_INTERRUPTIBLE);
39003 + do {
39004 + down(&gr_learn_user_sem);
39005 + spin_lock(&gr_learn_lock);
39006 + if (learn_buffer_len)
39007 + break;
39008 + spin_unlock(&gr_learn_lock);
39009 + up(&gr_learn_user_sem);
39010 + if (file->f_flags & O_NONBLOCK) {
39011 + retval = -EAGAIN;
39012 + goto out;
39013 + }
39014 + if (signal_pending(current)) {
39015 + retval = -ERESTARTSYS;
39016 + goto out;
39017 + }
39018 +
39019 + schedule();
39020 + } while (1);
39021 +
39022 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
39023 + learn_buffer_user_len = learn_buffer_len;
39024 + retval = learn_buffer_len;
39025 + learn_buffer_len = 0;
39026 +
39027 + spin_unlock(&gr_learn_lock);
39028 +
39029 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
39030 + retval = -EFAULT;
39031 +
39032 + up(&gr_learn_user_sem);
39033 +out:
39034 + set_current_state(TASK_RUNNING);
39035 + remove_wait_queue(&learn_wait, &wait);
39036 + return retval;
39037 +}
39038 +
39039 +static unsigned int
39040 +poll_learn(struct file * file, poll_table * wait)
39041 +{
39042 + poll_wait(file, &learn_wait, wait);
39043 +
39044 + if (learn_buffer_len)
39045 + return (POLLIN | POLLRDNORM);
39046 +
39047 + return 0;
39048 +}
39049 +
39050 +void
39051 +gr_clear_learn_entries(void)
39052 +{
39053 + char *tmp;
39054 +
39055 + down(&gr_learn_user_sem);
39056 + if (learn_buffer != NULL) {
39057 + spin_lock(&gr_learn_lock);
39058 + tmp = learn_buffer;
39059 + learn_buffer = NULL;
39060 + spin_unlock(&gr_learn_lock);
39061 + vfree(learn_buffer);
39062 + }
39063 + if (learn_buffer_user != NULL) {
39064 + vfree(learn_buffer_user);
39065 + learn_buffer_user = NULL;
39066 + }
39067 + learn_buffer_len = 0;
39068 + up(&gr_learn_user_sem);
39069 +
39070 + return;
39071 +}
39072 +
39073 +void
39074 +gr_add_learn_entry(const char *fmt, ...)
39075 +{
39076 + va_list args;
39077 + unsigned int len;
39078 +
39079 + if (!gr_learn_attached)
39080 + return;
39081 +
39082 + spin_lock(&gr_learn_lock);
39083 +
39084 + /* leave a gap at the end so we know when it's "full" but don't have to
39085 + compute the exact length of the string we're trying to append
39086 + */
39087 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
39088 + spin_unlock(&gr_learn_lock);
39089 + wake_up_interruptible(&learn_wait);
39090 + return;
39091 + }
39092 + if (learn_buffer == NULL) {
39093 + spin_unlock(&gr_learn_lock);
39094 + return;
39095 + }
39096 +
39097 + va_start(args, fmt);
39098 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
39099 + va_end(args);
39100 +
39101 + learn_buffer_len += len + 1;
39102 +
39103 + spin_unlock(&gr_learn_lock);
39104 + wake_up_interruptible(&learn_wait);
39105 +
39106 + return;
39107 +}
39108 +
39109 +static int
39110 +open_learn(struct inode *inode, struct file *file)
39111 +{
39112 + if (file->f_mode & FMODE_READ && gr_learn_attached)
39113 + return -EBUSY;
39114 + if (file->f_mode & FMODE_READ) {
39115 + int retval = 0;
39116 + down(&gr_learn_user_sem);
39117 + if (learn_buffer == NULL)
39118 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
39119 + if (learn_buffer_user == NULL)
39120 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
39121 + if (learn_buffer == NULL) {
39122 + retval = -ENOMEM;
39123 + goto out_error;
39124 + }
39125 + if (learn_buffer_user == NULL) {
39126 + retval = -ENOMEM;
39127 + goto out_error;
39128 + }
39129 + learn_buffer_len = 0;
39130 + learn_buffer_user_len = 0;
39131 + gr_learn_attached = 1;
39132 +out_error:
39133 + up(&gr_learn_user_sem);
39134 + return retval;
39135 + }
39136 + return 0;
39137 +}
39138 +
39139 +static int
39140 +close_learn(struct inode *inode, struct file *file)
39141 +{
39142 + char *tmp;
39143 +
39144 + if (file->f_mode & FMODE_READ) {
39145 + down(&gr_learn_user_sem);
39146 + if (learn_buffer != NULL) {
39147 + spin_lock(&gr_learn_lock);
39148 + tmp = learn_buffer;
39149 + learn_buffer = NULL;
39150 + spin_unlock(&gr_learn_lock);
39151 + vfree(tmp);
39152 + }
39153 + if (learn_buffer_user != NULL) {
39154 + vfree(learn_buffer_user);
39155 + learn_buffer_user = NULL;
39156 + }
39157 + learn_buffer_len = 0;
39158 + learn_buffer_user_len = 0;
39159 + gr_learn_attached = 0;
39160 + up(&gr_learn_user_sem);
39161 + }
39162 +
39163 + return 0;
39164 +}
39165 +
39166 +const struct file_operations grsec_fops = {
39167 + .read = read_learn,
39168 + .write = write_grsec_handler,
39169 + .open = open_learn,
39170 + .release = close_learn,
39171 + .poll = poll_learn,
39172 +};
39173 diff -urNp linux-2.6.35.4/grsecurity/gracl_res.c linux-2.6.35.4/grsecurity/gracl_res.c
39174 --- linux-2.6.35.4/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
39175 +++ linux-2.6.35.4/grsecurity/gracl_res.c 2010-09-17 20:12:37.000000000 -0400
39176 @@ -0,0 +1,68 @@
39177 +#include <linux/kernel.h>
39178 +#include <linux/sched.h>
39179 +#include <linux/gracl.h>
39180 +#include <linux/grinternal.h>
39181 +
39182 +static const char *restab_log[] = {
39183 + [RLIMIT_CPU] = "RLIMIT_CPU",
39184 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
39185 + [RLIMIT_DATA] = "RLIMIT_DATA",
39186 + [RLIMIT_STACK] = "RLIMIT_STACK",
39187 + [RLIMIT_CORE] = "RLIMIT_CORE",
39188 + [RLIMIT_RSS] = "RLIMIT_RSS",
39189 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
39190 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
39191 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
39192 + [RLIMIT_AS] = "RLIMIT_AS",
39193 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
39194 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
39195 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
39196 + [RLIMIT_NICE] = "RLIMIT_NICE",
39197 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
39198 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
39199 + [GR_CRASH_RES] = "RLIMIT_CRASH"
39200 +};
39201 +
39202 +void
39203 +gr_log_resource(const struct task_struct *task,
39204 + const int res, const unsigned long wanted, const int gt)
39205 +{
39206 + const struct cred *cred;
39207 + unsigned long rlim;
39208 +
39209 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
39210 + return;
39211 +
39212 + // not yet supported resource
39213 + if (unlikely(!restab_log[res]))
39214 + return;
39215 +
39216 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
39217 + rlim = task_rlimit_max(task, res);
39218 + else
39219 + rlim = task_rlimit(task, res);
39220 +
39221 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
39222 + return;
39223 +
39224 + rcu_read_lock();
39225 + cred = __task_cred(task);
39226 +
39227 + if (res == RLIMIT_NPROC &&
39228 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
39229 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
39230 + goto out_rcu_unlock;
39231 + else if (res == RLIMIT_MEMLOCK &&
39232 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
39233 + goto out_rcu_unlock;
39234 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
39235 + goto out_rcu_unlock;
39236 + rcu_read_unlock();
39237 +
39238 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
39239 +
39240 + return;
39241 +out_rcu_unlock:
39242 + rcu_read_unlock();
39243 + return;
39244 +}
39245 diff -urNp linux-2.6.35.4/grsecurity/gracl_segv.c linux-2.6.35.4/grsecurity/gracl_segv.c
39246 --- linux-2.6.35.4/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
39247 +++ linux-2.6.35.4/grsecurity/gracl_segv.c 2010-09-17 20:12:37.000000000 -0400
39248 @@ -0,0 +1,310 @@
39249 +#include <linux/kernel.h>
39250 +#include <linux/mm.h>
39251 +#include <asm/uaccess.h>
39252 +#include <asm/errno.h>
39253 +#include <asm/mman.h>
39254 +#include <net/sock.h>
39255 +#include <linux/file.h>
39256 +#include <linux/fs.h>
39257 +#include <linux/net.h>
39258 +#include <linux/in.h>
39259 +#include <linux/smp_lock.h>
39260 +#include <linux/slab.h>
39261 +#include <linux/types.h>
39262 +#include <linux/sched.h>
39263 +#include <linux/timer.h>
39264 +#include <linux/gracl.h>
39265 +#include <linux/grsecurity.h>
39266 +#include <linux/grinternal.h>
39267 +
39268 +static struct crash_uid *uid_set;
39269 +static unsigned short uid_used;
39270 +static DEFINE_SPINLOCK(gr_uid_lock);
39271 +extern rwlock_t gr_inode_lock;
39272 +extern struct acl_subject_label *
39273 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
39274 + struct acl_role_label *role);
39275 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
39276 +
39277 +int
39278 +gr_init_uidset(void)
39279 +{
39280 + uid_set =
39281 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
39282 + uid_used = 0;
39283 +
39284 + return uid_set ? 1 : 0;
39285 +}
39286 +
39287 +void
39288 +gr_free_uidset(void)
39289 +{
39290 + if (uid_set)
39291 + kfree(uid_set);
39292 +
39293 + return;
39294 +}
39295 +
39296 +int
39297 +gr_find_uid(const uid_t uid)
39298 +{
39299 + struct crash_uid *tmp = uid_set;
39300 + uid_t buid;
39301 + int low = 0, high = uid_used - 1, mid;
39302 +
39303 + while (high >= low) {
39304 + mid = (low + high) >> 1;
39305 + buid = tmp[mid].uid;
39306 + if (buid == uid)
39307 + return mid;
39308 + if (buid > uid)
39309 + high = mid - 1;
39310 + if (buid < uid)
39311 + low = mid + 1;
39312 + }
39313 +
39314 + return -1;
39315 +}
39316 +
39317 +static __inline__ void
39318 +gr_insertsort(void)
39319 +{
39320 + unsigned short i, j;
39321 + struct crash_uid index;
39322 +
39323 + for (i = 1; i < uid_used; i++) {
39324 + index = uid_set[i];
39325 + j = i;
39326 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
39327 + uid_set[j] = uid_set[j - 1];
39328 + j--;
39329 + }
39330 + uid_set[j] = index;
39331 + }
39332 +
39333 + return;
39334 +}
39335 +
39336 +static __inline__ void
39337 +gr_insert_uid(const uid_t uid, const unsigned long expires)
39338 +{
39339 + int loc;
39340 +
39341 + if (uid_used == GR_UIDTABLE_MAX)
39342 + return;
39343 +
39344 + loc = gr_find_uid(uid);
39345 +
39346 + if (loc >= 0) {
39347 + uid_set[loc].expires = expires;
39348 + return;
39349 + }
39350 +
39351 + uid_set[uid_used].uid = uid;
39352 + uid_set[uid_used].expires = expires;
39353 + uid_used++;
39354 +
39355 + gr_insertsort();
39356 +
39357 + return;
39358 +}
39359 +
39360 +void
39361 +gr_remove_uid(const unsigned short loc)
39362 +{
39363 + unsigned short i;
39364 +
39365 + for (i = loc + 1; i < uid_used; i++)
39366 + uid_set[i - 1] = uid_set[i];
39367 +
39368 + uid_used--;
39369 +
39370 + return;
39371 +}
39372 +
39373 +int
39374 +gr_check_crash_uid(const uid_t uid)
39375 +{
39376 + int loc;
39377 + int ret = 0;
39378 +
39379 + if (unlikely(!gr_acl_is_enabled()))
39380 + return 0;
39381 +
39382 + spin_lock(&gr_uid_lock);
39383 + loc = gr_find_uid(uid);
39384 +
39385 + if (loc < 0)
39386 + goto out_unlock;
39387 +
39388 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
39389 + gr_remove_uid(loc);
39390 + else
39391 + ret = 1;
39392 +
39393 +out_unlock:
39394 + spin_unlock(&gr_uid_lock);
39395 + return ret;
39396 +}
39397 +
39398 +static __inline__ int
39399 +proc_is_setxid(const struct cred *cred)
39400 +{
39401 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
39402 + cred->uid != cred->fsuid)
39403 + return 1;
39404 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
39405 + cred->gid != cred->fsgid)
39406 + return 1;
39407 +
39408 + return 0;
39409 +}
39410 +static __inline__ int
39411 +gr_fake_force_sig(int sig, struct task_struct *t)
39412 +{
39413 + unsigned long int flags;
39414 + int ret, blocked, ignored;
39415 + struct k_sigaction *action;
39416 +
39417 + spin_lock_irqsave(&t->sighand->siglock, flags);
39418 + action = &t->sighand->action[sig-1];
39419 + ignored = action->sa.sa_handler == SIG_IGN;
39420 + blocked = sigismember(&t->blocked, sig);
39421 + if (blocked || ignored) {
39422 + action->sa.sa_handler = SIG_DFL;
39423 + if (blocked) {
39424 + sigdelset(&t->blocked, sig);
39425 + recalc_sigpending_and_wake(t);
39426 + }
39427 + }
39428 + if (action->sa.sa_handler == SIG_DFL)
39429 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
39430 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
39431 +
39432 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
39433 +
39434 + return ret;
39435 +}
39436 +
39437 +void
39438 +gr_handle_crash(struct task_struct *task, const int sig)
39439 +{
39440 + struct acl_subject_label *curr;
39441 + struct acl_subject_label *curr2;
39442 + struct task_struct *tsk, *tsk2;
39443 + const struct cred *cred;
39444 + const struct cred *cred2;
39445 +
39446 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
39447 + return;
39448 +
39449 + if (unlikely(!gr_acl_is_enabled()))
39450 + return;
39451 +
39452 + curr = task->acl;
39453 +
39454 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
39455 + return;
39456 +
39457 + if (time_before_eq(curr->expires, get_seconds())) {
39458 + curr->expires = 0;
39459 + curr->crashes = 0;
39460 + }
39461 +
39462 + curr->crashes++;
39463 +
39464 + if (!curr->expires)
39465 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
39466 +
39467 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
39468 + time_after(curr->expires, get_seconds())) {
39469 + rcu_read_lock();
39470 + cred = __task_cred(task);
39471 + if (cred->uid && proc_is_setxid(cred)) {
39472 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
39473 + spin_lock(&gr_uid_lock);
39474 + gr_insert_uid(cred->uid, curr->expires);
39475 + spin_unlock(&gr_uid_lock);
39476 + curr->expires = 0;
39477 + curr->crashes = 0;
39478 + read_lock(&tasklist_lock);
39479 + do_each_thread(tsk2, tsk) {
39480 + cred2 = __task_cred(tsk);
39481 + if (tsk != task && cred2->uid == cred->uid)
39482 + gr_fake_force_sig(SIGKILL, tsk);
39483 + } while_each_thread(tsk2, tsk);
39484 + read_unlock(&tasklist_lock);
39485 + } else {
39486 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
39487 + read_lock(&tasklist_lock);
39488 + do_each_thread(tsk2, tsk) {
39489 + if (likely(tsk != task)) {
39490 + curr2 = tsk->acl;
39491 +
39492 + if (curr2->device == curr->device &&
39493 + curr2->inode == curr->inode)
39494 + gr_fake_force_sig(SIGKILL, tsk);
39495 + }
39496 + } while_each_thread(tsk2, tsk);
39497 + read_unlock(&tasklist_lock);
39498 + }
39499 + rcu_read_unlock();
39500 + }
39501 +
39502 + return;
39503 +}
39504 +
39505 +int
39506 +gr_check_crash_exec(const struct file *filp)
39507 +{
39508 + struct acl_subject_label *curr;
39509 +
39510 + if (unlikely(!gr_acl_is_enabled()))
39511 + return 0;
39512 +
39513 + read_lock(&gr_inode_lock);
39514 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
39515 + filp->f_path.dentry->d_inode->i_sb->s_dev,
39516 + current->role);
39517 + read_unlock(&gr_inode_lock);
39518 +
39519 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
39520 + (!curr->crashes && !curr->expires))
39521 + return 0;
39522 +
39523 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
39524 + time_after(curr->expires, get_seconds()))
39525 + return 1;
39526 + else if (time_before_eq(curr->expires, get_seconds())) {
39527 + curr->crashes = 0;
39528 + curr->expires = 0;
39529 + }
39530 +
39531 + return 0;
39532 +}
39533 +
39534 +void
39535 +gr_handle_alertkill(struct task_struct *task)
39536 +{
39537 + struct acl_subject_label *curracl;
39538 + __u32 curr_ip;
39539 + struct task_struct *p, *p2;
39540 +
39541 + if (unlikely(!gr_acl_is_enabled()))
39542 + return;
39543 +
39544 + curracl = task->acl;
39545 + curr_ip = task->signal->curr_ip;
39546 +
39547 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
39548 + read_lock(&tasklist_lock);
39549 + do_each_thread(p2, p) {
39550 + if (p->signal->curr_ip == curr_ip)
39551 + gr_fake_force_sig(SIGKILL, p);
39552 + } while_each_thread(p2, p);
39553 + read_unlock(&tasklist_lock);
39554 + } else if (curracl->mode & GR_KILLPROC)
39555 + gr_fake_force_sig(SIGKILL, task);
39556 +
39557 + return;
39558 +}
39559 diff -urNp linux-2.6.35.4/grsecurity/gracl_shm.c linux-2.6.35.4/grsecurity/gracl_shm.c
39560 --- linux-2.6.35.4/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
39561 +++ linux-2.6.35.4/grsecurity/gracl_shm.c 2010-09-17 20:12:37.000000000 -0400
39562 @@ -0,0 +1,40 @@
39563 +#include <linux/kernel.h>
39564 +#include <linux/mm.h>
39565 +#include <linux/sched.h>
39566 +#include <linux/file.h>
39567 +#include <linux/ipc.h>
39568 +#include <linux/gracl.h>
39569 +#include <linux/grsecurity.h>
39570 +#include <linux/grinternal.h>
39571 +
39572 +int
39573 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
39574 + const time_t shm_createtime, const uid_t cuid, const int shmid)
39575 +{
39576 + struct task_struct *task;
39577 +
39578 + if (!gr_acl_is_enabled())
39579 + return 1;
39580 +
39581 + rcu_read_lock();
39582 + read_lock(&tasklist_lock);
39583 +
39584 + task = find_task_by_vpid(shm_cprid);
39585 +
39586 + if (unlikely(!task))
39587 + task = find_task_by_vpid(shm_lapid);
39588 +
39589 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
39590 + (task->pid == shm_lapid)) &&
39591 + (task->acl->mode & GR_PROTSHM) &&
39592 + (task->acl != current->acl))) {
39593 + read_unlock(&tasklist_lock);
39594 + rcu_read_unlock();
39595 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
39596 + return 0;
39597 + }
39598 + read_unlock(&tasklist_lock);
39599 + rcu_read_unlock();
39600 +
39601 + return 1;
39602 +}
39603 diff -urNp linux-2.6.35.4/grsecurity/grsec_chdir.c linux-2.6.35.4/grsecurity/grsec_chdir.c
39604 --- linux-2.6.35.4/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
39605 +++ linux-2.6.35.4/grsecurity/grsec_chdir.c 2010-09-17 20:12:37.000000000 -0400
39606 @@ -0,0 +1,19 @@
39607 +#include <linux/kernel.h>
39608 +#include <linux/sched.h>
39609 +#include <linux/fs.h>
39610 +#include <linux/file.h>
39611 +#include <linux/grsecurity.h>
39612 +#include <linux/grinternal.h>
39613 +
39614 +void
39615 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
39616 +{
39617 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
39618 + if ((grsec_enable_chdir && grsec_enable_group &&
39619 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
39620 + !grsec_enable_group)) {
39621 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
39622 + }
39623 +#endif
39624 + return;
39625 +}
39626 diff -urNp linux-2.6.35.4/grsecurity/grsec_chroot.c linux-2.6.35.4/grsecurity/grsec_chroot.c
39627 --- linux-2.6.35.4/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
39628 +++ linux-2.6.35.4/grsecurity/grsec_chroot.c 2010-09-17 20:12:37.000000000 -0400
39629 @@ -0,0 +1,389 @@
39630 +#include <linux/kernel.h>
39631 +#include <linux/module.h>
39632 +#include <linux/sched.h>
39633 +#include <linux/file.h>
39634 +#include <linux/fs.h>
39635 +#include <linux/mount.h>
39636 +#include <linux/types.h>
39637 +#include <linux/pid_namespace.h>
39638 +#include <linux/grsecurity.h>
39639 +#include <linux/grinternal.h>
39640 +
39641 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
39642 +{
39643 +#ifdef CONFIG_GRKERNSEC
39644 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
39645 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
39646 + task->gr_is_chrooted = 1;
39647 + else
39648 + task->gr_is_chrooted = 0;
39649 +
39650 + task->gr_chroot_dentry = path->dentry;
39651 +#endif
39652 + return;
39653 +}
39654 +
39655 +void gr_clear_chroot_entries(struct task_struct *task)
39656 +{
39657 +#ifdef CONFIG_GRKERNSEC
39658 + task->gr_is_chrooted = 0;
39659 + task->gr_chroot_dentry = NULL;
39660 +#endif
39661 + return;
39662 +}
39663 +
39664 +int
39665 +gr_handle_chroot_unix(const pid_t pid)
39666 +{
39667 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
39668 + struct pid *spid = NULL;
39669 +
39670 + if (unlikely(!grsec_enable_chroot_unix))
39671 + return 1;
39672 +
39673 + if (likely(!proc_is_chrooted(current)))
39674 + return 1;
39675 +
39676 + rcu_read_lock();
39677 + read_lock(&tasklist_lock);
39678 +
39679 + spid = find_vpid(pid);
39680 + if (spid) {
39681 + struct task_struct *p;
39682 + p = pid_task(spid, PIDTYPE_PID);
39683 + if (unlikely(!have_same_root(current, p))) {
39684 + read_unlock(&tasklist_lock);
39685 + rcu_read_unlock();
39686 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
39687 + return 0;
39688 + }
39689 + }
39690 + read_unlock(&tasklist_lock);
39691 + rcu_read_unlock();
39692 +#endif
39693 + return 1;
39694 +}
39695 +
39696 +int
39697 +gr_handle_chroot_nice(void)
39698 +{
39699 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
39700 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
39701 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
39702 + return -EPERM;
39703 + }
39704 +#endif
39705 + return 0;
39706 +}
39707 +
39708 +int
39709 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
39710 +{
39711 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
39712 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
39713 + && proc_is_chrooted(current)) {
39714 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
39715 + return -EACCES;
39716 + }
39717 +#endif
39718 + return 0;
39719 +}
39720 +
39721 +int
39722 +gr_handle_chroot_rawio(const struct inode *inode)
39723 +{
39724 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
39725 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
39726 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
39727 + return 1;
39728 +#endif
39729 + return 0;
39730 +}
39731 +
39732 +int
39733 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
39734 +{
39735 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
39736 + struct task_struct *p;
39737 + int ret = 0;
39738 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
39739 + return ret;
39740 +
39741 + read_lock(&tasklist_lock);
39742 + do_each_pid_task(pid, type, p) {
39743 + if (!have_same_root(current, p)) {
39744 + ret = 1;
39745 + goto out;
39746 + }
39747 + } while_each_pid_task(pid, type, p);
39748 +out:
39749 + read_unlock(&tasklist_lock);
39750 + return ret;
39751 +#endif
39752 + return 0;
39753 +}
39754 +
39755 +int
39756 +gr_pid_is_chrooted(struct task_struct *p)
39757 +{
39758 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
39759 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
39760 + return 0;
39761 +
39762 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
39763 + !have_same_root(current, p)) {
39764 + return 1;
39765 + }
39766 +#endif
39767 + return 0;
39768 +}
39769 +
39770 +EXPORT_SYMBOL(gr_pid_is_chrooted);
39771 +
39772 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
39773 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
39774 +{
39775 + struct dentry *dentry = (struct dentry *)u_dentry;
39776 + struct vfsmount *mnt = (struct vfsmount *)u_mnt;
39777 + struct dentry *realroot;
39778 + struct vfsmount *realrootmnt;
39779 + struct dentry *currentroot;
39780 + struct vfsmount *currentmnt;
39781 + struct task_struct *reaper = &init_task;
39782 + int ret = 1;
39783 +
39784 + read_lock(&reaper->fs->lock);
39785 + realrootmnt = mntget(reaper->fs->root.mnt);
39786 + realroot = dget(reaper->fs->root.dentry);
39787 + read_unlock(&reaper->fs->lock);
39788 +
39789 + read_lock(&current->fs->lock);
39790 + currentmnt = mntget(current->fs->root.mnt);
39791 + currentroot = dget(current->fs->root.dentry);
39792 + read_unlock(&current->fs->lock);
39793 +
39794 + spin_lock(&dcache_lock);
39795 + for (;;) {
39796 + if (unlikely((dentry == realroot && mnt == realrootmnt)
39797 + || (dentry == currentroot && mnt == currentmnt)))
39798 + break;
39799 + if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
39800 + if (mnt->mnt_parent == mnt)
39801 + break;
39802 + dentry = mnt->mnt_mountpoint;
39803 + mnt = mnt->mnt_parent;
39804 + continue;
39805 + }
39806 + dentry = dentry->d_parent;
39807 + }
39808 + spin_unlock(&dcache_lock);
39809 +
39810 + dput(currentroot);
39811 + mntput(currentmnt);
39812 +
39813 + /* access is outside of chroot */
39814 + if (dentry == realroot && mnt == realrootmnt)
39815 + ret = 0;
39816 +
39817 + dput(realroot);
39818 + mntput(realrootmnt);
39819 + return ret;
39820 +}
39821 +#endif
39822 +
39823 +int
39824 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
39825 +{
39826 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
39827 + if (!grsec_enable_chroot_fchdir)
39828 + return 1;
39829 +
39830 + if (!proc_is_chrooted(current))
39831 + return 1;
39832 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
39833 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
39834 + return 0;
39835 + }
39836 +#endif
39837 + return 1;
39838 +}
39839 +
39840 +int
39841 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
39842 + const time_t shm_createtime)
39843 +{
39844 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
39845 + struct pid *pid = NULL;
39846 + time_t starttime;
39847 +
39848 + if (unlikely(!grsec_enable_chroot_shmat))
39849 + return 1;
39850 +
39851 + if (likely(!proc_is_chrooted(current)))
39852 + return 1;
39853 +
39854 + rcu_read_lock();
39855 + read_lock(&tasklist_lock);
39856 +
39857 + pid = find_vpid(shm_cprid);
39858 + if (pid) {
39859 + struct task_struct *p;
39860 + p = pid_task(pid, PIDTYPE_PID);
39861 + starttime = p->start_time.tv_sec;
39862 + if (unlikely(!have_same_root(current, p) &&
39863 + time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime))) {
39864 + read_unlock(&tasklist_lock);
39865 + rcu_read_unlock();
39866 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
39867 + return 0;
39868 + }
39869 + } else {
39870 + pid = find_vpid(shm_lapid);
39871 + if (pid) {
39872 + struct task_struct *p;
39873 + p = pid_task(pid, PIDTYPE_PID);
39874 + if (unlikely(!have_same_root(current, p))) {
39875 + read_unlock(&tasklist_lock);
39876 + rcu_read_unlock();
39877 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
39878 + return 0;
39879 + }
39880 + }
39881 + }
39882 +
39883 + read_unlock(&tasklist_lock);
39884 + rcu_read_unlock();
39885 +#endif
39886 + return 1;
39887 +}
39888 +
39889 +void
39890 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
39891 +{
39892 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
39893 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
39894 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
39895 +#endif
39896 + return;
39897 +}
39898 +
39899 +int
39900 +gr_handle_chroot_mknod(const struct dentry *dentry,
39901 + const struct vfsmount *mnt, const int mode)
39902 +{
39903 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
39904 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
39905 + proc_is_chrooted(current)) {
39906 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
39907 + return -EPERM;
39908 + }
39909 +#endif
39910 + return 0;
39911 +}
39912 +
39913 +int
39914 +gr_handle_chroot_mount(const struct dentry *dentry,
39915 + const struct vfsmount *mnt, const char *dev_name)
39916 +{
39917 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
39918 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
39919 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name, dentry, mnt);
39920 + return -EPERM;
39921 + }
39922 +#endif
39923 + return 0;
39924 +}
39925 +
39926 +int
39927 +gr_handle_chroot_pivot(void)
39928 +{
39929 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
39930 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
39931 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
39932 + return -EPERM;
39933 + }
39934 +#endif
39935 + return 0;
39936 +}
39937 +
39938 +int
39939 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
39940 +{
39941 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
39942 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
39943 + !gr_is_outside_chroot(dentry, mnt)) {
39944 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
39945 + return -EPERM;
39946 + }
39947 +#endif
39948 + return 0;
39949 +}
39950 +
39951 +int
39952 +gr_handle_chroot_caps(struct path *path)
39953 +{
39954 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
39955 + if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
39956 + (init_task.fs->root.dentry != path->dentry) &&
39957 + (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
39958 +
39959 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
39960 + const struct cred *old = current_cred();
39961 + struct cred *new = prepare_creds();
39962 + if (new == NULL)
39963 + return 1;
39964 +
39965 + new->cap_permitted = cap_drop(old->cap_permitted,
39966 + chroot_caps);
39967 + new->cap_inheritable = cap_drop(old->cap_inheritable,
39968 + chroot_caps);
39969 + new->cap_effective = cap_drop(old->cap_effective,
39970 + chroot_caps);
39971 +
39972 + commit_creds(new);
39973 +
39974 + return 0;
39975 + }
39976 +#endif
39977 + return 0;
39978 +}
39979 +
39980 +int
39981 +gr_handle_chroot_sysctl(const int op)
39982 +{
39983 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
39984 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
39985 + proc_is_chrooted(current))
39986 + return -EACCES;
39987 +#endif
39988 + return 0;
39989 +}
39990 +
39991 +void
39992 +gr_handle_chroot_chdir(struct path *path)
39993 +{
39994 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
39995 + if (grsec_enable_chroot_chdir)
39996 + set_fs_pwd(current->fs, path);
39997 +#endif
39998 + return;
39999 +}
40000 +
40001 +int
40002 +gr_handle_chroot_chmod(const struct dentry *dentry,
40003 + const struct vfsmount *mnt, const int mode)
40004 +{
40005 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
40006 + if (grsec_enable_chroot_chmod &&
40007 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
40008 + proc_is_chrooted(current)) {
40009 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
40010 + return -EPERM;
40011 + }
40012 +#endif
40013 + return 0;
40014 +}
40015 +
40016 +#ifdef CONFIG_SECURITY
40017 +EXPORT_SYMBOL(gr_handle_chroot_caps);
40018 +#endif
40019 diff -urNp linux-2.6.35.4/grsecurity/grsec_disabled.c linux-2.6.35.4/grsecurity/grsec_disabled.c
40020 --- linux-2.6.35.4/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
40021 +++ linux-2.6.35.4/grsecurity/grsec_disabled.c 2010-09-17 20:12:37.000000000 -0400
40022 @@ -0,0 +1,431 @@
40023 +#include <linux/kernel.h>
40024 +#include <linux/module.h>
40025 +#include <linux/sched.h>
40026 +#include <linux/file.h>
40027 +#include <linux/fs.h>
40028 +#include <linux/kdev_t.h>
40029 +#include <linux/net.h>
40030 +#include <linux/in.h>
40031 +#include <linux/ip.h>
40032 +#include <linux/skbuff.h>
40033 +#include <linux/sysctl.h>
40034 +
40035 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
40036 +void
40037 +pax_set_initial_flags(struct linux_binprm *bprm)
40038 +{
40039 + return;
40040 +}
40041 +#endif
40042 +
40043 +#ifdef CONFIG_SYSCTL
40044 +__u32
40045 +gr_handle_sysctl(const struct ctl_table * table, const int op)
40046 +{
40047 + return 0;
40048 +}
40049 +#endif
40050 +
40051 +#ifdef CONFIG_TASKSTATS
40052 +int gr_is_taskstats_denied(int pid)
40053 +{
40054 + return 0;
40055 +}
40056 +#endif
40057 +
40058 +int
40059 +gr_acl_is_enabled(void)
40060 +{
40061 + return 0;
40062 +}
40063 +
40064 +int
40065 +gr_handle_rawio(const struct inode *inode)
40066 +{
40067 + return 0;
40068 +}
40069 +
40070 +void
40071 +gr_acl_handle_psacct(struct task_struct *task, const long code)
40072 +{
40073 + return;
40074 +}
40075 +
40076 +int
40077 +gr_handle_ptrace(struct task_struct *task, const long request)
40078 +{
40079 + return 0;
40080 +}
40081 +
40082 +int
40083 +gr_handle_proc_ptrace(struct task_struct *task)
40084 +{
40085 + return 0;
40086 +}
40087 +
40088 +void
40089 +gr_learn_resource(const struct task_struct *task,
40090 + const int res, const unsigned long wanted, const int gt)
40091 +{
40092 + return;
40093 +}
40094 +
40095 +int
40096 +gr_set_acls(const int type)
40097 +{
40098 + return 0;
40099 +}
40100 +
40101 +int
40102 +gr_check_hidden_task(const struct task_struct *tsk)
40103 +{
40104 + return 0;
40105 +}
40106 +
40107 +int
40108 +gr_check_protected_task(const struct task_struct *task)
40109 +{
40110 + return 0;
40111 +}
40112 +
40113 +int
40114 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
40115 +{
40116 + return 0;
40117 +}
40118 +
40119 +void
40120 +gr_copy_label(struct task_struct *tsk)
40121 +{
40122 + return;
40123 +}
40124 +
40125 +void
40126 +gr_set_pax_flags(struct task_struct *task)
40127 +{
40128 + return;
40129 +}
40130 +
40131 +int
40132 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
40133 + const int unsafe_share)
40134 +{
40135 + return 0;
40136 +}
40137 +
40138 +void
40139 +gr_handle_delete(const ino_t ino, const dev_t dev)
40140 +{
40141 + return;
40142 +}
40143 +
40144 +void
40145 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
40146 +{
40147 + return;
40148 +}
40149 +
40150 +void
40151 +gr_handle_crash(struct task_struct *task, const int sig)
40152 +{
40153 + return;
40154 +}
40155 +
40156 +int
40157 +gr_check_crash_exec(const struct file *filp)
40158 +{
40159 + return 0;
40160 +}
40161 +
40162 +int
40163 +gr_check_crash_uid(const uid_t uid)
40164 +{
40165 + return 0;
40166 +}
40167 +
40168 +void
40169 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
40170 + struct dentry *old_dentry,
40171 + struct dentry *new_dentry,
40172 + struct vfsmount *mnt, const __u8 replace)
40173 +{
40174 + return;
40175 +}
40176 +
40177 +int
40178 +gr_search_socket(const int family, const int type, const int protocol)
40179 +{
40180 + return 1;
40181 +}
40182 +
40183 +int
40184 +gr_search_connectbind(const int mode, const struct socket *sock,
40185 + const struct sockaddr_in *addr)
40186 +{
40187 + return 0;
40188 +}
40189 +
40190 +int
40191 +gr_is_capable(const int cap)
40192 +{
40193 + return 1;
40194 +}
40195 +
40196 +int
40197 +gr_is_capable_nolog(const int cap)
40198 +{
40199 + return 1;
40200 +}
40201 +
40202 +void
40203 +gr_handle_alertkill(struct task_struct *task)
40204 +{
40205 + return;
40206 +}
40207 +
40208 +__u32
40209 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
40210 +{
40211 + return 1;
40212 +}
40213 +
40214 +__u32
40215 +gr_acl_handle_hidden_file(const struct dentry * dentry,
40216 + const struct vfsmount * mnt)
40217 +{
40218 + return 1;
40219 +}
40220 +
40221 +__u32
40222 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
40223 + const int fmode)
40224 +{
40225 + return 1;
40226 +}
40227 +
40228 +__u32
40229 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
40230 +{
40231 + return 1;
40232 +}
40233 +
40234 +__u32
40235 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
40236 +{
40237 + return 1;
40238 +}
40239 +
40240 +int
40241 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
40242 + unsigned int *vm_flags)
40243 +{
40244 + return 1;
40245 +}
40246 +
40247 +__u32
40248 +gr_acl_handle_truncate(const struct dentry * dentry,
40249 + const struct vfsmount * mnt)
40250 +{
40251 + return 1;
40252 +}
40253 +
40254 +__u32
40255 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
40256 +{
40257 + return 1;
40258 +}
40259 +
40260 +__u32
40261 +gr_acl_handle_access(const struct dentry * dentry,
40262 + const struct vfsmount * mnt, const int fmode)
40263 +{
40264 + return 1;
40265 +}
40266 +
40267 +__u32
40268 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
40269 + mode_t mode)
40270 +{
40271 + return 1;
40272 +}
40273 +
40274 +__u32
40275 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
40276 + mode_t mode)
40277 +{
40278 + return 1;
40279 +}
40280 +
40281 +__u32
40282 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
40283 +{
40284 + return 1;
40285 +}
40286 +
40287 +void
40288 +grsecurity_init(void)
40289 +{
40290 + return;
40291 +}
40292 +
40293 +__u32
40294 +gr_acl_handle_mknod(const struct dentry * new_dentry,
40295 + const struct dentry * parent_dentry,
40296 + const struct vfsmount * parent_mnt,
40297 + const int mode)
40298 +{
40299 + return 1;
40300 +}
40301 +
40302 +__u32
40303 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
40304 + const struct dentry * parent_dentry,
40305 + const struct vfsmount * parent_mnt)
40306 +{
40307 + return 1;
40308 +}
40309 +
40310 +__u32
40311 +gr_acl_handle_symlink(const struct dentry * new_dentry,
40312 + const struct dentry * parent_dentry,
40313 + const struct vfsmount * parent_mnt, const char *from)
40314 +{
40315 + return 1;
40316 +}
40317 +
40318 +__u32
40319 +gr_acl_handle_link(const struct dentry * new_dentry,
40320 + const struct dentry * parent_dentry,
40321 + const struct vfsmount * parent_mnt,
40322 + const struct dentry * old_dentry,
40323 + const struct vfsmount * old_mnt, const char *to)
40324 +{
40325 + return 1;
40326 +}
40327 +
40328 +int
40329 +gr_acl_handle_rename(const struct dentry *new_dentry,
40330 + const struct dentry *parent_dentry,
40331 + const struct vfsmount *parent_mnt,
40332 + const struct dentry *old_dentry,
40333 + const struct inode *old_parent_inode,
40334 + const struct vfsmount *old_mnt, const char *newname)
40335 +{
40336 + return 0;
40337 +}
40338 +
40339 +int
40340 +gr_acl_handle_filldir(const struct file *file, const char *name,
40341 + const int namelen, const ino_t ino)
40342 +{
40343 + return 1;
40344 +}
40345 +
40346 +int
40347 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
40348 + const time_t shm_createtime, const uid_t cuid, const int shmid)
40349 +{
40350 + return 1;
40351 +}
40352 +
40353 +int
40354 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
40355 +{
40356 + return 0;
40357 +}
40358 +
40359 +int
40360 +gr_search_accept(const struct socket *sock)
40361 +{
40362 + return 0;
40363 +}
40364 +
40365 +int
40366 +gr_search_listen(const struct socket *sock)
40367 +{
40368 + return 0;
40369 +}
40370 +
40371 +int
40372 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
40373 +{
40374 + return 0;
40375 +}
40376 +
40377 +__u32
40378 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
40379 +{
40380 + return 1;
40381 +}
40382 +
40383 +__u32
40384 +gr_acl_handle_creat(const struct dentry * dentry,
40385 + const struct dentry * p_dentry,
40386 + const struct vfsmount * p_mnt, const int fmode,
40387 + const int imode)
40388 +{
40389 + return 1;
40390 +}
40391 +
40392 +void
40393 +gr_acl_handle_exit(void)
40394 +{
40395 + return;
40396 +}
40397 +
40398 +int
40399 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
40400 +{
40401 + return 1;
40402 +}
40403 +
40404 +void
40405 +gr_set_role_label(const uid_t uid, const gid_t gid)
40406 +{
40407 + return;
40408 +}
40409 +
40410 +int
40411 +gr_acl_handle_procpidmem(const struct task_struct *task)
40412 +{
40413 + return 0;
40414 +}
40415 +
40416 +int
40417 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
40418 +{
40419 + return 0;
40420 +}
40421 +
40422 +int
40423 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
40424 +{
40425 + return 0;
40426 +}
40427 +
40428 +void
40429 +gr_set_kernel_label(struct task_struct *task)
40430 +{
40431 + return;
40432 +}
40433 +
40434 +int
40435 +gr_check_user_change(int real, int effective, int fs)
40436 +{
40437 + return 0;
40438 +}
40439 +
40440 +int
40441 +gr_check_group_change(int real, int effective, int fs)
40442 +{
40443 + return 0;
40444 +}
40445 +
40446 +EXPORT_SYMBOL(gr_is_capable);
40447 +EXPORT_SYMBOL(gr_is_capable_nolog);
40448 +EXPORT_SYMBOL(gr_learn_resource);
40449 +EXPORT_SYMBOL(gr_set_kernel_label);
40450 +#ifdef CONFIG_SECURITY
40451 +EXPORT_SYMBOL(gr_check_user_change);
40452 +EXPORT_SYMBOL(gr_check_group_change);
40453 +#endif
40454 diff -urNp linux-2.6.35.4/grsecurity/grsec_exec.c linux-2.6.35.4/grsecurity/grsec_exec.c
40455 --- linux-2.6.35.4/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
40456 +++ linux-2.6.35.4/grsecurity/grsec_exec.c 2010-09-17 20:12:37.000000000 -0400
40457 @@ -0,0 +1,88 @@
40458 +#include <linux/kernel.h>
40459 +#include <linux/sched.h>
40460 +#include <linux/file.h>
40461 +#include <linux/binfmts.h>
40462 +#include <linux/smp_lock.h>
40463 +#include <linux/fs.h>
40464 +#include <linux/types.h>
40465 +#include <linux/grdefs.h>
40466 +#include <linux/grinternal.h>
40467 +#include <linux/capability.h>
40468 +
40469 +#include <asm/uaccess.h>
40470 +
40471 +#ifdef CONFIG_GRKERNSEC_EXECLOG
40472 +static char gr_exec_arg_buf[132];
40473 +static DECLARE_MUTEX(gr_exec_arg_sem);
40474 +#endif
40475 +
40476 +int
40477 +gr_handle_nproc(void)
40478 +{
40479 +#ifdef CONFIG_GRKERNSEC_EXECVE
40480 + const struct cred *cred = current_cred();
40481 + if (grsec_enable_execve && cred->user &&
40482 + (atomic_read(&cred->user->processes) > rlimit(RLIMIT_NPROC)) &&
40483 + !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) {
40484 + gr_log_noargs(GR_DONT_AUDIT, GR_NPROC_MSG);
40485 + return -EAGAIN;
40486 + }
40487 +#endif
40488 + return 0;
40489 +}
40490 +
40491 +void
40492 +gr_handle_exec_args(struct linux_binprm *bprm, const char __user *__user *argv)
40493 +{
40494 +#ifdef CONFIG_GRKERNSEC_EXECLOG
40495 + char *grarg = gr_exec_arg_buf;
40496 + unsigned int i, x, execlen = 0;
40497 + char c;
40498 +
40499 + if (!((grsec_enable_execlog && grsec_enable_group &&
40500 + in_group_p(grsec_audit_gid))
40501 + || (grsec_enable_execlog && !grsec_enable_group)))
40502 + return;
40503 +
40504 + down(&gr_exec_arg_sem);
40505 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
40506 +
40507 + if (unlikely(argv == NULL))
40508 + goto log;
40509 +
40510 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
40511 + const char __user *p;
40512 + unsigned int len;
40513 +
40514 + if (copy_from_user(&p, argv + i, sizeof(p)))
40515 + goto log;
40516 + if (!p)
40517 + goto log;
40518 + len = strnlen_user(p, 128 - execlen);
40519 + if (len > 128 - execlen)
40520 + len = 128 - execlen;
40521 + else if (len > 0)
40522 + len--;
40523 + if (copy_from_user(grarg + execlen, p, len))
40524 + goto log;
40525 +
40526 + /* rewrite unprintable characters */
40527 + for (x = 0; x < len; x++) {
40528 + c = *(grarg + execlen + x);
40529 + if (c < 32 || c > 126)
40530 + *(grarg + execlen + x) = ' ';
40531 + }
40532 +
40533 + execlen += len;
40534 + *(grarg + execlen) = ' ';
40535 + *(grarg + execlen + 1) = '\0';
40536 + execlen++;
40537 + }
40538 +
40539 + log:
40540 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
40541 + bprm->file->f_path.mnt, grarg);
40542 + up(&gr_exec_arg_sem);
40543 +#endif
40544 + return;
40545 +}
40546 diff -urNp linux-2.6.35.4/grsecurity/grsec_fifo.c linux-2.6.35.4/grsecurity/grsec_fifo.c
40547 --- linux-2.6.35.4/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
40548 +++ linux-2.6.35.4/grsecurity/grsec_fifo.c 2010-09-17 20:12:37.000000000 -0400
40549 @@ -0,0 +1,24 @@
40550 +#include <linux/kernel.h>
40551 +#include <linux/sched.h>
40552 +#include <linux/fs.h>
40553 +#include <linux/file.h>
40554 +#include <linux/grinternal.h>
40555 +
40556 +int
40557 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
40558 + const struct dentry *dir, const int flag, const int acc_mode)
40559 +{
40560 +#ifdef CONFIG_GRKERNSEC_FIFO
40561 + const struct cred *cred = current_cred();
40562 +
40563 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
40564 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
40565 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
40566 + (cred->fsuid != dentry->d_inode->i_uid)) {
40567 + if (!generic_permission(dentry->d_inode, acc_mode, NULL))
40568 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
40569 + return -EACCES;
40570 + }
40571 +#endif
40572 + return 0;
40573 +}
40574 diff -urNp linux-2.6.35.4/grsecurity/grsec_fork.c linux-2.6.35.4/grsecurity/grsec_fork.c
40575 --- linux-2.6.35.4/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
40576 +++ linux-2.6.35.4/grsecurity/grsec_fork.c 2010-09-17 20:12:37.000000000 -0400
40577 @@ -0,0 +1,15 @@
40578 +#include <linux/kernel.h>
40579 +#include <linux/sched.h>
40580 +#include <linux/grsecurity.h>
40581 +#include <linux/grinternal.h>
40582 +#include <linux/errno.h>
40583 +
40584 +void
40585 +gr_log_forkfail(const int retval)
40586 +{
40587 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
40588 + if (grsec_enable_forkfail && retval != -ERESTARTNOINTR)
40589 + gr_log_int(GR_DONT_AUDIT, GR_FAILFORK_MSG, retval);
40590 +#endif
40591 + return;
40592 +}
40593 diff -urNp linux-2.6.35.4/grsecurity/grsec_init.c linux-2.6.35.4/grsecurity/grsec_init.c
40594 --- linux-2.6.35.4/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
40595 +++ linux-2.6.35.4/grsecurity/grsec_init.c 2010-09-17 20:12:37.000000000 -0400
40596 @@ -0,0 +1,266 @@
40597 +#include <linux/kernel.h>
40598 +#include <linux/sched.h>
40599 +#include <linux/mm.h>
40600 +#include <linux/smp_lock.h>
40601 +#include <linux/gracl.h>
40602 +#include <linux/slab.h>
40603 +#include <linux/vmalloc.h>
40604 +#include <linux/percpu.h>
40605 +#include <linux/module.h>
40606 +
40607 +int grsec_enable_link;
40608 +int grsec_enable_dmesg;
40609 +int grsec_enable_harden_ptrace;
40610 +int grsec_enable_fifo;
40611 +int grsec_enable_execve;
40612 +int grsec_enable_execlog;
40613 +int grsec_enable_signal;
40614 +int grsec_enable_forkfail;
40615 +int grsec_enable_audit_ptrace;
40616 +int grsec_enable_time;
40617 +int grsec_enable_audit_textrel;
40618 +int grsec_enable_group;
40619 +int grsec_audit_gid;
40620 +int grsec_enable_chdir;
40621 +int grsec_enable_mount;
40622 +int grsec_enable_rofs;
40623 +int grsec_enable_chroot_findtask;
40624 +int grsec_enable_chroot_mount;
40625 +int grsec_enable_chroot_shmat;
40626 +int grsec_enable_chroot_fchdir;
40627 +int grsec_enable_chroot_double;
40628 +int grsec_enable_chroot_pivot;
40629 +int grsec_enable_chroot_chdir;
40630 +int grsec_enable_chroot_chmod;
40631 +int grsec_enable_chroot_mknod;
40632 +int grsec_enable_chroot_nice;
40633 +int grsec_enable_chroot_execlog;
40634 +int grsec_enable_chroot_caps;
40635 +int grsec_enable_chroot_sysctl;
40636 +int grsec_enable_chroot_unix;
40637 +int grsec_enable_tpe;
40638 +int grsec_tpe_gid;
40639 +int grsec_enable_blackhole;
40640 +#ifdef CONFIG_IPV6_MODULE
40641 +EXPORT_SYMBOL(grsec_enable_blackhole);
40642 +#endif
40643 +int grsec_lastack_retries;
40644 +int grsec_enable_tpe_all;
40645 +int grsec_enable_tpe_invert;
40646 +int grsec_enable_socket_all;
40647 +int grsec_socket_all_gid;
40648 +int grsec_enable_socket_client;
40649 +int grsec_socket_client_gid;
40650 +int grsec_enable_socket_server;
40651 +int grsec_socket_server_gid;
40652 +int grsec_resource_logging;
40653 +int grsec_disable_privio;
40654 +int grsec_lock;
40655 +
40656 +DEFINE_SPINLOCK(grsec_alert_lock);
40657 +unsigned long grsec_alert_wtime = 0;
40658 +unsigned long grsec_alert_fyet = 0;
40659 +
40660 +DEFINE_SPINLOCK(grsec_audit_lock);
40661 +
40662 +DEFINE_RWLOCK(grsec_exec_file_lock);
40663 +
40664 +char *gr_shared_page[4];
40665 +
40666 +char *gr_alert_log_fmt;
40667 +char *gr_audit_log_fmt;
40668 +char *gr_alert_log_buf;
40669 +char *gr_audit_log_buf;
40670 +
40671 +extern struct gr_arg *gr_usermode;
40672 +extern unsigned char *gr_system_salt;
40673 +extern unsigned char *gr_system_sum;
40674 +
40675 +void __init
40676 +grsecurity_init(void)
40677 +{
40678 + int j;
40679 + /* create the per-cpu shared pages */
40680 +
40681 +#ifdef CONFIG_X86
40682 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
40683 +#endif
40684 +
40685 + for (j = 0; j < 4; j++) {
40686 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
40687 + if (gr_shared_page[j] == NULL) {
40688 + panic("Unable to allocate grsecurity shared page");
40689 + return;
40690 + }
40691 + }
40692 +
40693 + /* allocate log buffers */
40694 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
40695 + if (!gr_alert_log_fmt) {
40696 + panic("Unable to allocate grsecurity alert log format buffer");
40697 + return;
40698 + }
40699 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
40700 + if (!gr_audit_log_fmt) {
40701 + panic("Unable to allocate grsecurity audit log format buffer");
40702 + return;
40703 + }
40704 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
40705 + if (!gr_alert_log_buf) {
40706 + panic("Unable to allocate grsecurity alert log buffer");
40707 + return;
40708 + }
40709 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
40710 + if (!gr_audit_log_buf) {
40711 + panic("Unable to allocate grsecurity audit log buffer");
40712 + return;
40713 + }
40714 +
40715 + /* allocate memory for authentication structure */
40716 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
40717 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
40718 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
40719 +
40720 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
40721 + panic("Unable to allocate grsecurity authentication structure");
40722 + return;
40723 + }
40724 +
40725 +
40726 +#ifdef CONFIG_GRKERNSEC_IO
40727 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
40728 + grsec_disable_privio = 1;
40729 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
40730 + grsec_disable_privio = 1;
40731 +#else
40732 + grsec_disable_privio = 0;
40733 +#endif
40734 +#endif
40735 +
40736 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
40737 + /* for backward compatibility, tpe_invert always defaults to on if
40738 + enabled in the kernel
40739 + */
40740 + grsec_enable_tpe_invert = 1;
40741 +#endif
40742 +
40743 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
40744 +#ifndef CONFIG_GRKERNSEC_SYSCTL
40745 + grsec_lock = 1;
40746 +#endif
40747 +
40748 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
40749 + grsec_enable_audit_textrel = 1;
40750 +#endif
40751 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
40752 + grsec_enable_group = 1;
40753 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
40754 +#endif
40755 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
40756 + grsec_enable_chdir = 1;
40757 +#endif
40758 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
40759 + grsec_enable_harden_ptrace = 1;
40760 +#endif
40761 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
40762 + grsec_enable_mount = 1;
40763 +#endif
40764 +#ifdef CONFIG_GRKERNSEC_LINK
40765 + grsec_enable_link = 1;
40766 +#endif
40767 +#ifdef CONFIG_GRKERNSEC_DMESG
40768 + grsec_enable_dmesg = 1;
40769 +#endif
40770 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
40771 + grsec_enable_blackhole = 1;
40772 + grsec_lastack_retries = 4;
40773 +#endif
40774 +#ifdef CONFIG_GRKERNSEC_FIFO
40775 + grsec_enable_fifo = 1;
40776 +#endif
40777 +#ifdef CONFIG_GRKERNSEC_EXECVE
40778 + grsec_enable_execve = 1;
40779 +#endif
40780 +#ifdef CONFIG_GRKERNSEC_EXECLOG
40781 + grsec_enable_execlog = 1;
40782 +#endif
40783 +#ifdef CONFIG_GRKERNSEC_SIGNAL
40784 + grsec_enable_signal = 1;
40785 +#endif
40786 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
40787 + grsec_enable_forkfail = 1;
40788 +#endif
40789 +#ifdef CONFIG_GRKERNSEC_TIME
40790 + grsec_enable_time = 1;
40791 +#endif
40792 +#ifdef CONFIG_GRKERNSEC_RESLOG
40793 + grsec_resource_logging = 1;
40794 +#endif
40795 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
40796 + grsec_enable_chroot_findtask = 1;
40797 +#endif
40798 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
40799 + grsec_enable_chroot_unix = 1;
40800 +#endif
40801 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
40802 + grsec_enable_chroot_mount = 1;
40803 +#endif
40804 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
40805 + grsec_enable_chroot_fchdir = 1;
40806 +#endif
40807 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
40808 + grsec_enable_chroot_shmat = 1;
40809 +#endif
40810 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
40811 + grsec_enable_audit_ptrace = 1;
40812 +#endif
40813 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
40814 + grsec_enable_chroot_double = 1;
40815 +#endif
40816 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
40817 + grsec_enable_chroot_pivot = 1;
40818 +#endif
40819 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
40820 + grsec_enable_chroot_chdir = 1;
40821 +#endif
40822 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
40823 + grsec_enable_chroot_chmod = 1;
40824 +#endif
40825 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
40826 + grsec_enable_chroot_mknod = 1;
40827 +#endif
40828 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
40829 + grsec_enable_chroot_nice = 1;
40830 +#endif
40831 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
40832 + grsec_enable_chroot_execlog = 1;
40833 +#endif
40834 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
40835 + grsec_enable_chroot_caps = 1;
40836 +#endif
40837 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
40838 + grsec_enable_chroot_sysctl = 1;
40839 +#endif
40840 +#ifdef CONFIG_GRKERNSEC_TPE
40841 + grsec_enable_tpe = 1;
40842 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
40843 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
40844 + grsec_enable_tpe_all = 1;
40845 +#endif
40846 +#endif
40847 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
40848 + grsec_enable_socket_all = 1;
40849 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
40850 +#endif
40851 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
40852 + grsec_enable_socket_client = 1;
40853 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
40854 +#endif
40855 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
40856 + grsec_enable_socket_server = 1;
40857 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
40858 +#endif
40859 +#endif
40860 +
40861 + return;
40862 +}
40863 diff -urNp linux-2.6.35.4/grsecurity/grsec_link.c linux-2.6.35.4/grsecurity/grsec_link.c
40864 --- linux-2.6.35.4/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
40865 +++ linux-2.6.35.4/grsecurity/grsec_link.c 2010-09-17 20:12:37.000000000 -0400
40866 @@ -0,0 +1,43 @@
40867 +#include <linux/kernel.h>
40868 +#include <linux/sched.h>
40869 +#include <linux/fs.h>
40870 +#include <linux/file.h>
40871 +#include <linux/grinternal.h>
40872 +
40873 +int
40874 +gr_handle_follow_link(const struct inode *parent,
40875 + const struct inode *inode,
40876 + const struct dentry *dentry, const struct vfsmount *mnt)
40877 +{
40878 +#ifdef CONFIG_GRKERNSEC_LINK
40879 + const struct cred *cred = current_cred();
40880 +
40881 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
40882 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
40883 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
40884 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
40885 + return -EACCES;
40886 + }
40887 +#endif
40888 + return 0;
40889 +}
40890 +
40891 +int
40892 +gr_handle_hardlink(const struct dentry *dentry,
40893 + const struct vfsmount *mnt,
40894 + struct inode *inode, const int mode, const char *to)
40895 +{
40896 +#ifdef CONFIG_GRKERNSEC_LINK
40897 + const struct cred *cred = current_cred();
40898 +
40899 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
40900 + (!S_ISREG(mode) || (mode & S_ISUID) ||
40901 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
40902 + (generic_permission(inode, MAY_READ | MAY_WRITE, NULL))) &&
40903 + !capable(CAP_FOWNER) && cred->uid) {
40904 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
40905 + return -EPERM;
40906 + }
40907 +#endif
40908 + return 0;
40909 +}
40910 diff -urNp linux-2.6.35.4/grsecurity/grsec_log.c linux-2.6.35.4/grsecurity/grsec_log.c
40911 --- linux-2.6.35.4/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
40912 +++ linux-2.6.35.4/grsecurity/grsec_log.c 2010-09-17 20:12:37.000000000 -0400
40913 @@ -0,0 +1,306 @@
40914 +#include <linux/kernel.h>
40915 +#include <linux/sched.h>
40916 +#include <linux/file.h>
40917 +#include <linux/tty.h>
40918 +#include <linux/fs.h>
40919 +#include <linux/grinternal.h>
40920 +
40921 +#ifdef CONFIG_TREE_PREEMPT_RCU
40922 +#define DISABLE_PREEMPT() preempt_disable()
40923 +#define ENABLE_PREEMPT() preempt_enable()
40924 +#else
40925 +#define DISABLE_PREEMPT()
40926 +#define ENABLE_PREEMPT()
40927 +#endif
40928 +
40929 +#define BEGIN_LOCKS(x) \
40930 + DISABLE_PREEMPT(); \
40931 + rcu_read_lock(); \
40932 + read_lock(&tasklist_lock); \
40933 + read_lock(&grsec_exec_file_lock); \
40934 + if (x != GR_DO_AUDIT) \
40935 + spin_lock(&grsec_alert_lock); \
40936 + else \
40937 + spin_lock(&grsec_audit_lock)
40938 +
40939 +#define END_LOCKS(x) \
40940 + if (x != GR_DO_AUDIT) \
40941 + spin_unlock(&grsec_alert_lock); \
40942 + else \
40943 + spin_unlock(&grsec_audit_lock); \
40944 + read_unlock(&grsec_exec_file_lock); \
40945 + read_unlock(&tasklist_lock); \
40946 + rcu_read_unlock(); \
40947 + ENABLE_PREEMPT(); \
40948 + if (x == GR_DONT_AUDIT) \
40949 + gr_handle_alertkill(current)
40950 +
40951 +enum {
40952 + FLOODING,
40953 + NO_FLOODING
40954 +};
40955 +
40956 +extern char *gr_alert_log_fmt;
40957 +extern char *gr_audit_log_fmt;
40958 +extern char *gr_alert_log_buf;
40959 +extern char *gr_audit_log_buf;
40960 +
40961 +static int gr_log_start(int audit)
40962 +{
40963 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
40964 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
40965 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
40966 +
40967 + if (audit == GR_DO_AUDIT)
40968 + goto set_fmt;
40969 +
40970 + if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
40971 + grsec_alert_wtime = jiffies;
40972 + grsec_alert_fyet = 0;
40973 + } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
40974 + grsec_alert_fyet++;
40975 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
40976 + grsec_alert_wtime = jiffies;
40977 + grsec_alert_fyet++;
40978 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
40979 + return FLOODING;
40980 + } else return FLOODING;
40981 +
40982 +set_fmt:
40983 + memset(buf, 0, PAGE_SIZE);
40984 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
40985 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
40986 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
40987 + } else if (current->signal->curr_ip) {
40988 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
40989 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
40990 + } else if (gr_acl_is_enabled()) {
40991 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
40992 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
40993 + } else {
40994 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
40995 + strcpy(buf, fmt);
40996 + }
40997 +
40998 + return NO_FLOODING;
40999 +}
41000 +
41001 +static void gr_log_middle(int audit, const char *msg, va_list ap)
41002 + __attribute__ ((format (printf, 2, 0)));
41003 +
41004 +static void gr_log_middle(int audit, const char *msg, va_list ap)
41005 +{
41006 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
41007 + unsigned int len = strlen(buf);
41008 +
41009 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
41010 +
41011 + return;
41012 +}
41013 +
41014 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
41015 + __attribute__ ((format (printf, 2, 3)));
41016 +
41017 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
41018 +{
41019 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
41020 + unsigned int len = strlen(buf);
41021 + va_list ap;
41022 +
41023 + va_start(ap, msg);
41024 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
41025 + va_end(ap);
41026 +
41027 + return;
41028 +}
41029 +
41030 +static void gr_log_end(int audit)
41031 +{
41032 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
41033 + unsigned int len = strlen(buf);
41034 +
41035 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->parent)));
41036 + printk("%s\n", buf);
41037 +
41038 + return;
41039 +}
41040 +
41041 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
41042 +{
41043 + int logtype;
41044 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
41045 + char *str1, *str2, *str3;
41046 + void *voidptr;
41047 + int num1, num2;
41048 + unsigned long ulong1, ulong2;
41049 + struct dentry *dentry;
41050 + struct vfsmount *mnt;
41051 + struct file *file;
41052 + struct task_struct *task;
41053 + const struct cred *cred, *pcred;
41054 + va_list ap;
41055 +
41056 + BEGIN_LOCKS(audit);
41057 + logtype = gr_log_start(audit);
41058 + if (logtype == FLOODING) {
41059 + END_LOCKS(audit);
41060 + return;
41061 + }
41062 + va_start(ap, argtypes);
41063 + switch (argtypes) {
41064 + case GR_TTYSNIFF:
41065 + task = va_arg(ap, struct task_struct *);
41066 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->parent->comm, task->parent->pid);
41067 + break;
41068 + case GR_SYSCTL_HIDDEN:
41069 + str1 = va_arg(ap, char *);
41070 + gr_log_middle_varargs(audit, msg, result, str1);
41071 + break;
41072 + case GR_RBAC:
41073 + dentry = va_arg(ap, struct dentry *);
41074 + mnt = va_arg(ap, struct vfsmount *);
41075 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
41076 + break;
41077 + case GR_RBAC_STR:
41078 + dentry = va_arg(ap, struct dentry *);
41079 + mnt = va_arg(ap, struct vfsmount *);
41080 + str1 = va_arg(ap, char *);
41081 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
41082 + break;
41083 + case GR_STR_RBAC:
41084 + str1 = va_arg(ap, char *);
41085 + dentry = va_arg(ap, struct dentry *);
41086 + mnt = va_arg(ap, struct vfsmount *);
41087 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
41088 + break;
41089 + case GR_RBAC_MODE2:
41090 + dentry = va_arg(ap, struct dentry *);
41091 + mnt = va_arg(ap, struct vfsmount *);
41092 + str1 = va_arg(ap, char *);
41093 + str2 = va_arg(ap, char *);
41094 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
41095 + break;
41096 + case GR_RBAC_MODE3:
41097 + dentry = va_arg(ap, struct dentry *);
41098 + mnt = va_arg(ap, struct vfsmount *);
41099 + str1 = va_arg(ap, char *);
41100 + str2 = va_arg(ap, char *);
41101 + str3 = va_arg(ap, char *);
41102 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
41103 + break;
41104 + case GR_FILENAME:
41105 + dentry = va_arg(ap, struct dentry *);
41106 + mnt = va_arg(ap, struct vfsmount *);
41107 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
41108 + break;
41109 + case GR_STR_FILENAME:
41110 + str1 = va_arg(ap, char *);
41111 + dentry = va_arg(ap, struct dentry *);
41112 + mnt = va_arg(ap, struct vfsmount *);
41113 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
41114 + break;
41115 + case GR_FILENAME_STR:
41116 + dentry = va_arg(ap, struct dentry *);
41117 + mnt = va_arg(ap, struct vfsmount *);
41118 + str1 = va_arg(ap, char *);
41119 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
41120 + break;
41121 + case GR_FILENAME_TWO_INT:
41122 + dentry = va_arg(ap, struct dentry *);
41123 + mnt = va_arg(ap, struct vfsmount *);
41124 + num1 = va_arg(ap, int);
41125 + num2 = va_arg(ap, int);
41126 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
41127 + break;
41128 + case GR_FILENAME_TWO_INT_STR:
41129 + dentry = va_arg(ap, struct dentry *);
41130 + mnt = va_arg(ap, struct vfsmount *);
41131 + num1 = va_arg(ap, int);
41132 + num2 = va_arg(ap, int);
41133 + str1 = va_arg(ap, char *);
41134 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
41135 + break;
41136 + case GR_TEXTREL:
41137 + file = va_arg(ap, struct file *);
41138 + ulong1 = va_arg(ap, unsigned long);
41139 + ulong2 = va_arg(ap, unsigned long);
41140 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
41141 + break;
41142 + case GR_PTRACE:
41143 + task = va_arg(ap, struct task_struct *);
41144 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
41145 + break;
41146 + case GR_RESOURCE:
41147 + task = va_arg(ap, struct task_struct *);
41148 + cred = __task_cred(task);
41149 + pcred = __task_cred(task->parent);
41150 + ulong1 = va_arg(ap, unsigned long);
41151 + str1 = va_arg(ap, char *);
41152 + ulong2 = va_arg(ap, unsigned long);
41153 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->parent->comm, task->parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
41154 + break;
41155 + case GR_CAP:
41156 + task = va_arg(ap, struct task_struct *);
41157 + cred = __task_cred(task);
41158 + pcred = __task_cred(task->parent);
41159 + str1 = va_arg(ap, char *);
41160 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->parent->comm, task->parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
41161 + break;
41162 + case GR_SIG:
41163 + str1 = va_arg(ap, char *);
41164 + voidptr = va_arg(ap, void *);
41165 + gr_log_middle_varargs(audit, msg, str1, voidptr);
41166 + break;
41167 + case GR_SIG2:
41168 + task = va_arg(ap, struct task_struct *);
41169 + cred = __task_cred(task);
41170 + pcred = __task_cred(task->parent);
41171 + num1 = va_arg(ap, int);
41172 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->parent->comm, task->parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
41173 + break;
41174 + case GR_CRASH1:
41175 + task = va_arg(ap, struct task_struct *);
41176 + cred = __task_cred(task);
41177 + pcred = __task_cred(task->parent);
41178 + ulong1 = va_arg(ap, unsigned long);
41179 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->parent->comm, task->parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
41180 + break;
41181 + case GR_CRASH2:
41182 + task = va_arg(ap, struct task_struct *);
41183 + cred = __task_cred(task);
41184 + pcred = __task_cred(task->parent);
41185 + ulong1 = va_arg(ap, unsigned long);
41186 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->parent->comm, task->parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
41187 + break;
41188 + case GR_PSACCT:
41189 + {
41190 + unsigned int wday, cday;
41191 + __u8 whr, chr;
41192 + __u8 wmin, cmin;
41193 + __u8 wsec, csec;
41194 + char cur_tty[64] = { 0 };
41195 + char parent_tty[64] = { 0 };
41196 +
41197 + task = va_arg(ap, struct task_struct *);
41198 + wday = va_arg(ap, unsigned int);
41199 + cday = va_arg(ap, unsigned int);
41200 + whr = va_arg(ap, int);
41201 + chr = va_arg(ap, int);
41202 + wmin = va_arg(ap, int);
41203 + cmin = va_arg(ap, int);
41204 + wsec = va_arg(ap, int);
41205 + csec = va_arg(ap, int);
41206 + ulong1 = va_arg(ap, unsigned long);
41207 + cred = __task_cred(task);
41208 + pcred = __task_cred(task->parent);
41209 +
41210 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->parent->comm, task->parent->pid, &task->parent->signal->curr_ip, tty_name(task->parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
41211 + }
41212 + break;
41213 + default:
41214 + gr_log_middle(audit, msg, ap);
41215 + }
41216 + va_end(ap);
41217 + gr_log_end(audit);
41218 + END_LOCKS(audit);
41219 +}
41220 diff -urNp linux-2.6.35.4/grsecurity/grsec_mem.c linux-2.6.35.4/grsecurity/grsec_mem.c
41221 --- linux-2.6.35.4/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
41222 +++ linux-2.6.35.4/grsecurity/grsec_mem.c 2010-09-17 20:12:37.000000000 -0400
41223 @@ -0,0 +1,85 @@
41224 +#include <linux/kernel.h>
41225 +#include <linux/sched.h>
41226 +#include <linux/mm.h>
41227 +#include <linux/mman.h>
41228 +#include <linux/grinternal.h>
41229 +
41230 +void
41231 +gr_handle_ioperm(void)
41232 +{
41233 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
41234 + return;
41235 +}
41236 +
41237 +void
41238 +gr_handle_iopl(void)
41239 +{
41240 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
41241 + return;
41242 +}
41243 +
41244 +void
41245 +gr_handle_mem_write(void)
41246 +{
41247 + gr_log_noargs(GR_DONT_AUDIT, GR_MEM_WRITE_MSG);
41248 + return;
41249 +}
41250 +
41251 +void
41252 +gr_handle_kmem_write(void)
41253 +{
41254 + gr_log_noargs(GR_DONT_AUDIT, GR_KMEM_MSG);
41255 + return;
41256 +}
41257 +
41258 +void
41259 +gr_handle_open_port(void)
41260 +{
41261 + gr_log_noargs(GR_DONT_AUDIT, GR_PORT_OPEN_MSG);
41262 + return;
41263 +}
41264 +
41265 +int
41266 +gr_handle_mem_mmap(const unsigned long offset, struct vm_area_struct *vma)
41267 +{
41268 + unsigned long start, end;
41269 +
41270 + start = offset;
41271 + end = start + vma->vm_end - vma->vm_start;
41272 +
41273 + if (start > end) {
41274 + gr_log_noargs(GR_DONT_AUDIT, GR_MEM_MMAP_MSG);
41275 + return -EPERM;
41276 + }
41277 +
41278 + /* allowed ranges : ISA I/O BIOS */
41279 + if ((start >= __pa(high_memory))
41280 +#if defined(CONFIG_X86) || defined(CONFIG_PPC)
41281 + || (start >= 0x000a0000 && end <= 0x00100000)
41282 + || (start >= 0x00000000 && end <= 0x00001000)
41283 +#endif
41284 + )
41285 + return 0;
41286 +
41287 + if (vma->vm_flags & VM_WRITE) {
41288 + gr_log_noargs(GR_DONT_AUDIT, GR_MEM_MMAP_MSG);
41289 + return -EPERM;
41290 + } else
41291 + vma->vm_flags &= ~VM_MAYWRITE;
41292 +
41293 + return 0;
41294 +}
41295 +
41296 +void
41297 +gr_log_nonroot_mod_load(const char *modname)
41298 +{
41299 + gr_log_str(GR_DONT_AUDIT, GR_NONROOT_MODLOAD_MSG, modname);
41300 + return;
41301 +}
41302 +
41303 +void
41304 +gr_handle_vm86(void)
41305 +{
41306 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
41307 + return;
41308 +}
41309 diff -urNp linux-2.6.35.4/grsecurity/grsec_mount.c linux-2.6.35.4/grsecurity/grsec_mount.c
41310 --- linux-2.6.35.4/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
41311 +++ linux-2.6.35.4/grsecurity/grsec_mount.c 2010-09-17 20:12:37.000000000 -0400
41312 @@ -0,0 +1,62 @@
41313 +#include <linux/kernel.h>
41314 +#include <linux/sched.h>
41315 +#include <linux/mount.h>
41316 +#include <linux/grsecurity.h>
41317 +#include <linux/grinternal.h>
41318 +
41319 +void
41320 +gr_log_remount(const char *devname, const int retval)
41321 +{
41322 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
41323 + if (grsec_enable_mount && (retval >= 0))
41324 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
41325 +#endif
41326 + return;
41327 +}
41328 +
41329 +void
41330 +gr_log_unmount(const char *devname, const int retval)
41331 +{
41332 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
41333 + if (grsec_enable_mount && (retval >= 0))
41334 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
41335 +#endif
41336 + return;
41337 +}
41338 +
41339 +void
41340 +gr_log_mount(const char *from, const char *to, const int retval)
41341 +{
41342 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
41343 + if (grsec_enable_mount && (retval >= 0))
41344 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from, to);
41345 +#endif
41346 + return;
41347 +}
41348 +
41349 +int
41350 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
41351 +{
41352 +#ifdef CONFIG_GRKERNSEC_ROFS
41353 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
41354 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
41355 + return -EPERM;
41356 + } else
41357 + return 0;
41358 +#endif
41359 + return 0;
41360 +}
41361 +
41362 +int
41363 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
41364 +{
41365 +#ifdef CONFIG_GRKERNSEC_ROFS
41366 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
41367 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
41368 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
41369 + return -EPERM;
41370 + } else
41371 + return 0;
41372 +#endif
41373 + return 0;
41374 +}
41375 diff -urNp linux-2.6.35.4/grsecurity/grsec_ptrace.c linux-2.6.35.4/grsecurity/grsec_ptrace.c
41376 --- linux-2.6.35.4/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
41377 +++ linux-2.6.35.4/grsecurity/grsec_ptrace.c 2010-09-17 20:12:37.000000000 -0400
41378 @@ -0,0 +1,14 @@
41379 +#include <linux/kernel.h>
41380 +#include <linux/sched.h>
41381 +#include <linux/grinternal.h>
41382 +#include <linux/grsecurity.h>
41383 +
41384 +void
41385 +gr_audit_ptrace(struct task_struct *task)
41386 +{
41387 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
41388 + if (grsec_enable_audit_ptrace)
41389 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
41390 +#endif
41391 + return;
41392 +}
41393 diff -urNp linux-2.6.35.4/grsecurity/grsec_sig.c linux-2.6.35.4/grsecurity/grsec_sig.c
41394 --- linux-2.6.35.4/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
41395 +++ linux-2.6.35.4/grsecurity/grsec_sig.c 2010-09-17 20:12:37.000000000 -0400
41396 @@ -0,0 +1,65 @@
41397 +#include <linux/kernel.h>
41398 +#include <linux/sched.h>
41399 +#include <linux/delay.h>
41400 +#include <linux/grsecurity.h>
41401 +#include <linux/grinternal.h>
41402 +
41403 +char *signames[] = {
41404 + [SIGSEGV] = "Segmentation fault",
41405 + [SIGILL] = "Illegal instruction",
41406 + [SIGABRT] = "Abort",
41407 + [SIGBUS] = "Invalid alignment/Bus error"
41408 +};
41409 +
41410 +void
41411 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
41412 +{
41413 +#ifdef CONFIG_GRKERNSEC_SIGNAL
41414 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
41415 + (sig == SIGABRT) || (sig == SIGBUS))) {
41416 + if (t->pid == current->pid) {
41417 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
41418 + } else {
41419 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
41420 + }
41421 + }
41422 +#endif
41423 + return;
41424 +}
41425 +
41426 +int
41427 +gr_handle_signal(const struct task_struct *p, const int sig)
41428 +{
41429 +#ifdef CONFIG_GRKERNSEC
41430 + if (current->pid > 1 && gr_check_protected_task(p)) {
41431 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
41432 + return -EPERM;
41433 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
41434 + return -EPERM;
41435 + }
41436 +#endif
41437 + return 0;
41438 +}
41439 +
41440 +void gr_handle_brute_attach(struct task_struct *p)
41441 +{
41442 +#ifdef CONFIG_GRKERNSEC_BRUTE
41443 + read_lock(&tasklist_lock);
41444 + read_lock(&grsec_exec_file_lock);
41445 + if (p->parent && p->parent->exec_file == p->exec_file)
41446 + p->parent->brute = 1;
41447 + read_unlock(&grsec_exec_file_lock);
41448 + read_unlock(&tasklist_lock);
41449 +#endif
41450 + return;
41451 +}
41452 +
41453 +void gr_handle_brute_check(void)
41454 +{
41455 +#ifdef CONFIG_GRKERNSEC_BRUTE
41456 + if (current->brute)
41457 + msleep(30 * 1000);
41458 +#endif
41459 + return;
41460 +}
41461 +
41462 diff -urNp linux-2.6.35.4/grsecurity/grsec_sock.c linux-2.6.35.4/grsecurity/grsec_sock.c
41463 --- linux-2.6.35.4/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
41464 +++ linux-2.6.35.4/grsecurity/grsec_sock.c 2010-09-17 20:12:37.000000000 -0400
41465 @@ -0,0 +1,271 @@
41466 +#include <linux/kernel.h>
41467 +#include <linux/module.h>
41468 +#include <linux/sched.h>
41469 +#include <linux/file.h>
41470 +#include <linux/net.h>
41471 +#include <linux/in.h>
41472 +#include <linux/ip.h>
41473 +#include <net/sock.h>
41474 +#include <net/inet_sock.h>
41475 +#include <linux/grsecurity.h>
41476 +#include <linux/grinternal.h>
41477 +#include <linux/gracl.h>
41478 +
41479 +kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
41480 +EXPORT_SYMBOL(gr_cap_rtnetlink);
41481 +
41482 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
41483 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
41484 +
41485 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
41486 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
41487 +
41488 +#ifdef CONFIG_UNIX_MODULE
41489 +EXPORT_SYMBOL(gr_acl_handle_unix);
41490 +EXPORT_SYMBOL(gr_acl_handle_mknod);
41491 +EXPORT_SYMBOL(gr_handle_chroot_unix);
41492 +EXPORT_SYMBOL(gr_handle_create);
41493 +#endif
41494 +
41495 +#ifdef CONFIG_GRKERNSEC
41496 +#define gr_conn_table_size 32749
41497 +struct conn_table_entry {
41498 + struct conn_table_entry *next;
41499 + struct signal_struct *sig;
41500 +};
41501 +
41502 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
41503 +DEFINE_SPINLOCK(gr_conn_table_lock);
41504 +
41505 +extern const char * gr_socktype_to_name(unsigned char type);
41506 +extern const char * gr_proto_to_name(unsigned char proto);
41507 +
41508 +static __inline__ int
41509 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
41510 +{
41511 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
41512 +}
41513 +
41514 +static __inline__ int
41515 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
41516 + __u16 sport, __u16 dport)
41517 +{
41518 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
41519 + sig->gr_sport == sport && sig->gr_dport == dport))
41520 + return 1;
41521 + else
41522 + return 0;
41523 +}
41524 +
41525 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
41526 +{
41527 + struct conn_table_entry **match;
41528 + unsigned int index;
41529 +
41530 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
41531 + sig->gr_sport, sig->gr_dport,
41532 + gr_conn_table_size);
41533 +
41534 + newent->sig = sig;
41535 +
41536 + match = &gr_conn_table[index];
41537 + newent->next = *match;
41538 + *match = newent;
41539 +
41540 + return;
41541 +}
41542 +
41543 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
41544 +{
41545 + struct conn_table_entry *match, *last = NULL;
41546 + unsigned int index;
41547 +
41548 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
41549 + sig->gr_sport, sig->gr_dport,
41550 + gr_conn_table_size);
41551 +
41552 + match = gr_conn_table[index];
41553 + while (match && !conn_match(match->sig,
41554 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
41555 + sig->gr_dport)) {
41556 + last = match;
41557 + match = match->next;
41558 + }
41559 +
41560 + if (match) {
41561 + if (last)
41562 + last->next = match->next;
41563 + else
41564 + gr_conn_table[index] = NULL;
41565 + kfree(match);
41566 + }
41567 +
41568 + return;
41569 +}
41570 +
41571 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
41572 + __u16 sport, __u16 dport)
41573 +{
41574 + struct conn_table_entry *match;
41575 + unsigned int index;
41576 +
41577 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
41578 +
41579 + match = gr_conn_table[index];
41580 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
41581 + match = match->next;
41582 +
41583 + if (match)
41584 + return match->sig;
41585 + else
41586 + return NULL;
41587 +}
41588 +
41589 +#endif
41590 +
41591 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
41592 +{
41593 +#ifdef CONFIG_GRKERNSEC
41594 + struct signal_struct *sig = task->signal;
41595 + struct conn_table_entry *newent;
41596 +
41597 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
41598 + if (newent == NULL)
41599 + return;
41600 + /* no bh lock needed since we are called with bh disabled */
41601 + spin_lock(&gr_conn_table_lock);
41602 + gr_del_task_from_ip_table_nolock(sig);
41603 + sig->gr_saddr = inet->inet_rcv_saddr;
41604 + sig->gr_daddr = inet->inet_daddr;
41605 + sig->gr_sport = inet->inet_sport;
41606 + sig->gr_dport = inet->inet_dport;
41607 + gr_add_to_task_ip_table_nolock(sig, newent);
41608 + spin_unlock(&gr_conn_table_lock);
41609 +#endif
41610 + return;
41611 +}
41612 +
41613 +void gr_del_task_from_ip_table(struct task_struct *task)
41614 +{
41615 +#ifdef CONFIG_GRKERNSEC
41616 + spin_lock_bh(&gr_conn_table_lock);
41617 + gr_del_task_from_ip_table_nolock(task->signal);
41618 + spin_unlock_bh(&gr_conn_table_lock);
41619 +#endif
41620 + return;
41621 +}
41622 +
41623 +void
41624 +gr_attach_curr_ip(const struct sock *sk)
41625 +{
41626 +#ifdef CONFIG_GRKERNSEC
41627 + struct signal_struct *p, *set;
41628 + const struct inet_sock *inet = inet_sk(sk);
41629 +
41630 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
41631 + return;
41632 +
41633 + set = current->signal;
41634 +
41635 + spin_lock_bh(&gr_conn_table_lock);
41636 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
41637 + inet->inet_dport, inet->inet_sport);
41638 + if (unlikely(p != NULL)) {
41639 + set->curr_ip = p->curr_ip;
41640 + set->used_accept = 1;
41641 + gr_del_task_from_ip_table_nolock(p);
41642 + spin_unlock_bh(&gr_conn_table_lock);
41643 + return;
41644 + }
41645 + spin_unlock_bh(&gr_conn_table_lock);
41646 +
41647 + set->curr_ip = inet->inet_daddr;
41648 + set->used_accept = 1;
41649 +#endif
41650 + return;
41651 +}
41652 +
41653 +int
41654 +gr_handle_sock_all(const int family, const int type, const int protocol)
41655 +{
41656 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
41657 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
41658 + (family != AF_UNIX) && (family != AF_LOCAL)) {
41659 + gr_log_int_str2(GR_DONT_AUDIT, GR_SOCK2_MSG, family, gr_socktype_to_name(type), gr_proto_to_name(protocol));
41660 + return -EACCES;
41661 + }
41662 +#endif
41663 + return 0;
41664 +}
41665 +
41666 +int
41667 +gr_handle_sock_server(const struct sockaddr *sck)
41668 +{
41669 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
41670 + if (grsec_enable_socket_server &&
41671 + in_group_p(grsec_socket_server_gid) &&
41672 + sck && (sck->sa_family != AF_UNIX) &&
41673 + (sck->sa_family != AF_LOCAL)) {
41674 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
41675 + return -EACCES;
41676 + }
41677 +#endif
41678 + return 0;
41679 +}
41680 +
41681 +int
41682 +gr_handle_sock_server_other(const struct sock *sck)
41683 +{
41684 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
41685 + if (grsec_enable_socket_server &&
41686 + in_group_p(grsec_socket_server_gid) &&
41687 + sck && (sck->sk_family != AF_UNIX) &&
41688 + (sck->sk_family != AF_LOCAL)) {
41689 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
41690 + return -EACCES;
41691 + }
41692 +#endif
41693 + return 0;
41694 +}
41695 +
41696 +int
41697 +gr_handle_sock_client(const struct sockaddr *sck)
41698 +{
41699 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
41700 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
41701 + sck && (sck->sa_family != AF_UNIX) &&
41702 + (sck->sa_family != AF_LOCAL)) {
41703 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
41704 + return -EACCES;
41705 + }
41706 +#endif
41707 + return 0;
41708 +}
41709 +
41710 +kernel_cap_t
41711 +gr_cap_rtnetlink(struct sock *sock)
41712 +{
41713 +#ifdef CONFIG_GRKERNSEC
41714 + if (!gr_acl_is_enabled())
41715 + return current_cap();
41716 + else if (sock->sk_protocol == NETLINK_ISCSI &&
41717 + cap_raised(current_cap(), CAP_SYS_ADMIN) &&
41718 + gr_is_capable(CAP_SYS_ADMIN))
41719 + return current_cap();
41720 + else if (sock->sk_protocol == NETLINK_AUDIT &&
41721 + cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
41722 + gr_is_capable(CAP_AUDIT_WRITE) &&
41723 + cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
41724 + gr_is_capable(CAP_AUDIT_CONTROL))
41725 + return current_cap();
41726 + else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
41727 + ((sock->sk_protocol == NETLINK_ROUTE) ?
41728 + gr_is_capable_nolog(CAP_NET_ADMIN) :
41729 + gr_is_capable(CAP_NET_ADMIN)))
41730 + return current_cap();
41731 + else
41732 + return __cap_empty_set;
41733 +#else
41734 + return current_cap();
41735 +#endif
41736 +}
41737 diff -urNp linux-2.6.35.4/grsecurity/grsec_sysctl.c linux-2.6.35.4/grsecurity/grsec_sysctl.c
41738 --- linux-2.6.35.4/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
41739 +++ linux-2.6.35.4/grsecurity/grsec_sysctl.c 2010-09-17 20:18:57.000000000 -0400
41740 @@ -0,0 +1,424 @@
41741 +#include <linux/kernel.h>
41742 +#include <linux/sched.h>
41743 +#include <linux/sysctl.h>
41744 +#include <linux/grsecurity.h>
41745 +#include <linux/grinternal.h>
41746 +
41747 +int
41748 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
41749 +{
41750 +#ifdef CONFIG_GRKERNSEC_SYSCTL
41751 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
41752 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
41753 + return -EACCES;
41754 + }
41755 +#endif
41756 + return 0;
41757 +}
41758 +
41759 +#ifdef CONFIG_GRKERNSEC_ROFS
41760 +static int __maybe_unused one = 1;
41761 +#endif
41762 +
41763 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
41764 +struct ctl_table grsecurity_table[] = {
41765 +#ifdef CONFIG_GRKERNSEC_SYSCTL
41766 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
41767 +#ifdef CONFIG_GRKERNSEC_IO
41768 + {
41769 + .procname = "disable_priv_io",
41770 + .data = &grsec_disable_privio,
41771 + .maxlen = sizeof(int),
41772 + .mode = 0600,
41773 + .proc_handler = &proc_dointvec,
41774 + },
41775 +#endif
41776 +#endif
41777 +#ifdef CONFIG_GRKERNSEC_LINK
41778 + {
41779 + .procname = "linking_restrictions",
41780 + .data = &grsec_enable_link,
41781 + .maxlen = sizeof(int),
41782 + .mode = 0600,
41783 + .proc_handler = &proc_dointvec,
41784 + },
41785 +#endif
41786 +#ifdef CONFIG_GRKERNSEC_FIFO
41787 + {
41788 + .procname = "fifo_restrictions",
41789 + .data = &grsec_enable_fifo,
41790 + .maxlen = sizeof(int),
41791 + .mode = 0600,
41792 + .proc_handler = &proc_dointvec,
41793 + },
41794 +#endif
41795 +#ifdef CONFIG_GRKERNSEC_EXECVE
41796 + {
41797 + .procname = "execve_limiting",
41798 + .data = &grsec_enable_execve,
41799 + .maxlen = sizeof(int),
41800 + .mode = 0600,
41801 + .proc_handler = &proc_dointvec,
41802 + },
41803 +#endif
41804 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
41805 + {
41806 + .procname = "ip_blackhole",
41807 + .data = &grsec_enable_blackhole,
41808 + .maxlen = sizeof(int),
41809 + .mode = 0600,
41810 + .proc_handler = &proc_dointvec,
41811 + },
41812 + {
41813 + .procname = "lastack_retries",
41814 + .data = &grsec_lastack_retries,
41815 + .maxlen = sizeof(int),
41816 + .mode = 0600,
41817 + .proc_handler = &proc_dointvec,
41818 + },
41819 +#endif
41820 +#ifdef CONFIG_GRKERNSEC_EXECLOG
41821 + {
41822 + .procname = "exec_logging",
41823 + .data = &grsec_enable_execlog,
41824 + .maxlen = sizeof(int),
41825 + .mode = 0600,
41826 + .proc_handler = &proc_dointvec,
41827 + },
41828 +#endif
41829 +#ifdef CONFIG_GRKERNSEC_SIGNAL
41830 + {
41831 + .procname = "signal_logging",
41832 + .data = &grsec_enable_signal,
41833 + .maxlen = sizeof(int),
41834 + .mode = 0600,
41835 + .proc_handler = &proc_dointvec,
41836 + },
41837 +#endif
41838 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
41839 + {
41840 + .procname = "forkfail_logging",
41841 + .data = &grsec_enable_forkfail,
41842 + .maxlen = sizeof(int),
41843 + .mode = 0600,
41844 + .proc_handler = &proc_dointvec,
41845 + },
41846 +#endif
41847 +#ifdef CONFIG_GRKERNSEC_TIME
41848 + {
41849 + .procname = "timechange_logging",
41850 + .data = &grsec_enable_time,
41851 + .maxlen = sizeof(int),
41852 + .mode = 0600,
41853 + .proc_handler = &proc_dointvec,
41854 + },
41855 +#endif
41856 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
41857 + {
41858 + .procname = "chroot_deny_shmat",
41859 + .data = &grsec_enable_chroot_shmat,
41860 + .maxlen = sizeof(int),
41861 + .mode = 0600,
41862 + .proc_handler = &proc_dointvec,
41863 + },
41864 +#endif
41865 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
41866 + {
41867 + .procname = "chroot_deny_unix",
41868 + .data = &grsec_enable_chroot_unix,
41869 + .maxlen = sizeof(int),
41870 + .mode = 0600,
41871 + .proc_handler = &proc_dointvec,
41872 + },
41873 +#endif
41874 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
41875 + {
41876 + .procname = "chroot_deny_mount",
41877 + .data = &grsec_enable_chroot_mount,
41878 + .maxlen = sizeof(int),
41879 + .mode = 0600,
41880 + .proc_handler = &proc_dointvec,
41881 + },
41882 +#endif
41883 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
41884 + {
41885 + .procname = "chroot_deny_fchdir",
41886 + .data = &grsec_enable_chroot_fchdir,
41887 + .maxlen = sizeof(int),
41888 + .mode = 0600,
41889 + .proc_handler = &proc_dointvec,
41890 + },
41891 +#endif
41892 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
41893 + {
41894 + .procname = "chroot_deny_chroot",
41895 + .data = &grsec_enable_chroot_double,
41896 + .maxlen = sizeof(int),
41897 + .mode = 0600,
41898 + .proc_handler = &proc_dointvec,
41899 + },
41900 +#endif
41901 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
41902 + {
41903 + .procname = "chroot_deny_pivot",
41904 + .data = &grsec_enable_chroot_pivot,
41905 + .maxlen = sizeof(int),
41906 + .mode = 0600,
41907 + .proc_handler = &proc_dointvec,
41908 + },
41909 +#endif
41910 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
41911 + {
41912 + .procname = "chroot_enforce_chdir",
41913 + .data = &grsec_enable_chroot_chdir,
41914 + .maxlen = sizeof(int),
41915 + .mode = 0600,
41916 + .proc_handler = &proc_dointvec,
41917 + },
41918 +#endif
41919 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
41920 + {
41921 + .procname = "chroot_deny_chmod",
41922 + .data = &grsec_enable_chroot_chmod,
41923 + .maxlen = sizeof(int),
41924 + .mode = 0600,
41925 + .proc_handler = &proc_dointvec,
41926 + },
41927 +#endif
41928 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
41929 + {
41930 + .procname = "chroot_deny_mknod",
41931 + .data = &grsec_enable_chroot_mknod,
41932 + .maxlen = sizeof(int),
41933 + .mode = 0600,
41934 + .proc_handler = &proc_dointvec,
41935 + },
41936 +#endif
41937 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
41938 + {
41939 + .procname = "chroot_restrict_nice",
41940 + .data = &grsec_enable_chroot_nice,
41941 + .maxlen = sizeof(int),
41942 + .mode = 0600,
41943 + .proc_handler = &proc_dointvec,
41944 + },
41945 +#endif
41946 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
41947 + {
41948 + .procname = "chroot_execlog",
41949 + .data = &grsec_enable_chroot_execlog,
41950 + .maxlen = sizeof(int),
41951 + .mode = 0600,
41952 + .proc_handler = &proc_dointvec,
41953 + },
41954 +#endif
41955 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
41956 + {
41957 + .procname = "chroot_caps",
41958 + .data = &grsec_enable_chroot_caps,
41959 + .maxlen = sizeof(int),
41960 + .mode = 0600,
41961 + .proc_handler = &proc_dointvec,
41962 + },
41963 +#endif
41964 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
41965 + {
41966 + .procname = "chroot_deny_sysctl",
41967 + .data = &grsec_enable_chroot_sysctl,
41968 + .maxlen = sizeof(int),
41969 + .mode = 0600,
41970 + .proc_handler = &proc_dointvec,
41971 + },
41972 +#endif
41973 +#ifdef CONFIG_GRKERNSEC_TPE
41974 + {
41975 + .procname = "tpe",
41976 + .data = &grsec_enable_tpe,
41977 + .maxlen = sizeof(int),
41978 + .mode = 0600,
41979 + .proc_handler = &proc_dointvec,
41980 + },
41981 + {
41982 + .procname = "tpe_gid",
41983 + .data = &grsec_tpe_gid,
41984 + .maxlen = sizeof(int),
41985 + .mode = 0600,
41986 + .proc_handler = &proc_dointvec,
41987 + },
41988 +#endif
41989 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
41990 + {
41991 + .procname = "tpe_invert",
41992 + .data = &grsec_enable_tpe_invert,
41993 + .maxlen = sizeof(int),
41994 + .mode = 0600,
41995 + .proc_handler = &proc_dointvec,
41996 + },
41997 +#endif
41998 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
41999 + {
42000 + .procname = "tpe_restrict_all",
42001 + .data = &grsec_enable_tpe_all,
42002 + .maxlen = sizeof(int),
42003 + .mode = 0600,
42004 + .proc_handler = &proc_dointvec,
42005 + },
42006 +#endif
42007 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
42008 + {
42009 + .procname = "socket_all",
42010 + .data = &grsec_enable_socket_all,
42011 + .maxlen = sizeof(int),
42012 + .mode = 0600,
42013 + .proc_handler = &proc_dointvec,
42014 + },
42015 + {
42016 + .procname = "socket_all_gid",
42017 + .data = &grsec_socket_all_gid,
42018 + .maxlen = sizeof(int),
42019 + .mode = 0600,
42020 + .proc_handler = &proc_dointvec,
42021 + },
42022 +#endif
42023 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
42024 + {
42025 + .procname = "socket_client",
42026 + .data = &grsec_enable_socket_client,
42027 + .maxlen = sizeof(int),
42028 + .mode = 0600,
42029 + .proc_handler = &proc_dointvec,
42030 + },
42031 + {
42032 + .procname = "socket_client_gid",
42033 + .data = &grsec_socket_client_gid,
42034 + .maxlen = sizeof(int),
42035 + .mode = 0600,
42036 + .proc_handler = &proc_dointvec,
42037 + },
42038 +#endif
42039 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
42040 + {
42041 + .procname = "socket_server",
42042 + .data = &grsec_enable_socket_server,
42043 + .maxlen = sizeof(int),
42044 + .mode = 0600,
42045 + .proc_handler = &proc_dointvec,
42046 + },
42047 + {
42048 + .procname = "socket_server_gid",
42049 + .data = &grsec_socket_server_gid,
42050 + .maxlen = sizeof(int),
42051 + .mode = 0600,
42052 + .proc_handler = &proc_dointvec,
42053 + },
42054 +#endif
42055 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
42056 + {
42057 + .procname = "audit_group",
42058 + .data = &grsec_enable_group,
42059 + .maxlen = sizeof(int),
42060 + .mode = 0600,
42061 + .proc_handler = &proc_dointvec,
42062 + },
42063 + {
42064 + .procname = "audit_gid",
42065 + .data = &grsec_audit_gid,
42066 + .maxlen = sizeof(int),
42067 + .mode = 0600,
42068 + .proc_handler = &proc_dointvec,
42069 + },
42070 +#endif
42071 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
42072 + {
42073 + .procname = "audit_chdir",
42074 + .data = &grsec_enable_chdir,
42075 + .maxlen = sizeof(int),
42076 + .mode = 0600,
42077 + .proc_handler = &proc_dointvec,
42078 + },
42079 +#endif
42080 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
42081 + {
42082 + .procname = "audit_mount",
42083 + .data = &grsec_enable_mount,
42084 + .maxlen = sizeof(int),
42085 + .mode = 0600,
42086 + .proc_handler = &proc_dointvec,
42087 + },
42088 +#endif
42089 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
42090 + {
42091 + .procname = "audit_textrel",
42092 + .data = &grsec_enable_audit_textrel,
42093 + .maxlen = sizeof(int),
42094 + .mode = 0600,
42095 + .proc_handler = &proc_dointvec,
42096 + },
42097 +#endif
42098 +#ifdef CONFIG_GRKERNSEC_DMESG
42099 + {
42100 + .procname = "dmesg",
42101 + .data = &grsec_enable_dmesg,
42102 + .maxlen = sizeof(int),
42103 + .mode = 0600,
42104 + .proc_handler = &proc_dointvec,
42105 + },
42106 +#endif
42107 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
42108 + {
42109 + .procname = "chroot_findtask",
42110 + .data = &grsec_enable_chroot_findtask,
42111 + .maxlen = sizeof(int),
42112 + .mode = 0600,
42113 + .proc_handler = &proc_dointvec,
42114 + },
42115 +#endif
42116 +#ifdef CONFIG_GRKERNSEC_RESLOG
42117 + {
42118 + .procname = "resource_logging",
42119 + .data = &grsec_resource_logging,
42120 + .maxlen = sizeof(int),
42121 + .mode = 0600,
42122 + .proc_handler = &proc_dointvec,
42123 + },
42124 +#endif
42125 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
42126 + {
42127 + .procname = "audit_ptrace",
42128 + .data = &grsec_enable_audit_ptrace,
42129 + .maxlen = sizeof(int),
42130 + .mode = 0600,
42131 + .proc_handler = &proc_dointvec,
42132 + },
42133 +#endif
42134 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
42135 + {
42136 + .procname = "harden_ptrace",
42137 + .data = &grsec_enable_harden_ptrace,
42138 + .maxlen = sizeof(int),
42139 + .mode = 0600,
42140 + .proc_handler = &proc_dointvec,
42141 + },
42142 +#endif
42143 + {
42144 + .procname = "grsec_lock",
42145 + .data = &grsec_lock,
42146 + .maxlen = sizeof(int),
42147 + .mode = 0600,
42148 + .proc_handler = &proc_dointvec,
42149 + },
42150 +#endif
42151 +#ifdef CONFIG_GRKERNSEC_ROFS
42152 + {
42153 + .procname = "romount_protect",
42154 + .data = &grsec_enable_rofs,
42155 + .maxlen = sizeof(int),
42156 + .mode = 0600,
42157 + .proc_handler = &proc_dointvec_minmax,
42158 + .extra1 = &one,
42159 + .extra2 = &one,
42160 + },
42161 +#endif
42162 + { }
42163 +};
42164 +#endif
42165 diff -urNp linux-2.6.35.4/grsecurity/grsec_textrel.c linux-2.6.35.4/grsecurity/grsec_textrel.c
42166 --- linux-2.6.35.4/grsecurity/grsec_textrel.c 1969-12-31 19:00:00.000000000 -0500
42167 +++ linux-2.6.35.4/grsecurity/grsec_textrel.c 2010-09-17 20:12:37.000000000 -0400
42168 @@ -0,0 +1,16 @@
42169 +#include <linux/kernel.h>
42170 +#include <linux/sched.h>
42171 +#include <linux/mm.h>
42172 +#include <linux/file.h>
42173 +#include <linux/grinternal.h>
42174 +#include <linux/grsecurity.h>
42175 +
42176 +void
42177 +gr_log_textrel(struct vm_area_struct * vma)
42178 +{
42179 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
42180 + if (grsec_enable_audit_textrel)
42181 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
42182 +#endif
42183 + return;
42184 +}
42185 diff -urNp linux-2.6.35.4/grsecurity/grsec_time.c linux-2.6.35.4/grsecurity/grsec_time.c
42186 --- linux-2.6.35.4/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
42187 +++ linux-2.6.35.4/grsecurity/grsec_time.c 2010-09-17 20:12:37.000000000 -0400
42188 @@ -0,0 +1,13 @@
42189 +#include <linux/kernel.h>
42190 +#include <linux/sched.h>
42191 +#include <linux/grinternal.h>
42192 +
42193 +void
42194 +gr_log_timechange(void)
42195 +{
42196 +#ifdef CONFIG_GRKERNSEC_TIME
42197 + if (grsec_enable_time)
42198 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
42199 +#endif
42200 + return;
42201 +}
42202 diff -urNp linux-2.6.35.4/grsecurity/grsec_tpe.c linux-2.6.35.4/grsecurity/grsec_tpe.c
42203 --- linux-2.6.35.4/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
42204 +++ linux-2.6.35.4/grsecurity/grsec_tpe.c 2010-09-17 20:12:37.000000000 -0400
42205 @@ -0,0 +1,39 @@
42206 +#include <linux/kernel.h>
42207 +#include <linux/sched.h>
42208 +#include <linux/file.h>
42209 +#include <linux/fs.h>
42210 +#include <linux/grinternal.h>
42211 +
42212 +extern int gr_acl_tpe_check(void);
42213 +
42214 +int
42215 +gr_tpe_allow(const struct file *file)
42216 +{
42217 +#ifdef CONFIG_GRKERNSEC
42218 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
42219 + const struct cred *cred = current_cred();
42220 +
42221 + if (cred->uid && ((grsec_enable_tpe &&
42222 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
42223 + ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
42224 + (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
42225 +#else
42226 + in_group_p(grsec_tpe_gid)
42227 +#endif
42228 + ) || gr_acl_tpe_check()) &&
42229 + (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
42230 + (inode->i_mode & S_IWOTH))))) {
42231 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
42232 + return 0;
42233 + }
42234 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
42235 + if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
42236 + ((inode->i_uid && (inode->i_uid != cred->uid)) ||
42237 + (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
42238 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
42239 + return 0;
42240 + }
42241 +#endif
42242 +#endif
42243 + return 1;
42244 +}
42245 diff -urNp linux-2.6.35.4/grsecurity/grsum.c linux-2.6.35.4/grsecurity/grsum.c
42246 --- linux-2.6.35.4/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
42247 +++ linux-2.6.35.4/grsecurity/grsum.c 2010-09-17 20:12:37.000000000 -0400
42248 @@ -0,0 +1,61 @@
42249 +#include <linux/err.h>
42250 +#include <linux/kernel.h>
42251 +#include <linux/sched.h>
42252 +#include <linux/mm.h>
42253 +#include <linux/scatterlist.h>
42254 +#include <linux/crypto.h>
42255 +#include <linux/gracl.h>
42256 +
42257 +
42258 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
42259 +#error "crypto and sha256 must be built into the kernel"
42260 +#endif
42261 +
42262 +int
42263 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
42264 +{
42265 + char *p;
42266 + struct crypto_hash *tfm;
42267 + struct hash_desc desc;
42268 + struct scatterlist sg;
42269 + unsigned char temp_sum[GR_SHA_LEN];
42270 + volatile int retval = 0;
42271 + volatile int dummy = 0;
42272 + unsigned int i;
42273 +
42274 + sg_init_table(&sg, 1);
42275 +
42276 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
42277 + if (IS_ERR(tfm)) {
42278 + /* should never happen, since sha256 should be built in */
42279 + return 1;
42280 + }
42281 +
42282 + desc.tfm = tfm;
42283 + desc.flags = 0;
42284 +
42285 + crypto_hash_init(&desc);
42286 +
42287 + p = salt;
42288 + sg_set_buf(&sg, p, GR_SALT_LEN);
42289 + crypto_hash_update(&desc, &sg, sg.length);
42290 +
42291 + p = entry->pw;
42292 + sg_set_buf(&sg, p, strlen(p));
42293 +
42294 + crypto_hash_update(&desc, &sg, sg.length);
42295 +
42296 + crypto_hash_final(&desc, temp_sum);
42297 +
42298 + memset(entry->pw, 0, GR_PW_LEN);
42299 +
42300 + for (i = 0; i < GR_SHA_LEN; i++)
42301 + if (sum[i] != temp_sum[i])
42302 + retval = 1;
42303 + else
42304 + dummy = 1; // waste a cycle
42305 +
42306 + crypto_free_hash(tfm);
42307 +
42308 + return retval;
42309 +}
42310 diff -urNp linux-2.6.35.4/grsecurity/Kconfig linux-2.6.35.4/grsecurity/Kconfig
42311 --- linux-2.6.35.4/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
42312 +++ linux-2.6.35.4/grsecurity/Kconfig 2010-09-17 20:12:37.000000000 -0400
42313 @@ -0,0 +1,986 @@
42314 +#
42315 +# grecurity configuration
42316 +#
42317 +
42318 +menu "Grsecurity"
42319 +
42320 +config GRKERNSEC
42321 + bool "Grsecurity"
42322 + select CRYPTO
42323 + select CRYPTO_SHA256
42324 + help
42325 + If you say Y here, you will be able to configure many features
42326 + that will enhance the security of your system. It is highly
42327 + recommended that you say Y here and read through the help
42328 + for each option so that you fully understand the features and
42329 + can evaluate their usefulness for your machine.
42330 +
42331 +choice
42332 + prompt "Security Level"
42333 + depends on GRKERNSEC
42334 + default GRKERNSEC_CUSTOM
42335 +
42336 +config GRKERNSEC_LOW
42337 + bool "Low"
42338 + select GRKERNSEC_LINK
42339 + select GRKERNSEC_FIFO
42340 + select GRKERNSEC_EXECVE
42341 + select GRKERNSEC_RANDNET
42342 + select GRKERNSEC_DMESG
42343 + select GRKERNSEC_CHROOT
42344 + select GRKERNSEC_CHROOT_CHDIR
42345 +
42346 + help
42347 + If you choose this option, several of the grsecurity options will
42348 + be enabled that will give you greater protection against a number
42349 + of attacks, while assuring that none of your software will have any
42350 + conflicts with the additional security measures. If you run a lot
42351 + of unusual software, or you are having problems with the higher
42352 + security levels, you should say Y here. With this option, the
42353 + following features are enabled:
42354 +
42355 + - Linking restrictions
42356 + - FIFO restrictions
42357 + - Enforcing RLIMIT_NPROC on execve
42358 + - Restricted dmesg
42359 + - Enforced chdir("/") on chroot
42360 + - Runtime module disabling
42361 +
42362 +config GRKERNSEC_MEDIUM
42363 + bool "Medium"
42364 + select PAX
42365 + select PAX_EI_PAX
42366 + select PAX_PT_PAX_FLAGS
42367 + select PAX_HAVE_ACL_FLAGS
42368 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
42369 + select GRKERNSEC_CHROOT
42370 + select GRKERNSEC_CHROOT_SYSCTL
42371 + select GRKERNSEC_LINK
42372 + select GRKERNSEC_FIFO
42373 + select GRKERNSEC_EXECVE
42374 + select GRKERNSEC_DMESG
42375 + select GRKERNSEC_RANDNET
42376 + select GRKERNSEC_FORKFAIL
42377 + select GRKERNSEC_TIME
42378 + select GRKERNSEC_SIGNAL
42379 + select GRKERNSEC_CHROOT
42380 + select GRKERNSEC_CHROOT_UNIX
42381 + select GRKERNSEC_CHROOT_MOUNT
42382 + select GRKERNSEC_CHROOT_PIVOT
42383 + select GRKERNSEC_CHROOT_DOUBLE
42384 + select GRKERNSEC_CHROOT_CHDIR
42385 + select GRKERNSEC_CHROOT_MKNOD
42386 + select GRKERNSEC_PROC
42387 + select GRKERNSEC_PROC_USERGROUP
42388 + select PAX_RANDUSTACK
42389 + select PAX_ASLR
42390 + select PAX_RANDMMAP
42391 + select PAX_REFCOUNT if (X86 || SPARC64)
42392 + select PAX_USERCOPY if ((X86 || SPARC32 || SPARC64 || PPC) && (SLAB || SLUB || SLOB))
42393 +
42394 + help
42395 + If you say Y here, several features in addition to those included
42396 + in the low additional security level will be enabled. These
42397 + features provide even more security to your system, though in rare
42398 + cases they may be incompatible with very old or poorly written
42399 + software. If you enable this option, make sure that your auth
42400 + service (identd) is running as gid 1001. With this option,
42401 + the following features (in addition to those provided in the
42402 + low additional security level) will be enabled:
42403 +
42404 + - Failed fork logging
42405 + - Time change logging
42406 + - Signal logging
42407 + - Deny mounts in chroot
42408 + - Deny double chrooting
42409 + - Deny sysctl writes in chroot
42410 + - Deny mknod in chroot
42411 + - Deny access to abstract AF_UNIX sockets out of chroot
42412 + - Deny pivot_root in chroot
42413 + - Denied writes of /dev/kmem, /dev/mem, and /dev/port
42414 + - /proc restrictions with special GID set to 10 (usually wheel)
42415 + - Address Space Layout Randomization (ASLR)
42416 + - Prevent exploitation of most refcount overflows
42417 + - Bounds checking of copying between the kernel and userland
42418 +
42419 +config GRKERNSEC_HIGH
42420 + bool "High"
42421 + select GRKERNSEC_LINK
42422 + select GRKERNSEC_FIFO
42423 + select GRKERNSEC_EXECVE
42424 + select GRKERNSEC_DMESG
42425 + select GRKERNSEC_FORKFAIL
42426 + select GRKERNSEC_TIME
42427 + select GRKERNSEC_SIGNAL
42428 + select GRKERNSEC_CHROOT
42429 + select GRKERNSEC_CHROOT_SHMAT
42430 + select GRKERNSEC_CHROOT_UNIX
42431 + select GRKERNSEC_CHROOT_MOUNT
42432 + select GRKERNSEC_CHROOT_FCHDIR
42433 + select GRKERNSEC_CHROOT_PIVOT
42434 + select GRKERNSEC_CHROOT_DOUBLE
42435 + select GRKERNSEC_CHROOT_CHDIR
42436 + select GRKERNSEC_CHROOT_MKNOD
42437 + select GRKERNSEC_CHROOT_CAPS
42438 + select GRKERNSEC_CHROOT_SYSCTL
42439 + select GRKERNSEC_CHROOT_FINDTASK
42440 + select GRKERNSEC_PROC
42441 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
42442 + select GRKERNSEC_HIDESYM
42443 + select GRKERNSEC_BRUTE
42444 + select GRKERNSEC_PROC_USERGROUP
42445 + select GRKERNSEC_KMEM
42446 + select GRKERNSEC_RESLOG
42447 + select GRKERNSEC_RANDNET
42448 + select GRKERNSEC_PROC_ADD
42449 + select GRKERNSEC_CHROOT_CHMOD
42450 + select GRKERNSEC_CHROOT_NICE
42451 + select GRKERNSEC_AUDIT_MOUNT
42452 + select GRKERNSEC_MODHARDEN if (MODULES)
42453 + select GRKERNSEC_HARDEN_PTRACE
42454 + select GRKERNSEC_VM86 if (X86_32)
42455 + select PAX
42456 + select PAX_RANDUSTACK
42457 + select PAX_ASLR
42458 + select PAX_RANDMMAP
42459 + select PAX_NOEXEC
42460 + select PAX_MPROTECT
42461 + select PAX_EI_PAX
42462 + select PAX_PT_PAX_FLAGS
42463 + select PAX_HAVE_ACL_FLAGS
42464 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
42465 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
42466 + select PAX_RANDKSTACK if (X86_TSC && !X86_64)
42467 + select PAX_SEGMEXEC if (X86_32)
42468 + select PAX_PAGEEXEC
42469 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC32 || SPARC64)
42470 + select PAX_EMUTRAMP if (PARISC)
42471 + select PAX_EMUSIGRT if (PARISC)
42472 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
42473 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
42474 + select PAX_REFCOUNT if (X86 || SPARC64)
42475 + select PAX_USERCOPY if ((X86 || PPC || SPARC32 || SPARC64) && (SLAB || SLUB || SLOB))
42476 + help
42477 + If you say Y here, many of the features of grsecurity will be
42478 + enabled, which will protect you against many kinds of attacks
42479 + against your system. The heightened security comes at a cost
42480 + of an increased chance of incompatibilities with rare software
42481 + on your machine. Since this security level enables PaX, you should
42482 + view <http://pax.grsecurity.net> and read about the PaX
42483 + project. While you are there, download chpax and run it on
42484 + binaries that cause problems with PaX. Also remember that
42485 + since the /proc restrictions are enabled, you must run your
42486 + identd as gid 1001. This security level enables the following
42487 + features in addition to those listed in the low and medium
42488 + security levels:
42489 +
42490 + - Additional /proc restrictions
42491 + - Chmod restrictions in chroot
42492 + - No signals, ptrace, or viewing of processes outside of chroot
42493 + - Capability restrictions in chroot
42494 + - Deny fchdir out of chroot
42495 + - Priority restrictions in chroot
42496 + - Segmentation-based implementation of PaX
42497 + - Mprotect restrictions
42498 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
42499 + - Kernel stack randomization
42500 + - Mount/unmount/remount logging
42501 + - Kernel symbol hiding
42502 + - Prevention of memory exhaustion-based exploits
42503 + - Hardening of module auto-loading
42504 + - Ptrace restrictions
42505 + - Restricted vm86 mode
42506 +
42507 +config GRKERNSEC_CUSTOM
42508 + bool "Custom"
42509 + help
42510 + If you say Y here, you will be able to configure every grsecurity
42511 + option, which allows you to enable many more features that aren't
42512 + covered in the basic security levels. These additional features
42513 + include TPE, socket restrictions, and the sysctl system for
42514 + grsecurity. It is advised that you read through the help for
42515 + each option to determine its usefulness in your situation.
42516 +
42517 +endchoice
42518 +
42519 +menu "Address Space Protection"
42520 +depends on GRKERNSEC
42521 +
42522 +config GRKERNSEC_KMEM
42523 + bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
42524 + help
42525 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
42526 + be written to via mmap or otherwise to modify the running kernel.
42527 + /dev/port will also not be allowed to be opened. If you have module
42528 + support disabled, enabling this will close up four ways that are
42529 + currently used to insert malicious code into the running kernel.
42530 + Even with all these features enabled, we still highly recommend that
42531 + you use the RBAC system, as it is still possible for an attacker to
42532 + modify the running kernel through privileged I/O granted by ioperm/iopl.
42533 + If you are not using XFree86, you may be able to stop this additional
42534 + case by enabling the 'Disable privileged I/O' option. Though nothing
42535 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
42536 + but only to video memory, which is the only writing we allow in this
42537 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
42538 + not be allowed to mprotect it with PROT_WRITE later.
42539 + It is highly recommended that you say Y here if you meet all the
42540 + conditions above.
42541 +
42542 +config GRKERNSEC_VM86
42543 + bool "Restrict VM86 mode"
42544 + depends on X86_32
42545 +
42546 + help
42547 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
42548 + make use of a special execution mode on 32bit x86 processors called
42549 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
42550 + video cards and will still work with this option enabled. The purpose
42551 + of the option is to prevent exploitation of emulation errors in
42552 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
42553 + Nearly all users should be able to enable this option.
42554 +
42555 +config GRKERNSEC_IO
42556 + bool "Disable privileged I/O"
42557 + depends on X86
42558 + select RTC_CLASS
42559 + select RTC_INTF_DEV
42560 + select RTC_DRV_CMOS
42561 +
42562 + help
42563 + If you say Y here, all ioperm and iopl calls will return an error.
42564 + Ioperm and iopl can be used to modify the running kernel.
42565 + Unfortunately, some programs need this access to operate properly,
42566 + the most notable of which are XFree86 and hwclock. hwclock can be
42567 + remedied by having RTC support in the kernel, so real-time
42568 + clock support is enabled if this option is enabled, to ensure
42569 + that hwclock operates correctly. XFree86 still will not
42570 + operate correctly with this option enabled, so DO NOT CHOOSE Y
42571 + IF YOU USE XFree86. If you use XFree86 and you still want to
42572 + protect your kernel against modification, use the RBAC system.
42573 +
42574 +config GRKERNSEC_PROC_MEMMAP
42575 + bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
42576 + default y if (PAX_NOEXEC || PAX_ASLR)
42577 + depends on PAX_NOEXEC || PAX_ASLR
42578 + help
42579 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
42580 + give no information about the addresses of its mappings if
42581 + PaX features that rely on random addresses are enabled on the task.
42582 + If you use PaX it is greatly recommended that you say Y here as it
42583 + closes up a hole that makes the full ASLR useless for suid
42584 + binaries.
42585 +
42586 +config GRKERNSEC_BRUTE
42587 + bool "Deter exploit bruteforcing"
42588 + help
42589 + If you say Y here, attempts to bruteforce exploits against forking
42590 + daemons such as apache or sshd will be deterred. When a child of a
42591 + forking daemon is killed by PaX or crashes due to an illegal
42592 + instruction, the parent process will be delayed 30 seconds upon every
42593 + subsequent fork until the administrator is able to assess the
42594 + situation and restart the daemon. It is recommended that you also
42595 + enable signal logging in the auditing section so that logs are
42596 + generated when a process performs an illegal instruction.
42597 +
42598 +config GRKERNSEC_MODHARDEN
42599 + bool "Harden module auto-loading"
42600 + depends on MODULES
42601 + help
42602 + If you say Y here, module auto-loading in response to use of some
42603 + feature implemented by an unloaded module will be restricted to
42604 + root users. Enabling this option helps defend against attacks
42605 + by unprivileged users who abuse the auto-loading behavior to
42606 + cause a vulnerable module to load that is then exploited.
42607 +
42608 + If this option prevents a legitimate use of auto-loading for a
42609 + non-root user, the administrator can execute modprobe manually
42610 + with the exact name of the module mentioned in the alert log.
42611 + Alternatively, the administrator can add the module to the list
42612 + of modules loaded at boot by modifying init scripts.
42613 +
42614 + Modification of init scripts will most likely be needed on
42615 + Ubuntu servers with encrypted home directory support enabled,
42616 + as the first non-root user logging in will cause the ecb(aes),
42617 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
42618 +
42619 +config GRKERNSEC_HIDESYM
42620 + bool "Hide kernel symbols"
42621 + help
42622 + If you say Y here, getting information on loaded modules, and
42623 + displaying all kernel symbols through a syscall will be restricted
42624 + to users with CAP_SYS_MODULE. For software compatibility reasons,
42625 + /proc/kallsyms will be restricted to the root user. The RBAC
42626 + system can hide that entry even from root.
42627 +
42628 + This option also prevents leaking of kernel addresses through
42629 + several /proc entries.
42630 +
42631 + Note that this option is only effective provided the following
42632 + conditions are met:
42633 + 1) The kernel using grsecurity is not precompiled by some distribution
42634 + 2) You are using the RBAC system and hiding other files such as your
42635 + kernel image and System.map. Alternatively, enabling this option
42636 + causes the permissions on /boot, /lib/modules, and the kernel
42637 + source directory to change at compile time to prevent
42638 + reading by non-root users.
42639 + If the above conditions are met, this option will aid in providing a
42640 + useful protection against local kernel exploitation of overflows
42641 + and arbitrary read/write vulnerabilities.
42642 +
42643 +endmenu
42644 +menu "Role Based Access Control Options"
42645 +depends on GRKERNSEC
42646 +
42647 +config GRKERNSEC_NO_RBAC
42648 + bool "Disable RBAC system"
42649 + help
42650 + If you say Y here, the /dev/grsec device will be removed from the kernel,
42651 + preventing the RBAC system from being enabled. You should only say Y
42652 + here if you have no intention of using the RBAC system, so as to prevent
42653 + an attacker with root access from misusing the RBAC system to hide files
42654 + and processes when loadable module support and /dev/[k]mem have been
42655 + locked down.
42656 +
42657 +config GRKERNSEC_ACL_HIDEKERN
42658 + bool "Hide kernel processes"
42659 + help
42660 + If you say Y here, all kernel threads will be hidden to all
42661 + processes but those whose subject has the "view hidden processes"
42662 + flag.
42663 +
42664 +config GRKERNSEC_ACL_MAXTRIES
42665 + int "Maximum tries before password lockout"
42666 + default 3
42667 + help
42668 + This option enforces the maximum number of times a user can attempt
42669 + to authorize themselves with the grsecurity RBAC system before being
42670 + denied the ability to attempt authorization again for a specified time.
42671 + The lower the number, the harder it will be to brute-force a password.
42672 +
42673 +config GRKERNSEC_ACL_TIMEOUT
42674 + int "Time to wait after max password tries, in seconds"
42675 + default 30
42676 + help
42677 + This option specifies the time the user must wait after attempting to
42678 + authorize to the RBAC system with the maximum number of invalid
42679 + passwords. The higher the number, the harder it will be to brute-force
42680 + a password.
42681 +
42682 +endmenu
42683 +menu "Filesystem Protections"
42684 +depends on GRKERNSEC
42685 +
42686 +config GRKERNSEC_PROC
42687 + bool "Proc restrictions"
42688 + help
42689 + If you say Y here, the permissions of the /proc filesystem
42690 + will be altered to enhance system security and privacy. You MUST
42691 + choose either a user only restriction or a user and group restriction.
42692 + Depending upon the option you choose, you can either restrict users to
42693 + see only the processes they themselves run, or choose a group that can
42694 + view all processes and files normally restricted to root if you choose
42695 + the "restrict to user only" option. NOTE: If you're running identd as
42696 + a non-root user, you will have to run it as the group you specify here.
42697 +
42698 +config GRKERNSEC_PROC_USER
42699 + bool "Restrict /proc to user only"
42700 + depends on GRKERNSEC_PROC
42701 + help
42702 + If you say Y here, non-root users will only be able to view their own
42703 + processes, and restricts them from viewing network-related information,
42704 + and viewing kernel symbol and module information.
42705 +
42706 +config GRKERNSEC_PROC_USERGROUP
42707 + bool "Allow special group"
42708 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
42709 + help
42710 + If you say Y here, you will be able to select a group that will be
42711 + able to view all processes, network-related information, and
42712 + kernel and symbol information. This option is useful if you want
42713 + to run identd as a non-root user.
42714 +
42715 +config GRKERNSEC_PROC_GID
42716 + int "GID for special group"
42717 + depends on GRKERNSEC_PROC_USERGROUP
42718 + default 1001
42719 +
42720 +config GRKERNSEC_PROC_ADD
42721 + bool "Additional restrictions"
42722 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
42723 + help
42724 + If you say Y here, additional restrictions will be placed on
42725 + /proc that keep normal users from viewing device information and
42726 + slabinfo information that could be useful for exploits.
42727 +
42728 +config GRKERNSEC_LINK
42729 + bool "Linking restrictions"
42730 + help
42731 + If you say Y here, /tmp race exploits will be prevented, since users
42732 + will no longer be able to follow symlinks owned by other users in
42733 + world-writable +t directories (i.e. /tmp), unless the owner of the
42734 + symlink is the owner of the directory. users will also not be
42735 + able to hardlink to files they do not own. If the sysctl option is
42736 + enabled, a sysctl option with name "linking_restrictions" is created.
42737 +
42738 +config GRKERNSEC_FIFO
42739 + bool "FIFO restrictions"
42740 + help
42741 + If you say Y here, users will not be able to write to FIFOs they don't
42742 + own in world-writable +t directories (i.e. /tmp), unless the owner of
42743 + the FIFO is the same owner of the directory it's held in. If the sysctl
42744 + option is enabled, a sysctl option with name "fifo_restrictions" is
42745 + created.
42746 +
42747 +config GRKERNSEC_ROFS
42748 + bool "Runtime read-only mount protection"
42749 + help
42750 + If you say Y here, a sysctl option with name "romount_protect" will
42751 + be created. By setting this option to 1 at runtime, filesystems
42752 + will be protected in the following ways:
42753 + * No new writable mounts will be allowed
42754 + * Existing read-only mounts won't be able to be remounted read/write
42755 + * Write operations will be denied on all block devices
42756 + This option acts independently of grsec_lock: once it is set to 1,
42757 + it cannot be turned off. Therefore, please be mindful of the resulting
42758 + behavior if this option is enabled in an init script on a read-only
42759 + filesystem. This feature is mainly intended for secure embedded systems.
42760 +
42761 +config GRKERNSEC_CHROOT
42762 + bool "Chroot jail restrictions"
42763 + help
42764 + If you say Y here, you will be able to choose several options that will
42765 + make breaking out of a chrooted jail much more difficult. If you
42766 + encounter no software incompatibilities with the following options, it
42767 + is recommended that you enable each one.
42768 +
42769 +config GRKERNSEC_CHROOT_MOUNT
42770 + bool "Deny mounts"
42771 + depends on GRKERNSEC_CHROOT
42772 + help
42773 + If you say Y here, processes inside a chroot will not be able to
42774 + mount or remount filesystems. If the sysctl option is enabled, a
42775 + sysctl option with name "chroot_deny_mount" is created.
42776 +
42777 +config GRKERNSEC_CHROOT_DOUBLE
42778 + bool "Deny double-chroots"
42779 + depends on GRKERNSEC_CHROOT
42780 + help
42781 + If you say Y here, processes inside a chroot will not be able to chroot
42782 + again outside the chroot. This is a widely used method of breaking
42783 + out of a chroot jail and should not be allowed. If the sysctl
42784 + option is enabled, a sysctl option with name
42785 + "chroot_deny_chroot" is created.
42786 +
42787 +config GRKERNSEC_CHROOT_PIVOT
42788 + bool "Deny pivot_root in chroot"
42789 + depends on GRKERNSEC_CHROOT
42790 + help
42791 + If you say Y here, processes inside a chroot will not be able to use
42792 + a function called pivot_root() that was introduced in Linux 2.3.41. It
42793 + works similar to chroot in that it changes the root filesystem. This
42794 + function could be misused in a chrooted process to attempt to break out
42795 + of the chroot, and therefore should not be allowed. If the sysctl
42796 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
42797 + created.
42798 +
42799 +config GRKERNSEC_CHROOT_CHDIR
42800 + bool "Enforce chdir(\"/\") on all chroots"
42801 + depends on GRKERNSEC_CHROOT
42802 + help
42803 + If you say Y here, the current working directory of all newly-chrooted
42804 + applications will be set to the the root directory of the chroot.
42805 + The man page on chroot(2) states:
42806 + Note that this call does not change the current working
42807 + directory, so that `.' can be outside the tree rooted at
42808 + `/'. In particular, the super-user can escape from a
42809 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
42810 +
42811 + It is recommended that you say Y here, since it's not known to break
42812 + any software. If the sysctl option is enabled, a sysctl option with
42813 + name "chroot_enforce_chdir" is created.
42814 +
42815 +config GRKERNSEC_CHROOT_CHMOD
42816 + bool "Deny (f)chmod +s"
42817 + depends on GRKERNSEC_CHROOT
42818 + help
42819 + If you say Y here, processes inside a chroot will not be able to chmod
42820 + or fchmod files to make them have suid or sgid bits. This protects
42821 + against another published method of breaking a chroot. If the sysctl
42822 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
42823 + created.
42824 +
42825 +config GRKERNSEC_CHROOT_FCHDIR
42826 + bool "Deny fchdir out of chroot"
42827 + depends on GRKERNSEC_CHROOT
42828 + help
42829 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
42830 + to a file descriptor of the chrooting process that points to a directory
42831 + outside the filesystem will be stopped. If the sysctl option
42832 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
42833 +
42834 +config GRKERNSEC_CHROOT_MKNOD
42835 + bool "Deny mknod"
42836 + depends on GRKERNSEC_CHROOT
42837 + help
42838 + If you say Y here, processes inside a chroot will not be allowed to
42839 + mknod. The problem with using mknod inside a chroot is that it
42840 + would allow an attacker to create a device entry that is the same
42841 + as one on the physical root of your system, which could range from
42842 + anything from the console device to a device for your harddrive (which
42843 + they could then use to wipe the drive or steal data). It is recommended
42844 + that you say Y here, unless you run into software incompatibilities.
42845 + If the sysctl option is enabled, a sysctl option with name
42846 + "chroot_deny_mknod" is created.
42847 +
42848 +config GRKERNSEC_CHROOT_SHMAT
42849 + bool "Deny shmat() out of chroot"
42850 + depends on GRKERNSEC_CHROOT
42851 + help
42852 + If you say Y here, processes inside a chroot will not be able to attach
42853 + to shared memory segments that were created outside of the chroot jail.
42854 + It is recommended that you say Y here. If the sysctl option is enabled,
42855 + a sysctl option with name "chroot_deny_shmat" is created.
42856 +
42857 +config GRKERNSEC_CHROOT_UNIX
42858 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
42859 + depends on GRKERNSEC_CHROOT
42860 + help
42861 + If you say Y here, processes inside a chroot will not be able to
42862 + connect to abstract (meaning not belonging to a filesystem) Unix
42863 + domain sockets that were bound outside of a chroot. It is recommended
42864 + that you say Y here. If the sysctl option is enabled, a sysctl option
42865 + with name "chroot_deny_unix" is created.
42866 +
42867 +config GRKERNSEC_CHROOT_FINDTASK
42868 + bool "Protect outside processes"
42869 + depends on GRKERNSEC_CHROOT
42870 + help
42871 + If you say Y here, processes inside a chroot will not be able to
42872 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
42873 + getsid, or view any process outside of the chroot. If the sysctl
42874 + option is enabled, a sysctl option with name "chroot_findtask" is
42875 + created.
42876 +
42877 +config GRKERNSEC_CHROOT_NICE
42878 + bool "Restrict priority changes"
42879 + depends on GRKERNSEC_CHROOT
42880 + help
42881 + If you say Y here, processes inside a chroot will not be able to raise
42882 + the priority of processes in the chroot, or alter the priority of
42883 + processes outside the chroot. This provides more security than simply
42884 + removing CAP_SYS_NICE from the process' capability set. If the
42885 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
42886 + is created.
42887 +
42888 +config GRKERNSEC_CHROOT_SYSCTL
42889 + bool "Deny sysctl writes"
42890 + depends on GRKERNSEC_CHROOT
42891 + help
42892 + If you say Y here, an attacker in a chroot will not be able to
42893 + write to sysctl entries, either by sysctl(2) or through a /proc
42894 + interface. It is strongly recommended that you say Y here. If the
42895 + sysctl option is enabled, a sysctl option with name
42896 + "chroot_deny_sysctl" is created.
42897 +
42898 +config GRKERNSEC_CHROOT_CAPS
42899 + bool "Capability restrictions"
42900 + depends on GRKERNSEC_CHROOT
42901 + help
42902 + If you say Y here, the capabilities on all root processes within a
42903 + chroot jail will be lowered to stop module insertion, raw i/o,
42904 + system and net admin tasks, rebooting the system, modifying immutable
42905 + files, modifying IPC owned by another, and changing the system time.
42906 + This is left an option because it can break some apps. Disable this
42907 + if your chrooted apps are having problems performing those kinds of
42908 + tasks. If the sysctl option is enabled, a sysctl option with
42909 + name "chroot_caps" is created.
42910 +
42911 +endmenu
42912 +menu "Kernel Auditing"
42913 +depends on GRKERNSEC
42914 +
42915 +config GRKERNSEC_AUDIT_GROUP
42916 + bool "Single group for auditing"
42917 + help
42918 + If you say Y here, the exec, chdir, and (un)mount logging features
42919 + will only operate on a group you specify. This option is recommended
42920 + if you only want to watch certain users instead of having a large
42921 + amount of logs from the entire system. If the sysctl option is enabled,
42922 + a sysctl option with name "audit_group" is created.
42923 +
42924 +config GRKERNSEC_AUDIT_GID
42925 + int "GID for auditing"
42926 + depends on GRKERNSEC_AUDIT_GROUP
42927 + default 1007
42928 +
42929 +config GRKERNSEC_EXECLOG
42930 + bool "Exec logging"
42931 + help
42932 + If you say Y here, all execve() calls will be logged (since the
42933 + other exec*() calls are frontends to execve(), all execution
42934 + will be logged). Useful for shell-servers that like to keep track
42935 + of their users. If the sysctl option is enabled, a sysctl option with
42936 + name "exec_logging" is created.
42937 + WARNING: This option when enabled will produce a LOT of logs, especially
42938 + on an active system.
42939 +
42940 +config GRKERNSEC_RESLOG
42941 + bool "Resource logging"
42942 + help
42943 + If you say Y here, all attempts to overstep resource limits will
42944 + be logged with the resource name, the requested size, and the current
42945 + limit. It is highly recommended that you say Y here. If the sysctl
42946 + option is enabled, a sysctl option with name "resource_logging" is
42947 + created. If the RBAC system is enabled, the sysctl value is ignored.
42948 +
42949 +config GRKERNSEC_CHROOT_EXECLOG
42950 + bool "Log execs within chroot"
42951 + help
42952 + If you say Y here, all executions inside a chroot jail will be logged
42953 + to syslog. This can cause a large amount of logs if certain
42954 + applications (eg. djb's daemontools) are installed on the system, and
42955 + is therefore left as an option. If the sysctl option is enabled, a
42956 + sysctl option with name "chroot_execlog" is created.
42957 +
42958 +config GRKERNSEC_AUDIT_PTRACE
42959 + bool "Ptrace logging"
42960 + help
42961 + If you say Y here, all attempts to attach to a process via ptrace
42962 + will be logged. If the sysctl option is enabled, a sysctl option
42963 + with name "audit_ptrace" is created.
42964 +
42965 +config GRKERNSEC_AUDIT_CHDIR
42966 + bool "Chdir logging"
42967 + help
42968 + If you say Y here, all chdir() calls will be logged. If the sysctl
42969 + option is enabled, a sysctl option with name "audit_chdir" is created.
42970 +
42971 +config GRKERNSEC_AUDIT_MOUNT
42972 + bool "(Un)Mount logging"
42973 + help
42974 + If you say Y here, all mounts and unmounts will be logged. If the
42975 + sysctl option is enabled, a sysctl option with name "audit_mount" is
42976 + created.
42977 +
42978 +config GRKERNSEC_SIGNAL
42979 + bool "Signal logging"
42980 + help
42981 + If you say Y here, certain important signals will be logged, such as
42982 + SIGSEGV, which will as a result inform you of when a error in a program
42983 + occurred, which in some cases could mean a possible exploit attempt.
42984 + If the sysctl option is enabled, a sysctl option with name
42985 + "signal_logging" is created.
42986 +
42987 +config GRKERNSEC_FORKFAIL
42988 + bool "Fork failure logging"
42989 + help
42990 + If you say Y here, all failed fork() attempts will be logged.
42991 + This could suggest a fork bomb, or someone attempting to overstep
42992 + their process limit. If the sysctl option is enabled, a sysctl option
42993 + with name "forkfail_logging" is created.
42994 +
42995 +config GRKERNSEC_TIME
42996 + bool "Time change logging"
42997 + help
42998 + If you say Y here, any changes of the system clock will be logged.
42999 + If the sysctl option is enabled, a sysctl option with name
43000 + "timechange_logging" is created.
43001 +
43002 +config GRKERNSEC_PROC_IPADDR
43003 + bool "/proc/<pid>/ipaddr support"
43004 + help
43005 + If you say Y here, a new entry will be added to each /proc/<pid>
43006 + directory that contains the IP address of the person using the task.
43007 + The IP is carried across local TCP and AF_UNIX stream sockets.
43008 + This information can be useful for IDS/IPSes to perform remote response
43009 + to a local attack. The entry is readable by only the owner of the
43010 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
43011 + the RBAC system), and thus does not create privacy concerns.
43012 +
43013 +config GRKERNSEC_AUDIT_TEXTREL
43014 + bool 'ELF text relocations logging (READ HELP)'
43015 + depends on PAX_MPROTECT
43016 + help
43017 + If you say Y here, text relocations will be logged with the filename
43018 + of the offending library or binary. The purpose of the feature is
43019 + to help Linux distribution developers get rid of libraries and
43020 + binaries that need text relocations which hinder the future progress
43021 + of PaX. Only Linux distribution developers should say Y here, and
43022 + never on a production machine, as this option creates an information
43023 + leak that could aid an attacker in defeating the randomization of
43024 + a single memory region. If the sysctl option is enabled, a sysctl
43025 + option with name "audit_textrel" is created.
43026 +
43027 +endmenu
43028 +
43029 +menu "Executable Protections"
43030 +depends on GRKERNSEC
43031 +
43032 +config GRKERNSEC_EXECVE
43033 + bool "Enforce RLIMIT_NPROC on execs"
43034 + help
43035 + If you say Y here, users with a resource limit on processes will
43036 + have the value checked during execve() calls. The current system
43037 + only checks the system limit during fork() calls. If the sysctl option
43038 + is enabled, a sysctl option with name "execve_limiting" is created.
43039 +
43040 +config GRKERNSEC_DMESG
43041 + bool "Dmesg(8) restriction"
43042 + help
43043 + If you say Y here, non-root users will not be able to use dmesg(8)
43044 + to view up to the last 4kb of messages in the kernel's log buffer.
43045 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
43046 + created.
43047 +
43048 +config GRKERNSEC_HARDEN_PTRACE
43049 + bool "Deter ptrace-based process snooping"
43050 + help
43051 + If you say Y here, TTY sniffers and other malicious monitoring
43052 + programs implemented through ptrace will be defeated. If you
43053 + have been using the RBAC system, this option has already been
43054 + enabled for several years for all users, with the ability to make
43055 + fine-grained exceptions.
43056 +
43057 + This option only affects the ability of non-root users to ptrace
43058 + processes that are not a descendent of the ptracing process.
43059 + This means that strace ./binary and gdb ./binary will still work,
43060 + but attaching to arbitrary processes will not. If the sysctl
43061 + option is enabled, a sysctl option with name "harden_ptrace" is
43062 + created.
43063 +
43064 +config GRKERNSEC_TPE
43065 + bool "Trusted Path Execution (TPE)"
43066 + help
43067 + If you say Y here, you will be able to choose a gid to add to the
43068 + supplementary groups of users you want to mark as "untrusted."
43069 + These users will not be able to execute any files that are not in
43070 + root-owned directories writable only by root. If the sysctl option
43071 + is enabled, a sysctl option with name "tpe" is created.
43072 +
43073 +config GRKERNSEC_TPE_ALL
43074 + bool "Partially restrict all non-root users"
43075 + depends on GRKERNSEC_TPE
43076 + help
43077 + If you say Y here, all non-root users will be covered under
43078 + a weaker TPE restriction. This is separate from, and in addition to,
43079 + the main TPE options that you have selected elsewhere. Thus, if a
43080 + "trusted" GID is chosen, this restriction applies to even that GID.
43081 + Under this restriction, all non-root users will only be allowed to
43082 + execute files in directories they own that are not group or
43083 + world-writable, or in directories owned by root and writable only by
43084 + root. If the sysctl option is enabled, a sysctl option with name
43085 + "tpe_restrict_all" is created.
43086 +
43087 +config GRKERNSEC_TPE_INVERT
43088 + bool "Invert GID option"
43089 + depends on GRKERNSEC_TPE
43090 + help
43091 + If you say Y here, the group you specify in the TPE configuration will
43092 + decide what group TPE restrictions will be *disabled* for. This
43093 + option is useful if you want TPE restrictions to be applied to most
43094 + users on the system. If the sysctl option is enabled, a sysctl option
43095 + with name "tpe_invert" is created. Unlike other sysctl options, this
43096 + entry will default to on for backward-compatibility.
43097 +
43098 +config GRKERNSEC_TPE_GID
43099 + int "GID for untrusted users"
43100 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
43101 + default 1005
43102 + help
43103 + Setting this GID determines what group TPE restrictions will be
43104 + *enabled* for. If the sysctl option is enabled, a sysctl option
43105 + with name "tpe_gid" is created.
43106 +
43107 +config GRKERNSEC_TPE_GID
43108 + int "GID for trusted users"
43109 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
43110 + default 1005
43111 + help
43112 + Setting this GID determines what group TPE restrictions will be
43113 + *disabled* for. If the sysctl option is enabled, a sysctl option
43114 + with name "tpe_gid" is created.
43115 +
43116 +endmenu
43117 +menu "Network Protections"
43118 +depends on GRKERNSEC
43119 +
43120 +config GRKERNSEC_RANDNET
43121 + bool "Larger entropy pools"
43122 + help
43123 + If you say Y here, the entropy pools used for many features of Linux
43124 + and grsecurity will be doubled in size. Since several grsecurity
43125 + features use additional randomness, it is recommended that you say Y
43126 + here. Saying Y here has a similar effect as modifying
43127 + /proc/sys/kernel/random/poolsize.
43128 +
43129 +config GRKERNSEC_BLACKHOLE
43130 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
43131 + help
43132 + If you say Y here, neither TCP resets nor ICMP
43133 + destination-unreachable packets will be sent in response to packets
43134 + sent to ports for which no associated listening process exists.
43135 + This feature supports both IPV4 and IPV6 and exempts the
43136 + loopback interface from blackholing. Enabling this feature
43137 + makes a host more resilient to DoS attacks and reduces network
43138 + visibility against scanners.
43139 +
43140 + The blackhole feature as-implemented is equivalent to the FreeBSD
43141 + blackhole feature, as it prevents RST responses to all packets, not
43142 + just SYNs. Under most application behavior this causes no
43143 + problems, but applications (like haproxy) may not close certain
43144 + connections in a way that cleanly terminates them on the remote
43145 + end, leaving the remote host in LAST_ACK state. Because of this
43146 + side-effect and to prevent intentional LAST_ACK DoSes, this
43147 + feature also adds automatic mitigation against such attacks.
43148 + The mitigation drastically reduces the amount of time a socket
43149 + can spend in LAST_ACK state. If you're using haproxy and not
43150 + all servers it connects to have this option enabled, consider
43151 + disabling this feature on the haproxy host.
43152 +
43153 + If the sysctl option is enabled, two sysctl options with names
43154 + "ip_blackhole" and "lastack_retries" will be created.
43155 + While "ip_blackhole" takes the standard zero/non-zero on/off
43156 + toggle, "lastack_retries" uses the same kinds of values as
43157 + "tcp_retries1" and "tcp_retries2". The default value of 4
43158 + prevents a socket from lasting more than 45 seconds in LAST_ACK
43159 + state.
43160 +
43161 +config GRKERNSEC_SOCKET
43162 + bool "Socket restrictions"
43163 + help
43164 + If you say Y here, you will be able to choose from several options.
43165 + If you assign a GID on your system and add it to the supplementary
43166 + groups of users you want to restrict socket access to, this patch
43167 + will perform up to three things, based on the option(s) you choose.
43168 +
43169 +config GRKERNSEC_SOCKET_ALL
43170 + bool "Deny any sockets to group"
43171 + depends on GRKERNSEC_SOCKET
43172 + help
43173 + If you say Y here, you will be able to choose a GID of whose users will
43174 + be unable to connect to other hosts from your machine or run server
43175 + applications from your machine. If the sysctl option is enabled, a
43176 + sysctl option with name "socket_all" is created.
43177 +
43178 +config GRKERNSEC_SOCKET_ALL_GID
43179 + int "GID to deny all sockets for"
43180 + depends on GRKERNSEC_SOCKET_ALL
43181 + default 1004
43182 + help
43183 + Here you can choose the GID to disable socket access for. Remember to
43184 + add the users you want socket access disabled for to the GID
43185 + specified here. If the sysctl option is enabled, a sysctl option
43186 + with name "socket_all_gid" is created.
43187 +
43188 +config GRKERNSEC_SOCKET_CLIENT
43189 + bool "Deny client sockets to group"
43190 + depends on GRKERNSEC_SOCKET
43191 + help
43192 + If you say Y here, you will be able to choose a GID of whose users will
43193 + be unable to connect to other hosts from your machine, but will be
43194 + able to run servers. If this option is enabled, all users in the group
43195 + you specify will have to use passive mode when initiating ftp transfers
43196 + from the shell on your machine. If the sysctl option is enabled, a
43197 + sysctl option with name "socket_client" is created.
43198 +
43199 +config GRKERNSEC_SOCKET_CLIENT_GID
43200 + int "GID to deny client sockets for"
43201 + depends on GRKERNSEC_SOCKET_CLIENT
43202 + default 1003
43203 + help
43204 + Here you can choose the GID to disable client socket access for.
43205 + Remember to add the users you want client socket access disabled for to
43206 + the GID specified here. If the sysctl option is enabled, a sysctl
43207 + option with name "socket_client_gid" is created.
43208 +
43209 +config GRKERNSEC_SOCKET_SERVER
43210 + bool "Deny server sockets to group"
43211 + depends on GRKERNSEC_SOCKET
43212 + help
43213 + If you say Y here, you will be able to choose a GID of whose users will
43214 + be unable to run server applications from your machine. If the sysctl
43215 + option is enabled, a sysctl option with name "socket_server" is created.
43216 +
43217 +config GRKERNSEC_SOCKET_SERVER_GID
43218 + int "GID to deny server sockets for"
43219 + depends on GRKERNSEC_SOCKET_SERVER
43220 + default 1002
43221 + help
43222 + Here you can choose the GID to disable server socket access for.
43223 + Remember to add the users you want server socket access disabled for to
43224 + the GID specified here. If the sysctl option is enabled, a sysctl
43225 + option with name "socket_server_gid" is created.
43226 +
43227 +endmenu
43228 +menu "Sysctl support"
43229 +depends on GRKERNSEC && SYSCTL
43230 +
43231 +config GRKERNSEC_SYSCTL
43232 + bool "Sysctl support"
43233 + help
43234 + If you say Y here, you will be able to change the options that
43235 + grsecurity runs with at bootup, without having to recompile your
43236 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
43237 + to enable (1) or disable (0) various features. All the sysctl entries
43238 + are mutable until the "grsec_lock" entry is set to a non-zero value.
43239 + All features enabled in the kernel configuration are disabled at boot
43240 + if you do not say Y to the "Turn on features by default" option.
43241 + All options should be set at startup, and the grsec_lock entry should
43242 + be set to a non-zero value after all the options are set.
43243 + *THIS IS EXTREMELY IMPORTANT*
43244 +
43245 +config GRKERNSEC_SYSCTL_DISTRO
43246 + bool "Extra sysctl support for distro makers (READ HELP)"
43247 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
43248 + help
43249 + If you say Y here, additional sysctl options will be created
43250 + for features that affect processes running as root. Therefore,
43251 + it is critical when using this option that the grsec_lock entry be
43252 + enabled after boot. Only distros with prebuilt kernel packages
43253 + with this option enabled that can ensure grsec_lock is enabled
43254 + after boot should use this option.
43255 + *Failure to set grsec_lock after boot makes all grsec features
43256 + this option covers useless*
43257 +
43258 + Currently this option creates the following sysctl entries:
43259 + "Disable Privileged I/O": "disable_priv_io"
43260 +
43261 +config GRKERNSEC_SYSCTL_ON
43262 + bool "Turn on features by default"
43263 + depends on GRKERNSEC_SYSCTL
43264 + help
43265 + If you say Y here, instead of having all features enabled in the
43266 + kernel configuration disabled at boot time, the features will be
43267 + enabled at boot time. It is recommended you say Y here unless
43268 + there is some reason you would want all sysctl-tunable features to
43269 + be disabled by default. As mentioned elsewhere, it is important
43270 + to enable the grsec_lock entry once you have finished modifying
43271 + the sysctl entries.
43272 +
43273 +endmenu
43274 +menu "Logging Options"
43275 +depends on GRKERNSEC
43276 +
43277 +config GRKERNSEC_FLOODTIME
43278 + int "Seconds in between log messages (minimum)"
43279 + default 10
43280 + help
43281 + This option allows you to enforce the number of seconds between
43282 + grsecurity log messages. The default should be suitable for most
43283 + people, however, if you choose to change it, choose a value small enough
43284 + to allow informative logs to be produced, but large enough to
43285 + prevent flooding.
43286 +
43287 +config GRKERNSEC_FLOODBURST
43288 + int "Number of messages in a burst (maximum)"
43289 + default 4
43290 + help
43291 + This option allows you to choose the maximum number of messages allowed
43292 + within the flood time interval you chose in a separate option. The
43293 + default should be suitable for most people, however if you find that
43294 + many of your logs are being interpreted as flooding, you may want to
43295 + raise this value.
43296 +
43297 +endmenu
43298 +
43299 +endmenu
43300 diff -urNp linux-2.6.35.4/grsecurity/Makefile linux-2.6.35.4/grsecurity/Makefile
43301 --- linux-2.6.35.4/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
43302 +++ linux-2.6.35.4/grsecurity/Makefile 2010-09-17 20:12:37.000000000 -0400
43303 @@ -0,0 +1,29 @@
43304 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
43305 +# during 2001-2009 it has been completely redesigned by Brad Spengler
43306 +# into an RBAC system
43307 +#
43308 +# All code in this directory and various hooks inserted throughout the kernel
43309 +# are copyright Brad Spengler - Open Source Security, Inc., and released
43310 +# under the GPL v2 or higher
43311 +
43312 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
43313 + grsec_mount.o grsec_sig.o grsec_sock.o grsec_sysctl.o \
43314 + grsec_time.o grsec_tpe.o grsec_link.o grsec_textrel.o grsec_ptrace.o
43315 +
43316 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_ip.o gracl_segv.o \
43317 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
43318 + gracl_learn.o grsec_log.o
43319 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
43320 +
43321 +ifndef CONFIG_GRKERNSEC
43322 +obj-y += grsec_disabled.o
43323 +endif
43324 +
43325 +ifdef CONFIG_GRKERNSEC_HIDESYM
43326 +extra-y := grsec_hidesym.o
43327 +$(obj)/grsec_hidesym.o:
43328 + @-chmod -f 500 /boot
43329 + @-chmod -f 500 /lib/modules
43330 + @-chmod -f 700 .
43331 + @echo ' grsec: protected kernel image paths'
43332 +endif
43333 diff -urNp linux-2.6.35.4/include/acpi/acoutput.h linux-2.6.35.4/include/acpi/acoutput.h
43334 --- linux-2.6.35.4/include/acpi/acoutput.h 2010-08-26 19:47:12.000000000 -0400
43335 +++ linux-2.6.35.4/include/acpi/acoutput.h 2010-09-17 20:12:09.000000000 -0400
43336 @@ -268,8 +268,8 @@
43337 * leaving no executable debug code!
43338 */
43339 #define ACPI_FUNCTION_NAME(a)
43340 -#define ACPI_DEBUG_PRINT(pl)
43341 -#define ACPI_DEBUG_PRINT_RAW(pl)
43342 +#define ACPI_DEBUG_PRINT(pl) do {} while (0)
43343 +#define ACPI_DEBUG_PRINT_RAW(pl) do {} while (0)
43344
43345 #endif /* ACPI_DEBUG_OUTPUT */
43346
43347 diff -urNp linux-2.6.35.4/include/acpi/acpi_drivers.h linux-2.6.35.4/include/acpi/acpi_drivers.h
43348 --- linux-2.6.35.4/include/acpi/acpi_drivers.h 2010-08-26 19:47:12.000000000 -0400
43349 +++ linux-2.6.35.4/include/acpi/acpi_drivers.h 2010-09-17 20:12:09.000000000 -0400
43350 @@ -121,8 +121,8 @@ int acpi_processor_set_thermal_limit(acp
43351 Dock Station
43352 -------------------------------------------------------------------------- */
43353 struct acpi_dock_ops {
43354 - acpi_notify_handler handler;
43355 - acpi_notify_handler uevent;
43356 + const acpi_notify_handler handler;
43357 + const acpi_notify_handler uevent;
43358 };
43359
43360 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
43361 @@ -130,7 +130,7 @@ extern int is_dock_device(acpi_handle ha
43362 extern int register_dock_notifier(struct notifier_block *nb);
43363 extern void unregister_dock_notifier(struct notifier_block *nb);
43364 extern int register_hotplug_dock_device(acpi_handle handle,
43365 - struct acpi_dock_ops *ops,
43366 + const struct acpi_dock_ops *ops,
43367 void *context);
43368 extern void unregister_hotplug_dock_device(acpi_handle handle);
43369 #else
43370 @@ -146,7 +146,7 @@ static inline void unregister_dock_notif
43371 {
43372 }
43373 static inline int register_hotplug_dock_device(acpi_handle handle,
43374 - struct acpi_dock_ops *ops,
43375 + const struct acpi_dock_ops *ops,
43376 void *context)
43377 {
43378 return -ENODEV;
43379 diff -urNp linux-2.6.35.4/include/asm-generic/atomic-long.h linux-2.6.35.4/include/asm-generic/atomic-long.h
43380 --- linux-2.6.35.4/include/asm-generic/atomic-long.h 2010-08-26 19:47:12.000000000 -0400
43381 +++ linux-2.6.35.4/include/asm-generic/atomic-long.h 2010-09-17 20:12:09.000000000 -0400
43382 @@ -22,6 +22,12 @@
43383
43384 typedef atomic64_t atomic_long_t;
43385
43386 +#ifdef CONFIG_PAX_REFCOUNT
43387 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
43388 +#else
43389 +typedef atomic64_t atomic_long_unchecked_t;
43390 +#endif
43391 +
43392 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
43393
43394 static inline long atomic_long_read(atomic_long_t *l)
43395 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
43396 return (long)atomic64_read(v);
43397 }
43398
43399 +#ifdef CONFIG_PAX_REFCOUNT
43400 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
43401 +{
43402 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
43403 +
43404 + return (long)atomic64_read_unchecked(v);
43405 +}
43406 +#endif
43407 +
43408 static inline void atomic_long_set(atomic_long_t *l, long i)
43409 {
43410 atomic64_t *v = (atomic64_t *)l;
43411 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
43412 atomic64_set(v, i);
43413 }
43414
43415 +#ifdef CONFIG_PAX_REFCOUNT
43416 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
43417 +{
43418 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
43419 +
43420 + atomic64_set_unchecked(v, i);
43421 +}
43422 +#endif
43423 +
43424 static inline void atomic_long_inc(atomic_long_t *l)
43425 {
43426 atomic64_t *v = (atomic64_t *)l;
43427 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
43428 atomic64_inc(v);
43429 }
43430
43431 +#ifdef CONFIG_PAX_REFCOUNT
43432 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
43433 +{
43434 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
43435 +
43436 + atomic64_inc_unchecked(v);
43437 +}
43438 +#endif
43439 +
43440 static inline void atomic_long_dec(atomic_long_t *l)
43441 {
43442 atomic64_t *v = (atomic64_t *)l;
43443 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
43444 atomic64_dec(v);
43445 }
43446
43447 +#ifdef CONFIG_PAX_REFCOUNT
43448 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
43449 +{
43450 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
43451 +
43452 + atomic64_dec_unchecked(v);
43453 +}
43454 +#endif
43455 +
43456 static inline void atomic_long_add(long i, atomic_long_t *l)
43457 {
43458 atomic64_t *v = (atomic64_t *)l;
43459 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long
43460 atomic64_add(i, v);
43461 }
43462
43463 +#ifdef CONFIG_PAX_REFCOUNT
43464 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
43465 +{
43466 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
43467 +
43468 + atomic64_add_unchecked(i, v);
43469 +}
43470 +#endif
43471 +
43472 static inline void atomic_long_sub(long i, atomic_long_t *l)
43473 {
43474 atomic64_t *v = (atomic64_t *)l;
43475 @@ -115,6 +166,15 @@ static inline long atomic_long_inc_retur
43476 return (long)atomic64_inc_return(v);
43477 }
43478
43479 +#ifdef CONFIG_PAX_REFCOUNT
43480 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
43481 +{
43482 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
43483 +
43484 + return (long)atomic64_inc_return_unchecked(v);
43485 +}
43486 +#endif
43487 +
43488 static inline long atomic_long_dec_return(atomic_long_t *l)
43489 {
43490 atomic64_t *v = (atomic64_t *)l;
43491 @@ -140,6 +200,12 @@ static inline long atomic_long_add_unles
43492
43493 typedef atomic_t atomic_long_t;
43494
43495 +#ifdef CONFIG_PAX_REFCOUNT
43496 +typedef atomic_unchecked_t atomic_long_unchecked_t;
43497 +#else
43498 +typedef atomic_t atomic_long_unchecked_t;
43499 +#endif
43500 +
43501 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
43502 static inline long atomic_long_read(atomic_long_t *l)
43503 {
43504 @@ -148,6 +214,15 @@ static inline long atomic_long_read(atom
43505 return (long)atomic_read(v);
43506 }
43507
43508 +#ifdef CONFIG_PAX_REFCOUNT
43509 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
43510 +{
43511 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
43512 +
43513 + return (long)atomic_read_unchecked(v);
43514 +}
43515 +#endif
43516 +
43517 static inline void atomic_long_set(atomic_long_t *l, long i)
43518 {
43519 atomic_t *v = (atomic_t *)l;
43520 @@ -155,6 +230,15 @@ static inline void atomic_long_set(atomi
43521 atomic_set(v, i);
43522 }
43523
43524 +#ifdef CONFIG_PAX_REFCOUNT
43525 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
43526 +{
43527 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
43528 +
43529 + atomic_set_unchecked(v, i);
43530 +}
43531 +#endif
43532 +
43533 static inline void atomic_long_inc(atomic_long_t *l)
43534 {
43535 atomic_t *v = (atomic_t *)l;
43536 @@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomi
43537 atomic_inc(v);
43538 }
43539
43540 +#ifdef CONFIG_PAX_REFCOUNT
43541 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
43542 +{
43543 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
43544 +
43545 + atomic_inc_unchecked(v);
43546 +}
43547 +#endif
43548 +
43549 static inline void atomic_long_dec(atomic_long_t *l)
43550 {
43551 atomic_t *v = (atomic_t *)l;
43552 @@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomi
43553 atomic_dec(v);
43554 }
43555
43556 +#ifdef CONFIG_PAX_REFCOUNT
43557 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
43558 +{
43559 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
43560 +
43561 + atomic_dec_unchecked(v);
43562 +}
43563 +#endif
43564 +
43565 static inline void atomic_long_add(long i, atomic_long_t *l)
43566 {
43567 atomic_t *v = (atomic_t *)l;
43568 @@ -176,6 +278,15 @@ static inline void atomic_long_add(long
43569 atomic_add(i, v);
43570 }
43571
43572 +#ifdef CONFIG_PAX_REFCOUNT
43573 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
43574 +{
43575 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
43576 +
43577 + atomic_add_unchecked(i, v);
43578 +}
43579 +#endif
43580 +
43581 static inline void atomic_long_sub(long i, atomic_long_t *l)
43582 {
43583 atomic_t *v = (atomic_t *)l;
43584 @@ -232,6 +343,15 @@ static inline long atomic_long_inc_retur
43585 return (long)atomic_inc_return(v);
43586 }
43587
43588 +#ifdef CONFIG_PAX_REFCOUNT
43589 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
43590 +{
43591 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
43592 +
43593 + return (long)atomic_inc_return_unchecked(v);
43594 +}
43595 +#endif
43596 +
43597 static inline long atomic_long_dec_return(atomic_long_t *l)
43598 {
43599 atomic_t *v = (atomic_t *)l;
43600 @@ -255,4 +375,37 @@ static inline long atomic_long_add_unles
43601
43602 #endif /* BITS_PER_LONG == 64 */
43603
43604 +#ifdef CONFIG_PAX_REFCOUNT
43605 +static inline void pax_refcount_needs_these_functions(void)
43606 +{
43607 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
43608 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
43609 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
43610 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
43611 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
43612 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
43613 +
43614 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
43615 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
43616 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
43617 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
43618 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
43619 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
43620 +}
43621 +#else
43622 +#define atomic_read_unchecked(v) atomic_read(v)
43623 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
43624 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
43625 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
43626 +#define atomic_inc_unchecked(v) atomic_inc(v)
43627 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
43628 +
43629 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
43630 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
43631 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
43632 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
43633 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
43634 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
43635 +#endif
43636 +
43637 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
43638 diff -urNp linux-2.6.35.4/include/asm-generic/dma-mapping-common.h linux-2.6.35.4/include/asm-generic/dma-mapping-common.h
43639 --- linux-2.6.35.4/include/asm-generic/dma-mapping-common.h 2010-08-26 19:47:12.000000000 -0400
43640 +++ linux-2.6.35.4/include/asm-generic/dma-mapping-common.h 2010-09-17 20:12:09.000000000 -0400
43641 @@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_
43642 enum dma_data_direction dir,
43643 struct dma_attrs *attrs)
43644 {
43645 - struct dma_map_ops *ops = get_dma_ops(dev);
43646 + const struct dma_map_ops *ops = get_dma_ops(dev);
43647 dma_addr_t addr;
43648
43649 kmemcheck_mark_initialized(ptr, size);
43650 @@ -30,7 +30,7 @@ static inline void dma_unmap_single_attr
43651 enum dma_data_direction dir,
43652 struct dma_attrs *attrs)
43653 {
43654 - struct dma_map_ops *ops = get_dma_ops(dev);
43655 + const struct dma_map_ops *ops = get_dma_ops(dev);
43656
43657 BUG_ON(!valid_dma_direction(dir));
43658 if (ops->unmap_page)
43659 @@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struc
43660 int nents, enum dma_data_direction dir,
43661 struct dma_attrs *attrs)
43662 {
43663 - struct dma_map_ops *ops = get_dma_ops(dev);
43664 + const struct dma_map_ops *ops = get_dma_ops(dev);
43665 int i, ents;
43666 struct scatterlist *s;
43667
43668 @@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(st
43669 int nents, enum dma_data_direction dir,
43670 struct dma_attrs *attrs)
43671 {
43672 - struct dma_map_ops *ops = get_dma_ops(dev);
43673 + const struct dma_map_ops *ops = get_dma_ops(dev);
43674
43675 BUG_ON(!valid_dma_direction(dir));
43676 debug_dma_unmap_sg(dev, sg, nents, dir);
43677 @@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(st
43678 size_t offset, size_t size,
43679 enum dma_data_direction dir)
43680 {
43681 - struct dma_map_ops *ops = get_dma_ops(dev);
43682 + const struct dma_map_ops *ops = get_dma_ops(dev);
43683 dma_addr_t addr;
43684
43685 kmemcheck_mark_initialized(page_address(page) + offset, size);
43686 @@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(st
43687 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
43688 size_t size, enum dma_data_direction dir)
43689 {
43690 - struct dma_map_ops *ops = get_dma_ops(dev);
43691 + const struct dma_map_ops *ops = get_dma_ops(dev);
43692
43693 BUG_ON(!valid_dma_direction(dir));
43694 if (ops->unmap_page)
43695 @@ -97,7 +97,7 @@ static inline void dma_sync_single_for_c
43696 size_t size,
43697 enum dma_data_direction dir)
43698 {
43699 - struct dma_map_ops *ops = get_dma_ops(dev);
43700 + const struct dma_map_ops *ops = get_dma_ops(dev);
43701
43702 BUG_ON(!valid_dma_direction(dir));
43703 if (ops->sync_single_for_cpu)
43704 @@ -109,7 +109,7 @@ static inline void dma_sync_single_for_d
43705 dma_addr_t addr, size_t size,
43706 enum dma_data_direction dir)
43707 {
43708 - struct dma_map_ops *ops = get_dma_ops(dev);
43709 + const struct dma_map_ops *ops = get_dma_ops(dev);
43710
43711 BUG_ON(!valid_dma_direction(dir));
43712 if (ops->sync_single_for_device)
43713 @@ -139,7 +139,7 @@ static inline void
43714 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
43715 int nelems, enum dma_data_direction dir)
43716 {
43717 - struct dma_map_ops *ops = get_dma_ops(dev);
43718 + const struct dma_map_ops *ops = get_dma_ops(dev);
43719
43720 BUG_ON(!valid_dma_direction(dir));
43721 if (ops->sync_sg_for_cpu)
43722 @@ -151,7 +151,7 @@ static inline void
43723 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
43724 int nelems, enum dma_data_direction dir)
43725 {
43726 - struct dma_map_ops *ops = get_dma_ops(dev);
43727 + const struct dma_map_ops *ops = get_dma_ops(dev);
43728
43729 BUG_ON(!valid_dma_direction(dir));
43730 if (ops->sync_sg_for_device)
43731 diff -urNp linux-2.6.35.4/include/asm-generic/futex.h linux-2.6.35.4/include/asm-generic/futex.h
43732 --- linux-2.6.35.4/include/asm-generic/futex.h 2010-08-26 19:47:12.000000000 -0400
43733 +++ linux-2.6.35.4/include/asm-generic/futex.h 2010-09-17 20:12:09.000000000 -0400
43734 @@ -6,7 +6,7 @@
43735 #include <asm/errno.h>
43736
43737 static inline int
43738 -futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
43739 +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
43740 {
43741 int op = (encoded_op >> 28) & 7;
43742 int cmp = (encoded_op >> 24) & 15;
43743 @@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op,
43744 }
43745
43746 static inline int
43747 -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
43748 +futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
43749 {
43750 return -ENOSYS;
43751 }
43752 diff -urNp linux-2.6.35.4/include/asm-generic/int-l64.h linux-2.6.35.4/include/asm-generic/int-l64.h
43753 --- linux-2.6.35.4/include/asm-generic/int-l64.h 2010-08-26 19:47:12.000000000 -0400
43754 +++ linux-2.6.35.4/include/asm-generic/int-l64.h 2010-09-17 20:12:09.000000000 -0400
43755 @@ -46,6 +46,8 @@ typedef unsigned int u32;
43756 typedef signed long s64;
43757 typedef unsigned long u64;
43758
43759 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
43760 +
43761 #define S8_C(x) x
43762 #define U8_C(x) x ## U
43763 #define S16_C(x) x
43764 diff -urNp linux-2.6.35.4/include/asm-generic/int-ll64.h linux-2.6.35.4/include/asm-generic/int-ll64.h
43765 --- linux-2.6.35.4/include/asm-generic/int-ll64.h 2010-08-26 19:47:12.000000000 -0400
43766 +++ linux-2.6.35.4/include/asm-generic/int-ll64.h 2010-09-17 20:12:09.000000000 -0400
43767 @@ -51,6 +51,8 @@ typedef unsigned int u32;
43768 typedef signed long long s64;
43769 typedef unsigned long long u64;
43770
43771 +typedef unsigned long long intoverflow_t;
43772 +
43773 #define S8_C(x) x
43774 #define U8_C(x) x ## U
43775 #define S16_C(x) x
43776 diff -urNp linux-2.6.35.4/include/asm-generic/kmap_types.h linux-2.6.35.4/include/asm-generic/kmap_types.h
43777 --- linux-2.6.35.4/include/asm-generic/kmap_types.h 2010-08-26 19:47:12.000000000 -0400
43778 +++ linux-2.6.35.4/include/asm-generic/kmap_types.h 2010-09-17 20:12:09.000000000 -0400
43779 @@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
43780 KMAP_D(17) KM_NMI,
43781 KMAP_D(18) KM_NMI_PTE,
43782 KMAP_D(19) KM_KDB,
43783 +KMAP_D(20) KM_CLEARPAGE,
43784 /*
43785 * Remember to update debug_kmap_atomic() when adding new kmap types!
43786 */
43787 -KMAP_D(20) KM_TYPE_NR
43788 +KMAP_D(21) KM_TYPE_NR
43789 };
43790
43791 #undef KMAP_D
43792 diff -urNp linux-2.6.35.4/include/asm-generic/pgtable.h linux-2.6.35.4/include/asm-generic/pgtable.h
43793 --- linux-2.6.35.4/include/asm-generic/pgtable.h 2010-08-26 19:47:12.000000000 -0400
43794 +++ linux-2.6.35.4/include/asm-generic/pgtable.h 2010-09-17 20:12:09.000000000 -0400
43795 @@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_ar
43796 unsigned long size);
43797 #endif
43798
43799 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
43800 +static inline unsigned long pax_open_kernel(void) { return 0; }
43801 +#endif
43802 +
43803 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
43804 +static inline unsigned long pax_close_kernel(void) { return 0; }
43805 +#endif
43806 +
43807 #endif /* !__ASSEMBLY__ */
43808
43809 #endif /* _ASM_GENERIC_PGTABLE_H */
43810 diff -urNp linux-2.6.35.4/include/asm-generic/pgtable-nopmd.h linux-2.6.35.4/include/asm-generic/pgtable-nopmd.h
43811 --- linux-2.6.35.4/include/asm-generic/pgtable-nopmd.h 2010-08-26 19:47:12.000000000 -0400
43812 +++ linux-2.6.35.4/include/asm-generic/pgtable-nopmd.h 2010-09-17 20:12:09.000000000 -0400
43813 @@ -1,14 +1,19 @@
43814 #ifndef _PGTABLE_NOPMD_H
43815 #define _PGTABLE_NOPMD_H
43816
43817 -#ifndef __ASSEMBLY__
43818 -
43819 #include <asm-generic/pgtable-nopud.h>
43820
43821 -struct mm_struct;
43822 -
43823 #define __PAGETABLE_PMD_FOLDED
43824
43825 +#define PMD_SHIFT PUD_SHIFT
43826 +#define PTRS_PER_PMD 1
43827 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
43828 +#define PMD_MASK (~(PMD_SIZE-1))
43829 +
43830 +#ifndef __ASSEMBLY__
43831 +
43832 +struct mm_struct;
43833 +
43834 /*
43835 * Having the pmd type consist of a pud gets the size right, and allows
43836 * us to conceptually access the pud entry that this pmd is folded into
43837 @@ -16,11 +21,6 @@ struct mm_struct;
43838 */
43839 typedef struct { pud_t pud; } pmd_t;
43840
43841 -#define PMD_SHIFT PUD_SHIFT
43842 -#define PTRS_PER_PMD 1
43843 -#define PMD_SIZE (1UL << PMD_SHIFT)
43844 -#define PMD_MASK (~(PMD_SIZE-1))
43845 -
43846 /*
43847 * The "pud_xxx()" functions here are trivial for a folded two-level
43848 * setup: the pmd is never bad, and a pmd always exists (as it's folded
43849 diff -urNp linux-2.6.35.4/include/asm-generic/pgtable-nopud.h linux-2.6.35.4/include/asm-generic/pgtable-nopud.h
43850 --- linux-2.6.35.4/include/asm-generic/pgtable-nopud.h 2010-08-26 19:47:12.000000000 -0400
43851 +++ linux-2.6.35.4/include/asm-generic/pgtable-nopud.h 2010-09-17 20:12:09.000000000 -0400
43852 @@ -1,10 +1,15 @@
43853 #ifndef _PGTABLE_NOPUD_H
43854 #define _PGTABLE_NOPUD_H
43855
43856 -#ifndef __ASSEMBLY__
43857 -
43858 #define __PAGETABLE_PUD_FOLDED
43859
43860 +#define PUD_SHIFT PGDIR_SHIFT
43861 +#define PTRS_PER_PUD 1
43862 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
43863 +#define PUD_MASK (~(PUD_SIZE-1))
43864 +
43865 +#ifndef __ASSEMBLY__
43866 +
43867 /*
43868 * Having the pud type consist of a pgd gets the size right, and allows
43869 * us to conceptually access the pgd entry that this pud is folded into
43870 @@ -12,11 +17,6 @@
43871 */
43872 typedef struct { pgd_t pgd; } pud_t;
43873
43874 -#define PUD_SHIFT PGDIR_SHIFT
43875 -#define PTRS_PER_PUD 1
43876 -#define PUD_SIZE (1UL << PUD_SHIFT)
43877 -#define PUD_MASK (~(PUD_SIZE-1))
43878 -
43879 /*
43880 * The "pgd_xxx()" functions here are trivial for a folded two-level
43881 * setup: the pud is never bad, and a pud always exists (as it's folded
43882 diff -urNp linux-2.6.35.4/include/asm-generic/vmlinux.lds.h linux-2.6.35.4/include/asm-generic/vmlinux.lds.h
43883 --- linux-2.6.35.4/include/asm-generic/vmlinux.lds.h 2010-08-26 19:47:12.000000000 -0400
43884 +++ linux-2.6.35.4/include/asm-generic/vmlinux.lds.h 2010-09-17 20:12:09.000000000 -0400
43885 @@ -213,6 +213,7 @@
43886 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
43887 VMLINUX_SYMBOL(__start_rodata) = .; \
43888 *(.rodata) *(.rodata.*) \
43889 + *(.data..read_only) \
43890 *(__vermagic) /* Kernel version magic */ \
43891 *(__markers_strings) /* Markers: strings */ \
43892 *(__tracepoints_strings)/* Tracepoints: strings */ \
43893 @@ -670,22 +671,24 @@
43894 * section in the linker script will go there too. @phdr should have
43895 * a leading colon.
43896 *
43897 - * Note that this macros defines __per_cpu_load as an absolute symbol.
43898 + * Note that this macros defines per_cpu_load as an absolute symbol.
43899 * If there is no need to put the percpu section at a predetermined
43900 * address, use PERCPU().
43901 */
43902 #define PERCPU_VADDR(vaddr, phdr) \
43903 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
43904 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
43905 + per_cpu_load = .; \
43906 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
43907 - LOAD_OFFSET) { \
43908 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
43909 VMLINUX_SYMBOL(__per_cpu_start) = .; \
43910 *(.data..percpu..first) \
43911 - *(.data..percpu..page_aligned) \
43912 *(.data..percpu) \
43913 + . = ALIGN(PAGE_SIZE); \
43914 + *(.data..percpu..page_aligned) \
43915 *(.data..percpu..shared_aligned) \
43916 VMLINUX_SYMBOL(__per_cpu_end) = .; \
43917 } phdr \
43918 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
43919 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
43920
43921 /**
43922 * PERCPU - define output section for percpu area, simple version
43923 diff -urNp linux-2.6.35.4/include/drm/drm_pciids.h linux-2.6.35.4/include/drm/drm_pciids.h
43924 --- linux-2.6.35.4/include/drm/drm_pciids.h 2010-08-26 19:47:12.000000000 -0400
43925 +++ linux-2.6.35.4/include/drm/drm_pciids.h 2010-09-17 20:12:09.000000000 -0400
43926 @@ -419,7 +419,7 @@
43927 {0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
43928 {0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
43929 {0x1002, 0x9715, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
43930 - {0, 0, 0}
43931 + {0, 0, 0, 0, 0, 0}
43932
43933 #define r128_PCI_IDS \
43934 {0x1002, 0x4c45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
43935 @@ -459,14 +459,14 @@
43936 {0x1002, 0x5446, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
43937 {0x1002, 0x544C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
43938 {0x1002, 0x5452, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
43939 - {0, 0, 0}
43940 + {0, 0, 0, 0, 0, 0}
43941
43942 #define mga_PCI_IDS \
43943 {0x102b, 0x0520, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \
43944 {0x102b, 0x0521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \
43945 {0x102b, 0x0525, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G400}, \
43946 {0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G550}, \
43947 - {0, 0, 0}
43948 + {0, 0, 0, 0, 0, 0}
43949
43950 #define mach64_PCI_IDS \
43951 {0x1002, 0x4749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
43952 @@ -489,7 +489,7 @@
43953 {0x1002, 0x4c53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
43954 {0x1002, 0x4c4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
43955 {0x1002, 0x4c4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
43956 - {0, 0, 0}
43957 + {0, 0, 0, 0, 0, 0}
43958
43959 #define sisdrv_PCI_IDS \
43960 {0x1039, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
43961 @@ -500,7 +500,7 @@
43962 {0x1039, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
43963 {0x18CA, 0x0040, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
43964 {0x18CA, 0x0042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \
43965 - {0, 0, 0}
43966 + {0, 0, 0, 0, 0, 0}
43967
43968 #define tdfx_PCI_IDS \
43969 {0x121a, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
43970 @@ -509,7 +509,7 @@
43971 {0x121a, 0x0007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
43972 {0x121a, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
43973 {0x121a, 0x000b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
43974 - {0, 0, 0}
43975 + {0, 0, 0, 0, 0, 0}
43976
43977 #define viadrv_PCI_IDS \
43978 {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
43979 @@ -521,14 +521,14 @@
43980 {0x1106, 0x3343, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
43981 {0x1106, 0x3230, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_DX9_0}, \
43982 {0x1106, 0x3157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \
43983 - {0, 0, 0}
43984 + {0, 0, 0, 0, 0, 0}
43985
43986 #define i810_PCI_IDS \
43987 {0x8086, 0x7121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
43988 {0x8086, 0x7123, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
43989 {0x8086, 0x7125, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
43990 {0x8086, 0x1132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
43991 - {0, 0, 0}
43992 + {0, 0, 0, 0, 0, 0}
43993
43994 #define i830_PCI_IDS \
43995 {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
43996 @@ -536,11 +536,11 @@
43997 {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
43998 {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
43999 {0x8086, 0x358e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
44000 - {0, 0, 0}
44001 + {0, 0, 0, 0, 0, 0}
44002
44003 #define gamma_PCI_IDS \
44004 {0x3d3d, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
44005 - {0, 0, 0}
44006 + {0, 0, 0, 0, 0, 0}
44007
44008 #define savage_PCI_IDS \
44009 {0x5333, 0x8a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
44010 @@ -566,10 +566,10 @@
44011 {0x5333, 0x8d02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \
44012 {0x5333, 0x8d03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
44013 {0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
44014 - {0, 0, 0}
44015 + {0, 0, 0, 0, 0, 0}
44016
44017 #define ffb_PCI_IDS \
44018 - {0, 0, 0}
44019 + {0, 0, 0, 0, 0, 0}
44020
44021 #define i915_PCI_IDS \
44022 {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
44023 @@ -603,4 +603,4 @@
44024 {0x8086, 0x0042, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
44025 {0x8086, 0x0046, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
44026 {0x8086, 0x0102, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
44027 - {0, 0, 0}
44028 + {0, 0, 0, 0, 0, 0}
44029 diff -urNp linux-2.6.35.4/include/drm/drmP.h linux-2.6.35.4/include/drm/drmP.h
44030 --- linux-2.6.35.4/include/drm/drmP.h 2010-08-26 19:47:12.000000000 -0400
44031 +++ linux-2.6.35.4/include/drm/drmP.h 2010-09-17 20:12:09.000000000 -0400
44032 @@ -808,7 +808,7 @@ struct drm_driver {
44033 void (*vgaarb_irq)(struct drm_device *dev, bool state);
44034
44035 /* Driver private ops for this object */
44036 - struct vm_operations_struct *gem_vm_ops;
44037 + const struct vm_operations_struct *gem_vm_ops;
44038
44039 int major;
44040 int minor;
44041 @@ -917,7 +917,7 @@ struct drm_device {
44042
44043 /** \name Usage Counters */
44044 /*@{ */
44045 - int open_count; /**< Outstanding files open */
44046 + atomic_t open_count; /**< Outstanding files open */
44047 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
44048 atomic_t vma_count; /**< Outstanding vma areas open */
44049 int buf_use; /**< Buffers in use -- cannot alloc */
44050 @@ -928,7 +928,7 @@ struct drm_device {
44051 /*@{ */
44052 unsigned long counters;
44053 enum drm_stat_type types[15];
44054 - atomic_t counts[15];
44055 + atomic_unchecked_t counts[15];
44056 /*@} */
44057
44058 struct list_head filelist;
44059 diff -urNp linux-2.6.35.4/include/linux/a.out.h linux-2.6.35.4/include/linux/a.out.h
44060 --- linux-2.6.35.4/include/linux/a.out.h 2010-08-26 19:47:12.000000000 -0400
44061 +++ linux-2.6.35.4/include/linux/a.out.h 2010-09-17 20:12:09.000000000 -0400
44062 @@ -39,6 +39,14 @@ enum machine_type {
44063 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
44064 };
44065
44066 +/* Constants for the N_FLAGS field */
44067 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
44068 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
44069 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
44070 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
44071 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
44072 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
44073 +
44074 #if !defined (N_MAGIC)
44075 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
44076 #endif
44077 diff -urNp linux-2.6.35.4/include/linux/atmdev.h linux-2.6.35.4/include/linux/atmdev.h
44078 --- linux-2.6.35.4/include/linux/atmdev.h 2010-08-26 19:47:12.000000000 -0400
44079 +++ linux-2.6.35.4/include/linux/atmdev.h 2010-09-17 20:12:09.000000000 -0400
44080 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
44081 #endif
44082
44083 struct k_atm_aal_stats {
44084 -#define __HANDLE_ITEM(i) atomic_t i
44085 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
44086 __AAL_STAT_ITEMS
44087 #undef __HANDLE_ITEM
44088 };
44089 diff -urNp linux-2.6.35.4/include/linux/binfmts.h linux-2.6.35.4/include/linux/binfmts.h
44090 --- linux-2.6.35.4/include/linux/binfmts.h 2010-08-26 19:47:12.000000000 -0400
44091 +++ linux-2.6.35.4/include/linux/binfmts.h 2010-09-17 20:12:09.000000000 -0400
44092 @@ -87,6 +87,7 @@ struct linux_binfmt {
44093 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
44094 int (*load_shlib)(struct file *);
44095 int (*core_dump)(struct coredump_params *cprm);
44096 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
44097 unsigned long min_coredump; /* minimal dump size */
44098 int hasvdso;
44099 };
44100 diff -urNp linux-2.6.35.4/include/linux/blkdev.h linux-2.6.35.4/include/linux/blkdev.h
44101 --- linux-2.6.35.4/include/linux/blkdev.h 2010-08-26 19:47:12.000000000 -0400
44102 +++ linux-2.6.35.4/include/linux/blkdev.h 2010-09-17 20:12:09.000000000 -0400
44103 @@ -1331,20 +1331,20 @@ static inline int blk_integrity_rq(struc
44104 #endif /* CONFIG_BLK_DEV_INTEGRITY */
44105
44106 struct block_device_operations {
44107 - int (*open) (struct block_device *, fmode_t);
44108 - int (*release) (struct gendisk *, fmode_t);
44109 - int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
44110 - int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
44111 - int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
44112 - int (*direct_access) (struct block_device *, sector_t,
44113 + int (* const open) (struct block_device *, fmode_t);
44114 + int (* const release) (struct gendisk *, fmode_t);
44115 + int (* const locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
44116 + int (* const ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
44117 + int (* const compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
44118 + int (* const direct_access) (struct block_device *, sector_t,
44119 void **, unsigned long *);
44120 - int (*media_changed) (struct gendisk *);
44121 - void (*unlock_native_capacity) (struct gendisk *);
44122 - int (*revalidate_disk) (struct gendisk *);
44123 - int (*getgeo)(struct block_device *, struct hd_geometry *);
44124 + int (* const media_changed) (struct gendisk *);
44125 + void (* const unlock_native_capacity) (struct gendisk *);
44126 + int (* const revalidate_disk) (struct gendisk *);
44127 + int (*const getgeo)(struct block_device *, struct hd_geometry *);
44128 /* this callback is with swap_lock and sometimes page table lock held */
44129 - void (*swap_slot_free_notify) (struct block_device *, unsigned long);
44130 - struct module *owner;
44131 + void (* const swap_slot_free_notify) (struct block_device *, unsigned long);
44132 + struct module * const owner;
44133 };
44134
44135 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
44136 diff -urNp linux-2.6.35.4/include/linux/cache.h linux-2.6.35.4/include/linux/cache.h
44137 --- linux-2.6.35.4/include/linux/cache.h 2010-08-26 19:47:12.000000000 -0400
44138 +++ linux-2.6.35.4/include/linux/cache.h 2010-09-17 20:12:09.000000000 -0400
44139 @@ -16,6 +16,10 @@
44140 #define __read_mostly
44141 #endif
44142
44143 +#ifndef __read_only
44144 +#define __read_only __read_mostly
44145 +#endif
44146 +
44147 #ifndef ____cacheline_aligned
44148 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
44149 #endif
44150 diff -urNp linux-2.6.35.4/include/linux/capability.h linux-2.6.35.4/include/linux/capability.h
44151 --- linux-2.6.35.4/include/linux/capability.h 2010-08-26 19:47:12.000000000 -0400
44152 +++ linux-2.6.35.4/include/linux/capability.h 2010-09-17 20:12:37.000000000 -0400
44153 @@ -561,6 +561,7 @@ extern const kernel_cap_t __cap_init_eff
44154 (security_real_capable_noaudit((t), (cap)) == 0)
44155
44156 extern int capable(int cap);
44157 +int capable_nolog(int cap);
44158
44159 /* audit system wants to get cap info from files as well */
44160 struct dentry;
44161 diff -urNp linux-2.6.35.4/include/linux/compat.h linux-2.6.35.4/include/linux/compat.h
44162 --- linux-2.6.35.4/include/linux/compat.h 2010-08-26 19:47:12.000000000 -0400
44163 +++ linux-2.6.35.4/include/linux/compat.h 2010-09-17 20:12:37.000000000 -0400
44164 @@ -360,5 +360,8 @@ extern ssize_t compat_rw_copy_check_uvec
44165 const struct compat_iovec __user *uvector, unsigned long nr_segs,
44166 unsigned long fast_segs, struct iovec *fast_pointer,
44167 struct iovec **ret_pointer);
44168 +
44169 +extern void __user *compat_alloc_user_space(unsigned long len);
44170 +
44171 #endif /* CONFIG_COMPAT */
44172 #endif /* _LINUX_COMPAT_H */
44173 diff -urNp linux-2.6.35.4/include/linux/compiler-gcc4.h linux-2.6.35.4/include/linux/compiler-gcc4.h
44174 --- linux-2.6.35.4/include/linux/compiler-gcc4.h 2010-08-26 19:47:12.000000000 -0400
44175 +++ linux-2.6.35.4/include/linux/compiler-gcc4.h 2010-09-17 20:12:09.000000000 -0400
44176 @@ -54,6 +54,10 @@
44177
44178 #endif
44179
44180 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
44181 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
44182 +#define __bos0(ptr) __bos((ptr), 0)
44183 +#define __bos1(ptr) __bos((ptr), 1)
44184 #endif
44185
44186 #if __GNUC_MINOR__ > 0
44187 diff -urNp linux-2.6.35.4/include/linux/compiler.h linux-2.6.35.4/include/linux/compiler.h
44188 --- linux-2.6.35.4/include/linux/compiler.h 2010-08-26 19:47:12.000000000 -0400
44189 +++ linux-2.6.35.4/include/linux/compiler.h 2010-09-17 20:12:09.000000000 -0400
44190 @@ -267,6 +267,22 @@ void ftrace_likely_update(struct ftrace_
44191 #define __cold
44192 #endif
44193
44194 +#ifndef __alloc_size
44195 +#define __alloc_size
44196 +#endif
44197 +
44198 +#ifndef __bos
44199 +#define __bos
44200 +#endif
44201 +
44202 +#ifndef __bos0
44203 +#define __bos0
44204 +#endif
44205 +
44206 +#ifndef __bos1
44207 +#define __bos1
44208 +#endif
44209 +
44210 /* Simple shorthand for a section definition */
44211 #ifndef __section
44212 # define __section(S) __attribute__ ((__section__(#S)))
44213 diff -urNp linux-2.6.35.4/include/linux/decompress/mm.h linux-2.6.35.4/include/linux/decompress/mm.h
44214 --- linux-2.6.35.4/include/linux/decompress/mm.h 2010-08-26 19:47:12.000000000 -0400
44215 +++ linux-2.6.35.4/include/linux/decompress/mm.h 2010-09-17 20:12:09.000000000 -0400
44216 @@ -78,7 +78,7 @@ static void free(void *where)
44217 * warnings when not needed (indeed large_malloc / large_free are not
44218 * needed by inflate */
44219
44220 -#define malloc(a) kmalloc(a, GFP_KERNEL)
44221 +#define malloc(a) kmalloc((a), GFP_KERNEL)
44222 #define free(a) kfree(a)
44223
44224 #define large_malloc(a) vmalloc(a)
44225 diff -urNp linux-2.6.35.4/include/linux/dma-mapping.h linux-2.6.35.4/include/linux/dma-mapping.h
44226 --- linux-2.6.35.4/include/linux/dma-mapping.h 2010-08-26 19:47:12.000000000 -0400
44227 +++ linux-2.6.35.4/include/linux/dma-mapping.h 2010-09-17 20:12:09.000000000 -0400
44228 @@ -16,40 +16,40 @@ enum dma_data_direction {
44229 };
44230
44231 struct dma_map_ops {
44232 - void* (*alloc_coherent)(struct device *dev, size_t size,
44233 + void* (* const alloc_coherent)(struct device *dev, size_t size,
44234 dma_addr_t *dma_handle, gfp_t gfp);
44235 - void (*free_coherent)(struct device *dev, size_t size,
44236 + void (* const free_coherent)(struct device *dev, size_t size,
44237 void *vaddr, dma_addr_t dma_handle);
44238 - dma_addr_t (*map_page)(struct device *dev, struct page *page,
44239 + dma_addr_t (* const map_page)(struct device *dev, struct page *page,
44240 unsigned long offset, size_t size,
44241 enum dma_data_direction dir,
44242 struct dma_attrs *attrs);
44243 - void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
44244 + void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
44245 size_t size, enum dma_data_direction dir,
44246 struct dma_attrs *attrs);
44247 - int (*map_sg)(struct device *dev, struct scatterlist *sg,
44248 + int (* const map_sg)(struct device *dev, struct scatterlist *sg,
44249 int nents, enum dma_data_direction dir,
44250 struct dma_attrs *attrs);
44251 - void (*unmap_sg)(struct device *dev,
44252 + void (* const unmap_sg)(struct device *dev,
44253 struct scatterlist *sg, int nents,
44254 enum dma_data_direction dir,
44255 struct dma_attrs *attrs);
44256 - void (*sync_single_for_cpu)(struct device *dev,
44257 + void (* const sync_single_for_cpu)(struct device *dev,
44258 dma_addr_t dma_handle, size_t size,
44259 enum dma_data_direction dir);
44260 - void (*sync_single_for_device)(struct device *dev,
44261 + void (* const sync_single_for_device)(struct device *dev,
44262 dma_addr_t dma_handle, size_t size,
44263 enum dma_data_direction dir);
44264 - void (*sync_sg_for_cpu)(struct device *dev,
44265 + void (* const sync_sg_for_cpu)(struct device *dev,
44266 struct scatterlist *sg, int nents,
44267 enum dma_data_direction dir);
44268 - void (*sync_sg_for_device)(struct device *dev,
44269 + void (* const sync_sg_for_device)(struct device *dev,
44270 struct scatterlist *sg, int nents,
44271 enum dma_data_direction dir);
44272 - int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
44273 - int (*dma_supported)(struct device *dev, u64 mask);
44274 - int (*set_dma_mask)(struct device *dev, u64 mask);
44275 - int is_phys;
44276 + int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
44277 + int (* const dma_supported)(struct device *dev, u64 mask);
44278 + int (* set_dma_mask)(struct device *dev, u64 mask);
44279 + const int is_phys;
44280 };
44281
44282 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
44283 diff -urNp linux-2.6.35.4/include/linux/elf.h linux-2.6.35.4/include/linux/elf.h
44284 --- linux-2.6.35.4/include/linux/elf.h 2010-08-26 19:47:12.000000000 -0400
44285 +++ linux-2.6.35.4/include/linux/elf.h 2010-09-17 20:12:09.000000000 -0400
44286 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
44287 #define PT_GNU_EH_FRAME 0x6474e550
44288
44289 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
44290 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
44291 +
44292 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
44293 +
44294 +/* Constants for the e_flags field */
44295 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
44296 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
44297 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
44298 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
44299 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
44300 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
44301
44302 /*
44303 * Extended Numbering
44304 @@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
44305 #define DT_DEBUG 21
44306 #define DT_TEXTREL 22
44307 #define DT_JMPREL 23
44308 +#define DT_FLAGS 30
44309 + #define DF_TEXTREL 0x00000004
44310 #define DT_ENCODING 32
44311 #define OLD_DT_LOOS 0x60000000
44312 #define DT_LOOS 0x6000000d
44313 @@ -252,6 +265,19 @@ typedef struct elf64_hdr {
44314 #define PF_W 0x2
44315 #define PF_X 0x1
44316
44317 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
44318 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
44319 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
44320 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
44321 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
44322 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
44323 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
44324 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
44325 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
44326 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
44327 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
44328 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
44329 +
44330 typedef struct elf32_phdr{
44331 Elf32_Word p_type;
44332 Elf32_Off p_offset;
44333 @@ -344,6 +370,8 @@ typedef struct elf64_shdr {
44334 #define EI_OSABI 7
44335 #define EI_PAD 8
44336
44337 +#define EI_PAX 14
44338 +
44339 #define ELFMAG0 0x7f /* EI_MAG */
44340 #define ELFMAG1 'E'
44341 #define ELFMAG2 'L'
44342 @@ -421,6 +449,7 @@ extern Elf32_Dyn _DYNAMIC [];
44343 #define elf_note elf32_note
44344 #define elf_addr_t Elf32_Off
44345 #define Elf_Half Elf32_Half
44346 +#define elf_dyn Elf32_Dyn
44347
44348 #else
44349
44350 @@ -431,6 +460,7 @@ extern Elf64_Dyn _DYNAMIC [];
44351 #define elf_note elf64_note
44352 #define elf_addr_t Elf64_Off
44353 #define Elf_Half Elf64_Half
44354 +#define elf_dyn Elf64_Dyn
44355
44356 #endif
44357
44358 diff -urNp linux-2.6.35.4/include/linux/fs.h linux-2.6.35.4/include/linux/fs.h
44359 --- linux-2.6.35.4/include/linux/fs.h 2010-08-26 19:47:12.000000000 -0400
44360 +++ linux-2.6.35.4/include/linux/fs.h 2010-09-17 20:12:37.000000000 -0400
44361 @@ -90,6 +90,11 @@ struct inodes_stat_t {
44362 /* Expect random access pattern */
44363 #define FMODE_RANDOM ((__force fmode_t)0x1000)
44364
44365 +/* Hack for grsec so as not to require read permission simply to execute
44366 + * a binary
44367 + */
44368 +#define FMODE_GREXEC ((__force fmode_t)0x2000)
44369 +
44370 /*
44371 * The below are the various read and write types that we support. Some of
44372 * them include behavioral modifiers that send information down to the
44373 @@ -571,41 +576,41 @@ typedef int (*read_actor_t)(read_descrip
44374 unsigned long, unsigned long);
44375
44376 struct address_space_operations {
44377 - int (*writepage)(struct page *page, struct writeback_control *wbc);
44378 - int (*readpage)(struct file *, struct page *);
44379 - void (*sync_page)(struct page *);
44380 + int (* const writepage)(struct page *page, struct writeback_control *wbc);
44381 + int (* const readpage)(struct file *, struct page *);
44382 + void (* const sync_page)(struct page *);
44383
44384 /* Write back some dirty pages from this mapping. */
44385 - int (*writepages)(struct address_space *, struct writeback_control *);
44386 + int (* const writepages)(struct address_space *, struct writeback_control *);
44387
44388 /* Set a page dirty. Return true if this dirtied it */
44389 - int (*set_page_dirty)(struct page *page);
44390 + int (* const set_page_dirty)(struct page *page);
44391
44392 - int (*readpages)(struct file *filp, struct address_space *mapping,
44393 + int (* const readpages)(struct file *filp, struct address_space *mapping,
44394 struct list_head *pages, unsigned nr_pages);
44395
44396 - int (*write_begin)(struct file *, struct address_space *mapping,
44397 + int (* const write_begin)(struct file *, struct address_space *mapping,
44398 loff_t pos, unsigned len, unsigned flags,
44399 struct page **pagep, void **fsdata);
44400 - int (*write_end)(struct file *, struct address_space *mapping,
44401 + int (* const write_end)(struct file *, struct address_space *mapping,
44402 loff_t pos, unsigned len, unsigned copied,
44403 struct page *page, void *fsdata);
44404
44405 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
44406 - sector_t (*bmap)(struct address_space *, sector_t);
44407 - void (*invalidatepage) (struct page *, unsigned long);
44408 - int (*releasepage) (struct page *, gfp_t);
44409 - ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
44410 + sector_t (* const bmap)(struct address_space *, sector_t);
44411 + void (* const invalidatepage) (struct page *, unsigned long);
44412 + int (* const releasepage) (struct page *, gfp_t);
44413 + ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
44414 loff_t offset, unsigned long nr_segs);
44415 - int (*get_xip_mem)(struct address_space *, pgoff_t, int,
44416 + int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
44417 void **, unsigned long *);
44418 /* migrate the contents of a page to the specified target */
44419 - int (*migratepage) (struct address_space *,
44420 + int (* const migratepage) (struct address_space *,
44421 struct page *, struct page *);
44422 - int (*launder_page) (struct page *);
44423 - int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
44424 + int (* const launder_page) (struct page *);
44425 + int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
44426 unsigned long);
44427 - int (*error_remove_page)(struct address_space *, struct page *);
44428 + int (* const error_remove_page)(struct address_space *, struct page *);
44429 };
44430
44431 /*
44432 @@ -1035,19 +1040,19 @@ static inline int file_check_writeable(s
44433 typedef struct files_struct *fl_owner_t;
44434
44435 struct file_lock_operations {
44436 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
44437 - void (*fl_release_private)(struct file_lock *);
44438 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
44439 + void (* const fl_release_private)(struct file_lock *);
44440 };
44441
44442 struct lock_manager_operations {
44443 - int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
44444 - void (*fl_notify)(struct file_lock *); /* unblock callback */
44445 - int (*fl_grant)(struct file_lock *, struct file_lock *, int);
44446 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
44447 - void (*fl_release_private)(struct file_lock *);
44448 - void (*fl_break)(struct file_lock *);
44449 - int (*fl_mylease)(struct file_lock *, struct file_lock *);
44450 - int (*fl_change)(struct file_lock **, int);
44451 + int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
44452 + void (* const fl_notify)(struct file_lock *); /* unblock callback */
44453 + int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
44454 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
44455 + void (* const fl_release_private)(struct file_lock *);
44456 + void (* const fl_break)(struct file_lock *);
44457 + int (* const fl_mylease)(struct file_lock *, struct file_lock *);
44458 + int (* const fl_change)(struct file_lock **, int);
44459 };
44460
44461 struct lock_manager {
44462 @@ -1440,7 +1445,7 @@ struct fiemap_extent_info {
44463 unsigned int fi_flags; /* Flags as passed from user */
44464 unsigned int fi_extents_mapped; /* Number of mapped extents */
44465 unsigned int fi_extents_max; /* Size of fiemap_extent array */
44466 - struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
44467 + struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
44468 * array */
44469 };
44470 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
44471 @@ -1557,30 +1562,30 @@ extern ssize_t vfs_writev(struct file *,
44472 unsigned long, loff_t *);
44473
44474 struct super_operations {
44475 - struct inode *(*alloc_inode)(struct super_block *sb);
44476 - void (*destroy_inode)(struct inode *);
44477 + struct inode *(* const alloc_inode)(struct super_block *sb);
44478 + void (* const destroy_inode)(struct inode *);
44479
44480 - void (*dirty_inode) (struct inode *);
44481 - int (*write_inode) (struct inode *, struct writeback_control *wbc);
44482 - void (*drop_inode) (struct inode *);
44483 - void (*delete_inode) (struct inode *);
44484 - void (*put_super) (struct super_block *);
44485 - void (*write_super) (struct super_block *);
44486 - int (*sync_fs)(struct super_block *sb, int wait);
44487 - int (*freeze_fs) (struct super_block *);
44488 - int (*unfreeze_fs) (struct super_block *);
44489 - int (*statfs) (struct dentry *, struct kstatfs *);
44490 - int (*remount_fs) (struct super_block *, int *, char *);
44491 - void (*clear_inode) (struct inode *);
44492 - void (*umount_begin) (struct super_block *);
44493 + void (* const dirty_inode) (struct inode *);
44494 + int (* const write_inode) (struct inode *, struct writeback_control *wbc);
44495 + void (* const drop_inode) (struct inode *);
44496 + void (* const delete_inode) (struct inode *);
44497 + void (* const put_super) (struct super_block *);
44498 + void (* const write_super) (struct super_block *);
44499 + int (* const sync_fs)(struct super_block *sb, int wait);
44500 + int (* const freeze_fs) (struct super_block *);
44501 + int (* const unfreeze_fs) (struct super_block *);
44502 + int (* const statfs) (struct dentry *, struct kstatfs *);
44503 + int (* const remount_fs) (struct super_block *, int *, char *);
44504 + void (* const clear_inode) (struct inode *);
44505 + void (* const umount_begin) (struct super_block *);
44506
44507 - int (*show_options)(struct seq_file *, struct vfsmount *);
44508 - int (*show_stats)(struct seq_file *, struct vfsmount *);
44509 + int (* const show_options)(struct seq_file *, struct vfsmount *);
44510 + int (* const show_stats)(struct seq_file *, struct vfsmount *);
44511 #ifdef CONFIG_QUOTA
44512 - ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
44513 - ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
44514 + ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
44515 + ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
44516 #endif
44517 - int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
44518 + int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
44519 };
44520
44521 /*
44522 diff -urNp linux-2.6.35.4/include/linux/fs_struct.h linux-2.6.35.4/include/linux/fs_struct.h
44523 --- linux-2.6.35.4/include/linux/fs_struct.h 2010-08-26 19:47:12.000000000 -0400
44524 +++ linux-2.6.35.4/include/linux/fs_struct.h 2010-09-17 20:12:09.000000000 -0400
44525 @@ -4,7 +4,7 @@
44526 #include <linux/path.h>
44527
44528 struct fs_struct {
44529 - int users;
44530 + atomic_t users;
44531 rwlock_t lock;
44532 int umask;
44533 int in_exec;
44534 diff -urNp linux-2.6.35.4/include/linux/genhd.h linux-2.6.35.4/include/linux/genhd.h
44535 --- linux-2.6.35.4/include/linux/genhd.h 2010-08-26 19:47:12.000000000 -0400
44536 +++ linux-2.6.35.4/include/linux/genhd.h 2010-09-17 20:12:09.000000000 -0400
44537 @@ -162,7 +162,7 @@ struct gendisk {
44538
44539 struct timer_rand_state *random;
44540
44541 - atomic_t sync_io; /* RAID */
44542 + atomic_unchecked_t sync_io; /* RAID */
44543 struct work_struct async_notify;
44544 #ifdef CONFIG_BLK_DEV_INTEGRITY
44545 struct blk_integrity *integrity;
44546 diff -urNp linux-2.6.35.4/include/linux/gracl.h linux-2.6.35.4/include/linux/gracl.h
44547 --- linux-2.6.35.4/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
44548 +++ linux-2.6.35.4/include/linux/gracl.h 2010-09-17 20:12:37.000000000 -0400
44549 @@ -0,0 +1,310 @@
44550 +#ifndef GR_ACL_H
44551 +#define GR_ACL_H
44552 +
44553 +#include <linux/grdefs.h>
44554 +#include <linux/resource.h>
44555 +#include <linux/capability.h>
44556 +#include <linux/dcache.h>
44557 +#include <asm/resource.h>
44558 +
44559 +/* Major status information */
44560 +
44561 +#define GR_VERSION "grsecurity 2.2.0"
44562 +#define GRSECURITY_VERSION 0x2200
44563 +
44564 +enum {
44565 + GR_SHUTDOWN = 0,
44566 + GR_ENABLE = 1,
44567 + GR_SPROLE = 2,
44568 + GR_RELOAD = 3,
44569 + GR_SEGVMOD = 4,
44570 + GR_STATUS = 5,
44571 + GR_UNSPROLE = 6,
44572 + GR_PASSSET = 7,
44573 + GR_SPROLEPAM = 8,
44574 +};
44575 +
44576 +/* Password setup definitions
44577 + * kernel/grhash.c */
44578 +enum {
44579 + GR_PW_LEN = 128,
44580 + GR_SALT_LEN = 16,
44581 + GR_SHA_LEN = 32,
44582 +};
44583 +
44584 +enum {
44585 + GR_SPROLE_LEN = 64,
44586 +};
44587 +
44588 +#define GR_NLIMITS 32
44589 +
44590 +/* Begin Data Structures */
44591 +
44592 +struct sprole_pw {
44593 + unsigned char *rolename;
44594 + unsigned char salt[GR_SALT_LEN];
44595 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
44596 +};
44597 +
44598 +struct name_entry {
44599 + __u32 key;
44600 + ino_t inode;
44601 + dev_t device;
44602 + char *name;
44603 + __u16 len;
44604 + __u8 deleted;
44605 + struct name_entry *prev;
44606 + struct name_entry *next;
44607 +};
44608 +
44609 +struct inodev_entry {
44610 + struct name_entry *nentry;
44611 + struct inodev_entry *prev;
44612 + struct inodev_entry *next;
44613 +};
44614 +
44615 +struct acl_role_db {
44616 + struct acl_role_label **r_hash;
44617 + __u32 r_size;
44618 +};
44619 +
44620 +struct inodev_db {
44621 + struct inodev_entry **i_hash;
44622 + __u32 i_size;
44623 +};
44624 +
44625 +struct name_db {
44626 + struct name_entry **n_hash;
44627 + __u32 n_size;
44628 +};
44629 +
44630 +struct crash_uid {
44631 + uid_t uid;
44632 + unsigned long expires;
44633 +};
44634 +
44635 +struct gr_hash_struct {
44636 + void **table;
44637 + void **nametable;
44638 + void *first;
44639 + __u32 table_size;
44640 + __u32 used_size;
44641 + int type;
44642 +};
44643 +
44644 +/* Userspace Grsecurity ACL data structures */
44645 +
44646 +struct acl_subject_label {
44647 + char *filename;
44648 + ino_t inode;
44649 + dev_t device;
44650 + __u32 mode;
44651 + kernel_cap_t cap_mask;
44652 + kernel_cap_t cap_lower;
44653 + kernel_cap_t cap_invert_audit;
44654 +
44655 + struct rlimit res[GR_NLIMITS];
44656 + __u32 resmask;
44657 +
44658 + __u8 user_trans_type;
44659 + __u8 group_trans_type;
44660 + uid_t *user_transitions;
44661 + gid_t *group_transitions;
44662 + __u16 user_trans_num;
44663 + __u16 group_trans_num;
44664 +
44665 + __u32 ip_proto[8];
44666 + __u32 ip_type;
44667 + struct acl_ip_label **ips;
44668 + __u32 ip_num;
44669 + __u32 inaddr_any_override;
44670 +
44671 + __u32 crashes;
44672 + unsigned long expires;
44673 +
44674 + struct acl_subject_label *parent_subject;
44675 + struct gr_hash_struct *hash;
44676 + struct acl_subject_label *prev;
44677 + struct acl_subject_label *next;
44678 +
44679 + struct acl_object_label **obj_hash;
44680 + __u32 obj_hash_size;
44681 + __u16 pax_flags;
44682 +};
44683 +
44684 +struct role_allowed_ip {
44685 + __u32 addr;
44686 + __u32 netmask;
44687 +
44688 + struct role_allowed_ip *prev;
44689 + struct role_allowed_ip *next;
44690 +};
44691 +
44692 +struct role_transition {
44693 + char *rolename;
44694 +
44695 + struct role_transition *prev;
44696 + struct role_transition *next;
44697 +};
44698 +
44699 +struct acl_role_label {
44700 + char *rolename;
44701 + uid_t uidgid;
44702 + __u16 roletype;
44703 +
44704 + __u16 auth_attempts;
44705 + unsigned long expires;
44706 +
44707 + struct acl_subject_label *root_label;
44708 + struct gr_hash_struct *hash;
44709 +
44710 + struct acl_role_label *prev;
44711 + struct acl_role_label *next;
44712 +
44713 + struct role_transition *transitions;
44714 + struct role_allowed_ip *allowed_ips;
44715 + uid_t *domain_children;
44716 + __u16 domain_child_num;
44717 +
44718 + struct acl_subject_label **subj_hash;
44719 + __u32 subj_hash_size;
44720 +};
44721 +
44722 +struct user_acl_role_db {
44723 + struct acl_role_label **r_table;
44724 + __u32 num_pointers; /* Number of allocations to track */
44725 + __u32 num_roles; /* Number of roles */
44726 + __u32 num_domain_children; /* Number of domain children */
44727 + __u32 num_subjects; /* Number of subjects */
44728 + __u32 num_objects; /* Number of objects */
44729 +};
44730 +
44731 +struct acl_object_label {
44732 + char *filename;
44733 + ino_t inode;
44734 + dev_t device;
44735 + __u32 mode;
44736 +
44737 + struct acl_subject_label *nested;
44738 + struct acl_object_label *globbed;
44739 +
44740 + /* next two structures not used */
44741 +
44742 + struct acl_object_label *prev;
44743 + struct acl_object_label *next;
44744 +};
44745 +
44746 +struct acl_ip_label {
44747 + char *iface;
44748 + __u32 addr;
44749 + __u32 netmask;
44750 + __u16 low, high;
44751 + __u8 mode;
44752 + __u32 type;
44753 + __u32 proto[8];
44754 +
44755 + /* next two structures not used */
44756 +
44757 + struct acl_ip_label *prev;
44758 + struct acl_ip_label *next;
44759 +};
44760 +
44761 +struct gr_arg {
44762 + struct user_acl_role_db role_db;
44763 + unsigned char pw[GR_PW_LEN];
44764 + unsigned char salt[GR_SALT_LEN];
44765 + unsigned char sum[GR_SHA_LEN];
44766 + unsigned char sp_role[GR_SPROLE_LEN];
44767 + struct sprole_pw *sprole_pws;
44768 + dev_t segv_device;
44769 + ino_t segv_inode;
44770 + uid_t segv_uid;
44771 + __u16 num_sprole_pws;
44772 + __u16 mode;
44773 +};
44774 +
44775 +struct gr_arg_wrapper {
44776 + struct gr_arg *arg;
44777 + __u32 version;
44778 + __u32 size;
44779 +};
44780 +
44781 +struct subject_map {
44782 + struct acl_subject_label *user;
44783 + struct acl_subject_label *kernel;
44784 + struct subject_map *prev;
44785 + struct subject_map *next;
44786 +};
44787 +
44788 +struct acl_subj_map_db {
44789 + struct subject_map **s_hash;
44790 + __u32 s_size;
44791 +};
44792 +
44793 +/* End Data Structures Section */
44794 +
44795 +/* Hash functions generated by empirical testing by Brad Spengler
44796 + Makes good use of the low bits of the inode. Generally 0-1 times
44797 + in loop for successful match. 0-3 for unsuccessful match.
44798 + Shift/add algorithm with modulus of table size and an XOR*/
44799 +
44800 +static __inline__ unsigned int
44801 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
44802 +{
44803 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
44804 +}
44805 +
44806 + static __inline__ unsigned int
44807 +shash(const struct acl_subject_label *userp, const unsigned int sz)
44808 +{
44809 + return ((const unsigned long)userp % sz);
44810 +}
44811 +
44812 +static __inline__ unsigned int
44813 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
44814 +{
44815 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
44816 +}
44817 +
44818 +static __inline__ unsigned int
44819 +nhash(const char *name, const __u16 len, const unsigned int sz)
44820 +{
44821 + return full_name_hash((const unsigned char *)name, len) % sz;
44822 +}
44823 +
44824 +#define FOR_EACH_ROLE_START(role) \
44825 + role = role_list; \
44826 + while (role) {
44827 +
44828 +#define FOR_EACH_ROLE_END(role) \
44829 + role = role->prev; \
44830 + }
44831 +
44832 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
44833 + subj = NULL; \
44834 + iter = 0; \
44835 + while (iter < role->subj_hash_size) { \
44836 + if (subj == NULL) \
44837 + subj = role->subj_hash[iter]; \
44838 + if (subj == NULL) { \
44839 + iter++; \
44840 + continue; \
44841 + }
44842 +
44843 +#define FOR_EACH_SUBJECT_END(subj,iter) \
44844 + subj = subj->next; \
44845 + if (subj == NULL) \
44846 + iter++; \
44847 + }
44848 +
44849 +
44850 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
44851 + subj = role->hash->first; \
44852 + while (subj != NULL) {
44853 +
44854 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
44855 + subj = subj->next; \
44856 + }
44857 +
44858 +#endif
44859 +
44860 diff -urNp linux-2.6.35.4/include/linux/gralloc.h linux-2.6.35.4/include/linux/gralloc.h
44861 --- linux-2.6.35.4/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
44862 +++ linux-2.6.35.4/include/linux/gralloc.h 2010-09-17 20:12:37.000000000 -0400
44863 @@ -0,0 +1,9 @@
44864 +#ifndef __GRALLOC_H
44865 +#define __GRALLOC_H
44866 +
44867 +void acl_free_all(void);
44868 +int acl_alloc_stack_init(unsigned long size);
44869 +void *acl_alloc(unsigned long len);
44870 +void *acl_alloc_num(unsigned long num, unsigned long len);
44871 +
44872 +#endif
44873 diff -urNp linux-2.6.35.4/include/linux/grdefs.h linux-2.6.35.4/include/linux/grdefs.h
44874 --- linux-2.6.35.4/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
44875 +++ linux-2.6.35.4/include/linux/grdefs.h 2010-09-17 20:12:37.000000000 -0400
44876 @@ -0,0 +1,136 @@
44877 +#ifndef GRDEFS_H
44878 +#define GRDEFS_H
44879 +
44880 +/* Begin grsecurity status declarations */
44881 +
44882 +enum {
44883 + GR_READY = 0x01,
44884 + GR_STATUS_INIT = 0x00 // disabled state
44885 +};
44886 +
44887 +/* Begin ACL declarations */
44888 +
44889 +/* Role flags */
44890 +
44891 +enum {
44892 + GR_ROLE_USER = 0x0001,
44893 + GR_ROLE_GROUP = 0x0002,
44894 + GR_ROLE_DEFAULT = 0x0004,
44895 + GR_ROLE_SPECIAL = 0x0008,
44896 + GR_ROLE_AUTH = 0x0010,
44897 + GR_ROLE_NOPW = 0x0020,
44898 + GR_ROLE_GOD = 0x0040,
44899 + GR_ROLE_LEARN = 0x0080,
44900 + GR_ROLE_TPE = 0x0100,
44901 + GR_ROLE_DOMAIN = 0x0200,
44902 + GR_ROLE_PAM = 0x0400
44903 +};
44904 +
44905 +/* ACL Subject and Object mode flags */
44906 +enum {
44907 + GR_DELETED = 0x80000000
44908 +};
44909 +
44910 +/* ACL Object-only mode flags */
44911 +enum {
44912 + GR_READ = 0x00000001,
44913 + GR_APPEND = 0x00000002,
44914 + GR_WRITE = 0x00000004,
44915 + GR_EXEC = 0x00000008,
44916 + GR_FIND = 0x00000010,
44917 + GR_INHERIT = 0x00000020,
44918 + GR_SETID = 0x00000040,
44919 + GR_CREATE = 0x00000080,
44920 + GR_DELETE = 0x00000100,
44921 + GR_LINK = 0x00000200,
44922 + GR_AUDIT_READ = 0x00000400,
44923 + GR_AUDIT_APPEND = 0x00000800,
44924 + GR_AUDIT_WRITE = 0x00001000,
44925 + GR_AUDIT_EXEC = 0x00002000,
44926 + GR_AUDIT_FIND = 0x00004000,
44927 + GR_AUDIT_INHERIT= 0x00008000,
44928 + GR_AUDIT_SETID = 0x00010000,
44929 + GR_AUDIT_CREATE = 0x00020000,
44930 + GR_AUDIT_DELETE = 0x00040000,
44931 + GR_AUDIT_LINK = 0x00080000,
44932 + GR_PTRACERD = 0x00100000,
44933 + GR_NOPTRACE = 0x00200000,
44934 + GR_SUPPRESS = 0x00400000,
44935 + GR_NOLEARN = 0x00800000
44936 +};
44937 +
44938 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
44939 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
44940 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
44941 +
44942 +/* ACL subject-only mode flags */
44943 +enum {
44944 + GR_KILL = 0x00000001,
44945 + GR_VIEW = 0x00000002,
44946 + GR_PROTECTED = 0x00000004,
44947 + GR_LEARN = 0x00000008,
44948 + GR_OVERRIDE = 0x00000010,
44949 + /* just a placeholder, this mode is only used in userspace */
44950 + GR_DUMMY = 0x00000020,
44951 + GR_PROTSHM = 0x00000040,
44952 + GR_KILLPROC = 0x00000080,
44953 + GR_KILLIPPROC = 0x00000100,
44954 + /* just a placeholder, this mode is only used in userspace */
44955 + GR_NOTROJAN = 0x00000200,
44956 + GR_PROTPROCFD = 0x00000400,
44957 + GR_PROCACCT = 0x00000800,
44958 + GR_RELAXPTRACE = 0x00001000,
44959 + GR_NESTED = 0x00002000,
44960 + GR_INHERITLEARN = 0x00004000,
44961 + GR_PROCFIND = 0x00008000,
44962 + GR_POVERRIDE = 0x00010000,
44963 + GR_KERNELAUTH = 0x00020000,
44964 +};
44965 +
44966 +enum {
44967 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
44968 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
44969 + GR_PAX_ENABLE_MPROTECT = 0x0004,
44970 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
44971 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
44972 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
44973 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
44974 + GR_PAX_DISABLE_MPROTECT = 0x0400,
44975 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
44976 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
44977 +};
44978 +
44979 +enum {
44980 + GR_ID_USER = 0x01,
44981 + GR_ID_GROUP = 0x02,
44982 +};
44983 +
44984 +enum {
44985 + GR_ID_ALLOW = 0x01,
44986 + GR_ID_DENY = 0x02,
44987 +};
44988 +
44989 +#define GR_CRASH_RES 31
44990 +#define GR_UIDTABLE_MAX 500
44991 +
44992 +/* begin resource learning section */
44993 +enum {
44994 + GR_RLIM_CPU_BUMP = 60,
44995 + GR_RLIM_FSIZE_BUMP = 50000,
44996 + GR_RLIM_DATA_BUMP = 10000,
44997 + GR_RLIM_STACK_BUMP = 1000,
44998 + GR_RLIM_CORE_BUMP = 10000,
44999 + GR_RLIM_RSS_BUMP = 500000,
45000 + GR_RLIM_NPROC_BUMP = 1,
45001 + GR_RLIM_NOFILE_BUMP = 5,
45002 + GR_RLIM_MEMLOCK_BUMP = 50000,
45003 + GR_RLIM_AS_BUMP = 500000,
45004 + GR_RLIM_LOCKS_BUMP = 2,
45005 + GR_RLIM_SIGPENDING_BUMP = 5,
45006 + GR_RLIM_MSGQUEUE_BUMP = 10000,
45007 + GR_RLIM_NICE_BUMP = 1,
45008 + GR_RLIM_RTPRIO_BUMP = 1,
45009 + GR_RLIM_RTTIME_BUMP = 1000000
45010 +};
45011 +
45012 +#endif
45013 diff -urNp linux-2.6.35.4/include/linux/grinternal.h linux-2.6.35.4/include/linux/grinternal.h
45014 --- linux-2.6.35.4/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
45015 +++ linux-2.6.35.4/include/linux/grinternal.h 2010-09-17 20:12:37.000000000 -0400
45016 @@ -0,0 +1,211 @@
45017 +#ifndef __GRINTERNAL_H
45018 +#define __GRINTERNAL_H
45019 +
45020 +#ifdef CONFIG_GRKERNSEC
45021 +
45022 +#include <linux/fs.h>
45023 +#include <linux/mnt_namespace.h>
45024 +#include <linux/nsproxy.h>
45025 +#include <linux/gracl.h>
45026 +#include <linux/grdefs.h>
45027 +#include <linux/grmsg.h>
45028 +
45029 +void gr_add_learn_entry(const char *fmt, ...)
45030 + __attribute__ ((format (printf, 1, 2)));
45031 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
45032 + const struct vfsmount *mnt);
45033 +__u32 gr_check_create(const struct dentry *new_dentry,
45034 + const struct dentry *parent,
45035 + const struct vfsmount *mnt, const __u32 mode);
45036 +int gr_check_protected_task(const struct task_struct *task);
45037 +__u32 to_gr_audit(const __u32 reqmode);
45038 +int gr_set_acls(const int type);
45039 +
45040 +int gr_acl_is_enabled(void);
45041 +char gr_roletype_to_char(void);
45042 +
45043 +void gr_handle_alertkill(struct task_struct *task);
45044 +char *gr_to_filename(const struct dentry *dentry,
45045 + const struct vfsmount *mnt);
45046 +char *gr_to_filename1(const struct dentry *dentry,
45047 + const struct vfsmount *mnt);
45048 +char *gr_to_filename2(const struct dentry *dentry,
45049 + const struct vfsmount *mnt);
45050 +char *gr_to_filename3(const struct dentry *dentry,
45051 + const struct vfsmount *mnt);
45052 +
45053 +extern int grsec_enable_harden_ptrace;
45054 +extern int grsec_enable_link;
45055 +extern int grsec_enable_fifo;
45056 +extern int grsec_enable_execve;
45057 +extern int grsec_enable_shm;
45058 +extern int grsec_enable_execlog;
45059 +extern int grsec_enable_signal;
45060 +extern int grsec_enable_audit_ptrace;
45061 +extern int grsec_enable_forkfail;
45062 +extern int grsec_enable_time;
45063 +extern int grsec_enable_rofs;
45064 +extern int grsec_enable_chroot_shmat;
45065 +extern int grsec_enable_chroot_findtask;
45066 +extern int grsec_enable_chroot_mount;
45067 +extern int grsec_enable_chroot_double;
45068 +extern int grsec_enable_chroot_pivot;
45069 +extern int grsec_enable_chroot_chdir;
45070 +extern int grsec_enable_chroot_chmod;
45071 +extern int grsec_enable_chroot_mknod;
45072 +extern int grsec_enable_chroot_fchdir;
45073 +extern int grsec_enable_chroot_nice;
45074 +extern int grsec_enable_chroot_execlog;
45075 +extern int grsec_enable_chroot_caps;
45076 +extern int grsec_enable_chroot_sysctl;
45077 +extern int grsec_enable_chroot_unix;
45078 +extern int grsec_enable_tpe;
45079 +extern int grsec_tpe_gid;
45080 +extern int grsec_enable_tpe_all;
45081 +extern int grsec_enable_tpe_invert;
45082 +extern int grsec_enable_socket_all;
45083 +extern int grsec_socket_all_gid;
45084 +extern int grsec_enable_socket_client;
45085 +extern int grsec_socket_client_gid;
45086 +extern int grsec_enable_socket_server;
45087 +extern int grsec_socket_server_gid;
45088 +extern int grsec_audit_gid;
45089 +extern int grsec_enable_group;
45090 +extern int grsec_enable_audit_textrel;
45091 +extern int grsec_enable_mount;
45092 +extern int grsec_enable_chdir;
45093 +extern int grsec_resource_logging;
45094 +extern int grsec_enable_blackhole;
45095 +extern int grsec_lastack_retries;
45096 +extern int grsec_lock;
45097 +
45098 +extern spinlock_t grsec_alert_lock;
45099 +extern unsigned long grsec_alert_wtime;
45100 +extern unsigned long grsec_alert_fyet;
45101 +
45102 +extern spinlock_t grsec_audit_lock;
45103 +
45104 +extern rwlock_t grsec_exec_file_lock;
45105 +
45106 +#define gr_task_fullpath(tsk) (tsk->exec_file ? \
45107 + gr_to_filename2(tsk->exec_file->f_path.dentry, \
45108 + tsk->exec_file->f_vfsmnt) : "/")
45109 +
45110 +#define gr_parent_task_fullpath(tsk) (tsk->parent->exec_file ? \
45111 + gr_to_filename3(tsk->parent->exec_file->f_path.dentry, \
45112 + tsk->parent->exec_file->f_vfsmnt) : "/")
45113 +
45114 +#define gr_task_fullpath0(tsk) (tsk->exec_file ? \
45115 + gr_to_filename(tsk->exec_file->f_path.dentry, \
45116 + tsk->exec_file->f_vfsmnt) : "/")
45117 +
45118 +#define gr_parent_task_fullpath0(tsk) (tsk->parent->exec_file ? \
45119 + gr_to_filename1(tsk->parent->exec_file->f_path.dentry, \
45120 + tsk->parent->exec_file->f_vfsmnt) : "/")
45121 +
45122 +#define proc_is_chrooted(tsk_a) (tsk_a->gr_is_chrooted)
45123 +
45124 +#define have_same_root(tsk_a,tsk_b) (tsk_a->gr_chroot_dentry == tsk_b->gr_chroot_dentry)
45125 +
45126 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), task->comm, \
45127 + task->pid, cred->uid, \
45128 + cred->euid, cred->gid, cred->egid, \
45129 + gr_parent_task_fullpath(task), \
45130 + task->parent->comm, task->parent->pid, \
45131 + pcred->uid, pcred->euid, \
45132 + pcred->gid, pcred->egid
45133 +
45134 +#define GR_CHROOT_CAPS {{ \
45135 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
45136 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
45137 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
45138 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
45139 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
45140 + CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
45141 +
45142 +#define security_learn(normal_msg,args...) \
45143 +({ \
45144 + read_lock(&grsec_exec_file_lock); \
45145 + gr_add_learn_entry(normal_msg "\n", ## args); \
45146 + read_unlock(&grsec_exec_file_lock); \
45147 +})
45148 +
45149 +enum {
45150 + GR_DO_AUDIT,
45151 + GR_DONT_AUDIT,
45152 + GR_DONT_AUDIT_GOOD
45153 +};
45154 +
45155 +enum {
45156 + GR_TTYSNIFF,
45157 + GR_RBAC,
45158 + GR_RBAC_STR,
45159 + GR_STR_RBAC,
45160 + GR_RBAC_MODE2,
45161 + GR_RBAC_MODE3,
45162 + GR_FILENAME,
45163 + GR_SYSCTL_HIDDEN,
45164 + GR_NOARGS,
45165 + GR_ONE_INT,
45166 + GR_ONE_INT_TWO_STR,
45167 + GR_ONE_STR,
45168 + GR_STR_INT,
45169 + GR_TWO_INT,
45170 + GR_THREE_INT,
45171 + GR_FIVE_INT_TWO_STR,
45172 + GR_TWO_STR,
45173 + GR_THREE_STR,
45174 + GR_FOUR_STR,
45175 + GR_STR_FILENAME,
45176 + GR_FILENAME_STR,
45177 + GR_FILENAME_TWO_INT,
45178 + GR_FILENAME_TWO_INT_STR,
45179 + GR_TEXTREL,
45180 + GR_PTRACE,
45181 + GR_RESOURCE,
45182 + GR_CAP,
45183 + GR_SIG,
45184 + GR_SIG2,
45185 + GR_CRASH1,
45186 + GR_CRASH2,
45187 + GR_PSACCT
45188 +};
45189 +
45190 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
45191 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
45192 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
45193 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
45194 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
45195 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
45196 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
45197 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
45198 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
45199 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
45200 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
45201 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
45202 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
45203 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
45204 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
45205 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
45206 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
45207 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
45208 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
45209 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
45210 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
45211 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
45212 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
45213 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
45214 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
45215 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
45216 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
45217 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
45218 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
45219 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
45220 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
45221 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
45222 +
45223 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
45224 +
45225 +#endif
45226 +
45227 +#endif
45228 diff -urNp linux-2.6.35.4/include/linux/grmsg.h linux-2.6.35.4/include/linux/grmsg.h
45229 --- linux-2.6.35.4/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
45230 +++ linux-2.6.35.4/include/linux/grmsg.h 2010-09-17 20:12:37.000000000 -0400
45231 @@ -0,0 +1,108 @@
45232 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
45233 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
45234 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
45235 +#define GR_STOPMOD_MSG "denied modification of module state by "
45236 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
45237 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
45238 +#define GR_IOPERM_MSG "denied use of ioperm() by "
45239 +#define GR_IOPL_MSG "denied use of iopl() by "
45240 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
45241 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
45242 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
45243 +#define GR_KMEM_MSG "denied write of /dev/kmem by "
45244 +#define GR_PORT_OPEN_MSG "denied open of /dev/port by "
45245 +#define GR_MEM_WRITE_MSG "denied write of /dev/mem by "
45246 +#define GR_MEM_MMAP_MSG "denied mmap write of /dev/[k]mem by "
45247 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
45248 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
45249 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
45250 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
45251 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
45252 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
45253 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
45254 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
45255 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
45256 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
45257 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
45258 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
45259 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
45260 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
45261 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
45262 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
45263 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
45264 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
45265 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
45266 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
45267 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
45268 +#define GR_NPROC_MSG "denied overstep of process limit by "
45269 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
45270 +#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
45271 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
45272 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
45273 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
45274 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
45275 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
45276 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
45277 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
45278 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
45279 +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
45280 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
45281 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
45282 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
45283 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
45284 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
45285 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
45286 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
45287 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
45288 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
45289 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
45290 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
45291 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
45292 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
45293 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
45294 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
45295 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
45296 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
45297 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
45298 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
45299 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
45300 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
45301 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
45302 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
45303 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
45304 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
45305 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
45306 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
45307 +#define GR_FAILFORK_MSG "failed fork with errno %d by "
45308 +#define GR_NICE_CHROOT_MSG "denied priority change by "
45309 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
45310 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
45311 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
45312 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
45313 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
45314 +#define GR_TIME_MSG "time set by "
45315 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
45316 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
45317 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
45318 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
45319 +#define GR_SOCK2_MSG "denied socket(%d,%.16s,%.16s) by "
45320 +#define GR_BIND_MSG "denied bind() by "
45321 +#define GR_CONNECT_MSG "denied connect() by "
45322 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
45323 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
45324 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
45325 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
45326 +#define GR_CAP_ACL_MSG "use of %s denied for "
45327 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
45328 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
45329 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
45330 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
45331 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
45332 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
45333 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
45334 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
45335 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
45336 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
45337 +#define GR_NONROOT_MODLOAD_MSG "denied kernel module auto-load of %.64s by "
45338 +#define GR_VM86_MSG "denied use of vm86 by "
45339 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
45340 diff -urNp linux-2.6.35.4/include/linux/grsecurity.h linux-2.6.35.4/include/linux/grsecurity.h
45341 --- linux-2.6.35.4/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
45342 +++ linux-2.6.35.4/include/linux/grsecurity.h 2010-09-17 20:12:37.000000000 -0400
45343 @@ -0,0 +1,203 @@
45344 +#ifndef GR_SECURITY_H
45345 +#define GR_SECURITY_H
45346 +#include <linux/fs.h>
45347 +#include <linux/fs_struct.h>
45348 +#include <linux/binfmts.h>
45349 +#include <linux/gracl.h>
45350 +
45351 +/* notify of brain-dead configs */
45352 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
45353 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
45354 +#endif
45355 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
45356 +#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
45357 +#endif
45358 +#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
45359 +#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
45360 +#endif
45361 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
45362 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
45363 +#endif
45364 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
45365 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
45366 +#endif
45367 +
45368 +void gr_handle_brute_attach(struct task_struct *p);
45369 +void gr_handle_brute_check(void);
45370 +
45371 +char gr_roletype_to_char(void);
45372 +
45373 +int gr_check_user_change(int real, int effective, int fs);
45374 +int gr_check_group_change(int real, int effective, int fs);
45375 +
45376 +void gr_del_task_from_ip_table(struct task_struct *p);
45377 +
45378 +int gr_pid_is_chrooted(struct task_struct *p);
45379 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
45380 +int gr_handle_chroot_nice(void);
45381 +int gr_handle_chroot_sysctl(const int op);
45382 +int gr_handle_chroot_setpriority(struct task_struct *p,
45383 + const int niceval);
45384 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
45385 +int gr_handle_chroot_chroot(const struct dentry *dentry,
45386 + const struct vfsmount *mnt);
45387 +int gr_handle_chroot_caps(struct path *path);
45388 +void gr_handle_chroot_chdir(struct path *path);
45389 +int gr_handle_chroot_chmod(const struct dentry *dentry,
45390 + const struct vfsmount *mnt, const int mode);
45391 +int gr_handle_chroot_mknod(const struct dentry *dentry,
45392 + const struct vfsmount *mnt, const int mode);
45393 +int gr_handle_chroot_mount(const struct dentry *dentry,
45394 + const struct vfsmount *mnt,
45395 + const char *dev_name);
45396 +int gr_handle_chroot_pivot(void);
45397 +int gr_handle_chroot_unix(const pid_t pid);
45398 +
45399 +int gr_handle_rawio(const struct inode *inode);
45400 +int gr_handle_nproc(void);
45401 +
45402 +void gr_handle_ioperm(void);
45403 +void gr_handle_iopl(void);
45404 +
45405 +int gr_tpe_allow(const struct file *file);
45406 +
45407 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
45408 +void gr_clear_chroot_entries(struct task_struct *task);
45409 +
45410 +void gr_log_forkfail(const int retval);
45411 +void gr_log_timechange(void);
45412 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
45413 +void gr_log_chdir(const struct dentry *dentry,
45414 + const struct vfsmount *mnt);
45415 +void gr_log_chroot_exec(const struct dentry *dentry,
45416 + const struct vfsmount *mnt);
45417 +void gr_handle_exec_args(struct linux_binprm *bprm, char **argv);
45418 +void gr_log_remount(const char *devname, const int retval);
45419 +void gr_log_unmount(const char *devname, const int retval);
45420 +void gr_log_mount(const char *from, const char *to, const int retval);
45421 +void gr_log_textrel(struct vm_area_struct *vma);
45422 +
45423 +int gr_handle_follow_link(const struct inode *parent,
45424 + const struct inode *inode,
45425 + const struct dentry *dentry,
45426 + const struct vfsmount *mnt);
45427 +int gr_handle_fifo(const struct dentry *dentry,
45428 + const struct vfsmount *mnt,
45429 + const struct dentry *dir, const int flag,
45430 + const int acc_mode);
45431 +int gr_handle_hardlink(const struct dentry *dentry,
45432 + const struct vfsmount *mnt,
45433 + struct inode *inode,
45434 + const int mode, const char *to);
45435 +
45436 +int gr_is_capable(const int cap);
45437 +int gr_is_capable_nolog(const int cap);
45438 +void gr_learn_resource(const struct task_struct *task, const int limit,
45439 + const unsigned long wanted, const int gt);
45440 +void gr_copy_label(struct task_struct *tsk);
45441 +void gr_handle_crash(struct task_struct *task, const int sig);
45442 +int gr_handle_signal(const struct task_struct *p, const int sig);
45443 +int gr_check_crash_uid(const uid_t uid);
45444 +int gr_check_protected_task(const struct task_struct *task);
45445 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
45446 +int gr_acl_handle_mmap(const struct file *file,
45447 + const unsigned long prot);
45448 +int gr_acl_handle_mprotect(const struct file *file,
45449 + const unsigned long prot);
45450 +int gr_check_hidden_task(const struct task_struct *tsk);
45451 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
45452 + const struct vfsmount *mnt);
45453 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
45454 + const struct vfsmount *mnt);
45455 +__u32 gr_acl_handle_access(const struct dentry *dentry,
45456 + const struct vfsmount *mnt, const int fmode);
45457 +__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
45458 + const struct vfsmount *mnt, mode_t mode);
45459 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
45460 + const struct vfsmount *mnt, mode_t mode);
45461 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
45462 + const struct vfsmount *mnt);
45463 +int gr_handle_ptrace(struct task_struct *task, const long request);
45464 +int gr_handle_proc_ptrace(struct task_struct *task);
45465 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
45466 + const struct vfsmount *mnt);
45467 +int gr_check_crash_exec(const struct file *filp);
45468 +int gr_acl_is_enabled(void);
45469 +void gr_set_kernel_label(struct task_struct *task);
45470 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
45471 + const gid_t gid);
45472 +int gr_set_proc_label(const struct dentry *dentry,
45473 + const struct vfsmount *mnt,
45474 + const int unsafe_share);
45475 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
45476 + const struct vfsmount *mnt);
45477 +__u32 gr_acl_handle_open(const struct dentry *dentry,
45478 + const struct vfsmount *mnt, const int fmode);
45479 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
45480 + const struct dentry *p_dentry,
45481 + const struct vfsmount *p_mnt, const int fmode,
45482 + const int imode);
45483 +void gr_handle_create(const struct dentry *dentry,
45484 + const struct vfsmount *mnt);
45485 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
45486 + const struct dentry *parent_dentry,
45487 + const struct vfsmount *parent_mnt,
45488 + const int mode);
45489 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
45490 + const struct dentry *parent_dentry,
45491 + const struct vfsmount *parent_mnt);
45492 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
45493 + const struct vfsmount *mnt);
45494 +void gr_handle_delete(const ino_t ino, const dev_t dev);
45495 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
45496 + const struct vfsmount *mnt);
45497 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
45498 + const struct dentry *parent_dentry,
45499 + const struct vfsmount *parent_mnt,
45500 + const char *from);
45501 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
45502 + const struct dentry *parent_dentry,
45503 + const struct vfsmount *parent_mnt,
45504 + const struct dentry *old_dentry,
45505 + const struct vfsmount *old_mnt, const char *to);
45506 +int gr_acl_handle_rename(struct dentry *new_dentry,
45507 + struct dentry *parent_dentry,
45508 + const struct vfsmount *parent_mnt,
45509 + struct dentry *old_dentry,
45510 + struct inode *old_parent_inode,
45511 + struct vfsmount *old_mnt, const char *newname);
45512 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
45513 + struct dentry *old_dentry,
45514 + struct dentry *new_dentry,
45515 + struct vfsmount *mnt, const __u8 replace);
45516 +__u32 gr_check_link(const struct dentry *new_dentry,
45517 + const struct dentry *parent_dentry,
45518 + const struct vfsmount *parent_mnt,
45519 + const struct dentry *old_dentry,
45520 + const struct vfsmount *old_mnt);
45521 +int gr_acl_handle_filldir(const struct file *file, const char *name,
45522 + const unsigned int namelen, const ino_t ino);
45523 +
45524 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
45525 + const struct vfsmount *mnt);
45526 +void gr_acl_handle_exit(void);
45527 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
45528 +int gr_acl_handle_procpidmem(const struct task_struct *task);
45529 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
45530 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
45531 +void gr_audit_ptrace(struct task_struct *task);
45532 +
45533 +#ifdef CONFIG_GRKERNSEC
45534 +void gr_log_nonroot_mod_load(const char *modname);
45535 +void gr_handle_vm86(void);
45536 +void gr_handle_mem_write(void);
45537 +void gr_handle_kmem_write(void);
45538 +void gr_handle_open_port(void);
45539 +int gr_handle_mem_mmap(const unsigned long offset,
45540 + struct vm_area_struct *vma);
45541 +
45542 +extern int grsec_enable_dmesg;
45543 +extern int grsec_disable_privio;
45544 +#endif
45545 +
45546 +#endif
45547 diff -urNp linux-2.6.35.4/include/linux/grsock.h linux-2.6.35.4/include/linux/grsock.h
45548 --- linux-2.6.35.4/include/linux/grsock.h 1969-12-31 19:00:00.000000000 -0500
45549 +++ linux-2.6.35.4/include/linux/grsock.h 2010-09-17 20:12:37.000000000 -0400
45550 @@ -0,0 +1,19 @@
45551 +#ifndef __GRSOCK_H
45552 +#define __GRSOCK_H
45553 +
45554 +extern void gr_attach_curr_ip(const struct sock *sk);
45555 +extern int gr_handle_sock_all(const int family, const int type,
45556 + const int protocol);
45557 +extern int gr_handle_sock_server(const struct sockaddr *sck);
45558 +extern int gr_handle_sock_server_other(const struct sock *sck);
45559 +extern int gr_handle_sock_client(const struct sockaddr *sck);
45560 +extern int gr_search_connect(struct socket * sock,
45561 + struct sockaddr_in * addr);
45562 +extern int gr_search_bind(struct socket * sock,
45563 + struct sockaddr_in * addr);
45564 +extern int gr_search_listen(struct socket * sock);
45565 +extern int gr_search_accept(struct socket * sock);
45566 +extern int gr_search_socket(const int domain, const int type,
45567 + const int protocol);
45568 +
45569 +#endif
45570 diff -urNp linux-2.6.35.4/include/linux/highmem.h linux-2.6.35.4/include/linux/highmem.h
45571 --- linux-2.6.35.4/include/linux/highmem.h 2010-08-26 19:47:12.000000000 -0400
45572 +++ linux-2.6.35.4/include/linux/highmem.h 2010-09-17 20:12:09.000000000 -0400
45573 @@ -143,6 +143,18 @@ static inline void clear_highpage(struct
45574 kunmap_atomic(kaddr, KM_USER0);
45575 }
45576
45577 +static inline void sanitize_highpage(struct page *page)
45578 +{
45579 + void *kaddr;
45580 + unsigned long flags;
45581 +
45582 + local_irq_save(flags);
45583 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
45584 + clear_page(kaddr);
45585 + kunmap_atomic(kaddr, KM_CLEARPAGE);
45586 + local_irq_restore(flags);
45587 +}
45588 +
45589 static inline void zero_user_segments(struct page *page,
45590 unsigned start1, unsigned end1,
45591 unsigned start2, unsigned end2)
45592 diff -urNp linux-2.6.35.4/include/linux/interrupt.h linux-2.6.35.4/include/linux/interrupt.h
45593 --- linux-2.6.35.4/include/linux/interrupt.h 2010-08-26 19:47:12.000000000 -0400
45594 +++ linux-2.6.35.4/include/linux/interrupt.h 2010-09-17 20:12:09.000000000 -0400
45595 @@ -392,7 +392,7 @@ enum
45596 /* map softirq index to softirq name. update 'softirq_to_name' in
45597 * kernel/softirq.c when adding a new softirq.
45598 */
45599 -extern char *softirq_to_name[NR_SOFTIRQS];
45600 +extern const char * const softirq_to_name[NR_SOFTIRQS];
45601
45602 /* softirq mask and active fields moved to irq_cpustat_t in
45603 * asm/hardirq.h to get better cache usage. KAO
45604 @@ -400,12 +400,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
45605
45606 struct softirq_action
45607 {
45608 - void (*action)(struct softirq_action *);
45609 + void (*action)(void);
45610 };
45611
45612 asmlinkage void do_softirq(void);
45613 asmlinkage void __do_softirq(void);
45614 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
45615 +extern void open_softirq(int nr, void (*action)(void));
45616 extern void softirq_init(void);
45617 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
45618 extern void raise_softirq_irqoff(unsigned int nr);
45619 diff -urNp linux-2.6.35.4/include/linux/jbd2.h linux-2.6.35.4/include/linux/jbd2.h
45620 --- linux-2.6.35.4/include/linux/jbd2.h 2010-08-26 19:47:12.000000000 -0400
45621 +++ linux-2.6.35.4/include/linux/jbd2.h 2010-09-17 20:12:09.000000000 -0400
45622 @@ -67,7 +67,7 @@ extern u8 jbd2_journal_enable_debug;
45623 } \
45624 } while (0)
45625 #else
45626 -#define jbd_debug(f, a...) /**/
45627 +#define jbd_debug(f, a...) do {} while (0)
45628 #endif
45629
45630 extern void *jbd2_alloc(size_t size, gfp_t flags);
45631 diff -urNp linux-2.6.35.4/include/linux/jbd.h linux-2.6.35.4/include/linux/jbd.h
45632 --- linux-2.6.35.4/include/linux/jbd.h 2010-08-26 19:47:12.000000000 -0400
45633 +++ linux-2.6.35.4/include/linux/jbd.h 2010-09-17 20:12:09.000000000 -0400
45634 @@ -67,7 +67,7 @@ extern u8 journal_enable_debug;
45635 } \
45636 } while (0)
45637 #else
45638 -#define jbd_debug(f, a...) /**/
45639 +#define jbd_debug(f, a...) do {} while (0)
45640 #endif
45641
45642 static inline void *jbd_alloc(size_t size, gfp_t flags)
45643 diff -urNp linux-2.6.35.4/include/linux/kallsyms.h linux-2.6.35.4/include/linux/kallsyms.h
45644 --- linux-2.6.35.4/include/linux/kallsyms.h 2010-08-26 19:47:12.000000000 -0400
45645 +++ linux-2.6.35.4/include/linux/kallsyms.h 2010-09-17 20:12:37.000000000 -0400
45646 @@ -15,7 +15,8 @@
45647
45648 struct module;
45649
45650 -#ifdef CONFIG_KALLSYMS
45651 +#ifndef __INCLUDED_BY_HIDESYM
45652 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45653 /* Lookup the address for a symbol. Returns 0 if not found. */
45654 unsigned long kallsyms_lookup_name(const char *name);
45655
45656 @@ -92,6 +93,9 @@ static inline int lookup_symbol_attrs(un
45657 /* Stupid that this does nothing, but I didn't create this mess. */
45658 #define __print_symbol(fmt, addr)
45659 #endif /*CONFIG_KALLSYMS*/
45660 +#else /* when included by kallsyms.c, with HIDESYM enabled */
45661 +extern void __print_symbol(const char *fmt, unsigned long address);
45662 +#endif
45663
45664 /* This macro allows us to keep printk typechecking */
45665 static void __check_printsym_format(const char *fmt, ...)
45666 diff -urNp linux-2.6.35.4/include/linux/kgdb.h linux-2.6.35.4/include/linux/kgdb.h
45667 --- linux-2.6.35.4/include/linux/kgdb.h 2010-08-26 19:47:12.000000000 -0400
45668 +++ linux-2.6.35.4/include/linux/kgdb.h 2010-09-17 20:12:09.000000000 -0400
45669 @@ -263,22 +263,22 @@ struct kgdb_arch {
45670 */
45671 struct kgdb_io {
45672 const char *name;
45673 - int (*read_char) (void);
45674 - void (*write_char) (u8);
45675 - void (*flush) (void);
45676 - int (*init) (void);
45677 - void (*pre_exception) (void);
45678 - void (*post_exception) (void);
45679 + int (* const read_char) (void);
45680 + void (* const write_char) (u8);
45681 + void (* const flush) (void);
45682 + int (* const init) (void);
45683 + void (* const pre_exception) (void);
45684 + void (* const post_exception) (void);
45685 int is_console;
45686 };
45687
45688 -extern struct kgdb_arch arch_kgdb_ops;
45689 +extern const struct kgdb_arch arch_kgdb_ops;
45690
45691 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
45692
45693 -extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
45694 -extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
45695 -extern struct kgdb_io *dbg_io_ops;
45696 +extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
45697 +extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
45698 +extern const struct kgdb_io *dbg_io_ops;
45699
45700 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
45701 extern int kgdb_mem2hex(char *mem, char *buf, int count);
45702 diff -urNp linux-2.6.35.4/include/linux/kvm_host.h linux-2.6.35.4/include/linux/kvm_host.h
45703 --- linux-2.6.35.4/include/linux/kvm_host.h 2010-08-26 19:47:12.000000000 -0400
45704 +++ linux-2.6.35.4/include/linux/kvm_host.h 2010-09-17 20:12:09.000000000 -0400
45705 @@ -243,7 +243,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
45706 void vcpu_load(struct kvm_vcpu *vcpu);
45707 void vcpu_put(struct kvm_vcpu *vcpu);
45708
45709 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
45710 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
45711 struct module *module);
45712 void kvm_exit(void);
45713
45714 @@ -367,7 +367,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
45715 struct kvm_guest_debug *dbg);
45716 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
45717
45718 -int kvm_arch_init(void *opaque);
45719 +int kvm_arch_init(const void *opaque);
45720 void kvm_arch_exit(void);
45721
45722 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
45723 diff -urNp linux-2.6.35.4/include/linux/libata.h linux-2.6.35.4/include/linux/libata.h
45724 --- linux-2.6.35.4/include/linux/libata.h 2010-08-26 19:47:12.000000000 -0400
45725 +++ linux-2.6.35.4/include/linux/libata.h 2010-09-17 20:12:09.000000000 -0400
45726 @@ -64,11 +64,11 @@
45727 #ifdef ATA_VERBOSE_DEBUG
45728 #define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
45729 #else
45730 -#define VPRINTK(fmt, args...)
45731 +#define VPRINTK(fmt, args...) do {} while (0)
45732 #endif /* ATA_VERBOSE_DEBUG */
45733 #else
45734 -#define DPRINTK(fmt, args...)
45735 -#define VPRINTK(fmt, args...)
45736 +#define DPRINTK(fmt, args...) do {} while (0)
45737 +#define VPRINTK(fmt, args...) do {} while (0)
45738 #endif /* ATA_DEBUG */
45739
45740 #define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __func__, ## args)
45741 @@ -523,11 +523,11 @@ struct ata_ioports {
45742
45743 struct ata_host {
45744 spinlock_t lock;
45745 - struct device *dev;
45746 + struct device *dev;
45747 void __iomem * const *iomap;
45748 unsigned int n_ports;
45749 void *private_data;
45750 - struct ata_port_operations *ops;
45751 + const struct ata_port_operations *ops;
45752 unsigned long flags;
45753 #ifdef CONFIG_ATA_ACPI
45754 acpi_handle acpi_handle;
45755 @@ -709,7 +709,7 @@ struct ata_link {
45756
45757 struct ata_port {
45758 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
45759 - struct ata_port_operations *ops;
45760 + const struct ata_port_operations *ops;
45761 spinlock_t *lock;
45762 /* Flags owned by the EH context. Only EH should touch these once the
45763 port is active */
45764 @@ -894,7 +894,7 @@ struct ata_port_info {
45765 unsigned long pio_mask;
45766 unsigned long mwdma_mask;
45767 unsigned long udma_mask;
45768 - struct ata_port_operations *port_ops;
45769 + const struct ata_port_operations *port_ops;
45770 void *private_data;
45771 };
45772
45773 @@ -918,7 +918,7 @@ extern const unsigned long sata_deb_timi
45774 extern const unsigned long sata_deb_timing_hotplug[];
45775 extern const unsigned long sata_deb_timing_long[];
45776
45777 -extern struct ata_port_operations ata_dummy_port_ops;
45778 +extern const struct ata_port_operations ata_dummy_port_ops;
45779 extern const struct ata_port_info ata_dummy_port_info;
45780
45781 static inline const unsigned long *
45782 @@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_
45783 struct scsi_host_template *sht);
45784 extern void ata_host_detach(struct ata_host *host);
45785 extern void ata_host_init(struct ata_host *, struct device *,
45786 - unsigned long, struct ata_port_operations *);
45787 + unsigned long, const struct ata_port_operations *);
45788 extern int ata_scsi_detect(struct scsi_host_template *sht);
45789 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
45790 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
45791 diff -urNp linux-2.6.35.4/include/linux/lockd/bind.h linux-2.6.35.4/include/linux/lockd/bind.h
45792 --- linux-2.6.35.4/include/linux/lockd/bind.h 2010-08-26 19:47:12.000000000 -0400
45793 +++ linux-2.6.35.4/include/linux/lockd/bind.h 2010-09-17 20:12:09.000000000 -0400
45794 @@ -23,13 +23,13 @@ struct svc_rqst;
45795 * This is the set of functions for lockd->nfsd communication
45796 */
45797 struct nlmsvc_binding {
45798 - __be32 (*fopen)(struct svc_rqst *,
45799 + __be32 (* const fopen)(struct svc_rqst *,
45800 struct nfs_fh *,
45801 struct file **);
45802 - void (*fclose)(struct file *);
45803 + void (* const fclose)(struct file *);
45804 };
45805
45806 -extern struct nlmsvc_binding * nlmsvc_ops;
45807 +extern const struct nlmsvc_binding * nlmsvc_ops;
45808
45809 /*
45810 * Similar to nfs_client_initdata, but without the NFS-specific
45811 diff -urNp linux-2.6.35.4/include/linux/mm.h linux-2.6.35.4/include/linux/mm.h
45812 --- linux-2.6.35.4/include/linux/mm.h 2010-08-26 19:47:12.000000000 -0400
45813 +++ linux-2.6.35.4/include/linux/mm.h 2010-09-17 20:12:09.000000000 -0400
45814 @@ -103,7 +103,14 @@ extern unsigned int kobjsize(const void
45815
45816 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
45817 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
45818 +
45819 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
45820 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
45821 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
45822 +#else
45823 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
45824 +#endif
45825 +
45826 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
45827 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
45828
45829 @@ -1010,6 +1017,8 @@ struct shrinker {
45830 extern void register_shrinker(struct shrinker *);
45831 extern void unregister_shrinker(struct shrinker *);
45832
45833 +pgprot_t vm_get_page_prot(unsigned long vm_flags);
45834 +
45835 int vma_wants_writenotify(struct vm_area_struct *vma);
45836
45837 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
45838 @@ -1286,6 +1295,7 @@ out:
45839 }
45840
45841 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
45842 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
45843
45844 extern unsigned long do_brk(unsigned long, unsigned long);
45845
45846 @@ -1340,6 +1350,10 @@ extern struct vm_area_struct * find_vma(
45847 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
45848 struct vm_area_struct **pprev);
45849
45850 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
45851 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
45852 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
45853 +
45854 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
45855 NULL if none. Assume start_addr < end_addr. */
45856 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
45857 @@ -1356,7 +1370,6 @@ static inline unsigned long vma_pages(st
45858 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
45859 }
45860
45861 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
45862 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
45863 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
45864 unsigned long pfn, unsigned long size, pgprot_t);
45865 @@ -1463,10 +1476,16 @@ extern int unpoison_memory(unsigned long
45866 extern int sysctl_memory_failure_early_kill;
45867 extern int sysctl_memory_failure_recovery;
45868 extern void shake_page(struct page *p, int access);
45869 -extern atomic_long_t mce_bad_pages;
45870 +extern atomic_long_unchecked_t mce_bad_pages;
45871 extern int soft_offline_page(struct page *page, int flags);
45872
45873 extern void dump_page(struct page *page);
45874
45875 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
45876 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
45877 +#else
45878 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
45879 +#endif
45880 +
45881 #endif /* __KERNEL__ */
45882 #endif /* _LINUX_MM_H */
45883 diff -urNp linux-2.6.35.4/include/linux/mm_types.h linux-2.6.35.4/include/linux/mm_types.h
45884 --- linux-2.6.35.4/include/linux/mm_types.h 2010-08-26 19:47:12.000000000 -0400
45885 +++ linux-2.6.35.4/include/linux/mm_types.h 2010-09-17 20:12:09.000000000 -0400
45886 @@ -183,6 +183,8 @@ struct vm_area_struct {
45887 #ifdef CONFIG_NUMA
45888 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
45889 #endif
45890 +
45891 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
45892 };
45893
45894 struct core_thread {
45895 @@ -310,6 +312,24 @@ struct mm_struct {
45896 #ifdef CONFIG_MMU_NOTIFIER
45897 struct mmu_notifier_mm *mmu_notifier_mm;
45898 #endif
45899 +
45900 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45901 + unsigned long pax_flags;
45902 +#endif
45903 +
45904 +#ifdef CONFIG_PAX_DLRESOLVE
45905 + unsigned long call_dl_resolve;
45906 +#endif
45907 +
45908 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
45909 + unsigned long call_syscall;
45910 +#endif
45911 +
45912 +#ifdef CONFIG_PAX_ASLR
45913 + unsigned long delta_mmap; /* randomized offset */
45914 + unsigned long delta_stack; /* randomized offset */
45915 +#endif
45916 +
45917 };
45918
45919 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
45920 diff -urNp linux-2.6.35.4/include/linux/mmu_notifier.h linux-2.6.35.4/include/linux/mmu_notifier.h
45921 --- linux-2.6.35.4/include/linux/mmu_notifier.h 2010-08-26 19:47:12.000000000 -0400
45922 +++ linux-2.6.35.4/include/linux/mmu_notifier.h 2010-09-17 20:12:09.000000000 -0400
45923 @@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destr
45924 */
45925 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
45926 ({ \
45927 - pte_t __pte; \
45928 + pte_t ___pte; \
45929 struct vm_area_struct *___vma = __vma; \
45930 unsigned long ___address = __address; \
45931 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
45932 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
45933 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
45934 - __pte; \
45935 + ___pte; \
45936 })
45937
45938 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
45939 diff -urNp linux-2.6.35.4/include/linux/mmzone.h linux-2.6.35.4/include/linux/mmzone.h
45940 --- linux-2.6.35.4/include/linux/mmzone.h 2010-08-26 19:47:12.000000000 -0400
45941 +++ linux-2.6.35.4/include/linux/mmzone.h 2010-09-17 20:12:09.000000000 -0400
45942 @@ -345,7 +345,7 @@ struct zone {
45943 unsigned long flags; /* zone flags, see below */
45944
45945 /* Zone statistics */
45946 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
45947 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
45948
45949 /*
45950 * prev_priority holds the scanning priority for this zone. It is
45951 diff -urNp linux-2.6.35.4/include/linux/mod_devicetable.h linux-2.6.35.4/include/linux/mod_devicetable.h
45952 --- linux-2.6.35.4/include/linux/mod_devicetable.h 2010-08-26 19:47:12.000000000 -0400
45953 +++ linux-2.6.35.4/include/linux/mod_devicetable.h 2010-09-17 20:12:09.000000000 -0400
45954 @@ -12,7 +12,7 @@
45955 typedef unsigned long kernel_ulong_t;
45956 #endif
45957
45958 -#define PCI_ANY_ID (~0)
45959 +#define PCI_ANY_ID ((__u16)~0)
45960
45961 struct pci_device_id {
45962 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
45963 @@ -131,7 +131,7 @@ struct usb_device_id {
45964 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
45965 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
45966
45967 -#define HID_ANY_ID (~0)
45968 +#define HID_ANY_ID (~0U)
45969
45970 struct hid_device_id {
45971 __u16 bus;
45972 diff -urNp linux-2.6.35.4/include/linux/module.h linux-2.6.35.4/include/linux/module.h
45973 --- linux-2.6.35.4/include/linux/module.h 2010-08-26 19:47:12.000000000 -0400
45974 +++ linux-2.6.35.4/include/linux/module.h 2010-09-17 20:12:09.000000000 -0400
45975 @@ -297,16 +297,16 @@ struct module
45976 int (*init)(void);
45977
45978 /* If this is non-NULL, vfree after init() returns */
45979 - void *module_init;
45980 + void *module_init_rx, *module_init_rw;
45981
45982 /* Here is the actual code + data, vfree'd on unload. */
45983 - void *module_core;
45984 + void *module_core_rx, *module_core_rw;
45985
45986 /* Here are the sizes of the init and core sections */
45987 - unsigned int init_size, core_size;
45988 + unsigned int init_size_rw, core_size_rw;
45989
45990 /* The size of the executable code in each section. */
45991 - unsigned int init_text_size, core_text_size;
45992 + unsigned int init_size_rx, core_size_rx;
45993
45994 /* Arch-specific module values */
45995 struct mod_arch_specific arch;
45996 @@ -408,16 +408,46 @@ bool is_module_address(unsigned long add
45997 bool is_module_percpu_address(unsigned long addr);
45998 bool is_module_text_address(unsigned long addr);
45999
46000 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
46001 +{
46002 +
46003 +#ifdef CONFIG_PAX_KERNEXEC
46004 + if (ktla_ktva(addr) >= (unsigned long)start &&
46005 + ktla_ktva(addr) < (unsigned long)start + size)
46006 + return 1;
46007 +#endif
46008 +
46009 + return ((void *)addr >= start && (void *)addr < start + size);
46010 +}
46011 +
46012 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
46013 +{
46014 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
46015 +}
46016 +
46017 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
46018 +{
46019 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
46020 +}
46021 +
46022 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
46023 +{
46024 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
46025 +}
46026 +
46027 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
46028 +{
46029 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
46030 +}
46031 +
46032 static inline int within_module_core(unsigned long addr, struct module *mod)
46033 {
46034 - return (unsigned long)mod->module_core <= addr &&
46035 - addr < (unsigned long)mod->module_core + mod->core_size;
46036 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
46037 }
46038
46039 static inline int within_module_init(unsigned long addr, struct module *mod)
46040 {
46041 - return (unsigned long)mod->module_init <= addr &&
46042 - addr < (unsigned long)mod->module_init + mod->init_size;
46043 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
46044 }
46045
46046 /* Search for module by name: must hold module_mutex. */
46047 diff -urNp linux-2.6.35.4/include/linux/moduleloader.h linux-2.6.35.4/include/linux/moduleloader.h
46048 --- linux-2.6.35.4/include/linux/moduleloader.h 2010-08-26 19:47:12.000000000 -0400
46049 +++ linux-2.6.35.4/include/linux/moduleloader.h 2010-09-17 20:12:09.000000000 -0400
46050 @@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
46051 sections. Returns NULL on failure. */
46052 void *module_alloc(unsigned long size);
46053
46054 +#ifdef CONFIG_PAX_KERNEXEC
46055 +void *module_alloc_exec(unsigned long size);
46056 +#else
46057 +#define module_alloc_exec(x) module_alloc(x)
46058 +#endif
46059 +
46060 /* Free memory returned from module_alloc. */
46061 void module_free(struct module *mod, void *module_region);
46062
46063 +#ifdef CONFIG_PAX_KERNEXEC
46064 +void module_free_exec(struct module *mod, void *module_region);
46065 +#else
46066 +#define module_free_exec(x, y) module_free((x), (y))
46067 +#endif
46068 +
46069 /* Apply the given relocation to the (simplified) ELF. Return -error
46070 or 0. */
46071 int apply_relocate(Elf_Shdr *sechdrs,
46072 diff -urNp linux-2.6.35.4/include/linux/namei.h linux-2.6.35.4/include/linux/namei.h
46073 --- linux-2.6.35.4/include/linux/namei.h 2010-08-26 19:47:12.000000000 -0400
46074 +++ linux-2.6.35.4/include/linux/namei.h 2010-09-17 20:12:09.000000000 -0400
46075 @@ -22,7 +22,7 @@ struct nameidata {
46076 unsigned int flags;
46077 int last_type;
46078 unsigned depth;
46079 - char *saved_names[MAX_NESTED_LINKS + 1];
46080 + const char *saved_names[MAX_NESTED_LINKS + 1];
46081
46082 /* Intent data */
46083 union {
46084 @@ -81,12 +81,12 @@ extern int follow_up(struct path *);
46085 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
46086 extern void unlock_rename(struct dentry *, struct dentry *);
46087
46088 -static inline void nd_set_link(struct nameidata *nd, char *path)
46089 +static inline void nd_set_link(struct nameidata *nd, const char *path)
46090 {
46091 nd->saved_names[nd->depth] = path;
46092 }
46093
46094 -static inline char *nd_get_link(struct nameidata *nd)
46095 +static inline const char *nd_get_link(const struct nameidata *nd)
46096 {
46097 return nd->saved_names[nd->depth];
46098 }
46099 diff -urNp linux-2.6.35.4/include/linux/oprofile.h linux-2.6.35.4/include/linux/oprofile.h
46100 --- linux-2.6.35.4/include/linux/oprofile.h 2010-08-26 19:47:12.000000000 -0400
46101 +++ linux-2.6.35.4/include/linux/oprofile.h 2010-09-17 20:12:09.000000000 -0400
46102 @@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super
46103 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
46104 char const * name, ulong * val);
46105
46106 -/** Create a file for read-only access to an atomic_t. */
46107 +/** Create a file for read-only access to an atomic_unchecked_t. */
46108 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
46109 - char const * name, atomic_t * val);
46110 + char const * name, atomic_unchecked_t * val);
46111
46112 /** create a directory */
46113 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
46114 diff -urNp linux-2.6.35.4/include/linux/pipe_fs_i.h linux-2.6.35.4/include/linux/pipe_fs_i.h
46115 --- linux-2.6.35.4/include/linux/pipe_fs_i.h 2010-08-26 19:47:12.000000000 -0400
46116 +++ linux-2.6.35.4/include/linux/pipe_fs_i.h 2010-09-17 20:12:09.000000000 -0400
46117 @@ -45,9 +45,9 @@ struct pipe_buffer {
46118 struct pipe_inode_info {
46119 wait_queue_head_t wait;
46120 unsigned int nrbufs, curbuf, buffers;
46121 - unsigned int readers;
46122 - unsigned int writers;
46123 - unsigned int waiting_writers;
46124 + atomic_t readers;
46125 + atomic_t writers;
46126 + atomic_t waiting_writers;
46127 unsigned int r_counter;
46128 unsigned int w_counter;
46129 struct page *tmp_page;
46130 diff -urNp linux-2.6.35.4/include/linux/poison.h linux-2.6.35.4/include/linux/poison.h
46131 --- linux-2.6.35.4/include/linux/poison.h 2010-08-26 19:47:12.000000000 -0400
46132 +++ linux-2.6.35.4/include/linux/poison.h 2010-09-17 20:12:09.000000000 -0400
46133 @@ -19,8 +19,8 @@
46134 * under normal circumstances, used to verify that nobody uses
46135 * non-initialized list entries.
46136 */
46137 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
46138 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
46139 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
46140 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
46141
46142 /********** include/linux/timer.h **********/
46143 /*
46144 diff -urNp linux-2.6.35.4/include/linux/proc_fs.h linux-2.6.35.4/include/linux/proc_fs.h
46145 --- linux-2.6.35.4/include/linux/proc_fs.h 2010-08-26 19:47:12.000000000 -0400
46146 +++ linux-2.6.35.4/include/linux/proc_fs.h 2010-09-17 20:12:37.000000000 -0400
46147 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
46148 return proc_create_data(name, mode, parent, proc_fops, NULL);
46149 }
46150
46151 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
46152 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
46153 +{
46154 +#ifdef CONFIG_GRKERNSEC_PROC_USER
46155 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
46156 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46157 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
46158 +#else
46159 + return proc_create_data(name, mode, parent, proc_fops, NULL);
46160 +#endif
46161 +}
46162 +
46163 +
46164 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
46165 mode_t mode, struct proc_dir_entry *base,
46166 read_proc_t *read_proc, void * data)
46167 diff -urNp linux-2.6.35.4/include/linux/random.h linux-2.6.35.4/include/linux/random.h
46168 --- linux-2.6.35.4/include/linux/random.h 2010-08-26 19:47:12.000000000 -0400
46169 +++ linux-2.6.35.4/include/linux/random.h 2010-09-17 20:12:09.000000000 -0400
46170 @@ -80,12 +80,17 @@ void srandom32(u32 seed);
46171
46172 u32 prandom32(struct rnd_state *);
46173
46174 +static inline unsigned long pax_get_random_long(void)
46175 +{
46176 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
46177 +}
46178 +
46179 /*
46180 * Handle minimum values for seeds
46181 */
46182 static inline u32 __seed(u32 x, u32 m)
46183 {
46184 - return (x < m) ? x + m : x;
46185 + return (x <= m) ? x + m + 1 : x;
46186 }
46187
46188 /**
46189 diff -urNp linux-2.6.35.4/include/linux/reiserfs_fs.h linux-2.6.35.4/include/linux/reiserfs_fs.h
46190 --- linux-2.6.35.4/include/linux/reiserfs_fs.h 2010-08-26 19:47:12.000000000 -0400
46191 +++ linux-2.6.35.4/include/linux/reiserfs_fs.h 2010-09-17 20:12:09.000000000 -0400
46192 @@ -1404,7 +1404,7 @@ static inline loff_t max_reiserfs_offset
46193 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
46194
46195 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
46196 -#define get_generation(s) atomic_read (&fs_generation(s))
46197 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
46198 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
46199 #define __fs_changed(gen,s) (gen != get_generation (s))
46200 #define fs_changed(gen,s) \
46201 @@ -1616,24 +1616,24 @@ static inline struct super_block *sb_fro
46202 */
46203
46204 struct item_operations {
46205 - int (*bytes_number) (struct item_head * ih, int block_size);
46206 - void (*decrement_key) (struct cpu_key *);
46207 - int (*is_left_mergeable) (struct reiserfs_key * ih,
46208 + int (* const bytes_number) (struct item_head * ih, int block_size);
46209 + void (* const decrement_key) (struct cpu_key *);
46210 + int (* const is_left_mergeable) (struct reiserfs_key * ih,
46211 unsigned long bsize);
46212 - void (*print_item) (struct item_head *, char *item);
46213 - void (*check_item) (struct item_head *, char *item);
46214 + void (* const print_item) (struct item_head *, char *item);
46215 + void (* const check_item) (struct item_head *, char *item);
46216
46217 - int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
46218 + int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
46219 int is_affected, int insert_size);
46220 - int (*check_left) (struct virtual_item * vi, int free,
46221 + int (* const check_left) (struct virtual_item * vi, int free,
46222 int start_skip, int end_skip);
46223 - int (*check_right) (struct virtual_item * vi, int free);
46224 - int (*part_size) (struct virtual_item * vi, int from, int to);
46225 - int (*unit_num) (struct virtual_item * vi);
46226 - void (*print_vi) (struct virtual_item * vi);
46227 + int (* const check_right) (struct virtual_item * vi, int free);
46228 + int (* const part_size) (struct virtual_item * vi, int from, int to);
46229 + int (* const unit_num) (struct virtual_item * vi);
46230 + void (* const print_vi) (struct virtual_item * vi);
46231 };
46232
46233 -extern struct item_operations *item_ops[TYPE_ANY + 1];
46234 +extern const struct item_operations * const item_ops[TYPE_ANY + 1];
46235
46236 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
46237 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
46238 diff -urNp linux-2.6.35.4/include/linux/reiserfs_fs_sb.h linux-2.6.35.4/include/linux/reiserfs_fs_sb.h
46239 --- linux-2.6.35.4/include/linux/reiserfs_fs_sb.h 2010-08-26 19:47:12.000000000 -0400
46240 +++ linux-2.6.35.4/include/linux/reiserfs_fs_sb.h 2010-09-17 20:12:09.000000000 -0400
46241 @@ -386,7 +386,7 @@ struct reiserfs_sb_info {
46242 /* Comment? -Hans */
46243 wait_queue_head_t s_wait;
46244 /* To be obsoleted soon by per buffer seals.. -Hans */
46245 - atomic_t s_generation_counter; // increased by one every time the
46246 + atomic_unchecked_t s_generation_counter; // increased by one every time the
46247 // tree gets re-balanced
46248 unsigned long s_properties; /* File system properties. Currently holds
46249 on-disk FS format */
46250 diff -urNp linux-2.6.35.4/include/linux/rmap.h linux-2.6.35.4/include/linux/rmap.h
46251 --- linux-2.6.35.4/include/linux/rmap.h 2010-08-26 19:47:12.000000000 -0400
46252 +++ linux-2.6.35.4/include/linux/rmap.h 2010-09-17 20:12:09.000000000 -0400
46253 @@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struc
46254 void anon_vma_init(void); /* create anon_vma_cachep */
46255 int anon_vma_prepare(struct vm_area_struct *);
46256 void unlink_anon_vmas(struct vm_area_struct *);
46257 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
46258 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
46259 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
46260 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
46261 void __anon_vma_link(struct vm_area_struct *);
46262 void anon_vma_free(struct anon_vma *);
46263
46264 diff -urNp linux-2.6.35.4/include/linux/sched.h linux-2.6.35.4/include/linux/sched.h
46265 --- linux-2.6.35.4/include/linux/sched.h 2010-08-26 19:47:12.000000000 -0400
46266 +++ linux-2.6.35.4/include/linux/sched.h 2010-09-17 20:12:37.000000000 -0400
46267 @@ -100,6 +100,7 @@ struct robust_list_head;
46268 struct bio_list;
46269 struct fs_struct;
46270 struct perf_event_context;
46271 +struct linux_binprm;
46272
46273 /*
46274 * List of flags we want to share for kernel threads,
46275 @@ -381,10 +382,12 @@ struct user_namespace;
46276 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
46277
46278 extern int sysctl_max_map_count;
46279 +extern unsigned long sysctl_heap_stack_gap;
46280
46281 #include <linux/aio.h>
46282
46283 #ifdef CONFIG_MMU
46284 +extern bool check_heap_stack_gap(struct vm_area_struct *vma, unsigned long addr, unsigned long len);
46285 extern void arch_pick_mmap_layout(struct mm_struct *mm);
46286 extern unsigned long
46287 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
46288 @@ -628,6 +631,15 @@ struct signal_struct {
46289 struct tty_audit_buf *tty_audit_buf;
46290 #endif
46291
46292 +#ifdef CONFIG_GRKERNSEC
46293 + u32 curr_ip;
46294 + u32 gr_saddr;
46295 + u32 gr_daddr;
46296 + u16 gr_sport;
46297 + u16 gr_dport;
46298 + u8 used_accept:1;
46299 +#endif
46300 +
46301 int oom_adj; /* OOM kill score adjustment (bit shift) */
46302 };
46303
46304 @@ -1166,7 +1178,7 @@ struct rcu_node;
46305
46306 struct task_struct {
46307 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
46308 - void *stack;
46309 + struct thread_info *stack;
46310 atomic_t usage;
46311 unsigned int flags; /* per process flags, defined below */
46312 unsigned int ptrace;
46313 @@ -1274,8 +1286,8 @@ struct task_struct {
46314 struct list_head thread_group;
46315
46316 struct completion *vfork_done; /* for vfork() */
46317 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
46318 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
46319 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
46320 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
46321
46322 cputime_t utime, stime, utimescaled, stimescaled;
46323 cputime_t gtime;
46324 @@ -1291,16 +1303,6 @@ struct task_struct {
46325 struct task_cputime cputime_expires;
46326 struct list_head cpu_timers[3];
46327
46328 -/* process credentials */
46329 - const struct cred *real_cred; /* objective and real subjective task
46330 - * credentials (COW) */
46331 - const struct cred *cred; /* effective (overridable) subjective task
46332 - * credentials (COW) */
46333 - struct mutex cred_guard_mutex; /* guard against foreign influences on
46334 - * credential calculations
46335 - * (notably. ptrace) */
46336 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
46337 -
46338 char comm[TASK_COMM_LEN]; /* executable name excluding path
46339 - access with [gs]et_task_comm (which lock
46340 it with task_lock())
46341 @@ -1384,6 +1386,15 @@ struct task_struct {
46342 int softirqs_enabled;
46343 int softirq_context;
46344 #endif
46345 +
46346 +/* process credentials */
46347 + const struct cred *real_cred; /* objective and real subjective task
46348 + * credentials (COW) */
46349 + struct mutex cred_guard_mutex; /* guard against foreign influences on
46350 + * credential calculations
46351 + * (notably. ptrace) */
46352 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
46353 +
46354 #ifdef CONFIG_LOCKDEP
46355 # define MAX_LOCK_DEPTH 48UL
46356 u64 curr_chain_key;
46357 @@ -1404,6 +1415,9 @@ struct task_struct {
46358
46359 struct backing_dev_info *backing_dev_info;
46360
46361 + const struct cred *cred; /* effective (overridable) subjective task
46362 + * credentials (COW) */
46363 +
46364 struct io_context *io_context;
46365
46366 unsigned long ptrace_message;
46367 @@ -1469,6 +1483,20 @@ struct task_struct {
46368 unsigned long default_timer_slack_ns;
46369
46370 struct list_head *scm_work_list;
46371 +
46372 +#ifdef CONFIG_GRKERNSEC
46373 + /* grsecurity */
46374 + struct dentry *gr_chroot_dentry;
46375 + struct acl_subject_label *acl;
46376 + struct acl_role_label *role;
46377 + struct file *exec_file;
46378 + u16 acl_role_id;
46379 + u8 acl_sp_role;
46380 + u8 is_writable;
46381 + u8 brute;
46382 + u8 gr_is_chrooted;
46383 +#endif
46384 +
46385 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
46386 /* Index of current stored address in ret_stack */
46387 int curr_ret_stack;
46388 @@ -1500,6 +1528,52 @@ struct task_struct {
46389 #endif
46390 };
46391
46392 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
46393 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
46394 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
46395 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
46396 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
46397 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
46398 +
46399 +#ifdef CONFIG_PAX_SOFTMODE
46400 +extern unsigned int pax_softmode;
46401 +#endif
46402 +
46403 +extern int pax_check_flags(unsigned long *);
46404 +
46405 +/* if tsk != current then task_lock must be held on it */
46406 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
46407 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
46408 +{
46409 + if (likely(tsk->mm))
46410 + return tsk->mm->pax_flags;
46411 + else
46412 + return 0UL;
46413 +}
46414 +
46415 +/* if tsk != current then task_lock must be held on it */
46416 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
46417 +{
46418 + if (likely(tsk->mm)) {
46419 + tsk->mm->pax_flags = flags;
46420 + return 0;
46421 + }
46422 + return -EINVAL;
46423 +}
46424 +#endif
46425 +
46426 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
46427 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
46428 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
46429 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
46430 +#endif
46431 +
46432 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
46433 +void pax_report_insns(void *pc, void *sp);
46434 +void pax_report_refcount_overflow(struct pt_regs *regs);
46435 +void pax_report_leak_to_user(const void *ptr, unsigned long len);
46436 +void pax_report_overflow_from_user(const void *ptr, unsigned long len);
46437 +
46438 /* Future-safe accessor for struct task_struct's cpus_allowed. */
46439 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
46440
46441 @@ -2101,7 +2175,7 @@ extern void __cleanup_sighand(struct sig
46442 extern void exit_itimers(struct signal_struct *);
46443 extern void flush_itimer_signals(void);
46444
46445 -extern NORET_TYPE void do_group_exit(int);
46446 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
46447
46448 extern void daemonize(const char *, ...);
46449 extern int allow_signal(int);
46450 @@ -2217,8 +2291,8 @@ static inline void unlock_task_sighand(s
46451
46452 #ifndef __HAVE_THREAD_FUNCTIONS
46453
46454 -#define task_thread_info(task) ((struct thread_info *)(task)->stack)
46455 -#define task_stack_page(task) ((task)->stack)
46456 +#define task_thread_info(task) ((task)->stack)
46457 +#define task_stack_page(task) ((void *)(task)->stack)
46458
46459 static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
46460 {
46461 @@ -2233,13 +2307,17 @@ static inline unsigned long *end_of_stac
46462
46463 #endif
46464
46465 -static inline int object_is_on_stack(void *obj)
46466 +static inline int object_starts_on_stack(void *obj)
46467 {
46468 - void *stack = task_stack_page(current);
46469 + const void *stack = task_stack_page(current);
46470
46471 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
46472 }
46473
46474 +#ifdef CONFIG_PAX_USERCOPY
46475 +extern int object_is_on_stack(const void *obj, unsigned long len);
46476 +#endif
46477 +
46478 extern void thread_info_cache_init(void);
46479
46480 #ifdef CONFIG_DEBUG_STACK_USAGE
46481 diff -urNp linux-2.6.35.4/include/linux/screen_info.h linux-2.6.35.4/include/linux/screen_info.h
46482 --- linux-2.6.35.4/include/linux/screen_info.h 2010-08-26 19:47:12.000000000 -0400
46483 +++ linux-2.6.35.4/include/linux/screen_info.h 2010-09-17 20:12:09.000000000 -0400
46484 @@ -43,7 +43,8 @@ struct screen_info {
46485 __u16 pages; /* 0x32 */
46486 __u16 vesa_attributes; /* 0x34 */
46487 __u32 capabilities; /* 0x36 */
46488 - __u8 _reserved[6]; /* 0x3a */
46489 + __u16 vesapm_size; /* 0x3a */
46490 + __u8 _reserved[4]; /* 0x3c */
46491 } __attribute__((packed));
46492
46493 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
46494 diff -urNp linux-2.6.35.4/include/linux/security.h linux-2.6.35.4/include/linux/security.h
46495 --- linux-2.6.35.4/include/linux/security.h 2010-08-26 19:47:12.000000000 -0400
46496 +++ linux-2.6.35.4/include/linux/security.h 2010-09-17 20:12:37.000000000 -0400
46497 @@ -34,6 +34,7 @@
46498 #include <linux/key.h>
46499 #include <linux/xfrm.h>
46500 #include <linux/slab.h>
46501 +#include <linux/grsecurity.h>
46502 #include <net/flow.h>
46503
46504 /* Maximum number of letters for an LSM name string */
46505 diff -urNp linux-2.6.35.4/include/linux/shm.h linux-2.6.35.4/include/linux/shm.h
46506 --- linux-2.6.35.4/include/linux/shm.h 2010-08-26 19:47:12.000000000 -0400
46507 +++ linux-2.6.35.4/include/linux/shm.h 2010-09-17 20:12:37.000000000 -0400
46508 @@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
46509 pid_t shm_cprid;
46510 pid_t shm_lprid;
46511 struct user_struct *mlock_user;
46512 +#ifdef CONFIG_GRKERNSEC
46513 + time_t shm_createtime;
46514 + pid_t shm_lapid;
46515 +#endif
46516 };
46517
46518 /* shm_mode upper byte flags */
46519 diff -urNp linux-2.6.35.4/include/linux/slab.h linux-2.6.35.4/include/linux/slab.h
46520 --- linux-2.6.35.4/include/linux/slab.h 2010-08-26 19:47:12.000000000 -0400
46521 +++ linux-2.6.35.4/include/linux/slab.h 2010-09-17 20:12:09.000000000 -0400
46522 @@ -11,6 +11,7 @@
46523
46524 #include <linux/gfp.h>
46525 #include <linux/types.h>
46526 +#include <linux/err.h>
46527
46528 /*
46529 * Flags to pass to kmem_cache_create().
46530 @@ -87,10 +88,13 @@
46531 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
46532 * Both make kfree a no-op.
46533 */
46534 -#define ZERO_SIZE_PTR ((void *)16)
46535 +#define ZERO_SIZE_PTR \
46536 +({ \
46537 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
46538 + (void *)(-MAX_ERRNO-1L); \
46539 +})
46540
46541 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
46542 - (unsigned long)ZERO_SIZE_PTR)
46543 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
46544
46545 /*
46546 * struct kmem_cache related prototypes
46547 @@ -144,6 +148,7 @@ void * __must_check krealloc(const void
46548 void kfree(const void *);
46549 void kzfree(const void *);
46550 size_t ksize(const void *);
46551 +void check_object_size(const void *ptr, unsigned long n, bool to);
46552
46553 /*
46554 * Allocator specific definitions. These are mainly used to establish optimized
46555 @@ -334,4 +339,37 @@ static inline void *kzalloc_node(size_t
46556
46557 void __init kmem_cache_init_late(void);
46558
46559 +#define kmalloc(x, y) \
46560 +({ \
46561 + void *___retval; \
46562 + intoverflow_t ___x = (intoverflow_t)x; \
46563 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
46564 + ___retval = NULL; \
46565 + else \
46566 + ___retval = kmalloc((size_t)___x, (y)); \
46567 + ___retval; \
46568 +})
46569 +
46570 +#define kmalloc_node(x, y, z) \
46571 +({ \
46572 + void *___retval; \
46573 + intoverflow_t ___x = (intoverflow_t)x; \
46574 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
46575 + ___retval = NULL; \
46576 + else \
46577 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
46578 + ___retval; \
46579 +})
46580 +
46581 +#define kzalloc(x, y) \
46582 +({ \
46583 + void *___retval; \
46584 + intoverflow_t ___x = (intoverflow_t)x; \
46585 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
46586 + ___retval = NULL; \
46587 + else \
46588 + ___retval = kzalloc((size_t)___x, (y)); \
46589 + ___retval; \
46590 +})
46591 +
46592 #endif /* _LINUX_SLAB_H */
46593 diff -urNp linux-2.6.35.4/include/linux/slub_def.h linux-2.6.35.4/include/linux/slub_def.h
46594 --- linux-2.6.35.4/include/linux/slub_def.h 2010-08-26 19:47:12.000000000 -0400
46595 +++ linux-2.6.35.4/include/linux/slub_def.h 2010-09-17 20:12:09.000000000 -0400
46596 @@ -79,7 +79,7 @@ struct kmem_cache {
46597 struct kmem_cache_order_objects max;
46598 struct kmem_cache_order_objects min;
46599 gfp_t allocflags; /* gfp flags to use on each alloc */
46600 - int refcount; /* Refcount for slab cache destroy */
46601 + atomic_t refcount; /* Refcount for slab cache destroy */
46602 void (*ctor)(void *);
46603 int inuse; /* Offset to metadata */
46604 int align; /* Alignment */
46605 diff -urNp linux-2.6.35.4/include/linux/sonet.h linux-2.6.35.4/include/linux/sonet.h
46606 --- linux-2.6.35.4/include/linux/sonet.h 2010-08-26 19:47:12.000000000 -0400
46607 +++ linux-2.6.35.4/include/linux/sonet.h 2010-09-17 20:12:09.000000000 -0400
46608 @@ -61,7 +61,7 @@ struct sonet_stats {
46609 #include <asm/atomic.h>
46610
46611 struct k_sonet_stats {
46612 -#define __HANDLE_ITEM(i) atomic_t i
46613 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
46614 __SONET_ITEMS
46615 #undef __HANDLE_ITEM
46616 };
46617 diff -urNp linux-2.6.35.4/include/linux/suspend.h linux-2.6.35.4/include/linux/suspend.h
46618 --- linux-2.6.35.4/include/linux/suspend.h 2010-08-26 19:47:12.000000000 -0400
46619 +++ linux-2.6.35.4/include/linux/suspend.h 2010-09-17 20:12:09.000000000 -0400
46620 @@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
46621 * which require special recovery actions in that situation.
46622 */
46623 struct platform_suspend_ops {
46624 - int (*valid)(suspend_state_t state);
46625 - int (*begin)(suspend_state_t state);
46626 - int (*prepare)(void);
46627 - int (*prepare_late)(void);
46628 - int (*enter)(suspend_state_t state);
46629 - void (*wake)(void);
46630 - void (*finish)(void);
46631 - void (*end)(void);
46632 - void (*recover)(void);
46633 + int (* const valid)(suspend_state_t state);
46634 + int (* const begin)(suspend_state_t state);
46635 + int (* const prepare)(void);
46636 + int (* const prepare_late)(void);
46637 + int (* const enter)(suspend_state_t state);
46638 + void (* const wake)(void);
46639 + void (* const finish)(void);
46640 + void (* const end)(void);
46641 + void (* const recover)(void);
46642 };
46643
46644 #ifdef CONFIG_SUSPEND
46645 @@ -120,7 +120,7 @@ struct platform_suspend_ops {
46646 * suspend_set_ops - set platform dependent suspend operations
46647 * @ops: The new suspend operations to set.
46648 */
46649 -extern void suspend_set_ops(struct platform_suspend_ops *ops);
46650 +extern void suspend_set_ops(const struct platform_suspend_ops *ops);
46651 extern int suspend_valid_only_mem(suspend_state_t state);
46652
46653 /**
46654 @@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t st
46655 #else /* !CONFIG_SUSPEND */
46656 #define suspend_valid_only_mem NULL
46657
46658 -static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
46659 +static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
46660 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
46661 #endif /* !CONFIG_SUSPEND */
46662
46663 @@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone
46664 * platforms which require special recovery actions in that situation.
46665 */
46666 struct platform_hibernation_ops {
46667 - int (*begin)(void);
46668 - void (*end)(void);
46669 - int (*pre_snapshot)(void);
46670 - void (*finish)(void);
46671 - int (*prepare)(void);
46672 - int (*enter)(void);
46673 - void (*leave)(void);
46674 - int (*pre_restore)(void);
46675 - void (*restore_cleanup)(void);
46676 - void (*recover)(void);
46677 + int (* const begin)(void);
46678 + void (* const end)(void);
46679 + int (* const pre_snapshot)(void);
46680 + void (* const finish)(void);
46681 + int (* const prepare)(void);
46682 + int (* const enter)(void);
46683 + void (* const leave)(void);
46684 + int (* const pre_restore)(void);
46685 + void (* const restore_cleanup)(void);
46686 + void (* const recover)(void);
46687 };
46688
46689 #ifdef CONFIG_HIBERNATION
46690 @@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct
46691 extern void swsusp_unset_page_free(struct page *);
46692 extern unsigned long get_safe_page(gfp_t gfp_mask);
46693
46694 -extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
46695 +extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
46696 extern int hibernate(void);
46697 extern bool system_entering_hibernation(void);
46698 #else /* CONFIG_HIBERNATION */
46699 @@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidd
46700 static inline void swsusp_set_page_free(struct page *p) {}
46701 static inline void swsusp_unset_page_free(struct page *p) {}
46702
46703 -static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
46704 +static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
46705 static inline int hibernate(void) { return -ENOSYS; }
46706 static inline bool system_entering_hibernation(void) { return false; }
46707 #endif /* CONFIG_HIBERNATION */
46708 diff -urNp linux-2.6.35.4/include/linux/sysctl.h linux-2.6.35.4/include/linux/sysctl.h
46709 --- linux-2.6.35.4/include/linux/sysctl.h 2010-08-26 19:47:12.000000000 -0400
46710 +++ linux-2.6.35.4/include/linux/sysctl.h 2010-09-17 20:12:09.000000000 -0400
46711 @@ -155,7 +155,11 @@ enum
46712 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
46713 };
46714
46715 -
46716 +#ifdef CONFIG_PAX_SOFTMODE
46717 +enum {
46718 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
46719 +};
46720 +#endif
46721
46722 /* CTL_VM names: */
46723 enum
46724 diff -urNp linux-2.6.35.4/include/linux/sysfs.h linux-2.6.35.4/include/linux/sysfs.h
46725 --- linux-2.6.35.4/include/linux/sysfs.h 2010-08-26 19:47:12.000000000 -0400
46726 +++ linux-2.6.35.4/include/linux/sysfs.h 2010-09-17 20:12:09.000000000 -0400
46727 @@ -115,8 +115,8 @@ struct bin_attribute {
46728 #define sysfs_bin_attr_init(bin_attr) sysfs_attr_init(&(bin_attr)->attr)
46729
46730 struct sysfs_ops {
46731 - ssize_t (*show)(struct kobject *, struct attribute *,char *);
46732 - ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
46733 + ssize_t (* const show)(struct kobject *, struct attribute *,char *);
46734 + ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
46735 };
46736
46737 struct sysfs_dirent;
46738 diff -urNp linux-2.6.35.4/include/linux/thread_info.h linux-2.6.35.4/include/linux/thread_info.h
46739 --- linux-2.6.35.4/include/linux/thread_info.h 2010-08-26 19:47:12.000000000 -0400
46740 +++ linux-2.6.35.4/include/linux/thread_info.h 2010-09-17 20:12:09.000000000 -0400
46741 @@ -23,7 +23,7 @@ struct restart_block {
46742 };
46743 /* For futex_wait and futex_wait_requeue_pi */
46744 struct {
46745 - u32 *uaddr;
46746 + u32 __user *uaddr;
46747 u32 val;
46748 u32 flags;
46749 u32 bitset;
46750 diff -urNp linux-2.6.35.4/include/linux/tty.h linux-2.6.35.4/include/linux/tty.h
46751 --- linux-2.6.35.4/include/linux/tty.h 2010-08-26 19:47:12.000000000 -0400
46752 +++ linux-2.6.35.4/include/linux/tty.h 2010-09-17 20:12:09.000000000 -0400
46753 @@ -13,6 +13,7 @@
46754 #include <linux/tty_driver.h>
46755 #include <linux/tty_ldisc.h>
46756 #include <linux/mutex.h>
46757 +#include <linux/poll.h>
46758
46759 #include <asm/system.h>
46760
46761 @@ -453,7 +454,6 @@ extern int tty_perform_flush(struct tty_
46762 extern dev_t tty_devnum(struct tty_struct *tty);
46763 extern void proc_clear_tty(struct task_struct *p);
46764 extern struct tty_struct *get_current_tty(void);
46765 -extern void tty_default_fops(struct file_operations *fops);
46766 extern struct tty_struct *alloc_tty_struct(void);
46767 extern void free_tty_struct(struct tty_struct *tty);
46768 extern void initialize_tty_struct(struct tty_struct *tty,
46769 @@ -514,6 +514,18 @@ extern void tty_ldisc_begin(void);
46770 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
46771 extern void tty_ldisc_enable(struct tty_struct *tty);
46772
46773 +/* tty_io.c */
46774 +extern ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
46775 +extern ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
46776 +extern unsigned int tty_poll(struct file *, poll_table *);
46777 +#ifdef CONFIG_COMPAT
46778 +extern long tty_compat_ioctl(struct file *file, unsigned int cmd,
46779 + unsigned long arg);
46780 +#else
46781 +#define tty_compat_ioctl NULL
46782 +#endif
46783 +extern int tty_release(struct inode *, struct file *);
46784 +extern int tty_fasync(int fd, struct file *filp, int on);
46785
46786 /* n_tty.c */
46787 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
46788 diff -urNp linux-2.6.35.4/include/linux/tty_ldisc.h linux-2.6.35.4/include/linux/tty_ldisc.h
46789 --- linux-2.6.35.4/include/linux/tty_ldisc.h 2010-08-26 19:47:12.000000000 -0400
46790 +++ linux-2.6.35.4/include/linux/tty_ldisc.h 2010-09-17 20:12:09.000000000 -0400
46791 @@ -147,7 +147,7 @@ struct tty_ldisc_ops {
46792
46793 struct module *owner;
46794
46795 - int refcount;
46796 + atomic_t refcount;
46797 };
46798
46799 struct tty_ldisc {
46800 diff -urNp linux-2.6.35.4/include/linux/types.h linux-2.6.35.4/include/linux/types.h
46801 --- linux-2.6.35.4/include/linux/types.h 2010-08-26 19:47:12.000000000 -0400
46802 +++ linux-2.6.35.4/include/linux/types.h 2010-09-17 20:12:09.000000000 -0400
46803 @@ -191,10 +191,26 @@ typedef struct {
46804 int counter;
46805 } atomic_t;
46806
46807 +#ifdef CONFIG_PAX_REFCOUNT
46808 +typedef struct {
46809 + int counter;
46810 +} atomic_unchecked_t;
46811 +#else
46812 +typedef atomic_t atomic_unchecked_t;
46813 +#endif
46814 +
46815 #ifdef CONFIG_64BIT
46816 typedef struct {
46817 long counter;
46818 } atomic64_t;
46819 +
46820 +#ifdef CONFIG_PAX_REFCOUNT
46821 +typedef struct {
46822 + long counter;
46823 +} atomic64_unchecked_t;
46824 +#else
46825 +typedef atomic64_t atomic64_unchecked_t;
46826 +#endif
46827 #endif
46828
46829 struct ustat {
46830 diff -urNp linux-2.6.35.4/include/linux/uaccess.h linux-2.6.35.4/include/linux/uaccess.h
46831 --- linux-2.6.35.4/include/linux/uaccess.h 2010-08-26 19:47:12.000000000 -0400
46832 +++ linux-2.6.35.4/include/linux/uaccess.h 2010-09-17 20:12:09.000000000 -0400
46833 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
46834 long ret; \
46835 mm_segment_t old_fs = get_fs(); \
46836 \
46837 - set_fs(KERNEL_DS); \
46838 pagefault_disable(); \
46839 + set_fs(KERNEL_DS); \
46840 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
46841 - pagefault_enable(); \
46842 set_fs(old_fs); \
46843 + pagefault_enable(); \
46844 ret; \
46845 })
46846
46847 @@ -93,8 +93,8 @@ static inline unsigned long __copy_from_
46848 * Safely read from address @src to the buffer at @dst. If a kernel fault
46849 * happens, handle that and return -EFAULT.
46850 */
46851 -extern long probe_kernel_read(void *dst, void *src, size_t size);
46852 -extern long __probe_kernel_read(void *dst, void *src, size_t size);
46853 +extern long probe_kernel_read(void *dst, const void *src, size_t size);
46854 +extern long __probe_kernel_read(void *dst, const void *src, size_t size);
46855
46856 /*
46857 * probe_kernel_write(): safely attempt to write to a location
46858 @@ -105,7 +105,7 @@ extern long __probe_kernel_read(void *ds
46859 * Safely write to address @dst from the buffer at @src. If a kernel fault
46860 * happens, handle that and return -EFAULT.
46861 */
46862 -extern long notrace probe_kernel_write(void *dst, void *src, size_t size);
46863 -extern long notrace __probe_kernel_write(void *dst, void *src, size_t size);
46864 +extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
46865 +extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
46866
46867 #endif /* __LINUX_UACCESS_H__ */
46868 diff -urNp linux-2.6.35.4/include/linux/usb/hcd.h linux-2.6.35.4/include/linux/usb/hcd.h
46869 --- linux-2.6.35.4/include/linux/usb/hcd.h 2010-08-26 19:47:12.000000000 -0400
46870 +++ linux-2.6.35.4/include/linux/usb/hcd.h 2010-09-17 20:12:09.000000000 -0400
46871 @@ -559,7 +559,7 @@ struct usb_mon_operations {
46872 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
46873 };
46874
46875 -extern struct usb_mon_operations *mon_ops;
46876 +extern const struct usb_mon_operations *mon_ops;
46877
46878 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
46879 {
46880 @@ -581,7 +581,7 @@ static inline void usbmon_urb_complete(s
46881 (*mon_ops->urb_complete)(bus, urb, status);
46882 }
46883
46884 -int usb_mon_register(struct usb_mon_operations *ops);
46885 +int usb_mon_register(const struct usb_mon_operations *ops);
46886 void usb_mon_deregister(void);
46887
46888 #else
46889 diff -urNp linux-2.6.35.4/include/linux/vmalloc.h linux-2.6.35.4/include/linux/vmalloc.h
46890 --- linux-2.6.35.4/include/linux/vmalloc.h 2010-08-26 19:47:12.000000000 -0400
46891 +++ linux-2.6.35.4/include/linux/vmalloc.h 2010-09-17 20:12:09.000000000 -0400
46892 @@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
46893 #define VM_MAP 0x00000004 /* vmap()ed pages */
46894 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
46895 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
46896 +
46897 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
46898 +#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
46899 +#endif
46900 +
46901 /* bits [20..32] reserved for arch specific ioremap internals */
46902
46903 /*
46904 @@ -121,4 +126,81 @@ struct vm_struct **pcpu_get_vm_areas(con
46905
46906 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
46907
46908 +#define vmalloc(x) \
46909 +({ \
46910 + void *___retval; \
46911 + intoverflow_t ___x = (intoverflow_t)x; \
46912 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
46913 + ___retval = NULL; \
46914 + else \
46915 + ___retval = vmalloc((unsigned long)___x); \
46916 + ___retval; \
46917 +})
46918 +
46919 +#define __vmalloc(x, y, z) \
46920 +({ \
46921 + void *___retval; \
46922 + intoverflow_t ___x = (intoverflow_t)x; \
46923 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
46924 + ___retval = NULL; \
46925 + else \
46926 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
46927 + ___retval; \
46928 +})
46929 +
46930 +#define vmalloc_user(x) \
46931 +({ \
46932 + void *___retval; \
46933 + intoverflow_t ___x = (intoverflow_t)x; \
46934 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
46935 + ___retval = NULL; \
46936 + else \
46937 + ___retval = vmalloc_user((unsigned long)___x); \
46938 + ___retval; \
46939 +})
46940 +
46941 +#define vmalloc_exec(x) \
46942 +({ \
46943 + void *___retval; \
46944 + intoverflow_t ___x = (intoverflow_t)x; \
46945 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
46946 + ___retval = NULL; \
46947 + else \
46948 + ___retval = vmalloc_exec((unsigned long)___x); \
46949 + ___retval; \
46950 +})
46951 +
46952 +#define vmalloc_node(x, y) \
46953 +({ \
46954 + void *___retval; \
46955 + intoverflow_t ___x = (intoverflow_t)x; \
46956 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
46957 + ___retval = NULL; \
46958 + else \
46959 + ___retval = vmalloc_node((unsigned long)___x, (y));\
46960 + ___retval; \
46961 +})
46962 +
46963 +#define vmalloc_32(x) \
46964 +({ \
46965 + void *___retval; \
46966 + intoverflow_t ___x = (intoverflow_t)x; \
46967 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
46968 + ___retval = NULL; \
46969 + else \
46970 + ___retval = vmalloc_32((unsigned long)___x); \
46971 + ___retval; \
46972 +})
46973 +
46974 +#define vmalloc_32_user(x) \
46975 +({ \
46976 + void *___retval; \
46977 + intoverflow_t ___x = (intoverflow_t)x; \
46978 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
46979 + ___retval = NULL; \
46980 + else \
46981 + ___retval = vmalloc_32_user((unsigned long)___x);\
46982 + ___retval; \
46983 +})
46984 +
46985 #endif /* _LINUX_VMALLOC_H */
46986 diff -urNp linux-2.6.35.4/include/linux/vmstat.h linux-2.6.35.4/include/linux/vmstat.h
46987 --- linux-2.6.35.4/include/linux/vmstat.h 2010-08-26 19:47:12.000000000 -0400
46988 +++ linux-2.6.35.4/include/linux/vmstat.h 2010-09-17 20:12:09.000000000 -0400
46989 @@ -140,18 +140,18 @@ static inline void vm_events_fold_cpu(in
46990 /*
46991 * Zone based page accounting with per cpu differentials.
46992 */
46993 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
46994 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
46995
46996 static inline void zone_page_state_add(long x, struct zone *zone,
46997 enum zone_stat_item item)
46998 {
46999 - atomic_long_add(x, &zone->vm_stat[item]);
47000 - atomic_long_add(x, &vm_stat[item]);
47001 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
47002 + atomic_long_add_unchecked(x, &vm_stat[item]);
47003 }
47004
47005 static inline unsigned long global_page_state(enum zone_stat_item item)
47006 {
47007 - long x = atomic_long_read(&vm_stat[item]);
47008 + long x = atomic_long_read_unchecked(&vm_stat[item]);
47009 #ifdef CONFIG_SMP
47010 if (x < 0)
47011 x = 0;
47012 @@ -162,7 +162,7 @@ static inline unsigned long global_page_
47013 static inline unsigned long zone_page_state(struct zone *zone,
47014 enum zone_stat_item item)
47015 {
47016 - long x = atomic_long_read(&zone->vm_stat[item]);
47017 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
47018 #ifdef CONFIG_SMP
47019 if (x < 0)
47020 x = 0;
47021 @@ -246,8 +246,8 @@ static inline void __mod_zone_page_state
47022
47023 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
47024 {
47025 - atomic_long_inc(&zone->vm_stat[item]);
47026 - atomic_long_inc(&vm_stat[item]);
47027 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
47028 + atomic_long_inc_unchecked(&vm_stat[item]);
47029 }
47030
47031 static inline void __inc_zone_page_state(struct page *page,
47032 @@ -258,8 +258,8 @@ static inline void __inc_zone_page_state
47033
47034 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
47035 {
47036 - atomic_long_dec(&zone->vm_stat[item]);
47037 - atomic_long_dec(&vm_stat[item]);
47038 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
47039 + atomic_long_dec_unchecked(&vm_stat[item]);
47040 }
47041
47042 static inline void __dec_zone_page_state(struct page *page,
47043 diff -urNp linux-2.6.35.4/include/net/irda/ircomm_tty.h linux-2.6.35.4/include/net/irda/ircomm_tty.h
47044 --- linux-2.6.35.4/include/net/irda/ircomm_tty.h 2010-08-26 19:47:12.000000000 -0400
47045 +++ linux-2.6.35.4/include/net/irda/ircomm_tty.h 2010-09-17 20:12:09.000000000 -0400
47046 @@ -105,8 +105,8 @@ struct ircomm_tty_cb {
47047 unsigned short close_delay;
47048 unsigned short closing_wait; /* time to wait before closing */
47049
47050 - int open_count;
47051 - int blocked_open; /* # of blocked opens */
47052 + atomic_t open_count;
47053 + atomic_t blocked_open; /* # of blocked opens */
47054
47055 /* Protect concurent access to :
47056 * o self->open_count
47057 diff -urNp linux-2.6.35.4/include/net/neighbour.h linux-2.6.35.4/include/net/neighbour.h
47058 --- linux-2.6.35.4/include/net/neighbour.h 2010-08-26 19:47:12.000000000 -0400
47059 +++ linux-2.6.35.4/include/net/neighbour.h 2010-09-17 20:12:09.000000000 -0400
47060 @@ -116,12 +116,12 @@ struct neighbour {
47061
47062 struct neigh_ops {
47063 int family;
47064 - void (*solicit)(struct neighbour *, struct sk_buff*);
47065 - void (*error_report)(struct neighbour *, struct sk_buff*);
47066 - int (*output)(struct sk_buff*);
47067 - int (*connected_output)(struct sk_buff*);
47068 - int (*hh_output)(struct sk_buff*);
47069 - int (*queue_xmit)(struct sk_buff*);
47070 + void (* const solicit)(struct neighbour *, struct sk_buff*);
47071 + void (* const error_report)(struct neighbour *, struct sk_buff*);
47072 + int (* const output)(struct sk_buff*);
47073 + int (* const connected_output)(struct sk_buff*);
47074 + int (* const hh_output)(struct sk_buff*);
47075 + int (* const queue_xmit)(struct sk_buff*);
47076 };
47077
47078 struct pneigh_entry {
47079 diff -urNp linux-2.6.35.4/include/net/sctp/sctp.h linux-2.6.35.4/include/net/sctp/sctp.h
47080 --- linux-2.6.35.4/include/net/sctp/sctp.h 2010-08-26 19:47:12.000000000 -0400
47081 +++ linux-2.6.35.4/include/net/sctp/sctp.h 2010-09-17 20:12:09.000000000 -0400
47082 @@ -305,8 +305,8 @@ extern int sctp_debug_flag;
47083
47084 #else /* SCTP_DEBUG */
47085
47086 -#define SCTP_DEBUG_PRINTK(whatever...)
47087 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
47088 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
47089 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
47090 #define SCTP_ENABLE_DEBUG
47091 #define SCTP_DISABLE_DEBUG
47092 #define SCTP_ASSERT(expr, str, func)
47093 diff -urNp linux-2.6.35.4/include/net/tcp.h linux-2.6.35.4/include/net/tcp.h
47094 --- linux-2.6.35.4/include/net/tcp.h 2010-08-26 19:47:12.000000000 -0400
47095 +++ linux-2.6.35.4/include/net/tcp.h 2010-09-17 20:12:09.000000000 -0400
47096 @@ -1404,6 +1404,7 @@ enum tcp_seq_states {
47097 struct tcp_seq_afinfo {
47098 char *name;
47099 sa_family_t family;
47100 + /* cannot be const */
47101 struct file_operations seq_fops;
47102 struct seq_operations seq_ops;
47103 };
47104 diff -urNp linux-2.6.35.4/include/net/udp.h linux-2.6.35.4/include/net/udp.h
47105 --- linux-2.6.35.4/include/net/udp.h 2010-08-26 19:47:12.000000000 -0400
47106 +++ linux-2.6.35.4/include/net/udp.h 2010-09-17 20:12:09.000000000 -0400
47107 @@ -221,6 +221,7 @@ struct udp_seq_afinfo {
47108 char *name;
47109 sa_family_t family;
47110 struct udp_table *udp_table;
47111 + /* cannot be const */
47112 struct file_operations seq_fops;
47113 struct seq_operations seq_ops;
47114 };
47115 diff -urNp linux-2.6.35.4/include/sound/ac97_codec.h linux-2.6.35.4/include/sound/ac97_codec.h
47116 --- linux-2.6.35.4/include/sound/ac97_codec.h 2010-08-26 19:47:12.000000000 -0400
47117 +++ linux-2.6.35.4/include/sound/ac97_codec.h 2010-09-17 20:12:09.000000000 -0400
47118 @@ -419,15 +419,15 @@
47119 struct snd_ac97;
47120
47121 struct snd_ac97_build_ops {
47122 - int (*build_3d) (struct snd_ac97 *ac97);
47123 - int (*build_specific) (struct snd_ac97 *ac97);
47124 - int (*build_spdif) (struct snd_ac97 *ac97);
47125 - int (*build_post_spdif) (struct snd_ac97 *ac97);
47126 + int (* const build_3d) (struct snd_ac97 *ac97);
47127 + int (* const build_specific) (struct snd_ac97 *ac97);
47128 + int (* const build_spdif) (struct snd_ac97 *ac97);
47129 + int (* const build_post_spdif) (struct snd_ac97 *ac97);
47130 #ifdef CONFIG_PM
47131 - void (*suspend) (struct snd_ac97 *ac97);
47132 - void (*resume) (struct snd_ac97 *ac97);
47133 + void (* const suspend) (struct snd_ac97 *ac97);
47134 + void (* const resume) (struct snd_ac97 *ac97);
47135 #endif
47136 - void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
47137 + void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
47138 };
47139
47140 struct snd_ac97_bus_ops {
47141 @@ -477,7 +477,7 @@ struct snd_ac97_template {
47142
47143 struct snd_ac97 {
47144 /* -- lowlevel (hardware) driver specific -- */
47145 - struct snd_ac97_build_ops * build_ops;
47146 + const struct snd_ac97_build_ops * build_ops;
47147 void *private_data;
47148 void (*private_free) (struct snd_ac97 *ac97);
47149 /* --- */
47150 diff -urNp linux-2.6.35.4/include/trace/events/irq.h linux-2.6.35.4/include/trace/events/irq.h
47151 --- linux-2.6.35.4/include/trace/events/irq.h 2010-08-26 19:47:12.000000000 -0400
47152 +++ linux-2.6.35.4/include/trace/events/irq.h 2010-09-17 20:12:09.000000000 -0400
47153 @@ -34,7 +34,7 @@
47154 */
47155 TRACE_EVENT(irq_handler_entry,
47156
47157 - TP_PROTO(int irq, struct irqaction *action),
47158 + TP_PROTO(int irq, const struct irqaction *action),
47159
47160 TP_ARGS(irq, action),
47161
47162 @@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
47163 */
47164 TRACE_EVENT(irq_handler_exit,
47165
47166 - TP_PROTO(int irq, struct irqaction *action, int ret),
47167 + TP_PROTO(int irq, const struct irqaction *action, int ret),
47168
47169 TP_ARGS(irq, action, ret),
47170
47171 @@ -84,7 +84,7 @@ TRACE_EVENT(irq_handler_exit,
47172
47173 DECLARE_EVENT_CLASS(softirq,
47174
47175 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
47176 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
47177
47178 TP_ARGS(h, vec),
47179
47180 @@ -113,7 +113,7 @@ DECLARE_EVENT_CLASS(softirq,
47181 */
47182 DEFINE_EVENT(softirq, softirq_entry,
47183
47184 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
47185 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
47186
47187 TP_ARGS(h, vec)
47188 );
47189 @@ -131,7 +131,7 @@ DEFINE_EVENT(softirq, softirq_entry,
47190 */
47191 DEFINE_EVENT(softirq, softirq_exit,
47192
47193 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
47194 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
47195
47196 TP_ARGS(h, vec)
47197 );
47198 diff -urNp linux-2.6.35.4/include/video/uvesafb.h linux-2.6.35.4/include/video/uvesafb.h
47199 --- linux-2.6.35.4/include/video/uvesafb.h 2010-08-26 19:47:12.000000000 -0400
47200 +++ linux-2.6.35.4/include/video/uvesafb.h 2010-09-17 20:12:09.000000000 -0400
47201 @@ -177,6 +177,7 @@ struct uvesafb_par {
47202 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
47203 u8 pmi_setpal; /* PMI for palette changes */
47204 u16 *pmi_base; /* protected mode interface location */
47205 + u8 *pmi_code; /* protected mode code location */
47206 void *pmi_start;
47207 void *pmi_pal;
47208 u8 *vbe_state_orig; /*
47209 diff -urNp linux-2.6.35.4/init/do_mounts.c linux-2.6.35.4/init/do_mounts.c
47210 --- linux-2.6.35.4/init/do_mounts.c 2010-08-26 19:47:12.000000000 -0400
47211 +++ linux-2.6.35.4/init/do_mounts.c 2010-09-17 20:12:09.000000000 -0400
47212 @@ -217,11 +217,11 @@ static void __init get_fs_names(char *pa
47213
47214 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
47215 {
47216 - int err = sys_mount(name, "/root", fs, flags, data);
47217 + int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
47218 if (err)
47219 return err;
47220
47221 - sys_chdir("/root");
47222 + sys_chdir((__force char __user *)"/root");
47223 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
47224 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
47225 current->fs->pwd.mnt->mnt_sb->s_type->name,
47226 @@ -312,18 +312,18 @@ void __init change_floppy(char *fmt, ...
47227 va_start(args, fmt);
47228 vsprintf(buf, fmt, args);
47229 va_end(args);
47230 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
47231 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
47232 if (fd >= 0) {
47233 sys_ioctl(fd, FDEJECT, 0);
47234 sys_close(fd);
47235 }
47236 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
47237 - fd = sys_open("/dev/console", O_RDWR, 0);
47238 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
47239 if (fd >= 0) {
47240 sys_ioctl(fd, TCGETS, (long)&termios);
47241 termios.c_lflag &= ~ICANON;
47242 sys_ioctl(fd, TCSETSF, (long)&termios);
47243 - sys_read(fd, &c, 1);
47244 + sys_read(fd, (char __user *)&c, 1);
47245 termios.c_lflag |= ICANON;
47246 sys_ioctl(fd, TCSETSF, (long)&termios);
47247 sys_close(fd);
47248 @@ -417,6 +417,6 @@ void __init prepare_namespace(void)
47249 mount_root();
47250 out:
47251 devtmpfs_mount("dev");
47252 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
47253 - sys_chroot(".");
47254 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
47255 + sys_chroot((__force char __user *)".");
47256 }
47257 diff -urNp linux-2.6.35.4/init/do_mounts.h linux-2.6.35.4/init/do_mounts.h
47258 --- linux-2.6.35.4/init/do_mounts.h 2010-08-26 19:47:12.000000000 -0400
47259 +++ linux-2.6.35.4/init/do_mounts.h 2010-09-17 20:12:09.000000000 -0400
47260 @@ -15,15 +15,15 @@ extern int root_mountflags;
47261
47262 static inline int create_dev(char *name, dev_t dev)
47263 {
47264 - sys_unlink(name);
47265 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
47266 + sys_unlink((__force char __user *)name);
47267 + return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
47268 }
47269
47270 #if BITS_PER_LONG == 32
47271 static inline u32 bstat(char *name)
47272 {
47273 struct stat64 stat;
47274 - if (sys_stat64(name, &stat) != 0)
47275 + if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
47276 return 0;
47277 if (!S_ISBLK(stat.st_mode))
47278 return 0;
47279 diff -urNp linux-2.6.35.4/init/do_mounts_initrd.c linux-2.6.35.4/init/do_mounts_initrd.c
47280 --- linux-2.6.35.4/init/do_mounts_initrd.c 2010-08-26 19:47:12.000000000 -0400
47281 +++ linux-2.6.35.4/init/do_mounts_initrd.c 2010-09-17 20:12:09.000000000 -0400
47282 @@ -43,13 +43,13 @@ static void __init handle_initrd(void)
47283 create_dev("/dev/root.old", Root_RAM0);
47284 /* mount initrd on rootfs' /root */
47285 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
47286 - sys_mkdir("/old", 0700);
47287 - root_fd = sys_open("/", 0, 0);
47288 - old_fd = sys_open("/old", 0, 0);
47289 + sys_mkdir((__force const char __user *)"/old", 0700);
47290 + root_fd = sys_open((__force const char __user *)"/", 0, 0);
47291 + old_fd = sys_open((__force const char __user *)"/old", 0, 0);
47292 /* move initrd over / and chdir/chroot in initrd root */
47293 - sys_chdir("/root");
47294 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
47295 - sys_chroot(".");
47296 + sys_chdir((__force const char __user *)"/root");
47297 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
47298 + sys_chroot((__force const char __user *)".");
47299
47300 /*
47301 * In case that a resume from disk is carried out by linuxrc or one of
47302 @@ -66,15 +66,15 @@ static void __init handle_initrd(void)
47303
47304 /* move initrd to rootfs' /old */
47305 sys_fchdir(old_fd);
47306 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
47307 + sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
47308 /* switch root and cwd back to / of rootfs */
47309 sys_fchdir(root_fd);
47310 - sys_chroot(".");
47311 + sys_chroot((__force const char __user *)".");
47312 sys_close(old_fd);
47313 sys_close(root_fd);
47314
47315 if (new_decode_dev(real_root_dev) == Root_RAM0) {
47316 - sys_chdir("/old");
47317 + sys_chdir((__force const char __user *)"/old");
47318 return;
47319 }
47320
47321 @@ -82,17 +82,17 @@ static void __init handle_initrd(void)
47322 mount_root();
47323
47324 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
47325 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
47326 + error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
47327 if (!error)
47328 printk("okay\n");
47329 else {
47330 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
47331 + int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
47332 if (error == -ENOENT)
47333 printk("/initrd does not exist. Ignored.\n");
47334 else
47335 printk("failed\n");
47336 printk(KERN_NOTICE "Unmounting old root\n");
47337 - sys_umount("/old", MNT_DETACH);
47338 + sys_umount((__force char __user *)"/old", MNT_DETACH);
47339 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
47340 if (fd < 0) {
47341 error = fd;
47342 @@ -115,11 +115,11 @@ int __init initrd_load(void)
47343 * mounted in the normal path.
47344 */
47345 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
47346 - sys_unlink("/initrd.image");
47347 + sys_unlink((__force const char __user *)"/initrd.image");
47348 handle_initrd();
47349 return 1;
47350 }
47351 }
47352 - sys_unlink("/initrd.image");
47353 + sys_unlink((__force const char __user *)"/initrd.image");
47354 return 0;
47355 }
47356 diff -urNp linux-2.6.35.4/init/do_mounts_md.c linux-2.6.35.4/init/do_mounts_md.c
47357 --- linux-2.6.35.4/init/do_mounts_md.c 2010-08-26 19:47:12.000000000 -0400
47358 +++ linux-2.6.35.4/init/do_mounts_md.c 2010-09-17 20:12:09.000000000 -0400
47359 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
47360 partitioned ? "_d" : "", minor,
47361 md_setup_args[ent].device_names);
47362
47363 - fd = sys_open(name, 0, 0);
47364 + fd = sys_open((__force char __user *)name, 0, 0);
47365 if (fd < 0) {
47366 printk(KERN_ERR "md: open failed - cannot start "
47367 "array %s\n", name);
47368 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
47369 * array without it
47370 */
47371 sys_close(fd);
47372 - fd = sys_open(name, 0, 0);
47373 + fd = sys_open((__force char __user *)name, 0, 0);
47374 sys_ioctl(fd, BLKRRPART, 0);
47375 }
47376 sys_close(fd);
47377 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
47378
47379 wait_for_device_probe();
47380
47381 - fd = sys_open("/dev/md0", 0, 0);
47382 + fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
47383 if (fd >= 0) {
47384 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
47385 sys_close(fd);
47386 diff -urNp linux-2.6.35.4/init/initramfs.c linux-2.6.35.4/init/initramfs.c
47387 --- linux-2.6.35.4/init/initramfs.c 2010-08-26 19:47:12.000000000 -0400
47388 +++ linux-2.6.35.4/init/initramfs.c 2010-09-17 20:12:09.000000000 -0400
47389 @@ -74,7 +74,7 @@ static void __init free_hash(void)
47390 }
47391 }
47392
47393 -static long __init do_utime(char __user *filename, time_t mtime)
47394 +static long __init do_utime(__force char __user *filename, time_t mtime)
47395 {
47396 struct timespec t[2];
47397
47398 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
47399 struct dir_entry *de, *tmp;
47400 list_for_each_entry_safe(de, tmp, &dir_list, list) {
47401 list_del(&de->list);
47402 - do_utime(de->name, de->mtime);
47403 + do_utime((__force char __user *)de->name, de->mtime);
47404 kfree(de->name);
47405 kfree(de);
47406 }
47407 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
47408 if (nlink >= 2) {
47409 char *old = find_link(major, minor, ino, mode, collected);
47410 if (old)
47411 - return (sys_link(old, collected) < 0) ? -1 : 1;
47412 + return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
47413 }
47414 return 0;
47415 }
47416 @@ -280,11 +280,11 @@ static void __init clean_path(char *path
47417 {
47418 struct stat st;
47419
47420 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
47421 + if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
47422 if (S_ISDIR(st.st_mode))
47423 - sys_rmdir(path);
47424 + sys_rmdir((__force char __user *)path);
47425 else
47426 - sys_unlink(path);
47427 + sys_unlink((__force char __user *)path);
47428 }
47429 }
47430
47431 @@ -305,7 +305,7 @@ static int __init do_name(void)
47432 int openflags = O_WRONLY|O_CREAT;
47433 if (ml != 1)
47434 openflags |= O_TRUNC;
47435 - wfd = sys_open(collected, openflags, mode);
47436 + wfd = sys_open((__force char __user *)collected, openflags, mode);
47437
47438 if (wfd >= 0) {
47439 sys_fchown(wfd, uid, gid);
47440 @@ -317,17 +317,17 @@ static int __init do_name(void)
47441 }
47442 }
47443 } else if (S_ISDIR(mode)) {
47444 - sys_mkdir(collected, mode);
47445 - sys_chown(collected, uid, gid);
47446 - sys_chmod(collected, mode);
47447 + sys_mkdir((__force char __user *)collected, mode);
47448 + sys_chown((__force char __user *)collected, uid, gid);
47449 + sys_chmod((__force char __user *)collected, mode);
47450 dir_add(collected, mtime);
47451 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
47452 S_ISFIFO(mode) || S_ISSOCK(mode)) {
47453 if (maybe_link() == 0) {
47454 - sys_mknod(collected, mode, rdev);
47455 - sys_chown(collected, uid, gid);
47456 - sys_chmod(collected, mode);
47457 - do_utime(collected, mtime);
47458 + sys_mknod((__force char __user *)collected, mode, rdev);
47459 + sys_chown((__force char __user *)collected, uid, gid);
47460 + sys_chmod((__force char __user *)collected, mode);
47461 + do_utime((__force char __user *)collected, mtime);
47462 }
47463 }
47464 return 0;
47465 @@ -336,15 +336,15 @@ static int __init do_name(void)
47466 static int __init do_copy(void)
47467 {
47468 if (count >= body_len) {
47469 - sys_write(wfd, victim, body_len);
47470 + sys_write(wfd, (__force char __user *)victim, body_len);
47471 sys_close(wfd);
47472 - do_utime(vcollected, mtime);
47473 + do_utime((__force char __user *)vcollected, mtime);
47474 kfree(vcollected);
47475 eat(body_len);
47476 state = SkipIt;
47477 return 0;
47478 } else {
47479 - sys_write(wfd, victim, count);
47480 + sys_write(wfd, (__force char __user *)victim, count);
47481 body_len -= count;
47482 eat(count);
47483 return 1;
47484 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
47485 {
47486 collected[N_ALIGN(name_len) + body_len] = '\0';
47487 clean_path(collected, 0);
47488 - sys_symlink(collected + N_ALIGN(name_len), collected);
47489 - sys_lchown(collected, uid, gid);
47490 - do_utime(collected, mtime);
47491 + sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
47492 + sys_lchown((__force char __user *)collected, uid, gid);
47493 + do_utime((__force char __user *)collected, mtime);
47494 state = SkipIt;
47495 next_state = Reset;
47496 return 0;
47497 diff -urNp linux-2.6.35.4/init/Kconfig linux-2.6.35.4/init/Kconfig
47498 --- linux-2.6.35.4/init/Kconfig 2010-08-26 19:47:12.000000000 -0400
47499 +++ linux-2.6.35.4/init/Kconfig 2010-09-17 20:12:09.000000000 -0400
47500 @@ -1063,7 +1063,7 @@ config SLUB_DEBUG
47501
47502 config COMPAT_BRK
47503 bool "Disable heap randomization"
47504 - default y
47505 + default n
47506 help
47507 Randomizing heap placement makes heap exploits harder, but it
47508 also breaks ancient binaries (including anything libc5 based).
47509 diff -urNp linux-2.6.35.4/init/main.c linux-2.6.35.4/init/main.c
47510 --- linux-2.6.35.4/init/main.c 2010-08-26 19:47:12.000000000 -0400
47511 +++ linux-2.6.35.4/init/main.c 2010-09-17 20:12:37.000000000 -0400
47512 @@ -98,6 +98,7 @@ static inline void mark_rodata_ro(void)
47513 #ifdef CONFIG_TC
47514 extern void tc_init(void);
47515 #endif
47516 +extern void grsecurity_init(void);
47517
47518 enum system_states system_state __read_mostly;
47519 EXPORT_SYMBOL(system_state);
47520 @@ -200,6 +201,50 @@ static int __init set_reset_devices(char
47521
47522 __setup("reset_devices", set_reset_devices);
47523
47524 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
47525 +extern void pax_enter_kernel_user(void);
47526 +extern void pax_exit_kernel_user(void);
47527 +extern pgdval_t clone_pgd_mask;
47528 +#endif
47529 +
47530 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
47531 +static int __init setup_pax_nouderef(char *str)
47532 +{
47533 +#ifdef CONFIG_X86_32
47534 + unsigned int cpu;
47535 +
47536 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
47537 + get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_DS].type = 3;
47538 + get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_DS].limit = 0xf;
47539 + }
47540 + asm("mov %0, %%ds" : : "r" (__KERNEL_DS) : "memory");
47541 + asm("mov %0, %%es" : : "r" (__KERNEL_DS) : "memory");
47542 + asm("mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
47543 +#else
47544 + char *p;
47545 + p = (char *)pax_enter_kernel_user;
47546 + *p = 0xc3;
47547 + p = (char *)pax_exit_kernel_user;
47548 + *p = 0xc3;
47549 + clone_pgd_mask = ~(pgdval_t)0UL;
47550 +#endif
47551 +
47552 + return 0;
47553 +}
47554 +early_param("pax_nouderef", setup_pax_nouderef);
47555 +#endif
47556 +
47557 +#ifdef CONFIG_PAX_SOFTMODE
47558 +unsigned int pax_softmode;
47559 +
47560 +static int __init setup_pax_softmode(char *str)
47561 +{
47562 + get_option(&str, &pax_softmode);
47563 + return 1;
47564 +}
47565 +__setup("pax_softmode=", setup_pax_softmode);
47566 +#endif
47567 +
47568 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
47569 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
47570 static const char *panic_later, *panic_param;
47571 @@ -725,52 +770,53 @@ int initcall_debug;
47572 core_param(initcall_debug, initcall_debug, bool, 0644);
47573
47574 static char msgbuf[64];
47575 -static struct boot_trace_call call;
47576 -static struct boot_trace_ret ret;
47577 +static struct boot_trace_call trace_call;
47578 +static struct boot_trace_ret trace_ret;
47579
47580 int do_one_initcall(initcall_t fn)
47581 {
47582 int count = preempt_count();
47583 ktime_t calltime, delta, rettime;
47584 + const char *msg1 = "", *msg2 = "";
47585
47586 if (initcall_debug) {
47587 - call.caller = task_pid_nr(current);
47588 - printk("calling %pF @ %i\n", fn, call.caller);
47589 + trace_call.caller = task_pid_nr(current);
47590 + printk("calling %pF @ %i\n", fn, trace_call.caller);
47591 calltime = ktime_get();
47592 - trace_boot_call(&call, fn);
47593 + trace_boot_call(&trace_call, fn);
47594 enable_boot_trace();
47595 }
47596
47597 - ret.result = fn();
47598 + trace_ret.result = fn();
47599
47600 if (initcall_debug) {
47601 disable_boot_trace();
47602 rettime = ktime_get();
47603 delta = ktime_sub(rettime, calltime);
47604 - ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
47605 - trace_boot_ret(&ret, fn);
47606 + trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
47607 + trace_boot_ret(&trace_ret, fn);
47608 printk("initcall %pF returned %d after %Ld usecs\n", fn,
47609 - ret.result, ret.duration);
47610 + trace_ret.result, trace_ret.duration);
47611 }
47612
47613 msgbuf[0] = 0;
47614
47615 - if (ret.result && ret.result != -ENODEV && initcall_debug)
47616 - sprintf(msgbuf, "error code %d ", ret.result);
47617 + if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
47618 + sprintf(msgbuf, "error code %d ", trace_ret.result);
47619
47620 if (preempt_count() != count) {
47621 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
47622 + msg1 = " preemption imbalance";
47623 preempt_count() = count;
47624 }
47625 if (irqs_disabled()) {
47626 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
47627 + msg2 = " disabled interrupts";
47628 local_irq_enable();
47629 }
47630 - if (msgbuf[0]) {
47631 - printk("initcall %pF returned with %s\n", fn, msgbuf);
47632 + if (msgbuf[0] || *msg1 || *msg2) {
47633 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
47634 }
47635
47636 - return ret.result;
47637 + return trace_ret.result;
47638 }
47639
47640
47641 @@ -902,7 +948,7 @@ static int __init kernel_init(void * unu
47642 do_basic_setup();
47643
47644 /* Open the /dev/console on the rootfs, this should never fail */
47645 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
47646 + if (sys_open((__force const char __user *) "/dev/console", O_RDWR, 0) < 0)
47647 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
47648
47649 (void) sys_dup(0);
47650 @@ -915,11 +961,13 @@ static int __init kernel_init(void * unu
47651 if (!ramdisk_execute_command)
47652 ramdisk_execute_command = "/init";
47653
47654 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
47655 + if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
47656 ramdisk_execute_command = NULL;
47657 prepare_namespace();
47658 }
47659
47660 + grsecurity_init();
47661 +
47662 /*
47663 * Ok, we have completed the initial bootup, and
47664 * we're essentially up and running. Get rid of the
47665 diff -urNp linux-2.6.35.4/init/noinitramfs.c linux-2.6.35.4/init/noinitramfs.c
47666 --- linux-2.6.35.4/init/noinitramfs.c 2010-08-26 19:47:12.000000000 -0400
47667 +++ linux-2.6.35.4/init/noinitramfs.c 2010-09-17 20:12:09.000000000 -0400
47668 @@ -29,17 +29,17 @@ static int __init default_rootfs(void)
47669 {
47670 int err;
47671
47672 - err = sys_mkdir("/dev", 0755);
47673 + err = sys_mkdir((const char __user *)"/dev", 0755);
47674 if (err < 0)
47675 goto out;
47676
47677 - err = sys_mknod((const char __user *) "/dev/console",
47678 + err = sys_mknod((__force const char __user *) "/dev/console",
47679 S_IFCHR | S_IRUSR | S_IWUSR,
47680 new_encode_dev(MKDEV(5, 1)));
47681 if (err < 0)
47682 goto out;
47683
47684 - err = sys_mkdir("/root", 0700);
47685 + err = sys_mkdir((const char __user *)"/root", 0700);
47686 if (err < 0)
47687 goto out;
47688
47689 diff -urNp linux-2.6.35.4/ipc/mqueue.c linux-2.6.35.4/ipc/mqueue.c
47690 --- linux-2.6.35.4/ipc/mqueue.c 2010-08-26 19:47:12.000000000 -0400
47691 +++ linux-2.6.35.4/ipc/mqueue.c 2010-09-17 20:12:37.000000000 -0400
47692 @@ -153,6 +153,7 @@ static struct inode *mqueue_get_inode(st
47693 mq_bytes = (mq_msg_tblsz +
47694 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
47695
47696 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
47697 spin_lock(&mq_lock);
47698 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
47699 u->mq_bytes + mq_bytes >
47700 diff -urNp linux-2.6.35.4/ipc/shm.c linux-2.6.35.4/ipc/shm.c
47701 --- linux-2.6.35.4/ipc/shm.c 2010-08-26 19:47:12.000000000 -0400
47702 +++ linux-2.6.35.4/ipc/shm.c 2010-09-17 20:12:37.000000000 -0400
47703 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_name
47704 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
47705 #endif
47706
47707 +#ifdef CONFIG_GRKERNSEC
47708 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
47709 + const time_t shm_createtime, const uid_t cuid,
47710 + const int shmid);
47711 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
47712 + const time_t shm_createtime);
47713 +#endif
47714 +
47715 void shm_init_ns(struct ipc_namespace *ns)
47716 {
47717 ns->shm_ctlmax = SHMMAX;
47718 @@ -395,6 +403,14 @@ static int newseg(struct ipc_namespace *
47719 shp->shm_lprid = 0;
47720 shp->shm_atim = shp->shm_dtim = 0;
47721 shp->shm_ctim = get_seconds();
47722 +#ifdef CONFIG_GRKERNSEC
47723 + {
47724 + struct timespec timeval;
47725 + do_posix_clock_monotonic_gettime(&timeval);
47726 +
47727 + shp->shm_createtime = timeval.tv_sec;
47728 + }
47729 +#endif
47730 shp->shm_segsz = size;
47731 shp->shm_nattch = 0;
47732 shp->shm_file = file;
47733 @@ -877,9 +893,21 @@ long do_shmat(int shmid, char __user *sh
47734 if (err)
47735 goto out_unlock;
47736
47737 +#ifdef CONFIG_GRKERNSEC
47738 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
47739 + shp->shm_perm.cuid, shmid) ||
47740 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
47741 + err = -EACCES;
47742 + goto out_unlock;
47743 + }
47744 +#endif
47745 +
47746 path = shp->shm_file->f_path;
47747 path_get(&path);
47748 shp->shm_nattch++;
47749 +#ifdef CONFIG_GRKERNSEC
47750 + shp->shm_lapid = current->pid;
47751 +#endif
47752 size = i_size_read(path.dentry->d_inode);
47753 shm_unlock(shp);
47754
47755 diff -urNp linux-2.6.35.4/kernel/acct.c linux-2.6.35.4/kernel/acct.c
47756 --- linux-2.6.35.4/kernel/acct.c 2010-08-26 19:47:12.000000000 -0400
47757 +++ linux-2.6.35.4/kernel/acct.c 2010-09-17 20:12:09.000000000 -0400
47758 @@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_a
47759 */
47760 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
47761 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
47762 - file->f_op->write(file, (char *)&ac,
47763 + file->f_op->write(file, (__force char __user *)&ac,
47764 sizeof(acct_t), &file->f_pos);
47765 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
47766 set_fs(fs);
47767 diff -urNp linux-2.6.35.4/kernel/capability.c linux-2.6.35.4/kernel/capability.c
47768 --- linux-2.6.35.4/kernel/capability.c 2010-08-26 19:47:12.000000000 -0400
47769 +++ linux-2.6.35.4/kernel/capability.c 2010-09-17 20:12:37.000000000 -0400
47770 @@ -205,6 +205,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_
47771 * before modification is attempted and the application
47772 * fails.
47773 */
47774 + if (tocopy > ARRAY_SIZE(kdata))
47775 + return -EFAULT;
47776 +
47777 if (copy_to_user(dataptr, kdata, tocopy
47778 * sizeof(struct __user_cap_data_struct))) {
47779 return -EFAULT;
47780 @@ -306,10 +309,21 @@ int capable(int cap)
47781 BUG();
47782 }
47783
47784 - if (security_capable(cap) == 0) {
47785 + if (security_capable(cap) == 0 && gr_is_capable(cap)) {
47786 + current->flags |= PF_SUPERPRIV;
47787 + return 1;
47788 + }
47789 + return 0;
47790 +}
47791 +
47792 +int capable_nolog(int cap)
47793 +{
47794 + if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
47795 current->flags |= PF_SUPERPRIV;
47796 return 1;
47797 }
47798 return 0;
47799 }
47800 +
47801 EXPORT_SYMBOL(capable);
47802 +EXPORT_SYMBOL(capable_nolog);
47803 diff -urNp linux-2.6.35.4/kernel/compat.c linux-2.6.35.4/kernel/compat.c
47804 --- linux-2.6.35.4/kernel/compat.c 2010-08-26 19:47:12.000000000 -0400
47805 +++ linux-2.6.35.4/kernel/compat.c 2010-09-17 20:12:37.000000000 -0400
47806 @@ -13,6 +13,7 @@
47807
47808 #include <linux/linkage.h>
47809 #include <linux/compat.h>
47810 +#include <linux/module.h>
47811 #include <linux/errno.h>
47812 #include <linux/time.h>
47813 #include <linux/signal.h>
47814 @@ -1137,3 +1138,24 @@ compat_sys_sysinfo(struct compat_sysinfo
47815
47816 return 0;
47817 }
47818 +
47819 +/*
47820 + * Allocate user-space memory for the duration of a single system call,
47821 + * in order to marshall parameters inside a compat thunk.
47822 + */
47823 +void __user *compat_alloc_user_space(unsigned long len)
47824 +{
47825 + void __user *ptr;
47826 +
47827 + /* If len would occupy more than half of the entire compat space... */
47828 + if (unlikely(len > (((compat_uptr_t)~0) >> 1)))
47829 + return NULL;
47830 +
47831 + ptr = arch_compat_alloc_user_space(len);
47832 +
47833 + if (unlikely(!access_ok(VERIFY_WRITE, ptr, len)))
47834 + return NULL;
47835 +
47836 + return ptr;
47837 +}
47838 +EXPORT_SYMBOL_GPL(compat_alloc_user_space);
47839 diff -urNp linux-2.6.35.4/kernel/configs.c linux-2.6.35.4/kernel/configs.c
47840 --- linux-2.6.35.4/kernel/configs.c 2010-08-26 19:47:12.000000000 -0400
47841 +++ linux-2.6.35.4/kernel/configs.c 2010-09-17 20:12:37.000000000 -0400
47842 @@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
47843 struct proc_dir_entry *entry;
47844
47845 /* create the current config file */
47846 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
47847 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
47848 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
47849 + &ikconfig_file_ops);
47850 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47851 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
47852 + &ikconfig_file_ops);
47853 +#endif
47854 +#else
47855 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
47856 &ikconfig_file_ops);
47857 +#endif
47858 +
47859 if (!entry)
47860 return -ENOMEM;
47861
47862 diff -urNp linux-2.6.35.4/kernel/cred.c linux-2.6.35.4/kernel/cred.c
47863 --- linux-2.6.35.4/kernel/cred.c 2010-08-26 19:47:12.000000000 -0400
47864 +++ linux-2.6.35.4/kernel/cred.c 2010-09-17 20:12:37.000000000 -0400
47865 @@ -489,6 +489,8 @@ int commit_creds(struct cred *new)
47866
47867 get_cred(new); /* we will require a ref for the subj creds too */
47868
47869 + gr_set_role_label(task, new->uid, new->gid);
47870 +
47871 /* dumpability changes */
47872 if (old->euid != new->euid ||
47873 old->egid != new->egid ||
47874 diff -urNp linux-2.6.35.4/kernel/debug/debug_core.c linux-2.6.35.4/kernel/debug/debug_core.c
47875 --- linux-2.6.35.4/kernel/debug/debug_core.c 2010-08-26 19:47:12.000000000 -0400
47876 +++ linux-2.6.35.4/kernel/debug/debug_core.c 2010-09-17 20:12:09.000000000 -0400
47877 @@ -71,7 +71,7 @@ int kgdb_io_module_registered;
47878 /* Guard for recursive entry */
47879 static int exception_level;
47880
47881 -struct kgdb_io *dbg_io_ops;
47882 +const struct kgdb_io *dbg_io_ops;
47883 static DEFINE_SPINLOCK(kgdb_registration_lock);
47884
47885 /* kgdb console driver is loaded */
47886 @@ -871,7 +871,7 @@ static void kgdb_initial_breakpoint(void
47887 *
47888 * Register it with the KGDB core.
47889 */
47890 -int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
47891 +int kgdb_register_io_module(const struct kgdb_io *new_dbg_io_ops)
47892 {
47893 int err;
47894
47895 @@ -916,7 +916,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_modul
47896 *
47897 * Unregister it with the KGDB core.
47898 */
47899 -void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops)
47900 +void kgdb_unregister_io_module(const struct kgdb_io *old_dbg_io_ops)
47901 {
47902 BUG_ON(kgdb_connected);
47903
47904 diff -urNp linux-2.6.35.4/kernel/debug/kdb/kdb_main.c linux-2.6.35.4/kernel/debug/kdb/kdb_main.c
47905 --- linux-2.6.35.4/kernel/debug/kdb/kdb_main.c 2010-08-26 19:47:12.000000000 -0400
47906 +++ linux-2.6.35.4/kernel/debug/kdb/kdb_main.c 2010-09-17 20:12:09.000000000 -0400
47907 @@ -1872,7 +1872,7 @@ static int kdb_lsmod(int argc, const cha
47908 list_for_each_entry(mod, kdb_modules, list) {
47909
47910 kdb_printf("%-20s%8u 0x%p ", mod->name,
47911 - mod->core_size, (void *)mod);
47912 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
47913 #ifdef CONFIG_MODULE_UNLOAD
47914 kdb_printf("%4d ", module_refcount(mod));
47915 #endif
47916 @@ -1882,7 +1882,7 @@ static int kdb_lsmod(int argc, const cha
47917 kdb_printf(" (Loading)");
47918 else
47919 kdb_printf(" (Live)");
47920 - kdb_printf(" 0x%p", mod->module_core);
47921 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
47922
47923 #ifdef CONFIG_MODULE_UNLOAD
47924 {
47925 diff -urNp linux-2.6.35.4/kernel/exit.c linux-2.6.35.4/kernel/exit.c
47926 --- linux-2.6.35.4/kernel/exit.c 2010-08-26 19:47:12.000000000 -0400
47927 +++ linux-2.6.35.4/kernel/exit.c 2010-09-17 20:13:49.000000000 -0400
47928 @@ -56,6 +56,10 @@
47929 #include <asm/pgtable.h>
47930 #include <asm/mmu_context.h>
47931
47932 +#ifdef CONFIG_GRKERNSEC
47933 +extern rwlock_t grsec_exec_file_lock;
47934 +#endif
47935 +
47936 static void exit_mm(struct task_struct * tsk);
47937
47938 static void __unhash_process(struct task_struct *p, bool group_dead)
47939 @@ -162,6 +166,8 @@ void release_task(struct task_struct * p
47940 struct task_struct *leader;
47941 int zap_leader;
47942 repeat:
47943 + gr_del_task_from_ip_table(p);
47944 +
47945 tracehook_prepare_release_task(p);
47946 /* don't need to get the RCU readlock here - the process is dead and
47947 * can't be modifying its own credentials. But shut RCU-lockdep up */
47948 @@ -331,11 +337,22 @@ static void reparent_to_kthreadd(void)
47949 {
47950 write_lock_irq(&tasklist_lock);
47951
47952 +#ifdef CONFIG_GRKERNSEC
47953 + write_lock(&grsec_exec_file_lock);
47954 + if (current->exec_file) {
47955 + fput(current->exec_file);
47956 + current->exec_file = NULL;
47957 + }
47958 + write_unlock(&grsec_exec_file_lock);
47959 +#endif
47960 +
47961 ptrace_unlink(current);
47962 /* Reparent to init */
47963 current->real_parent = current->parent = kthreadd_task;
47964 list_move_tail(&current->sibling, &current->real_parent->children);
47965
47966 + gr_set_kernel_label(current);
47967 +
47968 /* Set the exit signal to SIGCHLD so we signal init on exit */
47969 current->exit_signal = SIGCHLD;
47970
47971 @@ -387,7 +404,7 @@ int allow_signal(int sig)
47972 * know it'll be handled, so that they don't get converted to
47973 * SIGKILL or just silently dropped.
47974 */
47975 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
47976 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
47977 recalc_sigpending();
47978 spin_unlock_irq(&current->sighand->siglock);
47979 return 0;
47980 @@ -423,6 +440,17 @@ void daemonize(const char *name, ...)
47981 vsnprintf(current->comm, sizeof(current->comm), name, args);
47982 va_end(args);
47983
47984 +#ifdef CONFIG_GRKERNSEC
47985 + write_lock(&grsec_exec_file_lock);
47986 + if (current->exec_file) {
47987 + fput(current->exec_file);
47988 + current->exec_file = NULL;
47989 + }
47990 + write_unlock(&grsec_exec_file_lock);
47991 +#endif
47992 +
47993 + gr_set_kernel_label(current);
47994 +
47995 /*
47996 * If we were started as result of loading a module, close all of the
47997 * user space pages. We don't need them, and if we didn't close them
47998 @@ -960,6 +988,9 @@ NORET_TYPE void do_exit(long code)
47999 tsk->exit_code = code;
48000 taskstats_exit(tsk, group_dead);
48001
48002 + gr_acl_handle_psacct(tsk, code);
48003 + gr_acl_handle_exit();
48004 +
48005 exit_mm(tsk);
48006
48007 if (group_dead)
48008 diff -urNp linux-2.6.35.4/kernel/fork.c linux-2.6.35.4/kernel/fork.c
48009 --- linux-2.6.35.4/kernel/fork.c 2010-08-26 19:47:12.000000000 -0400
48010 +++ linux-2.6.35.4/kernel/fork.c 2010-09-17 20:12:37.000000000 -0400
48011 @@ -276,7 +276,7 @@ static struct task_struct *dup_task_stru
48012 *stackend = STACK_END_MAGIC; /* for overflow detection */
48013
48014 #ifdef CONFIG_CC_STACKPROTECTOR
48015 - tsk->stack_canary = get_random_int();
48016 + tsk->stack_canary = pax_get_random_long();
48017 #endif
48018
48019 /* One for us, one for whoever does the "release_task()" (usually parent) */
48020 @@ -298,13 +298,78 @@ out:
48021 }
48022
48023 #ifdef CONFIG_MMU
48024 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
48025 +{
48026 + struct vm_area_struct *tmp;
48027 + unsigned long charge;
48028 + struct mempolicy *pol;
48029 + struct file *file;
48030 +
48031 + charge = 0;
48032 + if (mpnt->vm_flags & VM_ACCOUNT) {
48033 + unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
48034 + if (security_vm_enough_memory(len))
48035 + goto fail_nomem;
48036 + charge = len;
48037 + }
48038 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
48039 + if (!tmp)
48040 + goto fail_nomem;
48041 + *tmp = *mpnt;
48042 + tmp->vm_mm = mm;
48043 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
48044 + pol = mpol_dup(vma_policy(mpnt));
48045 + if (IS_ERR(pol))
48046 + goto fail_nomem_policy;
48047 + vma_set_policy(tmp, pol);
48048 + if (anon_vma_fork(tmp, mpnt))
48049 + goto fail_nomem_anon_vma_fork;
48050 + tmp->vm_flags &= ~VM_LOCKED;
48051 + tmp->vm_next = NULL;
48052 + tmp->vm_mirror = NULL;
48053 + file = tmp->vm_file;
48054 + if (file) {
48055 + struct inode *inode = file->f_path.dentry->d_inode;
48056 + struct address_space *mapping = file->f_mapping;
48057 +
48058 + get_file(file);
48059 + if (tmp->vm_flags & VM_DENYWRITE)
48060 + atomic_dec(&inode->i_writecount);
48061 + spin_lock(&mapping->i_mmap_lock);
48062 + if (tmp->vm_flags & VM_SHARED)
48063 + mapping->i_mmap_writable++;
48064 + tmp->vm_truncate_count = mpnt->vm_truncate_count;
48065 + flush_dcache_mmap_lock(mapping);
48066 + /* insert tmp into the share list, just after mpnt */
48067 + vma_prio_tree_add(tmp, mpnt);
48068 + flush_dcache_mmap_unlock(mapping);
48069 + spin_unlock(&mapping->i_mmap_lock);
48070 + }
48071 +
48072 + /*
48073 + * Clear hugetlb-related page reserves for children. This only
48074 + * affects MAP_PRIVATE mappings. Faults generated by the child
48075 + * are not guaranteed to succeed, even if read-only
48076 + */
48077 + if (is_vm_hugetlb_page(tmp))
48078 + reset_vma_resv_huge_pages(tmp);
48079 +
48080 + return tmp;
48081 +
48082 +fail_nomem_anon_vma_fork:
48083 + mpol_put(pol);
48084 +fail_nomem_policy:
48085 + kmem_cache_free(vm_area_cachep, tmp);
48086 +fail_nomem:
48087 + vm_unacct_memory(charge);
48088 + return NULL;
48089 +}
48090 +
48091 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
48092 {
48093 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
48094 struct rb_node **rb_link, *rb_parent;
48095 int retval;
48096 - unsigned long charge;
48097 - struct mempolicy *pol;
48098
48099 down_write(&oldmm->mmap_sem);
48100 flush_cache_dup_mm(oldmm);
48101 @@ -316,8 +381,8 @@ static int dup_mmap(struct mm_struct *mm
48102 mm->locked_vm = 0;
48103 mm->mmap = NULL;
48104 mm->mmap_cache = NULL;
48105 - mm->free_area_cache = oldmm->mmap_base;
48106 - mm->cached_hole_size = ~0UL;
48107 + mm->free_area_cache = oldmm->free_area_cache;
48108 + mm->cached_hole_size = oldmm->cached_hole_size;
48109 mm->map_count = 0;
48110 cpumask_clear(mm_cpumask(mm));
48111 mm->mm_rb = RB_ROOT;
48112 @@ -330,8 +395,6 @@ static int dup_mmap(struct mm_struct *mm
48113
48114 prev = NULL;
48115 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
48116 - struct file *file;
48117 -
48118 if (mpnt->vm_flags & VM_DONTCOPY) {
48119 long pages = vma_pages(mpnt);
48120 mm->total_vm -= pages;
48121 @@ -339,56 +402,13 @@ static int dup_mmap(struct mm_struct *mm
48122 -pages);
48123 continue;
48124 }
48125 - charge = 0;
48126 - if (mpnt->vm_flags & VM_ACCOUNT) {
48127 - unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
48128 - if (security_vm_enough_memory(len))
48129 - goto fail_nomem;
48130 - charge = len;
48131 - }
48132 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
48133 - if (!tmp)
48134 - goto fail_nomem;
48135 - *tmp = *mpnt;
48136 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
48137 - pol = mpol_dup(vma_policy(mpnt));
48138 - retval = PTR_ERR(pol);
48139 - if (IS_ERR(pol))
48140 - goto fail_nomem_policy;
48141 - vma_set_policy(tmp, pol);
48142 - if (anon_vma_fork(tmp, mpnt))
48143 - goto fail_nomem_anon_vma_fork;
48144 - tmp->vm_flags &= ~VM_LOCKED;
48145 - tmp->vm_mm = mm;
48146 - tmp->vm_next = tmp->vm_prev = NULL;
48147 - file = tmp->vm_file;
48148 - if (file) {
48149 - struct inode *inode = file->f_path.dentry->d_inode;
48150 - struct address_space *mapping = file->f_mapping;
48151 -
48152 - get_file(file);
48153 - if (tmp->vm_flags & VM_DENYWRITE)
48154 - atomic_dec(&inode->i_writecount);
48155 - spin_lock(&mapping->i_mmap_lock);
48156 - if (tmp->vm_flags & VM_SHARED)
48157 - mapping->i_mmap_writable++;
48158 - tmp->vm_truncate_count = mpnt->vm_truncate_count;
48159 - flush_dcache_mmap_lock(mapping);
48160 - /* insert tmp into the share list, just after mpnt */
48161 - vma_prio_tree_add(tmp, mpnt);
48162 - flush_dcache_mmap_unlock(mapping);
48163 - spin_unlock(&mapping->i_mmap_lock);
48164 + tmp = dup_vma(mm, mpnt);
48165 + if (!tmp) {
48166 + retval = -ENOMEM;
48167 + goto out;
48168 }
48169
48170 /*
48171 - * Clear hugetlb-related page reserves for children. This only
48172 - * affects MAP_PRIVATE mappings. Faults generated by the child
48173 - * are not guaranteed to succeed, even if read-only
48174 - */
48175 - if (is_vm_hugetlb_page(tmp))
48176 - reset_vma_resv_huge_pages(tmp);
48177 -
48178 - /*
48179 * Link in the new vma and copy the page table entries.
48180 */
48181 *pprev = tmp;
48182 @@ -409,6 +429,31 @@ static int dup_mmap(struct mm_struct *mm
48183 if (retval)
48184 goto out;
48185 }
48186 +
48187 +#ifdef CONFIG_PAX_SEGMEXEC
48188 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
48189 + struct vm_area_struct *mpnt_m;
48190 +
48191 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
48192 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
48193 +
48194 + if (!mpnt->vm_mirror)
48195 + continue;
48196 +
48197 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
48198 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
48199 + mpnt->vm_mirror = mpnt_m;
48200 + } else {
48201 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
48202 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
48203 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
48204 + mpnt->vm_mirror->vm_mirror = mpnt;
48205 + }
48206 + }
48207 + BUG_ON(mpnt_m);
48208 + }
48209 +#endif
48210 +
48211 /* a new mm has just been created */
48212 arch_dup_mmap(oldmm, mm);
48213 retval = 0;
48214 @@ -417,14 +462,6 @@ out:
48215 flush_tlb_mm(oldmm);
48216 up_write(&oldmm->mmap_sem);
48217 return retval;
48218 -fail_nomem_anon_vma_fork:
48219 - mpol_put(pol);
48220 -fail_nomem_policy:
48221 - kmem_cache_free(vm_area_cachep, tmp);
48222 -fail_nomem:
48223 - retval = -ENOMEM;
48224 - vm_unacct_memory(charge);
48225 - goto out;
48226 }
48227
48228 static inline int mm_alloc_pgd(struct mm_struct * mm)
48229 @@ -760,13 +797,14 @@ static int copy_fs(unsigned long clone_f
48230 write_unlock(&fs->lock);
48231 return -EAGAIN;
48232 }
48233 - fs->users++;
48234 + atomic_inc(&fs->users);
48235 write_unlock(&fs->lock);
48236 return 0;
48237 }
48238 tsk->fs = copy_fs_struct(fs);
48239 if (!tsk->fs)
48240 return -ENOMEM;
48241 + gr_set_chroot_entries(tsk, &tsk->fs->root);
48242 return 0;
48243 }
48244
48245 @@ -1019,10 +1057,13 @@ static struct task_struct *copy_process(
48246 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
48247 #endif
48248 retval = -EAGAIN;
48249 +
48250 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
48251 +
48252 if (atomic_read(&p->real_cred->user->processes) >=
48253 task_rlimit(p, RLIMIT_NPROC)) {
48254 - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
48255 - p->real_cred->user != INIT_USER)
48256 + if (p->real_cred->user != INIT_USER &&
48257 + !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
48258 goto bad_fork_free;
48259 }
48260
48261 @@ -1176,6 +1217,8 @@ static struct task_struct *copy_process(
48262 goto bad_fork_free_pid;
48263 }
48264
48265 + gr_copy_label(p);
48266 +
48267 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
48268 /*
48269 * Clear TID on mm_release()?
48270 @@ -1328,6 +1371,8 @@ bad_fork_cleanup_count:
48271 bad_fork_free:
48272 free_task(p);
48273 fork_out:
48274 + gr_log_forkfail(retval);
48275 +
48276 return ERR_PTR(retval);
48277 }
48278
48279 @@ -1433,6 +1478,8 @@ long do_fork(unsigned long clone_flags,
48280 if (clone_flags & CLONE_PARENT_SETTID)
48281 put_user(nr, parent_tidptr);
48282
48283 + gr_handle_brute_check();
48284 +
48285 if (clone_flags & CLONE_VFORK) {
48286 p->vfork_done = &vfork;
48287 init_completion(&vfork);
48288 @@ -1557,7 +1604,7 @@ static int unshare_fs(unsigned long unsh
48289 return 0;
48290
48291 /* don't need lock here; in the worst case we'll do useless copy */
48292 - if (fs->users == 1)
48293 + if (atomic_read(&fs->users) == 1)
48294 return 0;
48295
48296 *new_fsp = copy_fs_struct(fs);
48297 @@ -1680,7 +1727,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
48298 fs = current->fs;
48299 write_lock(&fs->lock);
48300 current->fs = new_fs;
48301 - if (--fs->users)
48302 + gr_set_chroot_entries(current, &current->fs->root);
48303 + if (atomic_dec_return(&fs->users))
48304 new_fs = NULL;
48305 else
48306 new_fs = fs;
48307 diff -urNp linux-2.6.35.4/kernel/futex.c linux-2.6.35.4/kernel/futex.c
48308 --- linux-2.6.35.4/kernel/futex.c 2010-08-26 19:47:12.000000000 -0400
48309 +++ linux-2.6.35.4/kernel/futex.c 2010-09-17 20:12:37.000000000 -0400
48310 @@ -54,6 +54,7 @@
48311 #include <linux/mount.h>
48312 #include <linux/pagemap.h>
48313 #include <linux/syscalls.h>
48314 +#include <linux/ptrace.h>
48315 #include <linux/signal.h>
48316 #include <linux/module.h>
48317 #include <linux/magic.h>
48318 @@ -221,6 +222,11 @@ get_futex_key(u32 __user *uaddr, int fsh
48319 struct page *page;
48320 int err;
48321
48322 +#ifdef CONFIG_PAX_SEGMEXEC
48323 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
48324 + return -EFAULT;
48325 +#endif
48326 +
48327 /*
48328 * The futex address must be "naturally" aligned.
48329 */
48330 @@ -1843,7 +1849,7 @@ retry:
48331
48332 restart = &current_thread_info()->restart_block;
48333 restart->fn = futex_wait_restart;
48334 - restart->futex.uaddr = (u32 *)uaddr;
48335 + restart->futex.uaddr = uaddr;
48336 restart->futex.val = val;
48337 restart->futex.time = abs_time->tv64;
48338 restart->futex.bitset = bitset;
48339 @@ -2376,7 +2382,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
48340 {
48341 struct robust_list_head __user *head;
48342 unsigned long ret;
48343 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
48344 const struct cred *cred = current_cred(), *pcred;
48345 +#endif
48346
48347 if (!futex_cmpxchg_enabled)
48348 return -ENOSYS;
48349 @@ -2392,11 +2400,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
48350 if (!p)
48351 goto err_unlock;
48352 ret = -EPERM;
48353 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48354 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
48355 + goto err_unlock;
48356 +#else
48357 pcred = __task_cred(p);
48358 if (cred->euid != pcred->euid &&
48359 cred->euid != pcred->uid &&
48360 !capable(CAP_SYS_PTRACE))
48361 goto err_unlock;
48362 +#endif
48363 head = p->robust_list;
48364 rcu_read_unlock();
48365 }
48366 @@ -2458,7 +2471,7 @@ retry:
48367 */
48368 static inline int fetch_robust_entry(struct robust_list __user **entry,
48369 struct robust_list __user * __user *head,
48370 - int *pi)
48371 + unsigned int *pi)
48372 {
48373 unsigned long uentry;
48374
48375 diff -urNp linux-2.6.35.4/kernel/futex_compat.c linux-2.6.35.4/kernel/futex_compat.c
48376 --- linux-2.6.35.4/kernel/futex_compat.c 2010-08-26 19:47:12.000000000 -0400
48377 +++ linux-2.6.35.4/kernel/futex_compat.c 2010-09-17 20:12:37.000000000 -0400
48378 @@ -10,6 +10,7 @@
48379 #include <linux/compat.h>
48380 #include <linux/nsproxy.h>
48381 #include <linux/futex.h>
48382 +#include <linux/ptrace.h>
48383
48384 #include <asm/uaccess.h>
48385
48386 @@ -135,7 +136,10 @@ compat_sys_get_robust_list(int pid, comp
48387 {
48388 struct compat_robust_list_head __user *head;
48389 unsigned long ret;
48390 - const struct cred *cred = current_cred(), *pcred;
48391 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
48392 + const struct cred *cred = current_cred();
48393 + const struct cred *pcred;
48394 +#endif
48395
48396 if (!futex_cmpxchg_enabled)
48397 return -ENOSYS;
48398 @@ -151,11 +155,16 @@ compat_sys_get_robust_list(int pid, comp
48399 if (!p)
48400 goto err_unlock;
48401 ret = -EPERM;
48402 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48403 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
48404 + goto err_unlock;
48405 +#else
48406 pcred = __task_cred(p);
48407 if (cred->euid != pcred->euid &&
48408 cred->euid != pcred->uid &&
48409 !capable(CAP_SYS_PTRACE))
48410 goto err_unlock;
48411 +#endif
48412 head = p->compat_robust_list;
48413 rcu_read_unlock();
48414 }
48415 diff -urNp linux-2.6.35.4/kernel/gcov/base.c linux-2.6.35.4/kernel/gcov/base.c
48416 --- linux-2.6.35.4/kernel/gcov/base.c 2010-08-26 19:47:12.000000000 -0400
48417 +++ linux-2.6.35.4/kernel/gcov/base.c 2010-09-17 20:12:09.000000000 -0400
48418 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
48419 }
48420
48421 #ifdef CONFIG_MODULES
48422 -static inline int within(void *addr, void *start, unsigned long size)
48423 -{
48424 - return ((addr >= start) && (addr < start + size));
48425 -}
48426 -
48427 /* Update list and generate events when modules are unloaded. */
48428 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
48429 void *data)
48430 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
48431 prev = NULL;
48432 /* Remove entries located in module from linked list. */
48433 for (info = gcov_info_head; info; info = info->next) {
48434 - if (within(info, mod->module_core, mod->core_size)) {
48435 + if (within_module_core_rw((unsigned long)info, mod)) {
48436 if (prev)
48437 prev->next = info->next;
48438 else
48439 diff -urNp linux-2.6.35.4/kernel/hrtimer.c linux-2.6.35.4/kernel/hrtimer.c
48440 --- linux-2.6.35.4/kernel/hrtimer.c 2010-08-26 19:47:12.000000000 -0400
48441 +++ linux-2.6.35.4/kernel/hrtimer.c 2010-09-17 20:12:09.000000000 -0400
48442 @@ -1398,7 +1398,7 @@ void hrtimer_peek_ahead_timers(void)
48443 local_irq_restore(flags);
48444 }
48445
48446 -static void run_hrtimer_softirq(struct softirq_action *h)
48447 +static void run_hrtimer_softirq(void)
48448 {
48449 hrtimer_peek_ahead_timers();
48450 }
48451 diff -urNp linux-2.6.35.4/kernel/kallsyms.c linux-2.6.35.4/kernel/kallsyms.c
48452 --- linux-2.6.35.4/kernel/kallsyms.c 2010-08-26 19:47:12.000000000 -0400
48453 +++ linux-2.6.35.4/kernel/kallsyms.c 2010-09-17 20:12:37.000000000 -0400
48454 @@ -11,6 +11,9 @@
48455 * Changed the compression method from stem compression to "table lookup"
48456 * compression (see scripts/kallsyms.c for a more complete description)
48457 */
48458 +#ifdef CONFIG_GRKERNSEC_HIDESYM
48459 +#define __INCLUDED_BY_HIDESYM 1
48460 +#endif
48461 #include <linux/kallsyms.h>
48462 #include <linux/module.h>
48463 #include <linux/init.h>
48464 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_mark
48465
48466 static inline int is_kernel_inittext(unsigned long addr)
48467 {
48468 + if (system_state != SYSTEM_BOOTING)
48469 + return 0;
48470 +
48471 if (addr >= (unsigned long)_sinittext
48472 && addr <= (unsigned long)_einittext)
48473 return 1;
48474 return 0;
48475 }
48476
48477 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
48478 +#ifdef CONFIG_MODULES
48479 +static inline int is_module_text(unsigned long addr)
48480 +{
48481 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
48482 + return 1;
48483 +
48484 + addr = ktla_ktva(addr);
48485 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
48486 +}
48487 +#else
48488 +static inline int is_module_text(unsigned long addr)
48489 +{
48490 + return 0;
48491 +}
48492 +#endif
48493 +#endif
48494 +
48495 static inline int is_kernel_text(unsigned long addr)
48496 {
48497 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
48498 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigne
48499
48500 static inline int is_kernel(unsigned long addr)
48501 {
48502 +
48503 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
48504 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
48505 + return 1;
48506 +
48507 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
48508 +#else
48509 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
48510 +#endif
48511 +
48512 return 1;
48513 return in_gate_area_no_task(addr);
48514 }
48515
48516 static int is_ksym_addr(unsigned long addr)
48517 {
48518 +
48519 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
48520 + if (is_module_text(addr))
48521 + return 0;
48522 +#endif
48523 +
48524 if (all_var)
48525 return is_kernel(addr);
48526
48527 @@ -416,7 +455,6 @@ static unsigned long get_ksymbol_core(st
48528
48529 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
48530 {
48531 - iter->name[0] = '\0';
48532 iter->nameoff = get_symbol_offset(new_pos);
48533 iter->pos = new_pos;
48534 }
48535 @@ -464,6 +502,11 @@ static int s_show(struct seq_file *m, vo
48536 {
48537 struct kallsym_iter *iter = m->private;
48538
48539 +#ifdef CONFIG_GRKERNSEC_HIDESYM
48540 + if (current_uid())
48541 + return 0;
48542 +#endif
48543 +
48544 /* Some debugging symbols have no name. Ignore them. */
48545 if (!iter->name[0])
48546 return 0;
48547 @@ -504,7 +547,7 @@ static int kallsyms_open(struct inode *i
48548 struct kallsym_iter *iter;
48549 int ret;
48550
48551 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
48552 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
48553 if (!iter)
48554 return -ENOMEM;
48555 reset_iter(iter, 0);
48556 diff -urNp linux-2.6.35.4/kernel/kmod.c linux-2.6.35.4/kernel/kmod.c
48557 --- linux-2.6.35.4/kernel/kmod.c 2010-08-26 19:47:12.000000000 -0400
48558 +++ linux-2.6.35.4/kernel/kmod.c 2010-09-17 20:12:37.000000000 -0400
48559 @@ -90,6 +90,18 @@ int __request_module(bool wait, const ch
48560 if (ret)
48561 return ret;
48562
48563 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
48564 + /* we could do a tighter check here, but some distros
48565 + are taking it upon themselves to remove CAP_SYS_MODULE
48566 + from even root-running apps which cause modules to be
48567 + auto-loaded
48568 + */
48569 + if (current_uid()) {
48570 + gr_log_nonroot_mod_load(module_name);
48571 + return -EPERM;
48572 + }
48573 +#endif
48574 +
48575 /* If modprobe needs a service that is in a module, we get a recursive
48576 * loop. Limit the number of running kmod threads to max_threads/2 or
48577 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
48578 diff -urNp linux-2.6.35.4/kernel/kprobes.c linux-2.6.35.4/kernel/kprobes.c
48579 --- linux-2.6.35.4/kernel/kprobes.c 2010-08-26 19:47:12.000000000 -0400
48580 +++ linux-2.6.35.4/kernel/kprobes.c 2010-09-17 20:12:09.000000000 -0400
48581 @@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_
48582 * kernel image and loaded module images reside. This is required
48583 * so x86_64 can correctly handle the %rip-relative fixups.
48584 */
48585 - kip->insns = module_alloc(PAGE_SIZE);
48586 + kip->insns = module_alloc_exec(PAGE_SIZE);
48587 if (!kip->insns) {
48588 kfree(kip);
48589 return NULL;
48590 @@ -223,7 +223,7 @@ static int __kprobes collect_one_slot(st
48591 */
48592 if (!list_is_singular(&kip->list)) {
48593 list_del(&kip->list);
48594 - module_free(NULL, kip->insns);
48595 + module_free_exec(NULL, kip->insns);
48596 kfree(kip);
48597 }
48598 return 1;
48599 @@ -1709,7 +1709,7 @@ static int __init init_kprobes(void)
48600 {
48601 int i, err = 0;
48602 unsigned long offset = 0, size = 0;
48603 - char *modname, namebuf[128];
48604 + char *modname, namebuf[KSYM_NAME_LEN];
48605 const char *symbol_name;
48606 void *addr;
48607 struct kprobe_blackpoint *kb;
48608 @@ -1835,7 +1835,7 @@ static int __kprobes show_kprobe_addr(st
48609 const char *sym = NULL;
48610 unsigned int i = *(loff_t *) v;
48611 unsigned long offset = 0;
48612 - char *modname, namebuf[128];
48613 + char *modname, namebuf[KSYM_NAME_LEN];
48614
48615 head = &kprobe_table[i];
48616 preempt_disable();
48617 diff -urNp linux-2.6.35.4/kernel/lockdep.c linux-2.6.35.4/kernel/lockdep.c
48618 --- linux-2.6.35.4/kernel/lockdep.c 2010-08-26 19:47:12.000000000 -0400
48619 +++ linux-2.6.35.4/kernel/lockdep.c 2010-09-17 20:12:09.000000000 -0400
48620 @@ -571,6 +571,10 @@ static int static_obj(void *obj)
48621 end = (unsigned long) &_end,
48622 addr = (unsigned long) obj;
48623
48624 +#ifdef CONFIG_PAX_KERNEXEC
48625 + start = ktla_ktva(start);
48626 +#endif
48627 +
48628 /*
48629 * static variable?
48630 */
48631 @@ -696,6 +700,7 @@ register_lock_class(struct lockdep_map *
48632 if (!static_obj(lock->key)) {
48633 debug_locks_off();
48634 printk("INFO: trying to register non-static key.\n");
48635 + printk("lock:%pS key:%pS.\n", lock, lock->key);
48636 printk("the code is fine but needs lockdep annotation.\n");
48637 printk("turning off the locking correctness validator.\n");
48638 dump_stack();
48639 diff -urNp linux-2.6.35.4/kernel/lockdep_proc.c linux-2.6.35.4/kernel/lockdep_proc.c
48640 --- linux-2.6.35.4/kernel/lockdep_proc.c 2010-08-26 19:47:12.000000000 -0400
48641 +++ linux-2.6.35.4/kernel/lockdep_proc.c 2010-09-17 20:12:09.000000000 -0400
48642 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
48643
48644 static void print_name(struct seq_file *m, struct lock_class *class)
48645 {
48646 - char str[128];
48647 + char str[KSYM_NAME_LEN];
48648 const char *name = class->name;
48649
48650 if (!name) {
48651 diff -urNp linux-2.6.35.4/kernel/module.c linux-2.6.35.4/kernel/module.c
48652 --- linux-2.6.35.4/kernel/module.c 2010-08-26 19:47:12.000000000 -0400
48653 +++ linux-2.6.35.4/kernel/module.c 2010-09-17 20:12:37.000000000 -0400
48654 @@ -96,7 +96,8 @@ static BLOCKING_NOTIFIER_HEAD(module_not
48655
48656 /* Bounds of module allocation, for speeding __module_address.
48657 * Protected by module_mutex. */
48658 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
48659 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
48660 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
48661
48662 int register_module_notifier(struct notifier_block * nb)
48663 {
48664 @@ -250,7 +251,7 @@ bool each_symbol(bool (*fn)(const struct
48665 return true;
48666
48667 list_for_each_entry_rcu(mod, &modules, list) {
48668 - struct symsearch arr[] = {
48669 + struct symsearch modarr[] = {
48670 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
48671 NOT_GPL_ONLY, false },
48672 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
48673 @@ -272,7 +273,7 @@ bool each_symbol(bool (*fn)(const struct
48674 #endif
48675 };
48676
48677 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
48678 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
48679 return true;
48680 }
48681 return false;
48682 @@ -383,7 +384,7 @@ static inline void __percpu *mod_percpu(
48683 static int percpu_modalloc(struct module *mod,
48684 unsigned long size, unsigned long align)
48685 {
48686 - if (align > PAGE_SIZE) {
48687 + if (align-1 >= PAGE_SIZE) {
48688 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
48689 mod->name, align, PAGE_SIZE);
48690 align = PAGE_SIZE;
48691 @@ -1562,7 +1563,8 @@ static void free_module(struct module *m
48692 destroy_params(mod->kp, mod->num_kp);
48693
48694 /* This may be NULL, but that's OK */
48695 - module_free(mod, mod->module_init);
48696 + module_free(mod, mod->module_init_rw);
48697 + module_free_exec(mod, mod->module_init_rx);
48698 kfree(mod->args);
48699 percpu_modfree(mod);
48700 #if defined(CONFIG_MODULE_UNLOAD)
48701 @@ -1570,10 +1572,12 @@ static void free_module(struct module *m
48702 free_percpu(mod->refptr);
48703 #endif
48704 /* Free lock-classes: */
48705 - lockdep_free_key_range(mod->module_core, mod->core_size);
48706 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
48707 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
48708
48709 /* Finally, free the core (containing the module structure) */
48710 - module_free(mod, mod->module_core);
48711 + module_free_exec(mod, mod->module_core_rx);
48712 + module_free(mod, mod->module_core_rw);
48713
48714 #ifdef CONFIG_MPU
48715 update_protections(current->mm);
48716 @@ -1670,7 +1674,9 @@ static int simplify_symbols(Elf_Shdr *se
48717 mod);
48718 /* Ok if resolved. */
48719 if (ksym && !IS_ERR(ksym)) {
48720 + pax_open_kernel();
48721 sym[i].st_value = ksym->value;
48722 + pax_close_kernel();
48723 break;
48724 }
48725
48726 @@ -1690,7 +1696,9 @@ static int simplify_symbols(Elf_Shdr *se
48727 secbase = (unsigned long)mod_percpu(mod);
48728 else
48729 secbase = sechdrs[sym[i].st_shndx].sh_addr;
48730 + pax_open_kernel();
48731 sym[i].st_value += secbase;
48732 + pax_close_kernel();
48733 break;
48734 }
48735 }
48736 @@ -1751,11 +1759,12 @@ static void layout_sections(struct modul
48737 || s->sh_entsize != ~0UL
48738 || strstarts(secstrings + s->sh_name, ".init"))
48739 continue;
48740 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
48741 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
48742 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
48743 + else
48744 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
48745 DEBUGP("\t%s\n", secstrings + s->sh_name);
48746 }
48747 - if (m == 0)
48748 - mod->core_text_size = mod->core_size;
48749 }
48750
48751 DEBUGP("Init section allocation order:\n");
48752 @@ -1768,12 +1777,13 @@ static void layout_sections(struct modul
48753 || s->sh_entsize != ~0UL
48754 || !strstarts(secstrings + s->sh_name, ".init"))
48755 continue;
48756 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
48757 - | INIT_OFFSET_MASK);
48758 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
48759 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
48760 + else
48761 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
48762 + s->sh_entsize |= INIT_OFFSET_MASK;
48763 DEBUGP("\t%s\n", secstrings + s->sh_name);
48764 }
48765 - if (m == 0)
48766 - mod->init_text_size = mod->init_size;
48767 }
48768 }
48769
48770 @@ -1877,9 +1887,8 @@ static int is_exported(const char *name,
48771
48772 /* As per nm */
48773 static char elf_type(const Elf_Sym *sym,
48774 - Elf_Shdr *sechdrs,
48775 - const char *secstrings,
48776 - struct module *mod)
48777 + const Elf_Shdr *sechdrs,
48778 + const char *secstrings)
48779 {
48780 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
48781 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
48782 @@ -1954,7 +1963,7 @@ static unsigned long layout_symtab(struc
48783
48784 /* Put symbol section at end of init part of module. */
48785 symsect->sh_flags |= SHF_ALLOC;
48786 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
48787 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
48788 symindex) | INIT_OFFSET_MASK;
48789 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
48790
48791 @@ -1971,19 +1980,19 @@ static unsigned long layout_symtab(struc
48792 }
48793
48794 /* Append room for core symbols at end of core part. */
48795 - symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
48796 - mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
48797 + symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
48798 + mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
48799
48800 /* Put string table section at end of init part of module. */
48801 strsect->sh_flags |= SHF_ALLOC;
48802 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
48803 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
48804 strindex) | INIT_OFFSET_MASK;
48805 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
48806
48807 /* Append room for core symbols' strings at end of core part. */
48808 - *pstroffs = mod->core_size;
48809 + *pstroffs = mod->core_size_rx;
48810 __set_bit(0, strmap);
48811 - mod->core_size += bitmap_weight(strmap, strsect->sh_size);
48812 + mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
48813
48814 return symoffs;
48815 }
48816 @@ -2007,12 +2016,14 @@ static void add_kallsyms(struct module *
48817 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
48818 mod->strtab = (void *)sechdrs[strindex].sh_addr;
48819
48820 + pax_open_kernel();
48821 +
48822 /* Set types up while we still have access to sections. */
48823 for (i = 0; i < mod->num_symtab; i++)
48824 mod->symtab[i].st_info
48825 - = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
48826 + = elf_type(&mod->symtab[i], sechdrs, secstrings);
48827
48828 - mod->core_symtab = dst = mod->module_core + symoffs;
48829 + mod->core_symtab = dst = mod->module_core_rx + symoffs;
48830 src = mod->symtab;
48831 *dst = *src;
48832 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
48833 @@ -2024,10 +2035,12 @@ static void add_kallsyms(struct module *
48834 }
48835 mod->core_num_syms = ndst;
48836
48837 - mod->core_strtab = s = mod->module_core + stroffs;
48838 + mod->core_strtab = s = mod->module_core_rx + stroffs;
48839 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
48840 if (test_bit(i, strmap))
48841 *++s = mod->strtab[i];
48842 +
48843 + pax_close_kernel();
48844 }
48845 #else
48846 static inline unsigned long layout_symtab(struct module *mod,
48847 @@ -2070,17 +2083,33 @@ static void dynamic_debug_remove(struct
48848 ddebug_remove_module(debug->modname);
48849 }
48850
48851 -static void *module_alloc_update_bounds(unsigned long size)
48852 +static void *module_alloc_update_bounds_rw(unsigned long size)
48853 {
48854 void *ret = module_alloc(size);
48855
48856 if (ret) {
48857 mutex_lock(&module_mutex);
48858 /* Update module bounds. */
48859 - if ((unsigned long)ret < module_addr_min)
48860 - module_addr_min = (unsigned long)ret;
48861 - if ((unsigned long)ret + size > module_addr_max)
48862 - module_addr_max = (unsigned long)ret + size;
48863 + if ((unsigned long)ret < module_addr_min_rw)
48864 + module_addr_min_rw = (unsigned long)ret;
48865 + if ((unsigned long)ret + size > module_addr_max_rw)
48866 + module_addr_max_rw = (unsigned long)ret + size;
48867 + mutex_unlock(&module_mutex);
48868 + }
48869 + return ret;
48870 +}
48871 +
48872 +static void *module_alloc_update_bounds_rx(unsigned long size)
48873 +{
48874 + void *ret = module_alloc_exec(size);
48875 +
48876 + if (ret) {
48877 + mutex_lock(&module_mutex);
48878 + /* Update module bounds. */
48879 + if ((unsigned long)ret < module_addr_min_rx)
48880 + module_addr_min_rx = (unsigned long)ret;
48881 + if ((unsigned long)ret + size > module_addr_max_rx)
48882 + module_addr_max_rx = (unsigned long)ret + size;
48883 mutex_unlock(&module_mutex);
48884 }
48885 return ret;
48886 @@ -2284,7 +2313,7 @@ static noinline struct module *load_modu
48887 secstrings, &stroffs, strmap);
48888
48889 /* Do the allocs. */
48890 - ptr = module_alloc_update_bounds(mod->core_size);
48891 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
48892 /*
48893 * The pointer to this block is stored in the module structure
48894 * which is inside the block. Just mark it as not being a
48895 @@ -2295,23 +2324,47 @@ static noinline struct module *load_modu
48896 err = -ENOMEM;
48897 goto free_percpu;
48898 }
48899 - memset(ptr, 0, mod->core_size);
48900 - mod->module_core = ptr;
48901 + memset(ptr, 0, mod->core_size_rw);
48902 + mod->module_core_rw = ptr;
48903
48904 - ptr = module_alloc_update_bounds(mod->init_size);
48905 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
48906 /*
48907 * The pointer to this block is stored in the module structure
48908 * which is inside the block. This block doesn't need to be
48909 * scanned as it contains data and code that will be freed
48910 * after the module is initialized.
48911 */
48912 - kmemleak_ignore(ptr);
48913 - if (!ptr && mod->init_size) {
48914 + kmemleak_not_leak(ptr);
48915 + if (!ptr && mod->init_size_rw) {
48916 + err = -ENOMEM;
48917 + goto free_core_rw;
48918 + }
48919 + memset(ptr, 0, mod->init_size_rw);
48920 + mod->module_init_rw = ptr;
48921 +
48922 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
48923 + kmemleak_not_leak(ptr);
48924 + if (!ptr) {
48925 + err = -ENOMEM;
48926 + goto free_init_rw;
48927 + }
48928 +
48929 + pax_open_kernel();
48930 + memset(ptr, 0, mod->core_size_rx);
48931 + pax_close_kernel();
48932 + mod->module_core_rx = ptr;
48933 +
48934 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
48935 + kmemleak_not_leak(ptr);
48936 + if (!ptr && mod->init_size_rx) {
48937 err = -ENOMEM;
48938 - goto free_core;
48939 + goto free_core_rx;
48940 }
48941 - memset(ptr, 0, mod->init_size);
48942 - mod->module_init = ptr;
48943 +
48944 + pax_open_kernel();
48945 + memset(ptr, 0, mod->init_size_rx);
48946 + pax_close_kernel();
48947 + mod->module_init_rx = ptr;
48948
48949 /* Transfer each section which specifies SHF_ALLOC */
48950 DEBUGP("final section addresses:\n");
48951 @@ -2321,17 +2374,41 @@ static noinline struct module *load_modu
48952 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
48953 continue;
48954
48955 - if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
48956 - dest = mod->module_init
48957 - + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
48958 - else
48959 - dest = mod->module_core + sechdrs[i].sh_entsize;
48960 + if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
48961 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
48962 + dest = mod->module_init_rw
48963 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
48964 + else
48965 + dest = mod->module_init_rx
48966 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
48967 + } else {
48968 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
48969 + dest = mod->module_core_rw + sechdrs[i].sh_entsize;
48970 + else
48971 + dest = mod->module_core_rx + sechdrs[i].sh_entsize;
48972 + }
48973 +
48974 + if (sechdrs[i].sh_type != SHT_NOBITS) {
48975
48976 - if (sechdrs[i].sh_type != SHT_NOBITS)
48977 - memcpy(dest, (void *)sechdrs[i].sh_addr,
48978 - sechdrs[i].sh_size);
48979 +#ifdef CONFIG_PAX_KERNEXEC
48980 + if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
48981 + pax_open_kernel();
48982 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
48983 + pax_close_kernel();
48984 + } else
48985 +#endif
48986 +
48987 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
48988 + }
48989 /* Update sh_addr to point to copy in image. */
48990 - sechdrs[i].sh_addr = (unsigned long)dest;
48991 +
48992 +#ifdef CONFIG_PAX_KERNEXEC
48993 + if (sechdrs[i].sh_flags & SHF_EXECINSTR)
48994 + sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
48995 + else
48996 +#endif
48997 +
48998 + sechdrs[i].sh_addr = (unsigned long)dest;
48999 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
49000 }
49001 /* Module has been moved. */
49002 @@ -2342,7 +2419,7 @@ static noinline struct module *load_modu
49003 mod->refptr = alloc_percpu(struct module_ref);
49004 if (!mod->refptr) {
49005 err = -ENOMEM;
49006 - goto free_init;
49007 + goto free_init_rx;
49008 }
49009 #endif
49010 /* Now we've moved module, initialize linked lists, etc. */
49011 @@ -2452,8 +2529,8 @@ static noinline struct module *load_modu
49012
49013 /* Now do relocations. */
49014 for (i = 1; i < hdr->e_shnum; i++) {
49015 - const char *strtab = (char *)sechdrs[strindex].sh_addr;
49016 unsigned int info = sechdrs[i].sh_info;
49017 + strtab = (char *)sechdrs[strindex].sh_addr;
49018
49019 /* Not a valid relocation section? */
49020 if (info >= hdr->e_shnum)
49021 @@ -2503,12 +2580,12 @@ static noinline struct module *load_modu
49022 * Do it before processing of module parameters, so the module
49023 * can provide parameter accessor functions of its own.
49024 */
49025 - if (mod->module_init)
49026 - flush_icache_range((unsigned long)mod->module_init,
49027 - (unsigned long)mod->module_init
49028 - + mod->init_size);
49029 - flush_icache_range((unsigned long)mod->module_core,
49030 - (unsigned long)mod->module_core + mod->core_size);
49031 + if (mod->module_init_rx)
49032 + flush_icache_range((unsigned long)mod->module_init_rx,
49033 + (unsigned long)mod->module_init_rx
49034 + + mod->init_size_rx);
49035 + flush_icache_range((unsigned long)mod->module_core_rx,
49036 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
49037
49038 set_fs(old_fs);
49039
49040 @@ -2574,12 +2651,16 @@ static noinline struct module *load_modu
49041 free_modinfo(mod);
49042 module_unload_free(mod);
49043 #if defined(CONFIG_MODULE_UNLOAD)
49044 + free_init_rx:
49045 free_percpu(mod->refptr);
49046 - free_init:
49047 #endif
49048 - module_free(mod, mod->module_init);
49049 - free_core:
49050 - module_free(mod, mod->module_core);
49051 + module_free_exec(mod, mod->module_init_rx);
49052 + free_core_rx:
49053 + module_free_exec(mod, mod->module_core_rx);
49054 + free_init_rw:
49055 + module_free(mod, mod->module_init_rw);
49056 + free_core_rw:
49057 + module_free(mod, mod->module_core_rw);
49058 /* mod will be freed with core. Don't access it beyond this line! */
49059 free_percpu:
49060 free_percpu(percpu);
49061 @@ -2669,10 +2750,12 @@ SYSCALL_DEFINE3(init_module, void __user
49062 mod->symtab = mod->core_symtab;
49063 mod->strtab = mod->core_strtab;
49064 #endif
49065 - module_free(mod, mod->module_init);
49066 - mod->module_init = NULL;
49067 - mod->init_size = 0;
49068 - mod->init_text_size = 0;
49069 + module_free(mod, mod->module_init_rw);
49070 + module_free_exec(mod, mod->module_init_rx);
49071 + mod->module_init_rw = NULL;
49072 + mod->module_init_rx = NULL;
49073 + mod->init_size_rw = 0;
49074 + mod->init_size_rx = 0;
49075 mutex_unlock(&module_mutex);
49076
49077 return 0;
49078 @@ -2703,10 +2786,16 @@ static const char *get_ksymbol(struct mo
49079 unsigned long nextval;
49080
49081 /* At worse, next value is at end of module */
49082 - if (within_module_init(addr, mod))
49083 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
49084 + if (within_module_init_rx(addr, mod))
49085 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
49086 + else if (within_module_init_rw(addr, mod))
49087 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
49088 + else if (within_module_core_rx(addr, mod))
49089 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
49090 + else if (within_module_core_rw(addr, mod))
49091 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
49092 else
49093 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
49094 + return NULL;
49095
49096 /* Scan for closest preceeding symbol, and next symbol. (ELF
49097 starts real symbols at 1). */
49098 @@ -2952,7 +3041,7 @@ static int m_show(struct seq_file *m, vo
49099 char buf[8];
49100
49101 seq_printf(m, "%s %u",
49102 - mod->name, mod->init_size + mod->core_size);
49103 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
49104 print_unload_info(m, mod);
49105
49106 /* Informative for users. */
49107 @@ -2961,7 +3050,7 @@ static int m_show(struct seq_file *m, vo
49108 mod->state == MODULE_STATE_COMING ? "Loading":
49109 "Live");
49110 /* Used by oprofile and other similar tools. */
49111 - seq_printf(m, " 0x%p", mod->module_core);
49112 + seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
49113
49114 /* Taints info */
49115 if (mod->taints)
49116 @@ -2997,7 +3086,17 @@ static const struct file_operations proc
49117
49118 static int __init proc_modules_init(void)
49119 {
49120 +#ifndef CONFIG_GRKERNSEC_HIDESYM
49121 +#ifdef CONFIG_GRKERNSEC_PROC_USER
49122 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
49123 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49124 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
49125 +#else
49126 proc_create("modules", 0, NULL, &proc_modules_operations);
49127 +#endif
49128 +#else
49129 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
49130 +#endif
49131 return 0;
49132 }
49133 module_init(proc_modules_init);
49134 @@ -3056,12 +3155,12 @@ struct module *__module_address(unsigned
49135 {
49136 struct module *mod;
49137
49138 - if (addr < module_addr_min || addr > module_addr_max)
49139 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
49140 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
49141 return NULL;
49142
49143 list_for_each_entry_rcu(mod, &modules, list)
49144 - if (within_module_core(addr, mod)
49145 - || within_module_init(addr, mod))
49146 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
49147 return mod;
49148 return NULL;
49149 }
49150 @@ -3095,11 +3194,20 @@ bool is_module_text_address(unsigned lon
49151 */
49152 struct module *__module_text_address(unsigned long addr)
49153 {
49154 - struct module *mod = __module_address(addr);
49155 + struct module *mod;
49156 +
49157 +#ifdef CONFIG_X86_32
49158 + addr = ktla_ktva(addr);
49159 +#endif
49160 +
49161 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
49162 + return NULL;
49163 +
49164 + mod = __module_address(addr);
49165 +
49166 if (mod) {
49167 /* Make sure it's within the text section. */
49168 - if (!within(addr, mod->module_init, mod->init_text_size)
49169 - && !within(addr, mod->module_core, mod->core_text_size))
49170 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
49171 mod = NULL;
49172 }
49173 return mod;
49174 diff -urNp linux-2.6.35.4/kernel/panic.c linux-2.6.35.4/kernel/panic.c
49175 --- linux-2.6.35.4/kernel/panic.c 2010-08-26 19:47:12.000000000 -0400
49176 +++ linux-2.6.35.4/kernel/panic.c 2010-09-17 20:12:09.000000000 -0400
49177 @@ -429,7 +429,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
49178 */
49179 void __stack_chk_fail(void)
49180 {
49181 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
49182 + dump_stack();
49183 + panic("stack-protector: Kernel stack is corrupted in: %pS\n",
49184 __builtin_return_address(0));
49185 }
49186 EXPORT_SYMBOL(__stack_chk_fail);
49187 diff -urNp linux-2.6.35.4/kernel/pid.c linux-2.6.35.4/kernel/pid.c
49188 --- linux-2.6.35.4/kernel/pid.c 2010-08-26 19:47:12.000000000 -0400
49189 +++ linux-2.6.35.4/kernel/pid.c 2010-09-17 20:12:37.000000000 -0400
49190 @@ -33,6 +33,7 @@
49191 #include <linux/rculist.h>
49192 #include <linux/bootmem.h>
49193 #include <linux/hash.h>
49194 +#include <linux/security.h>
49195 #include <linux/pid_namespace.h>
49196 #include <linux/init_task.h>
49197 #include <linux/syscalls.h>
49198 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
49199
49200 int pid_max = PID_MAX_DEFAULT;
49201
49202 -#define RESERVED_PIDS 300
49203 +#define RESERVED_PIDS 500
49204
49205 int pid_max_min = RESERVED_PIDS + 1;
49206 int pid_max_max = PID_MAX_LIMIT;
49207 @@ -382,7 +383,14 @@ EXPORT_SYMBOL(pid_task);
49208 */
49209 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
49210 {
49211 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
49212 + struct task_struct *task;
49213 +
49214 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
49215 +
49216 + if (gr_pid_is_chrooted(task))
49217 + return NULL;
49218 +
49219 + return task;
49220 }
49221
49222 struct task_struct *find_task_by_vpid(pid_t vnr)
49223 diff -urNp linux-2.6.35.4/kernel/posix-cpu-timers.c linux-2.6.35.4/kernel/posix-cpu-timers.c
49224 --- linux-2.6.35.4/kernel/posix-cpu-timers.c 2010-08-26 19:47:12.000000000 -0400
49225 +++ linux-2.6.35.4/kernel/posix-cpu-timers.c 2010-09-17 20:12:37.000000000 -0400
49226 @@ -6,6 +6,7 @@
49227 #include <linux/posix-timers.h>
49228 #include <linux/errno.h>
49229 #include <linux/math64.h>
49230 +#include <linux/security.h>
49231 #include <asm/uaccess.h>
49232 #include <linux/kernel_stat.h>
49233 #include <trace/events/timer.h>
49234 @@ -972,6 +973,7 @@ static void check_thread_timers(struct t
49235 unsigned long hard =
49236 ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max);
49237
49238 + gr_learn_resource(tsk, RLIMIT_RTTIME, tsk->rt.timeout * (USEC_PER_SEC/HZ), 1);
49239 if (hard != RLIM_INFINITY &&
49240 tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
49241 /*
49242 @@ -1138,6 +1140,7 @@ static void check_process_timers(struct
49243 unsigned long hard =
49244 ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_max);
49245 cputime_t x;
49246 + gr_learn_resource(tsk, RLIMIT_CPU, psecs, 0);
49247 if (psecs >= hard) {
49248 /*
49249 * At the hard limit, we just die.
49250 diff -urNp linux-2.6.35.4/kernel/power/hibernate.c linux-2.6.35.4/kernel/power/hibernate.c
49251 --- linux-2.6.35.4/kernel/power/hibernate.c 2010-08-26 19:47:12.000000000 -0400
49252 +++ linux-2.6.35.4/kernel/power/hibernate.c 2010-09-17 20:12:09.000000000 -0400
49253 @@ -50,14 +50,14 @@ enum {
49254
49255 static int hibernation_mode = HIBERNATION_SHUTDOWN;
49256
49257 -static struct platform_hibernation_ops *hibernation_ops;
49258 +static const struct platform_hibernation_ops *hibernation_ops;
49259
49260 /**
49261 * hibernation_set_ops - set the global hibernate operations
49262 * @ops: the hibernation operations to use in subsequent hibernation transitions
49263 */
49264
49265 -void hibernation_set_ops(struct platform_hibernation_ops *ops)
49266 +void hibernation_set_ops(const struct platform_hibernation_ops *ops)
49267 {
49268 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
49269 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
49270 diff -urNp linux-2.6.35.4/kernel/power/poweroff.c linux-2.6.35.4/kernel/power/poweroff.c
49271 --- linux-2.6.35.4/kernel/power/poweroff.c 2010-08-26 19:47:12.000000000 -0400
49272 +++ linux-2.6.35.4/kernel/power/poweroff.c 2010-09-17 20:12:09.000000000 -0400
49273 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
49274 .enable_mask = SYSRQ_ENABLE_BOOT,
49275 };
49276
49277 -static int pm_sysrq_init(void)
49278 +static int __init pm_sysrq_init(void)
49279 {
49280 register_sysrq_key('o', &sysrq_poweroff_op);
49281 return 0;
49282 diff -urNp linux-2.6.35.4/kernel/power/process.c linux-2.6.35.4/kernel/power/process.c
49283 --- linux-2.6.35.4/kernel/power/process.c 2010-08-26 19:47:12.000000000 -0400
49284 +++ linux-2.6.35.4/kernel/power/process.c 2010-09-17 20:12:09.000000000 -0400
49285 @@ -38,12 +38,15 @@ static int try_to_freeze_tasks(bool sig_
49286 struct timeval start, end;
49287 u64 elapsed_csecs64;
49288 unsigned int elapsed_csecs;
49289 + bool timedout = false;
49290
49291 do_gettimeofday(&start);
49292
49293 end_time = jiffies + TIMEOUT;
49294 while (true) {
49295 todo = 0;
49296 + if (time_after(jiffies, end_time))
49297 + timedout = true;
49298 read_lock(&tasklist_lock);
49299 do_each_thread(g, p) {
49300 if (frozen(p) || !freezeable(p))
49301 @@ -58,12 +61,16 @@ static int try_to_freeze_tasks(bool sig_
49302 * It is "frozen enough". If the task does wake
49303 * up, it will immediately call try_to_freeze.
49304 */
49305 - if (!task_is_stopped_or_traced(p) &&
49306 - !freezer_should_skip(p))
49307 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
49308 todo++;
49309 + if (timedout) {
49310 + printk(KERN_ERR "Task refusing to freeze:\n");
49311 + sched_show_task(p);
49312 + }
49313 + }
49314 } while_each_thread(g, p);
49315 read_unlock(&tasklist_lock);
49316 - if (!todo || time_after(jiffies, end_time))
49317 + if (!todo || timedout)
49318 break;
49319
49320 /*
49321 diff -urNp linux-2.6.35.4/kernel/power/suspend.c linux-2.6.35.4/kernel/power/suspend.c
49322 --- linux-2.6.35.4/kernel/power/suspend.c 2010-08-26 19:47:12.000000000 -0400
49323 +++ linux-2.6.35.4/kernel/power/suspend.c 2010-09-17 20:12:09.000000000 -0400
49324 @@ -30,13 +30,13 @@ const char *const pm_states[PM_SUSPEND_M
49325 [PM_SUSPEND_MEM] = "mem",
49326 };
49327
49328 -static struct platform_suspend_ops *suspend_ops;
49329 +static const struct platform_suspend_ops *suspend_ops;
49330
49331 /**
49332 * suspend_set_ops - Set the global suspend method table.
49333 * @ops: Pointer to ops structure.
49334 */
49335 -void suspend_set_ops(struct platform_suspend_ops *ops)
49336 +void suspend_set_ops(const struct platform_suspend_ops *ops)
49337 {
49338 mutex_lock(&pm_mutex);
49339 suspend_ops = ops;
49340 diff -urNp linux-2.6.35.4/kernel/printk.c linux-2.6.35.4/kernel/printk.c
49341 --- linux-2.6.35.4/kernel/printk.c 2010-08-26 19:47:12.000000000 -0400
49342 +++ linux-2.6.35.4/kernel/printk.c 2010-09-17 20:12:37.000000000 -0400
49343 @@ -266,6 +266,11 @@ int do_syslog(int type, char __user *buf
49344 char c;
49345 int error = 0;
49346
49347 +#ifdef CONFIG_GRKERNSEC_DMESG
49348 + if (grsec_enable_dmesg && !capable(CAP_SYS_ADMIN))
49349 + return -EPERM;
49350 +#endif
49351 +
49352 error = security_syslog(type, from_file);
49353 if (error)
49354 return error;
49355 diff -urNp linux-2.6.35.4/kernel/ptrace.c linux-2.6.35.4/kernel/ptrace.c
49356 --- linux-2.6.35.4/kernel/ptrace.c 2010-08-26 19:47:12.000000000 -0400
49357 +++ linux-2.6.35.4/kernel/ptrace.c 2010-09-17 20:12:37.000000000 -0400
49358 @@ -140,7 +140,7 @@ int __ptrace_may_access(struct task_stru
49359 cred->gid != tcred->egid ||
49360 cred->gid != tcred->sgid ||
49361 cred->gid != tcred->gid) &&
49362 - !capable(CAP_SYS_PTRACE)) {
49363 + !capable_nolog(CAP_SYS_PTRACE)) {
49364 rcu_read_unlock();
49365 return -EPERM;
49366 }
49367 @@ -148,7 +148,7 @@ int __ptrace_may_access(struct task_stru
49368 smp_rmb();
49369 if (task->mm)
49370 dumpable = get_dumpable(task->mm);
49371 - if (!dumpable && !capable(CAP_SYS_PTRACE))
49372 + if (!dumpable && !capable_nolog(CAP_SYS_PTRACE))
49373 return -EPERM;
49374
49375 return security_ptrace_access_check(task, mode);
49376 @@ -198,7 +198,7 @@ int ptrace_attach(struct task_struct *ta
49377 goto unlock_tasklist;
49378
49379 task->ptrace = PT_PTRACED;
49380 - if (capable(CAP_SYS_PTRACE))
49381 + if (capable_nolog(CAP_SYS_PTRACE))
49382 task->ptrace |= PT_PTRACE_CAP;
49383
49384 __ptrace_link(task, current);
49385 @@ -361,7 +361,7 @@ int ptrace_readdata(struct task_struct *
49386 break;
49387 return -EIO;
49388 }
49389 - if (copy_to_user(dst, buf, retval))
49390 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
49391 return -EFAULT;
49392 copied += retval;
49393 src += retval;
49394 @@ -572,18 +572,18 @@ int ptrace_request(struct task_struct *c
49395 ret = ptrace_setoptions(child, data);
49396 break;
49397 case PTRACE_GETEVENTMSG:
49398 - ret = put_user(child->ptrace_message, (unsigned long __user *) data);
49399 + ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
49400 break;
49401
49402 case PTRACE_GETSIGINFO:
49403 ret = ptrace_getsiginfo(child, &siginfo);
49404 if (!ret)
49405 - ret = copy_siginfo_to_user((siginfo_t __user *) data,
49406 + ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
49407 &siginfo);
49408 break;
49409
49410 case PTRACE_SETSIGINFO:
49411 - if (copy_from_user(&siginfo, (siginfo_t __user *) data,
49412 + if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
49413 sizeof siginfo))
49414 ret = -EFAULT;
49415 else
49416 @@ -703,14 +703,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
49417 goto out;
49418 }
49419
49420 + if (gr_handle_ptrace(child, request)) {
49421 + ret = -EPERM;
49422 + goto out_put_task_struct;
49423 + }
49424 +
49425 if (request == PTRACE_ATTACH) {
49426 ret = ptrace_attach(child);
49427 /*
49428 * Some architectures need to do book-keeping after
49429 * a ptrace attach.
49430 */
49431 - if (!ret)
49432 + if (!ret) {
49433 arch_ptrace_attach(child);
49434 + gr_audit_ptrace(child);
49435 + }
49436 goto out_put_task_struct;
49437 }
49438
49439 @@ -734,7 +741,7 @@ int generic_ptrace_peekdata(struct task_
49440 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
49441 if (copied != sizeof(tmp))
49442 return -EIO;
49443 - return put_user(tmp, (unsigned long __user *)data);
49444 + return put_user(tmp, (__force unsigned long __user *)data);
49445 }
49446
49447 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
49448 diff -urNp linux-2.6.35.4/kernel/rcutree.c linux-2.6.35.4/kernel/rcutree.c
49449 --- linux-2.6.35.4/kernel/rcutree.c 2010-08-26 19:47:12.000000000 -0400
49450 +++ linux-2.6.35.4/kernel/rcutree.c 2010-09-17 20:12:09.000000000 -0400
49451 @@ -1356,7 +1356,7 @@ __rcu_process_callbacks(struct rcu_state
49452 /*
49453 * Do softirq processing for the current CPU.
49454 */
49455 -static void rcu_process_callbacks(struct softirq_action *unused)
49456 +static void rcu_process_callbacks(void)
49457 {
49458 /*
49459 * Memory references from any prior RCU read-side critical sections
49460 diff -urNp linux-2.6.35.4/kernel/resource.c linux-2.6.35.4/kernel/resource.c
49461 --- linux-2.6.35.4/kernel/resource.c 2010-08-26 19:47:12.000000000 -0400
49462 +++ linux-2.6.35.4/kernel/resource.c 2010-09-17 20:12:37.000000000 -0400
49463 @@ -133,8 +133,18 @@ static const struct file_operations proc
49464
49465 static int __init ioresources_init(void)
49466 {
49467 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
49468 +#ifdef CONFIG_GRKERNSEC_PROC_USER
49469 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
49470 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
49471 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
49472 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
49473 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
49474 +#endif
49475 +#else
49476 proc_create("ioports", 0, NULL, &proc_ioports_operations);
49477 proc_create("iomem", 0, NULL, &proc_iomem_operations);
49478 +#endif
49479 return 0;
49480 }
49481 __initcall(ioresources_init);
49482 diff -urNp linux-2.6.35.4/kernel/sched.c linux-2.6.35.4/kernel/sched.c
49483 --- linux-2.6.35.4/kernel/sched.c 2010-08-26 19:47:12.000000000 -0400
49484 +++ linux-2.6.35.4/kernel/sched.c 2010-09-17 20:12:37.000000000 -0400
49485 @@ -4266,6 +4266,8 @@ int can_nice(const struct task_struct *p
49486 /* convert nice value [19,-20] to rlimit style value [1,40] */
49487 int nice_rlim = 20 - nice;
49488
49489 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
49490 +
49491 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
49492 capable(CAP_SYS_NICE));
49493 }
49494 @@ -4299,7 +4301,8 @@ SYSCALL_DEFINE1(nice, int, increment)
49495 if (nice > 19)
49496 nice = 19;
49497
49498 - if (increment < 0 && !can_nice(current, nice))
49499 + if (increment < 0 && (!can_nice(current, nice) ||
49500 + gr_handle_chroot_nice()))
49501 return -EPERM;
49502
49503 retval = security_task_setnice(current, nice);
49504 @@ -4446,6 +4449,7 @@ recheck:
49505 rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
49506 unlock_task_sighand(p, &flags);
49507
49508 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
49509 /* can't set/change the rt policy */
49510 if (policy != p->policy && !rlim_rtprio)
49511 return -EPERM;
49512 diff -urNp linux-2.6.35.4/kernel/sched_fair.c linux-2.6.35.4/kernel/sched_fair.c
49513 --- linux-2.6.35.4/kernel/sched_fair.c 2010-08-26 19:47:12.000000000 -0400
49514 +++ linux-2.6.35.4/kernel/sched_fair.c 2010-09-17 20:12:09.000000000 -0400
49515 @@ -3390,7 +3390,7 @@ out:
49516 * In CONFIG_NO_HZ case, the idle load balance owner will do the
49517 * rebalancing for all the cpus for whom scheduler ticks are stopped.
49518 */
49519 -static void run_rebalance_domains(struct softirq_action *h)
49520 +static void run_rebalance_domains(void)
49521 {
49522 int this_cpu = smp_processor_id();
49523 struct rq *this_rq = cpu_rq(this_cpu);
49524 diff -urNp linux-2.6.35.4/kernel/signal.c linux-2.6.35.4/kernel/signal.c
49525 --- linux-2.6.35.4/kernel/signal.c 2010-08-26 19:47:12.000000000 -0400
49526 +++ linux-2.6.35.4/kernel/signal.c 2010-09-17 20:20:18.000000000 -0400
49527 @@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cache
49528
49529 int print_fatal_signals __read_mostly;
49530
49531 -static void __user *sig_handler(struct task_struct *t, int sig)
49532 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
49533 {
49534 return t->sighand->action[sig - 1].sa.sa_handler;
49535 }
49536
49537 -static int sig_handler_ignored(void __user *handler, int sig)
49538 +static int sig_handler_ignored(__sighandler_t handler, int sig)
49539 {
49540 /* Is it explicitly or implicitly ignored? */
49541 return handler == SIG_IGN ||
49542 @@ -60,7 +60,7 @@ static int sig_handler_ignored(void __us
49543 static int sig_task_ignored(struct task_struct *t, int sig,
49544 int from_ancestor_ns)
49545 {
49546 - void __user *handler;
49547 + __sighandler_t handler;
49548
49549 handler = sig_handler(t, sig);
49550
49551 @@ -243,6 +243,9 @@ __sigqueue_alloc(int sig, struct task_st
49552 atomic_inc(&user->sigpending);
49553 rcu_read_unlock();
49554
49555 + if (!override_rlimit)
49556 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
49557 +
49558 if (override_rlimit ||
49559 atomic_read(&user->sigpending) <=
49560 task_rlimit(t, RLIMIT_SIGPENDING)) {
49561 @@ -367,7 +370,7 @@ flush_signal_handlers(struct task_struct
49562
49563 int unhandled_signal(struct task_struct *tsk, int sig)
49564 {
49565 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
49566 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
49567 if (is_global_init(tsk))
49568 return 1;
49569 if (handler != SIG_IGN && handler != SIG_DFL)
49570 @@ -678,6 +681,9 @@ static int check_kill_permission(int sig
49571 }
49572 }
49573
49574 + if (gr_handle_signal(t, sig))
49575 + return -EPERM;
49576 +
49577 return security_task_kill(t, info, sig, 0);
49578 }
49579
49580 @@ -1025,7 +1031,7 @@ __group_send_sig_info(int sig, struct si
49581 return send_signal(sig, info, p, 1);
49582 }
49583
49584 -static int
49585 +int
49586 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
49587 {
49588 return send_signal(sig, info, t, 0);
49589 @@ -1079,6 +1085,9 @@ force_sig_info(int sig, struct siginfo *
49590 ret = specific_send_sig_info(sig, info, t);
49591 spin_unlock_irqrestore(&t->sighand->siglock, flags);
49592
49593 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
49594 + gr_handle_crash(t, sig);
49595 +
49596 return ret;
49597 }
49598
49599 @@ -1136,8 +1145,11 @@ int group_send_sig_info(int sig, struct
49600 ret = check_kill_permission(sig, info, p);
49601 rcu_read_unlock();
49602
49603 - if (!ret && sig)
49604 + if (!ret && sig) {
49605 ret = do_send_sig_info(sig, info, p, true);
49606 + if (!ret)
49607 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
49608 + }
49609
49610 return ret;
49611 }
49612 diff -urNp linux-2.6.35.4/kernel/smp.c linux-2.6.35.4/kernel/smp.c
49613 --- linux-2.6.35.4/kernel/smp.c 2010-08-26 19:47:12.000000000 -0400
49614 +++ linux-2.6.35.4/kernel/smp.c 2010-09-17 20:12:09.000000000 -0400
49615 @@ -499,22 +499,22 @@ int smp_call_function(void (*func)(void
49616 }
49617 EXPORT_SYMBOL(smp_call_function);
49618
49619 -void ipi_call_lock(void)
49620 +void ipi_call_lock(void) __acquires(call_function.lock)
49621 {
49622 raw_spin_lock(&call_function.lock);
49623 }
49624
49625 -void ipi_call_unlock(void)
49626 +void ipi_call_unlock(void) __releases(call_function.lock)
49627 {
49628 raw_spin_unlock(&call_function.lock);
49629 }
49630
49631 -void ipi_call_lock_irq(void)
49632 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
49633 {
49634 raw_spin_lock_irq(&call_function.lock);
49635 }
49636
49637 -void ipi_call_unlock_irq(void)
49638 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
49639 {
49640 raw_spin_unlock_irq(&call_function.lock);
49641 }
49642 diff -urNp linux-2.6.35.4/kernel/softirq.c linux-2.6.35.4/kernel/softirq.c
49643 --- linux-2.6.35.4/kernel/softirq.c 2010-08-26 19:47:12.000000000 -0400
49644 +++ linux-2.6.35.4/kernel/softirq.c 2010-09-17 20:12:09.000000000 -0400
49645 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
49646
49647 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
49648
49649 -char *softirq_to_name[NR_SOFTIRQS] = {
49650 +const char * const softirq_to_name[NR_SOFTIRQS] = {
49651 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
49652 "TASKLET", "SCHED", "HRTIMER", "RCU"
49653 };
49654 @@ -190,7 +190,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
49655
49656 asmlinkage void __do_softirq(void)
49657 {
49658 - struct softirq_action *h;
49659 + const struct softirq_action *h;
49660 __u32 pending;
49661 int max_restart = MAX_SOFTIRQ_RESTART;
49662 int cpu;
49663 @@ -216,7 +216,7 @@ restart:
49664 kstat_incr_softirqs_this_cpu(h - softirq_vec);
49665
49666 trace_softirq_entry(h, softirq_vec);
49667 - h->action(h);
49668 + h->action();
49669 trace_softirq_exit(h, softirq_vec);
49670 if (unlikely(prev_count != preempt_count())) {
49671 printk(KERN_ERR "huh, entered softirq %td %s %p"
49672 @@ -340,7 +340,7 @@ void raise_softirq(unsigned int nr)
49673 local_irq_restore(flags);
49674 }
49675
49676 -void open_softirq(int nr, void (*action)(struct softirq_action *))
49677 +void open_softirq(int nr, void (*action)(void))
49678 {
49679 softirq_vec[nr].action = action;
49680 }
49681 @@ -396,7 +396,7 @@ void __tasklet_hi_schedule_first(struct
49682
49683 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
49684
49685 -static void tasklet_action(struct softirq_action *a)
49686 +static void tasklet_action(void)
49687 {
49688 struct tasklet_struct *list;
49689
49690 @@ -431,7 +431,7 @@ static void tasklet_action(struct softir
49691 }
49692 }
49693
49694 -static void tasklet_hi_action(struct softirq_action *a)
49695 +static void tasklet_hi_action(void)
49696 {
49697 struct tasklet_struct *list;
49698
49699 diff -urNp linux-2.6.35.4/kernel/sys.c linux-2.6.35.4/kernel/sys.c
49700 --- linux-2.6.35.4/kernel/sys.c 2010-08-26 19:47:12.000000000 -0400
49701 +++ linux-2.6.35.4/kernel/sys.c 2010-09-17 20:28:33.000000000 -0400
49702 @@ -134,6 +134,12 @@ static int set_one_prio(struct task_stru
49703 error = -EACCES;
49704 goto out;
49705 }
49706 +
49707 + if (gr_handle_chroot_setpriority(p, niceval)) {
49708 + error = -EACCES;
49709 + goto out;
49710 + }
49711 +
49712 no_nice = security_task_setnice(p, niceval);
49713 if (no_nice) {
49714 error = no_nice;
49715 @@ -511,6 +517,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
49716 goto error;
49717 }
49718
49719 + if (gr_check_group_change(new->gid, new->egid, -1))
49720 + goto error;
49721 +
49722 if (rgid != (gid_t) -1 ||
49723 (egid != (gid_t) -1 && egid != old->gid))
49724 new->sgid = new->egid;
49725 @@ -540,6 +549,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
49726 old = current_cred();
49727
49728 retval = -EPERM;
49729 +
49730 + if (gr_check_group_change(gid, gid, gid))
49731 + goto error;
49732 +
49733 if (capable(CAP_SETGID))
49734 new->gid = new->egid = new->sgid = new->fsgid = gid;
49735 else if (gid == old->gid || gid == old->sgid)
49736 @@ -620,6 +633,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
49737 goto error;
49738 }
49739
49740 + if (gr_check_user_change(new->uid, new->euid, -1))
49741 + goto error;
49742 +
49743 if (new->uid != old->uid) {
49744 retval = set_user(new);
49745 if (retval < 0)
49746 @@ -664,6 +680,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
49747 old = current_cred();
49748
49749 retval = -EPERM;
49750 +
49751 + if (gr_check_crash_uid(uid))
49752 + goto error;
49753 + if (gr_check_user_change(uid, uid, uid))
49754 + goto error;
49755 +
49756 if (capable(CAP_SETUID)) {
49757 new->suid = new->uid = uid;
49758 if (uid != old->uid) {
49759 @@ -718,6 +740,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
49760 goto error;
49761 }
49762
49763 + if (gr_check_user_change(ruid, euid, -1))
49764 + goto error;
49765 +
49766 if (ruid != (uid_t) -1) {
49767 new->uid = ruid;
49768 if (ruid != old->uid) {
49769 @@ -782,6 +807,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
49770 goto error;
49771 }
49772
49773 + if (gr_check_group_change(rgid, egid, -1))
49774 + goto error;
49775 +
49776 if (rgid != (gid_t) -1)
49777 new->gid = rgid;
49778 if (egid != (gid_t) -1)
49779 @@ -828,6 +856,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
49780 old = current_cred();
49781 old_fsuid = old->fsuid;
49782
49783 + if (gr_check_user_change(-1, -1, uid))
49784 + goto error;
49785 +
49786 if (uid == old->uid || uid == old->euid ||
49787 uid == old->suid || uid == old->fsuid ||
49788 capable(CAP_SETUID)) {
49789 @@ -838,6 +869,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
49790 }
49791 }
49792
49793 +error:
49794 abort_creds(new);
49795 return old_fsuid;
49796
49797 @@ -864,12 +896,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
49798 if (gid == old->gid || gid == old->egid ||
49799 gid == old->sgid || gid == old->fsgid ||
49800 capable(CAP_SETGID)) {
49801 + if (gr_check_group_change(-1, -1, gid))
49802 + goto error;
49803 +
49804 if (gid != old_fsgid) {
49805 new->fsgid = gid;
49806 goto change_okay;
49807 }
49808 }
49809
49810 +error:
49811 abort_creds(new);
49812 return old_fsgid;
49813
49814 @@ -1491,7 +1527,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
49815 error = get_dumpable(me->mm);
49816 break;
49817 case PR_SET_DUMPABLE:
49818 - if (arg2 < 0 || arg2 > 1) {
49819 + if (arg2 > 1) {
49820 error = -EINVAL;
49821 break;
49822 }
49823 diff -urNp linux-2.6.35.4/kernel/sysctl.c linux-2.6.35.4/kernel/sysctl.c
49824 --- linux-2.6.35.4/kernel/sysctl.c 2010-08-26 19:47:12.000000000 -0400
49825 +++ linux-2.6.35.4/kernel/sysctl.c 2010-09-17 20:18:09.000000000 -0400
49826 @@ -78,6 +78,13 @@
49827
49828
49829 #if defined(CONFIG_SYSCTL)
49830 +#include <linux/grsecurity.h>
49831 +#include <linux/grinternal.h>
49832 +
49833 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
49834 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
49835 + const int op);
49836 +extern int gr_handle_chroot_sysctl(const int op);
49837
49838 /* External variables not in a header file. */
49839 extern int sysctl_overcommit_memory;
49840 @@ -185,6 +192,7 @@ static int sysrq_sysctl_handler(ctl_tabl
49841 }
49842
49843 #endif
49844 +extern struct ctl_table grsecurity_table[];
49845
49846 static struct ctl_table root_table[];
49847 static struct ctl_table_root sysctl_table_root;
49848 @@ -217,6 +225,20 @@ extern struct ctl_table epoll_table[];
49849 int sysctl_legacy_va_layout;
49850 #endif
49851
49852 +#ifdef CONFIG_PAX_SOFTMODE
49853 +static ctl_table pax_table[] = {
49854 + {
49855 + .procname = "softmode",
49856 + .data = &pax_softmode,
49857 + .maxlen = sizeof(unsigned int),
49858 + .mode = 0600,
49859 + .proc_handler = &proc_dointvec,
49860 + },
49861 +
49862 + { }
49863 +};
49864 +#endif
49865 +
49866 /* The default sysctl tables: */
49867
49868 static struct ctl_table root_table[] = {
49869 @@ -269,6 +291,22 @@ static int max_extfrag_threshold = 1000;
49870 #endif
49871
49872 static struct ctl_table kern_table[] = {
49873 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
49874 + {
49875 + .procname = "grsecurity",
49876 + .mode = 0500,
49877 + .child = grsecurity_table,
49878 + },
49879 +#endif
49880 +
49881 +#ifdef CONFIG_PAX_SOFTMODE
49882 + {
49883 + .procname = "pax",
49884 + .mode = 0500,
49885 + .child = pax_table,
49886 + },
49887 +#endif
49888 +
49889 {
49890 .procname = "sched_child_runs_first",
49891 .data = &sysctl_sched_child_runs_first,
49892 @@ -1171,6 +1209,13 @@ static struct ctl_table vm_table[] = {
49893 .proc_handler = proc_dointvec_minmax,
49894 .extra1 = &zero,
49895 },
49896 + {
49897 + .procname = "heap_stack_gap",
49898 + .data = &sysctl_heap_stack_gap,
49899 + .maxlen = sizeof(sysctl_heap_stack_gap),
49900 + .mode = 0644,
49901 + .proc_handler = proc_doulongvec_minmax,
49902 + },
49903 #else
49904 {
49905 .procname = "nr_trim_pages",
49906 @@ -1686,6 +1731,16 @@ int sysctl_perm(struct ctl_table_root *r
49907 int error;
49908 int mode;
49909
49910 + if (table->parent != NULL && table->parent->procname != NULL &&
49911 + table->procname != NULL &&
49912 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
49913 + return -EACCES;
49914 + if (gr_handle_chroot_sysctl(op))
49915 + return -EACCES;
49916 + error = gr_handle_sysctl(table, op);
49917 + if (error)
49918 + return error;
49919 +
49920 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
49921 if (error)
49922 return error;
49923 @@ -2201,6 +2256,8 @@ static int proc_put_long(void __user **b
49924 len = strlen(tmp);
49925 if (len > *size)
49926 len = *size;
49927 + if (len > sizeof(tmp))
49928 + len = sizeof(tmp);
49929 if (copy_to_user(*buf, tmp, len))
49930 return -EFAULT;
49931 *size -= len;
49932 diff -urNp linux-2.6.35.4/kernel/taskstats.c linux-2.6.35.4/kernel/taskstats.c
49933 --- linux-2.6.35.4/kernel/taskstats.c 2010-08-26 19:47:12.000000000 -0400
49934 +++ linux-2.6.35.4/kernel/taskstats.c 2010-09-17 20:12:37.000000000 -0400
49935 @@ -27,9 +27,12 @@
49936 #include <linux/cgroup.h>
49937 #include <linux/fs.h>
49938 #include <linux/file.h>
49939 +#include <linux/grsecurity.h>
49940 #include <net/genetlink.h>
49941 #include <asm/atomic.h>
49942
49943 +extern int gr_is_taskstats_denied(int pid);
49944 +
49945 /*
49946 * Maximum length of a cpumask that can be specified in
49947 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
49948 @@ -432,6 +435,9 @@ static int taskstats_user_cmd(struct sk_
49949 size_t size;
49950 cpumask_var_t mask;
49951
49952 + if (gr_is_taskstats_denied(current->pid))
49953 + return -EACCES;
49954 +
49955 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
49956 return -ENOMEM;
49957
49958 diff -urNp linux-2.6.35.4/kernel/time/tick-broadcast.c linux-2.6.35.4/kernel/time/tick-broadcast.c
49959 --- linux-2.6.35.4/kernel/time/tick-broadcast.c 2010-08-26 19:47:12.000000000 -0400
49960 +++ linux-2.6.35.4/kernel/time/tick-broadcast.c 2010-09-17 20:12:09.000000000 -0400
49961 @@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct cl
49962 * then clear the broadcast bit.
49963 */
49964 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
49965 - int cpu = smp_processor_id();
49966 + cpu = smp_processor_id();
49967
49968 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
49969 tick_broadcast_clear_oneshot(cpu);
49970 diff -urNp linux-2.6.35.4/kernel/time/timer_list.c linux-2.6.35.4/kernel/time/timer_list.c
49971 --- linux-2.6.35.4/kernel/time/timer_list.c 2010-08-26 19:47:12.000000000 -0400
49972 +++ linux-2.6.35.4/kernel/time/timer_list.c 2010-09-17 20:12:37.000000000 -0400
49973 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
49974
49975 static void print_name_offset(struct seq_file *m, void *sym)
49976 {
49977 +#ifdef CONFIG_GRKERNSEC_HIDESYM
49978 + SEQ_printf(m, "<%p>", NULL);
49979 +#else
49980 char symname[KSYM_NAME_LEN];
49981
49982 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
49983 SEQ_printf(m, "<%p>", sym);
49984 else
49985 SEQ_printf(m, "%s", symname);
49986 +#endif
49987 }
49988
49989 static void
49990 @@ -112,7 +116,11 @@ next_one:
49991 static void
49992 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
49993 {
49994 +#ifdef CONFIG_GRKERNSEC_HIDESYM
49995 + SEQ_printf(m, " .base: %p\n", NULL);
49996 +#else
49997 SEQ_printf(m, " .base: %p\n", base);
49998 +#endif
49999 SEQ_printf(m, " .index: %d\n",
50000 base->index);
50001 SEQ_printf(m, " .resolution: %Lu nsecs\n",
50002 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs
50003 {
50004 struct proc_dir_entry *pe;
50005
50006 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
50007 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
50008 +#else
50009 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
50010 +#endif
50011 if (!pe)
50012 return -ENOMEM;
50013 return 0;
50014 diff -urNp linux-2.6.35.4/kernel/time/timer_stats.c linux-2.6.35.4/kernel/time/timer_stats.c
50015 --- linux-2.6.35.4/kernel/time/timer_stats.c 2010-08-26 19:47:12.000000000 -0400
50016 +++ linux-2.6.35.4/kernel/time/timer_stats.c 2010-09-17 20:12:37.000000000 -0400
50017 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
50018
50019 static void print_name_offset(struct seq_file *m, unsigned long addr)
50020 {
50021 +#ifdef CONFIG_GRKERNSEC_HIDESYM
50022 + seq_printf(m, "<%p>", NULL);
50023 +#else
50024 char symname[KSYM_NAME_LEN];
50025
50026 if (lookup_symbol_name(addr, symname) < 0)
50027 seq_printf(m, "<%p>", (void *)addr);
50028 else
50029 seq_printf(m, "%s", symname);
50030 +#endif
50031 }
50032
50033 static int tstats_show(struct seq_file *m, void *v)
50034 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(voi
50035 {
50036 struct proc_dir_entry *pe;
50037
50038 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
50039 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
50040 +#else
50041 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
50042 +#endif
50043 if (!pe)
50044 return -ENOMEM;
50045 return 0;
50046 diff -urNp linux-2.6.35.4/kernel/time.c linux-2.6.35.4/kernel/time.c
50047 --- linux-2.6.35.4/kernel/time.c 2010-08-26 19:47:12.000000000 -0400
50048 +++ linux-2.6.35.4/kernel/time.c 2010-09-17 20:12:37.000000000 -0400
50049 @@ -93,6 +93,9 @@ SYSCALL_DEFINE1(stime, time_t __user *,
50050 return err;
50051
50052 do_settimeofday(&tv);
50053 +
50054 + gr_log_timechange();
50055 +
50056 return 0;
50057 }
50058
50059 @@ -200,6 +203,8 @@ SYSCALL_DEFINE2(settimeofday, struct tim
50060 return -EFAULT;
50061 }
50062
50063 + gr_log_timechange();
50064 +
50065 return do_sys_settimeofday(tv ? &new_ts : NULL, tz ? &new_tz : NULL);
50066 }
50067
50068 @@ -238,7 +243,7 @@ EXPORT_SYMBOL(current_fs_time);
50069 * Avoid unnecessary multiplications/divisions in the
50070 * two most common HZ cases:
50071 */
50072 -unsigned int inline jiffies_to_msecs(const unsigned long j)
50073 +inline unsigned int jiffies_to_msecs(const unsigned long j)
50074 {
50075 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
50076 return (MSEC_PER_SEC / HZ) * j;
50077 @@ -254,7 +259,7 @@ unsigned int inline jiffies_to_msecs(con
50078 }
50079 EXPORT_SYMBOL(jiffies_to_msecs);
50080
50081 -unsigned int inline jiffies_to_usecs(const unsigned long j)
50082 +inline unsigned int jiffies_to_usecs(const unsigned long j)
50083 {
50084 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
50085 return (USEC_PER_SEC / HZ) * j;
50086 diff -urNp linux-2.6.35.4/kernel/timer.c linux-2.6.35.4/kernel/timer.c
50087 --- linux-2.6.35.4/kernel/timer.c 2010-08-26 19:47:12.000000000 -0400
50088 +++ linux-2.6.35.4/kernel/timer.c 2010-09-17 20:12:09.000000000 -0400
50089 @@ -1272,7 +1272,7 @@ void update_process_times(int user_tick)
50090 /*
50091 * This function runs timers and the timer-tq in bottom half context.
50092 */
50093 -static void run_timer_softirq(struct softirq_action *h)
50094 +static void run_timer_softirq(void)
50095 {
50096 struct tvec_base *base = __get_cpu_var(tvec_bases);
50097
50098 diff -urNp linux-2.6.35.4/kernel/trace/ftrace.c linux-2.6.35.4/kernel/trace/ftrace.c
50099 --- linux-2.6.35.4/kernel/trace/ftrace.c 2010-08-26 19:47:12.000000000 -0400
50100 +++ linux-2.6.35.4/kernel/trace/ftrace.c 2010-09-17 20:12:09.000000000 -0400
50101 @@ -1101,13 +1101,18 @@ ftrace_code_disable(struct module *mod,
50102
50103 ip = rec->ip;
50104
50105 + ret = ftrace_arch_code_modify_prepare();
50106 + FTRACE_WARN_ON(ret);
50107 + if (ret)
50108 + return 0;
50109 +
50110 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
50111 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
50112 if (ret) {
50113 ftrace_bug(ret, ip);
50114 rec->flags |= FTRACE_FL_FAILED;
50115 - return 0;
50116 }
50117 - return 1;
50118 + return ret ? 0 : 1;
50119 }
50120
50121 /*
50122 diff -urNp linux-2.6.35.4/kernel/trace/ring_buffer.c linux-2.6.35.4/kernel/trace/ring_buffer.c
50123 --- linux-2.6.35.4/kernel/trace/ring_buffer.c 2010-08-26 19:47:12.000000000 -0400
50124 +++ linux-2.6.35.4/kernel/trace/ring_buffer.c 2010-09-17 20:12:09.000000000 -0400
50125 @@ -635,7 +635,7 @@ static struct list_head *rb_list_head(st
50126 * the reader page). But if the next page is a header page,
50127 * its flags will be non zero.
50128 */
50129 -static int inline
50130 +static inline int
50131 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
50132 struct buffer_page *page, struct list_head *list)
50133 {
50134 diff -urNp linux-2.6.35.4/kernel/trace/trace.c linux-2.6.35.4/kernel/trace/trace.c
50135 --- linux-2.6.35.4/kernel/trace/trace.c 2010-08-26 19:47:12.000000000 -0400
50136 +++ linux-2.6.35.4/kernel/trace/trace.c 2010-09-17 20:12:09.000000000 -0400
50137 @@ -3965,10 +3965,9 @@ static const struct file_operations trac
50138 };
50139 #endif
50140
50141 -static struct dentry *d_tracer;
50142 -
50143 struct dentry *tracing_init_dentry(void)
50144 {
50145 + static struct dentry *d_tracer;
50146 static int once;
50147
50148 if (d_tracer)
50149 @@ -3988,10 +3987,9 @@ struct dentry *tracing_init_dentry(void)
50150 return d_tracer;
50151 }
50152
50153 -static struct dentry *d_percpu;
50154 -
50155 struct dentry *tracing_dentry_percpu(void)
50156 {
50157 + static struct dentry *d_percpu;
50158 static int once;
50159 struct dentry *d_tracer;
50160
50161 diff -urNp linux-2.6.35.4/kernel/trace/trace_output.c linux-2.6.35.4/kernel/trace/trace_output.c
50162 --- linux-2.6.35.4/kernel/trace/trace_output.c 2010-08-26 19:47:12.000000000 -0400
50163 +++ linux-2.6.35.4/kernel/trace/trace_output.c 2010-09-17 20:12:09.000000000 -0400
50164 @@ -281,7 +281,7 @@ int trace_seq_path(struct trace_seq *s,
50165
50166 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
50167 if (!IS_ERR(p)) {
50168 - p = mangle_path(s->buffer + s->len, p, "\n");
50169 + p = mangle_path(s->buffer + s->len, p, "\n\\");
50170 if (p) {
50171 s->len = p - s->buffer;
50172 return 1;
50173 diff -urNp linux-2.6.35.4/kernel/trace/trace_stack.c linux-2.6.35.4/kernel/trace/trace_stack.c
50174 --- linux-2.6.35.4/kernel/trace/trace_stack.c 2010-08-26 19:47:12.000000000 -0400
50175 +++ linux-2.6.35.4/kernel/trace/trace_stack.c 2010-09-17 20:12:09.000000000 -0400
50176 @@ -50,7 +50,7 @@ static inline void check_stack(void)
50177 return;
50178
50179 /* we do not handle interrupt stacks yet */
50180 - if (!object_is_on_stack(&this_size))
50181 + if (!object_starts_on_stack(&this_size))
50182 return;
50183
50184 local_irq_save(flags);
50185 diff -urNp linux-2.6.35.4/lib/bug.c linux-2.6.35.4/lib/bug.c
50186 --- linux-2.6.35.4/lib/bug.c 2010-08-26 19:47:12.000000000 -0400
50187 +++ linux-2.6.35.4/lib/bug.c 2010-09-17 20:12:09.000000000 -0400
50188 @@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned l
50189 return BUG_TRAP_TYPE_NONE;
50190
50191 bug = find_bug(bugaddr);
50192 + if (!bug)
50193 + return BUG_TRAP_TYPE_NONE;
50194
50195 printk(KERN_EMERG "------------[ cut here ]------------\n");
50196
50197 diff -urNp linux-2.6.35.4/lib/debugobjects.c linux-2.6.35.4/lib/debugobjects.c
50198 --- linux-2.6.35.4/lib/debugobjects.c 2010-08-26 19:47:12.000000000 -0400
50199 +++ linux-2.6.35.4/lib/debugobjects.c 2010-09-17 20:12:09.000000000 -0400
50200 @@ -281,7 +281,7 @@ static void debug_object_is_on_stack(voi
50201 if (limit > 4)
50202 return;
50203
50204 - is_on_stack = object_is_on_stack(addr);
50205 + is_on_stack = object_starts_on_stack(addr);
50206 if (is_on_stack == onstack)
50207 return;
50208
50209 diff -urNp linux-2.6.35.4/lib/dma-debug.c linux-2.6.35.4/lib/dma-debug.c
50210 --- linux-2.6.35.4/lib/dma-debug.c 2010-08-26 19:47:12.000000000 -0400
50211 +++ linux-2.6.35.4/lib/dma-debug.c 2010-09-17 20:12:09.000000000 -0400
50212 @@ -861,7 +861,7 @@ out:
50213
50214 static void check_for_stack(struct device *dev, void *addr)
50215 {
50216 - if (object_is_on_stack(addr))
50217 + if (object_starts_on_stack(addr))
50218 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
50219 "stack [addr=%p]\n", addr);
50220 }
50221 diff -urNp linux-2.6.35.4/lib/inflate.c linux-2.6.35.4/lib/inflate.c
50222 --- linux-2.6.35.4/lib/inflate.c 2010-08-26 19:47:12.000000000 -0400
50223 +++ linux-2.6.35.4/lib/inflate.c 2010-09-17 20:12:09.000000000 -0400
50224 @@ -267,7 +267,7 @@ static void free(void *where)
50225 malloc_ptr = free_mem_ptr;
50226 }
50227 #else
50228 -#define malloc(a) kmalloc(a, GFP_KERNEL)
50229 +#define malloc(a) kmalloc((a), GFP_KERNEL)
50230 #define free(a) kfree(a)
50231 #endif
50232
50233 diff -urNp linux-2.6.35.4/lib/Kconfig.debug linux-2.6.35.4/lib/Kconfig.debug
50234 --- linux-2.6.35.4/lib/Kconfig.debug 2010-08-26 19:47:12.000000000 -0400
50235 +++ linux-2.6.35.4/lib/Kconfig.debug 2010-09-17 20:12:37.000000000 -0400
50236 @@ -970,7 +970,7 @@ config LATENCYTOP
50237 select STACKTRACE
50238 select SCHEDSTATS
50239 select SCHED_DEBUG
50240 - depends on HAVE_LATENCYTOP_SUPPORT
50241 + depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
50242 help
50243 Enable this option if you want to use the LatencyTOP tool
50244 to find out which userspace is blocking on what kernel operations.
50245 diff -urNp linux-2.6.35.4/lib/parser.c linux-2.6.35.4/lib/parser.c
50246 --- linux-2.6.35.4/lib/parser.c 2010-08-26 19:47:12.000000000 -0400
50247 +++ linux-2.6.35.4/lib/parser.c 2010-09-17 20:12:09.000000000 -0400
50248 @@ -129,7 +129,7 @@ static int match_number(substring_t *s,
50249 char *buf;
50250 int ret;
50251
50252 - buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
50253 + buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
50254 if (!buf)
50255 return -ENOMEM;
50256 memcpy(buf, s->from, s->to - s->from);
50257 diff -urNp linux-2.6.35.4/lib/radix-tree.c linux-2.6.35.4/lib/radix-tree.c
50258 --- linux-2.6.35.4/lib/radix-tree.c 2010-08-26 19:47:12.000000000 -0400
50259 +++ linux-2.6.35.4/lib/radix-tree.c 2010-09-17 20:12:09.000000000 -0400
50260 @@ -80,7 +80,7 @@ struct radix_tree_preload {
50261 int nr;
50262 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
50263 };
50264 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
50265 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
50266
50267 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
50268 {
50269 diff -urNp linux-2.6.35.4/localversion-grsec linux-2.6.35.4/localversion-grsec
50270 --- linux-2.6.35.4/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
50271 +++ linux-2.6.35.4/localversion-grsec 2010-09-17 20:12:37.000000000 -0400
50272 @@ -0,0 +1 @@
50273 +-grsec
50274 diff -urNp linux-2.6.35.4/Makefile linux-2.6.35.4/Makefile
50275 --- linux-2.6.35.4/Makefile 2010-08-26 19:47:12.000000000 -0400
50276 +++ linux-2.6.35.4/Makefile 2010-09-17 20:12:37.000000000 -0400
50277 @@ -230,8 +230,8 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
50278
50279 HOSTCC = gcc
50280 HOSTCXX = g++
50281 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
50282 -HOSTCXXFLAGS = -O2
50283 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
50284 +HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
50285
50286 # Decide whether to build built-in, modular, or both.
50287 # Normally, just do built-in.
50288 @@ -650,7 +650,7 @@ export mod_strip_cmd
50289
50290
50291 ifeq ($(KBUILD_EXTMOD),)
50292 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
50293 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
50294
50295 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
50296 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
50297 diff -urNp linux-2.6.35.4/mm/bootmem.c linux-2.6.35.4/mm/bootmem.c
50298 --- linux-2.6.35.4/mm/bootmem.c 2010-08-26 19:47:12.000000000 -0400
50299 +++ linux-2.6.35.4/mm/bootmem.c 2010-09-17 20:12:09.000000000 -0400
50300 @@ -200,19 +200,30 @@ static void __init __free_pages_memory(u
50301 unsigned long __init free_all_memory_core_early(int nodeid)
50302 {
50303 int i;
50304 - u64 start, end;
50305 + u64 start, end, startrange, endrange;
50306 unsigned long count = 0;
50307 - struct range *range = NULL;
50308 + struct range *range = NULL, rangerange = { 0, 0 };
50309 int nr_range;
50310
50311 nr_range = get_free_all_memory_range(&range, nodeid);
50312 + startrange = __pa(range) >> PAGE_SHIFT;
50313 + endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
50314
50315 for (i = 0; i < nr_range; i++) {
50316 start = range[i].start;
50317 end = range[i].end;
50318 + if (start <= endrange && startrange < end) {
50319 + BUG_ON(rangerange.start | rangerange.end);
50320 + rangerange = range[i];
50321 + continue;
50322 + }
50323 count += end - start;
50324 __free_pages_memory(start, end);
50325 }
50326 + start = rangerange.start;
50327 + end = rangerange.end;
50328 + count += end - start;
50329 + __free_pages_memory(start, end);
50330
50331 return count;
50332 }
50333 diff -urNp linux-2.6.35.4/mm/filemap.c linux-2.6.35.4/mm/filemap.c
50334 --- linux-2.6.35.4/mm/filemap.c 2010-08-26 19:47:12.000000000 -0400
50335 +++ linux-2.6.35.4/mm/filemap.c 2010-09-17 20:12:37.000000000 -0400
50336 @@ -1640,7 +1640,7 @@ int generic_file_mmap(struct file * file
50337 struct address_space *mapping = file->f_mapping;
50338
50339 if (!mapping->a_ops->readpage)
50340 - return -ENOEXEC;
50341 + return -ENODEV;
50342 file_accessed(file);
50343 vma->vm_ops = &generic_file_vm_ops;
50344 vma->vm_flags |= VM_CAN_NONLINEAR;
50345 @@ -2036,6 +2036,7 @@ inline int generic_write_checks(struct f
50346 *pos = i_size_read(inode);
50347
50348 if (limit != RLIM_INFINITY) {
50349 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
50350 if (*pos >= limit) {
50351 send_sig(SIGXFSZ, current, 0);
50352 return -EFBIG;
50353 diff -urNp linux-2.6.35.4/mm/fremap.c linux-2.6.35.4/mm/fremap.c
50354 --- linux-2.6.35.4/mm/fremap.c 2010-08-26 19:47:12.000000000 -0400
50355 +++ linux-2.6.35.4/mm/fremap.c 2010-09-17 20:12:09.000000000 -0400
50356 @@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
50357 retry:
50358 vma = find_vma(mm, start);
50359
50360 +#ifdef CONFIG_PAX_SEGMEXEC
50361 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
50362 + goto out;
50363 +#endif
50364 +
50365 /*
50366 * Make sure the vma is shared, that it supports prefaulting,
50367 * and that the remapped range is valid and fully within
50368 @@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
50369 /*
50370 * drop PG_Mlocked flag for over-mapped range
50371 */
50372 - unsigned int saved_flags = vma->vm_flags;
50373 + unsigned long saved_flags = vma->vm_flags;
50374 munlock_vma_pages_range(vma, start, start + size);
50375 vma->vm_flags = saved_flags;
50376 }
50377 diff -urNp linux-2.6.35.4/mm/highmem.c linux-2.6.35.4/mm/highmem.c
50378 --- linux-2.6.35.4/mm/highmem.c 2010-08-26 19:47:12.000000000 -0400
50379 +++ linux-2.6.35.4/mm/highmem.c 2010-09-17 20:12:09.000000000 -0400
50380 @@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
50381 * So no dangers, even with speculative execution.
50382 */
50383 page = pte_page(pkmap_page_table[i]);
50384 + pax_open_kernel();
50385 pte_clear(&init_mm, (unsigned long)page_address(page),
50386 &pkmap_page_table[i]);
50387 -
50388 + pax_close_kernel();
50389 set_page_address(page, NULL);
50390 need_flush = 1;
50391 }
50392 @@ -177,9 +178,11 @@ start:
50393 }
50394 }
50395 vaddr = PKMAP_ADDR(last_pkmap_nr);
50396 +
50397 + pax_open_kernel();
50398 set_pte_at(&init_mm, vaddr,
50399 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
50400 -
50401 + pax_close_kernel();
50402 pkmap_count[last_pkmap_nr] = 1;
50403 set_page_address(page, (void *)vaddr);
50404
50405 diff -urNp linux-2.6.35.4/mm/hugetlb.c linux-2.6.35.4/mm/hugetlb.c
50406 --- linux-2.6.35.4/mm/hugetlb.c 2010-08-26 19:47:12.000000000 -0400
50407 +++ linux-2.6.35.4/mm/hugetlb.c 2010-09-17 20:12:09.000000000 -0400
50408 @@ -2272,6 +2272,26 @@ static int unmap_ref_private(struct mm_s
50409 return 1;
50410 }
50411
50412 +#ifdef CONFIG_PAX_SEGMEXEC
50413 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
50414 +{
50415 + struct mm_struct *mm = vma->vm_mm;
50416 + struct vm_area_struct *vma_m;
50417 + unsigned long address_m;
50418 + pte_t *ptep_m;
50419 +
50420 + vma_m = pax_find_mirror_vma(vma);
50421 + if (!vma_m)
50422 + return;
50423 +
50424 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
50425 + address_m = address + SEGMEXEC_TASK_SIZE;
50426 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
50427 + get_page(page_m);
50428 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
50429 +}
50430 +#endif
50431 +
50432 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
50433 unsigned long address, pte_t *ptep, pte_t pte,
50434 struct page *pagecache_page)
50435 @@ -2352,6 +2372,11 @@ retry_avoidcopy:
50436 huge_ptep_clear_flush(vma, address, ptep);
50437 set_huge_pte_at(mm, address, ptep,
50438 make_huge_pte(vma, new_page, 1));
50439 +
50440 +#ifdef CONFIG_PAX_SEGMEXEC
50441 + pax_mirror_huge_pte(vma, address, new_page);
50442 +#endif
50443 +
50444 /* Make the old page be freed below */
50445 new_page = old_page;
50446 }
50447 @@ -2483,6 +2508,10 @@ retry:
50448 && (vma->vm_flags & VM_SHARED)));
50449 set_huge_pte_at(mm, address, ptep, new_pte);
50450
50451 +#ifdef CONFIG_PAX_SEGMEXEC
50452 + pax_mirror_huge_pte(vma, address, page);
50453 +#endif
50454 +
50455 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
50456 /* Optimization, do the COW without a second fault */
50457 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
50458 @@ -2511,6 +2540,28 @@ int hugetlb_fault(struct mm_struct *mm,
50459 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
50460 struct hstate *h = hstate_vma(vma);
50461
50462 +#ifdef CONFIG_PAX_SEGMEXEC
50463 + struct vm_area_struct *vma_m;
50464 +
50465 + vma_m = pax_find_mirror_vma(vma);
50466 + if (vma_m) {
50467 + unsigned long address_m;
50468 +
50469 + if (vma->vm_start > vma_m->vm_start) {
50470 + address_m = address;
50471 + address -= SEGMEXEC_TASK_SIZE;
50472 + vma = vma_m;
50473 + h = hstate_vma(vma);
50474 + } else
50475 + address_m = address + SEGMEXEC_TASK_SIZE;
50476 +
50477 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
50478 + return VM_FAULT_OOM;
50479 + address_m &= HPAGE_MASK;
50480 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
50481 + }
50482 +#endif
50483 +
50484 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
50485 if (!ptep)
50486 return VM_FAULT_OOM;
50487 diff -urNp linux-2.6.35.4/mm/Kconfig linux-2.6.35.4/mm/Kconfig
50488 --- linux-2.6.35.4/mm/Kconfig 2010-08-26 19:47:12.000000000 -0400
50489 +++ linux-2.6.35.4/mm/Kconfig 2010-09-17 20:12:37.000000000 -0400
50490 @@ -240,7 +240,7 @@ config KSM
50491 config DEFAULT_MMAP_MIN_ADDR
50492 int "Low address space to protect from user allocation"
50493 depends on MMU
50494 - default 4096
50495 + default 65536
50496 help
50497 This is the portion of low virtual memory which should be protected
50498 from userspace allocation. Keeping a user from writing to low pages
50499 diff -urNp linux-2.6.35.4/mm/maccess.c linux-2.6.35.4/mm/maccess.c
50500 --- linux-2.6.35.4/mm/maccess.c 2010-08-26 19:47:12.000000000 -0400
50501 +++ linux-2.6.35.4/mm/maccess.c 2010-09-17 20:12:09.000000000 -0400
50502 @@ -15,10 +15,10 @@
50503 * happens, handle that and return -EFAULT.
50504 */
50505
50506 -long __weak probe_kernel_read(void *dst, void *src, size_t size)
50507 +long __weak probe_kernel_read(void *dst, const void *src, size_t size)
50508 __attribute__((alias("__probe_kernel_read")));
50509
50510 -long __probe_kernel_read(void *dst, void *src, size_t size)
50511 +long __probe_kernel_read(void *dst, const void *src, size_t size)
50512 {
50513 long ret;
50514 mm_segment_t old_fs = get_fs();
50515 @@ -43,10 +43,10 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
50516 * Safely write to address @dst from the buffer at @src. If a kernel fault
50517 * happens, handle that and return -EFAULT.
50518 */
50519 -long __weak probe_kernel_write(void *dst, void *src, size_t size)
50520 +long __weak probe_kernel_write(void *dst, const void *src, size_t size)
50521 __attribute__((alias("__probe_kernel_write")));
50522
50523 -long __probe_kernel_write(void *dst, void *src, size_t size)
50524 +long __probe_kernel_write(void *dst, const void *src, size_t size)
50525 {
50526 long ret;
50527 mm_segment_t old_fs = get_fs();
50528 diff -urNp linux-2.6.35.4/mm/madvise.c linux-2.6.35.4/mm/madvise.c
50529 --- linux-2.6.35.4/mm/madvise.c 2010-08-26 19:47:12.000000000 -0400
50530 +++ linux-2.6.35.4/mm/madvise.c 2010-09-17 20:12:09.000000000 -0400
50531 @@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_a
50532 pgoff_t pgoff;
50533 unsigned long new_flags = vma->vm_flags;
50534
50535 +#ifdef CONFIG_PAX_SEGMEXEC
50536 + struct vm_area_struct *vma_m;
50537 +#endif
50538 +
50539 switch (behavior) {
50540 case MADV_NORMAL:
50541 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
50542 @@ -104,6 +108,13 @@ success:
50543 /*
50544 * vm_flags is protected by the mmap_sem held in write mode.
50545 */
50546 +
50547 +#ifdef CONFIG_PAX_SEGMEXEC
50548 + vma_m = pax_find_mirror_vma(vma);
50549 + if (vma_m)
50550 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
50551 +#endif
50552 +
50553 vma->vm_flags = new_flags;
50554
50555 out:
50556 @@ -162,6 +173,11 @@ static long madvise_dontneed(struct vm_a
50557 struct vm_area_struct ** prev,
50558 unsigned long start, unsigned long end)
50559 {
50560 +
50561 +#ifdef CONFIG_PAX_SEGMEXEC
50562 + struct vm_area_struct *vma_m;
50563 +#endif
50564 +
50565 *prev = vma;
50566 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
50567 return -EINVAL;
50568 @@ -174,6 +190,21 @@ static long madvise_dontneed(struct vm_a
50569 zap_page_range(vma, start, end - start, &details);
50570 } else
50571 zap_page_range(vma, start, end - start, NULL);
50572 +
50573 +#ifdef CONFIG_PAX_SEGMEXEC
50574 + vma_m = pax_find_mirror_vma(vma);
50575 + if (vma_m) {
50576 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
50577 + struct zap_details details = {
50578 + .nonlinear_vma = vma_m,
50579 + .last_index = ULONG_MAX,
50580 + };
50581 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
50582 + } else
50583 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
50584 + }
50585 +#endif
50586 +
50587 return 0;
50588 }
50589
50590 @@ -366,6 +397,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
50591 if (end < start)
50592 goto out;
50593
50594 +#ifdef CONFIG_PAX_SEGMEXEC
50595 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
50596 + if (end > SEGMEXEC_TASK_SIZE)
50597 + goto out;
50598 + } else
50599 +#endif
50600 +
50601 + if (end > TASK_SIZE)
50602 + goto out;
50603 +
50604 error = 0;
50605 if (end == start)
50606 goto out;
50607 diff -urNp linux-2.6.35.4/mm/memory.c linux-2.6.35.4/mm/memory.c
50608 --- linux-2.6.35.4/mm/memory.c 2010-08-26 19:47:12.000000000 -0400
50609 +++ linux-2.6.35.4/mm/memory.c 2010-09-17 20:12:09.000000000 -0400
50610 @@ -259,8 +259,12 @@ static inline void free_pmd_range(struct
50611 return;
50612
50613 pmd = pmd_offset(pud, start);
50614 +
50615 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
50616 pud_clear(pud);
50617 pmd_free_tlb(tlb, pmd, start);
50618 +#endif
50619 +
50620 }
50621
50622 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
50623 @@ -292,8 +296,12 @@ static inline void free_pud_range(struct
50624 return;
50625
50626 pud = pud_offset(pgd, start);
50627 +
50628 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
50629 pgd_clear(pgd);
50630 pud_free_tlb(tlb, pud, start);
50631 +#endif
50632 +
50633 }
50634
50635 /*
50636 @@ -1363,10 +1371,10 @@ int __get_user_pages(struct task_struct
50637 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
50638 i = 0;
50639
50640 - do {
50641 + while (nr_pages) {
50642 struct vm_area_struct *vma;
50643
50644 - vma = find_extend_vma(mm, start);
50645 + vma = find_vma(mm, start);
50646 if (!vma && in_gate_area(tsk, start)) {
50647 unsigned long pg = start & PAGE_MASK;
50648 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
50649 @@ -1418,7 +1426,7 @@ int __get_user_pages(struct task_struct
50650 continue;
50651 }
50652
50653 - if (!vma ||
50654 + if (!vma || start < vma->vm_start ||
50655 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
50656 !(vm_flags & vma->vm_flags))
50657 return i ? : -EFAULT;
50658 @@ -1493,7 +1501,7 @@ int __get_user_pages(struct task_struct
50659 start += PAGE_SIZE;
50660 nr_pages--;
50661 } while (nr_pages && start < vma->vm_end);
50662 - } while (nr_pages);
50663 + }
50664 return i;
50665 }
50666
50667 @@ -2089,6 +2097,186 @@ static inline void cow_user_page(struct
50668 copy_user_highpage(dst, src, va, vma);
50669 }
50670
50671 +#ifdef CONFIG_PAX_SEGMEXEC
50672 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
50673 +{
50674 + struct mm_struct *mm = vma->vm_mm;
50675 + spinlock_t *ptl;
50676 + pte_t *pte, entry;
50677 +
50678 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
50679 + entry = *pte;
50680 + if (!pte_present(entry)) {
50681 + if (!pte_none(entry)) {
50682 + BUG_ON(pte_file(entry));
50683 + free_swap_and_cache(pte_to_swp_entry(entry));
50684 + pte_clear_not_present_full(mm, address, pte, 0);
50685 + }
50686 + } else {
50687 + struct page *page;
50688 +
50689 + flush_cache_page(vma, address, pte_pfn(entry));
50690 + entry = ptep_clear_flush(vma, address, pte);
50691 + BUG_ON(pte_dirty(entry));
50692 + page = vm_normal_page(vma, address, entry);
50693 + if (page) {
50694 + update_hiwater_rss(mm);
50695 + if (PageAnon(page))
50696 + dec_mm_counter_fast(mm, MM_ANONPAGES);
50697 + else
50698 + dec_mm_counter_fast(mm, MM_FILEPAGES);
50699 + page_remove_rmap(page);
50700 + page_cache_release(page);
50701 + }
50702 + }
50703 + pte_unmap_unlock(pte, ptl);
50704 +}
50705 +
50706 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
50707 + *
50708 + * the ptl of the lower mapped page is held on entry and is not released on exit
50709 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
50710 + */
50711 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
50712 +{
50713 + struct mm_struct *mm = vma->vm_mm;
50714 + unsigned long address_m;
50715 + spinlock_t *ptl_m;
50716 + struct vm_area_struct *vma_m;
50717 + pmd_t *pmd_m;
50718 + pte_t *pte_m, entry_m;
50719 +
50720 + BUG_ON(!page_m || !PageAnon(page_m));
50721 +
50722 + vma_m = pax_find_mirror_vma(vma);
50723 + if (!vma_m)
50724 + return;
50725 +
50726 + BUG_ON(!PageLocked(page_m));
50727 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
50728 + address_m = address + SEGMEXEC_TASK_SIZE;
50729 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
50730 + pte_m = pte_offset_map_nested(pmd_m, address_m);
50731 + ptl_m = pte_lockptr(mm, pmd_m);
50732 + if (ptl != ptl_m) {
50733 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
50734 + if (!pte_none(*pte_m))
50735 + goto out;
50736 + }
50737 +
50738 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
50739 + page_cache_get(page_m);
50740 + page_add_anon_rmap(page_m, vma_m, address_m);
50741 + inc_mm_counter_fast(mm, MM_ANONPAGES);
50742 + set_pte_at(mm, address_m, pte_m, entry_m);
50743 + update_mmu_cache(vma_m, address_m, entry_m);
50744 +out:
50745 + if (ptl != ptl_m)
50746 + spin_unlock(ptl_m);
50747 + pte_unmap_nested(pte_m);
50748 + unlock_page(page_m);
50749 +}
50750 +
50751 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
50752 +{
50753 + struct mm_struct *mm = vma->vm_mm;
50754 + unsigned long address_m;
50755 + spinlock_t *ptl_m;
50756 + struct vm_area_struct *vma_m;
50757 + pmd_t *pmd_m;
50758 + pte_t *pte_m, entry_m;
50759 +
50760 + BUG_ON(!page_m || PageAnon(page_m));
50761 +
50762 + vma_m = pax_find_mirror_vma(vma);
50763 + if (!vma_m)
50764 + return;
50765 +
50766 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
50767 + address_m = address + SEGMEXEC_TASK_SIZE;
50768 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
50769 + pte_m = pte_offset_map_nested(pmd_m, address_m);
50770 + ptl_m = pte_lockptr(mm, pmd_m);
50771 + if (ptl != ptl_m) {
50772 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
50773 + if (!pte_none(*pte_m))
50774 + goto out;
50775 + }
50776 +
50777 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
50778 + page_cache_get(page_m);
50779 + page_add_file_rmap(page_m);
50780 + inc_mm_counter_fast(mm, MM_FILEPAGES);
50781 + set_pte_at(mm, address_m, pte_m, entry_m);
50782 + update_mmu_cache(vma_m, address_m, entry_m);
50783 +out:
50784 + if (ptl != ptl_m)
50785 + spin_unlock(ptl_m);
50786 + pte_unmap_nested(pte_m);
50787 +}
50788 +
50789 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
50790 +{
50791 + struct mm_struct *mm = vma->vm_mm;
50792 + unsigned long address_m;
50793 + spinlock_t *ptl_m;
50794 + struct vm_area_struct *vma_m;
50795 + pmd_t *pmd_m;
50796 + pte_t *pte_m, entry_m;
50797 +
50798 + vma_m = pax_find_mirror_vma(vma);
50799 + if (!vma_m)
50800 + return;
50801 +
50802 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
50803 + address_m = address + SEGMEXEC_TASK_SIZE;
50804 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
50805 + pte_m = pte_offset_map_nested(pmd_m, address_m);
50806 + ptl_m = pte_lockptr(mm, pmd_m);
50807 + if (ptl != ptl_m) {
50808 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
50809 + if (!pte_none(*pte_m))
50810 + goto out;
50811 + }
50812 +
50813 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
50814 + set_pte_at(mm, address_m, pte_m, entry_m);
50815 +out:
50816 + if (ptl != ptl_m)
50817 + spin_unlock(ptl_m);
50818 + pte_unmap_nested(pte_m);
50819 +}
50820 +
50821 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
50822 +{
50823 + struct page *page_m;
50824 + pte_t entry;
50825 +
50826 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
50827 + goto out;
50828 +
50829 + entry = *pte;
50830 + page_m = vm_normal_page(vma, address, entry);
50831 + if (!page_m)
50832 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
50833 + else if (PageAnon(page_m)) {
50834 + if (pax_find_mirror_vma(vma)) {
50835 + pte_unmap_unlock(pte, ptl);
50836 + lock_page(page_m);
50837 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
50838 + if (pte_same(entry, *pte))
50839 + pax_mirror_anon_pte(vma, address, page_m, ptl);
50840 + else
50841 + unlock_page(page_m);
50842 + }
50843 + } else
50844 + pax_mirror_file_pte(vma, address, page_m, ptl);
50845 +
50846 +out:
50847 + pte_unmap_unlock(pte, ptl);
50848 +}
50849 +#endif
50850 +
50851 /*
50852 * This routine handles present pages, when users try to write
50853 * to a shared page. It is done by copying the page to a new address
50854 @@ -2275,6 +2463,12 @@ gotten:
50855 */
50856 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
50857 if (likely(pte_same(*page_table, orig_pte))) {
50858 +
50859 +#ifdef CONFIG_PAX_SEGMEXEC
50860 + if (pax_find_mirror_vma(vma))
50861 + BUG_ON(!trylock_page(new_page));
50862 +#endif
50863 +
50864 if (old_page) {
50865 if (!PageAnon(old_page)) {
50866 dec_mm_counter_fast(mm, MM_FILEPAGES);
50867 @@ -2326,6 +2520,10 @@ gotten:
50868 page_remove_rmap(old_page);
50869 }
50870
50871 +#ifdef CONFIG_PAX_SEGMEXEC
50872 + pax_mirror_anon_pte(vma, address, new_page, ptl);
50873 +#endif
50874 +
50875 /* Free the old page.. */
50876 new_page = old_page;
50877 ret |= VM_FAULT_WRITE;
50878 @@ -2734,6 +2932,11 @@ static int do_swap_page(struct mm_struct
50879 swap_free(entry);
50880 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
50881 try_to_free_swap(page);
50882 +
50883 +#ifdef CONFIG_PAX_SEGMEXEC
50884 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
50885 +#endif
50886 +
50887 unlock_page(page);
50888
50889 if (flags & FAULT_FLAG_WRITE) {
50890 @@ -2745,6 +2948,11 @@ static int do_swap_page(struct mm_struct
50891
50892 /* No need to invalidate - it was non-present before */
50893 update_mmu_cache(vma, address, page_table);
50894 +
50895 +#ifdef CONFIG_PAX_SEGMEXEC
50896 + pax_mirror_anon_pte(vma, address, page, ptl);
50897 +#endif
50898 +
50899 unlock:
50900 pte_unmap_unlock(page_table, ptl);
50901 out:
50902 @@ -2760,33 +2968,6 @@ out_release:
50903 }
50904
50905 /*
50906 - * This is like a special single-page "expand_downwards()",
50907 - * except we must first make sure that 'address-PAGE_SIZE'
50908 - * doesn't hit another vma.
50909 - *
50910 - * The "find_vma()" will do the right thing even if we wrap
50911 - */
50912 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
50913 -{
50914 - address &= PAGE_MASK;
50915 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
50916 - struct vm_area_struct *prev = vma->vm_prev;
50917 -
50918 - /*
50919 - * Is there a mapping abutting this one below?
50920 - *
50921 - * That's only ok if it's the same stack mapping
50922 - * that has gotten split..
50923 - */
50924 - if (prev && prev->vm_end == address)
50925 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
50926 -
50927 - expand_stack(vma, address - PAGE_SIZE);
50928 - }
50929 - return 0;
50930 -}
50931 -
50932 -/*
50933 * We enter with non-exclusive mmap_sem (to exclude vma changes,
50934 * but allow concurrent faults), and pte mapped but not yet locked.
50935 * We return with mmap_sem still held, but pte unmapped and unlocked.
50936 @@ -2795,27 +2976,23 @@ static int do_anonymous_page(struct mm_s
50937 unsigned long address, pte_t *page_table, pmd_t *pmd,
50938 unsigned int flags)
50939 {
50940 - struct page *page;
50941 + struct page *page = NULL;
50942 spinlock_t *ptl;
50943 pte_t entry;
50944
50945 - pte_unmap(page_table);
50946 -
50947 - /* Check if we need to add a guard page to the stack */
50948 - if (check_stack_guard_page(vma, address) < 0)
50949 - return VM_FAULT_SIGBUS;
50950 -
50951 - /* Use the zero-page for reads */
50952 if (!(flags & FAULT_FLAG_WRITE)) {
50953 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
50954 vma->vm_page_prot));
50955 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
50956 + ptl = pte_lockptr(mm, pmd);
50957 + spin_lock(ptl);
50958 if (!pte_none(*page_table))
50959 goto unlock;
50960 goto setpte;
50961 }
50962
50963 /* Allocate our own private page. */
50964 + pte_unmap(page_table);
50965 +
50966 if (unlikely(anon_vma_prepare(vma)))
50967 goto oom;
50968 page = alloc_zeroed_user_highpage_movable(vma, address);
50969 @@ -2834,6 +3011,11 @@ static int do_anonymous_page(struct mm_s
50970 if (!pte_none(*page_table))
50971 goto release;
50972
50973 +#ifdef CONFIG_PAX_SEGMEXEC
50974 + if (pax_find_mirror_vma(vma))
50975 + BUG_ON(!trylock_page(page));
50976 +#endif
50977 +
50978 inc_mm_counter_fast(mm, MM_ANONPAGES);
50979 page_add_new_anon_rmap(page, vma, address);
50980 setpte:
50981 @@ -2841,6 +3023,12 @@ setpte:
50982
50983 /* No need to invalidate - it was non-present before */
50984 update_mmu_cache(vma, address, page_table);
50985 +
50986 +#ifdef CONFIG_PAX_SEGMEXEC
50987 + if (page)
50988 + pax_mirror_anon_pte(vma, address, page, ptl);
50989 +#endif
50990 +
50991 unlock:
50992 pte_unmap_unlock(page_table, ptl);
50993 return 0;
50994 @@ -2983,6 +3171,12 @@ static int __do_fault(struct mm_struct *
50995 */
50996 /* Only go through if we didn't race with anybody else... */
50997 if (likely(pte_same(*page_table, orig_pte))) {
50998 +
50999 +#ifdef CONFIG_PAX_SEGMEXEC
51000 + if (anon && pax_find_mirror_vma(vma))
51001 + BUG_ON(!trylock_page(page));
51002 +#endif
51003 +
51004 flush_icache_page(vma, page);
51005 entry = mk_pte(page, vma->vm_page_prot);
51006 if (flags & FAULT_FLAG_WRITE)
51007 @@ -3002,6 +3196,14 @@ static int __do_fault(struct mm_struct *
51008
51009 /* no need to invalidate: a not-present page won't be cached */
51010 update_mmu_cache(vma, address, page_table);
51011 +
51012 +#ifdef CONFIG_PAX_SEGMEXEC
51013 + if (anon)
51014 + pax_mirror_anon_pte(vma, address, page, ptl);
51015 + else
51016 + pax_mirror_file_pte(vma, address, page, ptl);
51017 +#endif
51018 +
51019 } else {
51020 if (charged)
51021 mem_cgroup_uncharge_page(page);
51022 @@ -3149,6 +3351,12 @@ static inline int handle_pte_fault(struc
51023 if (flags & FAULT_FLAG_WRITE)
51024 flush_tlb_page(vma, address);
51025 }
51026 +
51027 +#ifdef CONFIG_PAX_SEGMEXEC
51028 + pax_mirror_pte(vma, address, pte, pmd, ptl);
51029 + return 0;
51030 +#endif
51031 +
51032 unlock:
51033 pte_unmap_unlock(pte, ptl);
51034 return 0;
51035 @@ -3165,6 +3373,10 @@ int handle_mm_fault(struct mm_struct *mm
51036 pmd_t *pmd;
51037 pte_t *pte;
51038
51039 +#ifdef CONFIG_PAX_SEGMEXEC
51040 + struct vm_area_struct *vma_m;
51041 +#endif
51042 +
51043 __set_current_state(TASK_RUNNING);
51044
51045 count_vm_event(PGFAULT);
51046 @@ -3175,6 +3387,34 @@ int handle_mm_fault(struct mm_struct *mm
51047 if (unlikely(is_vm_hugetlb_page(vma)))
51048 return hugetlb_fault(mm, vma, address, flags);
51049
51050 +#ifdef CONFIG_PAX_SEGMEXEC
51051 + vma_m = pax_find_mirror_vma(vma);
51052 + if (vma_m) {
51053 + unsigned long address_m;
51054 + pgd_t *pgd_m;
51055 + pud_t *pud_m;
51056 + pmd_t *pmd_m;
51057 +
51058 + if (vma->vm_start > vma_m->vm_start) {
51059 + address_m = address;
51060 + address -= SEGMEXEC_TASK_SIZE;
51061 + vma = vma_m;
51062 + } else
51063 + address_m = address + SEGMEXEC_TASK_SIZE;
51064 +
51065 + pgd_m = pgd_offset(mm, address_m);
51066 + pud_m = pud_alloc(mm, pgd_m, address_m);
51067 + if (!pud_m)
51068 + return VM_FAULT_OOM;
51069 + pmd_m = pmd_alloc(mm, pud_m, address_m);
51070 + if (!pmd_m)
51071 + return VM_FAULT_OOM;
51072 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
51073 + return VM_FAULT_OOM;
51074 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
51075 + }
51076 +#endif
51077 +
51078 pgd = pgd_offset(mm, address);
51079 pud = pud_alloc(mm, pgd, address);
51080 if (!pud)
51081 @@ -3272,7 +3512,7 @@ static int __init gate_vma_init(void)
51082 gate_vma.vm_start = FIXADDR_USER_START;
51083 gate_vma.vm_end = FIXADDR_USER_END;
51084 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
51085 - gate_vma.vm_page_prot = __P101;
51086 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
51087 /*
51088 * Make sure the vDSO gets into every core dump.
51089 * Dumping its contents makes post-mortem fully interpretable later
51090 diff -urNp linux-2.6.35.4/mm/memory-failure.c linux-2.6.35.4/mm/memory-failure.c
51091 --- linux-2.6.35.4/mm/memory-failure.c 2010-08-26 19:47:12.000000000 -0400
51092 +++ linux-2.6.35.4/mm/memory-failure.c 2010-09-17 20:12:09.000000000 -0400
51093 @@ -51,7 +51,7 @@ int sysctl_memory_failure_early_kill __r
51094
51095 int sysctl_memory_failure_recovery __read_mostly = 1;
51096
51097 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
51098 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
51099
51100 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
51101
51102 @@ -939,7 +939,7 @@ int __memory_failure(unsigned long pfn,
51103 return 0;
51104 }
51105
51106 - atomic_long_add(1, &mce_bad_pages);
51107 + atomic_long_add_unchecked(1, &mce_bad_pages);
51108
51109 /*
51110 * We need/can do nothing about count=0 pages.
51111 @@ -1003,7 +1003,7 @@ int __memory_failure(unsigned long pfn,
51112 }
51113 if (hwpoison_filter(p)) {
51114 if (TestClearPageHWPoison(p))
51115 - atomic_long_dec(&mce_bad_pages);
51116 + atomic_long_dec_unchecked(&mce_bad_pages);
51117 unlock_page(p);
51118 put_page(p);
51119 return 0;
51120 @@ -1096,7 +1096,7 @@ int unpoison_memory(unsigned long pfn)
51121
51122 if (!get_page_unless_zero(page)) {
51123 if (TestClearPageHWPoison(p))
51124 - atomic_long_dec(&mce_bad_pages);
51125 + atomic_long_dec_unchecked(&mce_bad_pages);
51126 pr_debug("MCE: Software-unpoisoned free page %#lx\n", pfn);
51127 return 0;
51128 }
51129 @@ -1110,7 +1110,7 @@ int unpoison_memory(unsigned long pfn)
51130 */
51131 if (TestClearPageHWPoison(p)) {
51132 pr_debug("MCE: Software-unpoisoned page %#lx\n", pfn);
51133 - atomic_long_dec(&mce_bad_pages);
51134 + atomic_long_dec_unchecked(&mce_bad_pages);
51135 freeit = 1;
51136 }
51137 unlock_page(page);
51138 @@ -1291,7 +1291,7 @@ int soft_offline_page(struct page *page,
51139 return ret;
51140
51141 done:
51142 - atomic_long_add(1, &mce_bad_pages);
51143 + atomic_long_add_unchecked(1, &mce_bad_pages);
51144 SetPageHWPoison(page);
51145 /* keep elevated page count for bad page */
51146 return ret;
51147 diff -urNp linux-2.6.35.4/mm/mempolicy.c linux-2.6.35.4/mm/mempolicy.c
51148 --- linux-2.6.35.4/mm/mempolicy.c 2010-08-26 19:47:12.000000000 -0400
51149 +++ linux-2.6.35.4/mm/mempolicy.c 2010-09-17 20:12:37.000000000 -0400
51150 @@ -642,6 +642,10 @@ static int mbind_range(struct mm_struct
51151 unsigned long vmstart;
51152 unsigned long vmend;
51153
51154 +#ifdef CONFIG_PAX_SEGMEXEC
51155 + struct vm_area_struct *vma_m;
51156 +#endif
51157 +
51158 vma = find_vma_prev(mm, start, &prev);
51159 if (!vma || vma->vm_start > start)
51160 return -EFAULT;
51161 @@ -672,6 +676,16 @@ static int mbind_range(struct mm_struct
51162 err = policy_vma(vma, new_pol);
51163 if (err)
51164 goto out;
51165 +
51166 +#ifdef CONFIG_PAX_SEGMEXEC
51167 + vma_m = pax_find_mirror_vma(vma);
51168 + if (vma_m) {
51169 + err = policy_vma(vma_m, new_pol);
51170 + if (err)
51171 + goto out;
51172 + }
51173 +#endif
51174 +
51175 }
51176
51177 out:
51178 @@ -1098,6 +1112,17 @@ static long do_mbind(unsigned long start
51179
51180 if (end < start)
51181 return -EINVAL;
51182 +
51183 +#ifdef CONFIG_PAX_SEGMEXEC
51184 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
51185 + if (end > SEGMEXEC_TASK_SIZE)
51186 + return -EINVAL;
51187 + } else
51188 +#endif
51189 +
51190 + if (end > TASK_SIZE)
51191 + return -EINVAL;
51192 +
51193 if (end == start)
51194 return 0;
51195
51196 @@ -1303,6 +1328,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
51197 if (!mm)
51198 return -EINVAL;
51199
51200 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51201 + if (mm != current->mm &&
51202 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
51203 + err = -EPERM;
51204 + goto out;
51205 + }
51206 +#endif
51207 +
51208 /*
51209 * Check if this process has the right to modify the specified
51210 * process. The right exists if the process has administrative
51211 @@ -1312,8 +1345,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
51212 rcu_read_lock();
51213 tcred = __task_cred(task);
51214 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
51215 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
51216 - !capable(CAP_SYS_NICE)) {
51217 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
51218 rcu_read_unlock();
51219 err = -EPERM;
51220 goto out;
51221 @@ -2564,7 +2596,7 @@ int show_numa_map(struct seq_file *m, vo
51222
51223 if (file) {
51224 seq_printf(m, " file=");
51225 - seq_path(m, &file->f_path, "\n\t= ");
51226 + seq_path(m, &file->f_path, "\n\t\\= ");
51227 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
51228 seq_printf(m, " heap");
51229 } else if (vma->vm_start <= mm->start_stack &&
51230 diff -urNp linux-2.6.35.4/mm/migrate.c linux-2.6.35.4/mm/migrate.c
51231 --- linux-2.6.35.4/mm/migrate.c 2010-08-26 19:47:12.000000000 -0400
51232 +++ linux-2.6.35.4/mm/migrate.c 2010-09-17 20:12:37.000000000 -0400
51233 @@ -1102,6 +1102,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
51234 if (!mm)
51235 return -EINVAL;
51236
51237 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51238 + if (mm != current->mm &&
51239 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
51240 + err = -EPERM;
51241 + goto out;
51242 + }
51243 +#endif
51244 +
51245 /*
51246 * Check if this process has the right to modify the specified
51247 * process. The right exists if the process has administrative
51248 @@ -1111,8 +1119,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
51249 rcu_read_lock();
51250 tcred = __task_cred(task);
51251 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
51252 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
51253 - !capable(CAP_SYS_NICE)) {
51254 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
51255 rcu_read_unlock();
51256 err = -EPERM;
51257 goto out;
51258 diff -urNp linux-2.6.35.4/mm/mlock.c linux-2.6.35.4/mm/mlock.c
51259 --- linux-2.6.35.4/mm/mlock.c 2010-08-26 19:47:12.000000000 -0400
51260 +++ linux-2.6.35.4/mm/mlock.c 2010-09-17 20:12:37.000000000 -0400
51261 @@ -13,6 +13,7 @@
51262 #include <linux/pagemap.h>
51263 #include <linux/mempolicy.h>
51264 #include <linux/syscalls.h>
51265 +#include <linux/security.h>
51266 #include <linux/sched.h>
51267 #include <linux/module.h>
51268 #include <linux/rmap.h>
51269 @@ -135,19 +136,6 @@ void munlock_vma_page(struct page *page)
51270 }
51271 }
51272
51273 -/* Is the vma a continuation of the stack vma above it? */
51274 -static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
51275 -{
51276 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
51277 -}
51278 -
51279 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
51280 -{
51281 - return (vma->vm_flags & VM_GROWSDOWN) &&
51282 - (vma->vm_start == addr) &&
51283 - !vma_stack_continue(vma->vm_prev, addr);
51284 -}
51285 -
51286 /**
51287 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
51288 * @vma: target vma
51289 @@ -180,12 +168,6 @@ static long __mlock_vma_pages_range(stru
51290 if (vma->vm_flags & VM_WRITE)
51291 gup_flags |= FOLL_WRITE;
51292
51293 - /* We don't try to access the guard page of a stack vma */
51294 - if (stack_guard_page(vma, start)) {
51295 - addr += PAGE_SIZE;
51296 - nr_pages--;
51297 - }
51298 -
51299 while (nr_pages > 0) {
51300 int i;
51301
51302 @@ -451,6 +433,9 @@ static int do_mlock(unsigned long start,
51303 return -EINVAL;
51304 if (end == start)
51305 return 0;
51306 + if (end > TASK_SIZE)
51307 + return -EINVAL;
51308 +
51309 vma = find_vma_prev(current->mm, start, &prev);
51310 if (!vma || vma->vm_start > start)
51311 return -ENOMEM;
51312 @@ -461,6 +446,11 @@ static int do_mlock(unsigned long start,
51313 for (nstart = start ; ; ) {
51314 unsigned int newflags;
51315
51316 +#ifdef CONFIG_PAX_SEGMEXEC
51317 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
51318 + break;
51319 +#endif
51320 +
51321 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
51322
51323 newflags = vma->vm_flags | VM_LOCKED;
51324 @@ -510,6 +500,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
51325 lock_limit >>= PAGE_SHIFT;
51326
51327 /* check against resource limits */
51328 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
51329 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
51330 error = do_mlock(start, len, 1);
51331 up_write(&current->mm->mmap_sem);
51332 @@ -531,17 +522,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
51333 static int do_mlockall(int flags)
51334 {
51335 struct vm_area_struct * vma, * prev = NULL;
51336 - unsigned int def_flags = 0;
51337
51338 if (flags & MCL_FUTURE)
51339 - def_flags = VM_LOCKED;
51340 - current->mm->def_flags = def_flags;
51341 + current->mm->def_flags |= VM_LOCKED;
51342 + else
51343 + current->mm->def_flags &= ~VM_LOCKED;
51344 if (flags == MCL_FUTURE)
51345 goto out;
51346
51347 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
51348 - unsigned int newflags;
51349 + unsigned long newflags;
51350 +
51351 +#ifdef CONFIG_PAX_SEGMEXEC
51352 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
51353 + break;
51354 +#endif
51355
51356 + BUG_ON(vma->vm_end > TASK_SIZE);
51357 newflags = vma->vm_flags | VM_LOCKED;
51358 if (!(flags & MCL_CURRENT))
51359 newflags &= ~VM_LOCKED;
51360 @@ -573,6 +570,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
51361 lock_limit >>= PAGE_SHIFT;
51362
51363 ret = -ENOMEM;
51364 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
51365 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
51366 capable(CAP_IPC_LOCK))
51367 ret = do_mlockall(flags);
51368 diff -urNp linux-2.6.35.4/mm/mmap.c linux-2.6.35.4/mm/mmap.c
51369 --- linux-2.6.35.4/mm/mmap.c 2010-08-26 19:47:12.000000000 -0400
51370 +++ linux-2.6.35.4/mm/mmap.c 2010-09-17 20:12:37.000000000 -0400
51371 @@ -44,6 +44,16 @@
51372 #define arch_rebalance_pgtables(addr, len) (addr)
51373 #endif
51374
51375 +static inline void verify_mm_writelocked(struct mm_struct *mm)
51376 +{
51377 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
51378 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
51379 + up_read(&mm->mmap_sem);
51380 + BUG();
51381 + }
51382 +#endif
51383 +}
51384 +
51385 static void unmap_region(struct mm_struct *mm,
51386 struct vm_area_struct *vma, struct vm_area_struct *prev,
51387 unsigned long start, unsigned long end);
51388 @@ -69,22 +79,32 @@ static void unmap_region(struct mm_struc
51389 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
51390 *
51391 */
51392 -pgprot_t protection_map[16] = {
51393 +pgprot_t protection_map[16] __read_only = {
51394 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
51395 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
51396 };
51397
51398 pgprot_t vm_get_page_prot(unsigned long vm_flags)
51399 {
51400 - return __pgprot(pgprot_val(protection_map[vm_flags &
51401 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
51402 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
51403 pgprot_val(arch_vm_get_page_prot(vm_flags)));
51404 +
51405 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
51406 + if (!(__supported_pte_mask & _PAGE_NX) &&
51407 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
51408 + (vm_flags & (VM_READ | VM_WRITE)))
51409 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
51410 +#endif
51411 +
51412 + return prot;
51413 }
51414 EXPORT_SYMBOL(vm_get_page_prot);
51415
51416 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
51417 int sysctl_overcommit_ratio = 50; /* default is 50% */
51418 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
51419 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
51420 struct percpu_counter vm_committed_as;
51421
51422 /*
51423 @@ -230,6 +250,7 @@ static struct vm_area_struct *remove_vma
51424 struct vm_area_struct *next = vma->vm_next;
51425
51426 might_sleep();
51427 + BUG_ON(vma->vm_mirror);
51428 if (vma->vm_ops && vma->vm_ops->close)
51429 vma->vm_ops->close(vma);
51430 if (vma->vm_file) {
51431 @@ -266,6 +287,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
51432 * not page aligned -Ram Gupta
51433 */
51434 rlim = rlimit(RLIMIT_DATA);
51435 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
51436 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
51437 (mm->end_data - mm->start_data) > rlim)
51438 goto out;
51439 @@ -695,6 +717,12 @@ static int
51440 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
51441 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
51442 {
51443 +
51444 +#ifdef CONFIG_PAX_SEGMEXEC
51445 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
51446 + return 0;
51447 +#endif
51448 +
51449 if (is_mergeable_vma(vma, file, vm_flags) &&
51450 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
51451 if (vma->vm_pgoff == vm_pgoff)
51452 @@ -714,6 +742,12 @@ static int
51453 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
51454 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
51455 {
51456 +
51457 +#ifdef CONFIG_PAX_SEGMEXEC
51458 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
51459 + return 0;
51460 +#endif
51461 +
51462 if (is_mergeable_vma(vma, file, vm_flags) &&
51463 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
51464 pgoff_t vm_pglen;
51465 @@ -756,13 +790,20 @@ can_vma_merge_after(struct vm_area_struc
51466 struct vm_area_struct *vma_merge(struct mm_struct *mm,
51467 struct vm_area_struct *prev, unsigned long addr,
51468 unsigned long end, unsigned long vm_flags,
51469 - struct anon_vma *anon_vma, struct file *file,
51470 + struct anon_vma *anon_vma, struct file *file,
51471 pgoff_t pgoff, struct mempolicy *policy)
51472 {
51473 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
51474 struct vm_area_struct *area, *next;
51475 int err;
51476
51477 +#ifdef CONFIG_PAX_SEGMEXEC
51478 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
51479 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
51480 +
51481 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
51482 +#endif
51483 +
51484 /*
51485 * We later require that vma->vm_flags == vm_flags,
51486 * so this tests vma->vm_flags & VM_SPECIAL, too.
51487 @@ -778,6 +819,15 @@ struct vm_area_struct *vma_merge(struct
51488 if (next && next->vm_end == end) /* cases 6, 7, 8 */
51489 next = next->vm_next;
51490
51491 +#ifdef CONFIG_PAX_SEGMEXEC
51492 + if (prev)
51493 + prev_m = pax_find_mirror_vma(prev);
51494 + if (area)
51495 + area_m = pax_find_mirror_vma(area);
51496 + if (next)
51497 + next_m = pax_find_mirror_vma(next);
51498 +#endif
51499 +
51500 /*
51501 * Can it merge with the predecessor?
51502 */
51503 @@ -797,9 +847,24 @@ struct vm_area_struct *vma_merge(struct
51504 /* cases 1, 6 */
51505 err = vma_adjust(prev, prev->vm_start,
51506 next->vm_end, prev->vm_pgoff, NULL);
51507 - } else /* cases 2, 5, 7 */
51508 +
51509 +#ifdef CONFIG_PAX_SEGMEXEC
51510 + if (!err && prev_m)
51511 + err = vma_adjust(prev_m, prev_m->vm_start,
51512 + next_m->vm_end, prev_m->vm_pgoff, NULL);
51513 +#endif
51514 +
51515 + } else { /* cases 2, 5, 7 */
51516 err = vma_adjust(prev, prev->vm_start,
51517 end, prev->vm_pgoff, NULL);
51518 +
51519 +#ifdef CONFIG_PAX_SEGMEXEC
51520 + if (!err && prev_m)
51521 + err = vma_adjust(prev_m, prev_m->vm_start,
51522 + end_m, prev_m->vm_pgoff, NULL);
51523 +#endif
51524 +
51525 + }
51526 if (err)
51527 return NULL;
51528 return prev;
51529 @@ -812,12 +877,27 @@ struct vm_area_struct *vma_merge(struct
51530 mpol_equal(policy, vma_policy(next)) &&
51531 can_vma_merge_before(next, vm_flags,
51532 anon_vma, file, pgoff+pglen)) {
51533 - if (prev && addr < prev->vm_end) /* case 4 */
51534 + if (prev && addr < prev->vm_end) { /* case 4 */
51535 err = vma_adjust(prev, prev->vm_start,
51536 addr, prev->vm_pgoff, NULL);
51537 - else /* cases 3, 8 */
51538 +
51539 +#ifdef CONFIG_PAX_SEGMEXEC
51540 + if (!err && prev_m)
51541 + err = vma_adjust(prev_m, prev_m->vm_start,
51542 + addr_m, prev_m->vm_pgoff, NULL);
51543 +#endif
51544 +
51545 + } else { /* cases 3, 8 */
51546 err = vma_adjust(area, addr, next->vm_end,
51547 next->vm_pgoff - pglen, NULL);
51548 +
51549 +#ifdef CONFIG_PAX_SEGMEXEC
51550 + if (!err && area_m)
51551 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
51552 + next_m->vm_pgoff - pglen, NULL);
51553 +#endif
51554 +
51555 + }
51556 if (err)
51557 return NULL;
51558 return area;
51559 @@ -932,14 +1012,11 @@ none:
51560 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
51561 struct file *file, long pages)
51562 {
51563 - const unsigned long stack_flags
51564 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
51565 -
51566 if (file) {
51567 mm->shared_vm += pages;
51568 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
51569 mm->exec_vm += pages;
51570 - } else if (flags & stack_flags)
51571 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
51572 mm->stack_vm += pages;
51573 if (flags & (VM_RESERVED|VM_IO))
51574 mm->reserved_vm += pages;
51575 @@ -966,7 +1043,7 @@ unsigned long do_mmap_pgoff(struct file
51576 * (the exception is when the underlying filesystem is noexec
51577 * mounted, in which case we dont add PROT_EXEC.)
51578 */
51579 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
51580 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
51581 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
51582 prot |= PROT_EXEC;
51583
51584 @@ -992,7 +1069,7 @@ unsigned long do_mmap_pgoff(struct file
51585 /* Obtain the address to map to. we verify (or select) it and ensure
51586 * that it represents a valid section of the address space.
51587 */
51588 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
51589 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
51590 if (addr & ~PAGE_MASK)
51591 return addr;
51592
51593 @@ -1003,6 +1080,28 @@ unsigned long do_mmap_pgoff(struct file
51594 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
51595 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
51596
51597 +#ifdef CONFIG_PAX_MPROTECT
51598 + if (mm->pax_flags & MF_PAX_MPROTECT) {
51599 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
51600 +
51601 +#ifdef CONFIG_PAX_EMUPLT
51602 + vm_flags &= ~VM_EXEC;
51603 +#else
51604 + return -EPERM;
51605 +#endif
51606 +
51607 + if (!(vm_flags & VM_EXEC))
51608 + vm_flags &= ~VM_MAYEXEC;
51609 + else
51610 + vm_flags &= ~VM_MAYWRITE;
51611 + }
51612 +#endif
51613 +
51614 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
51615 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
51616 + vm_flags &= ~VM_PAGEEXEC;
51617 +#endif
51618 +
51619 if (flags & MAP_LOCKED)
51620 if (!can_do_mlock())
51621 return -EPERM;
51622 @@ -1014,6 +1113,7 @@ unsigned long do_mmap_pgoff(struct file
51623 locked += mm->locked_vm;
51624 lock_limit = rlimit(RLIMIT_MEMLOCK);
51625 lock_limit >>= PAGE_SHIFT;
51626 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
51627 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
51628 return -EAGAIN;
51629 }
51630 @@ -1084,6 +1184,9 @@ unsigned long do_mmap_pgoff(struct file
51631 if (error)
51632 return error;
51633
51634 + if (!gr_acl_handle_mmap(file, prot))
51635 + return -EACCES;
51636 +
51637 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
51638 }
51639 EXPORT_SYMBOL(do_mmap_pgoff);
51640 @@ -1160,10 +1263,10 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_ar
51641 */
51642 int vma_wants_writenotify(struct vm_area_struct *vma)
51643 {
51644 - unsigned int vm_flags = vma->vm_flags;
51645 + unsigned long vm_flags = vma->vm_flags;
51646
51647 /* If it was private or non-writable, the write bit is already clear */
51648 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
51649 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
51650 return 0;
51651
51652 /* The backer wishes to know when pages are first written to? */
51653 @@ -1212,14 +1315,24 @@ unsigned long mmap_region(struct file *f
51654 unsigned long charged = 0;
51655 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
51656
51657 +#ifdef CONFIG_PAX_SEGMEXEC
51658 + struct vm_area_struct *vma_m = NULL;
51659 +#endif
51660 +
51661 + /*
51662 + * mm->mmap_sem is required to protect against another thread
51663 + * changing the mappings in case we sleep.
51664 + */
51665 + verify_mm_writelocked(mm);
51666 +
51667 /* Clear old maps */
51668 error = -ENOMEM;
51669 -munmap_back:
51670 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
51671 if (vma && vma->vm_start < addr + len) {
51672 if (do_munmap(mm, addr, len))
51673 return -ENOMEM;
51674 - goto munmap_back;
51675 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
51676 + BUG_ON(vma && vma->vm_start < addr + len);
51677 }
51678
51679 /* Check against address space limit. */
51680 @@ -1268,6 +1381,16 @@ munmap_back:
51681 goto unacct_error;
51682 }
51683
51684 +#ifdef CONFIG_PAX_SEGMEXEC
51685 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
51686 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
51687 + if (!vma_m) {
51688 + error = -ENOMEM;
51689 + goto free_vma;
51690 + }
51691 + }
51692 +#endif
51693 +
51694 vma->vm_mm = mm;
51695 vma->vm_start = addr;
51696 vma->vm_end = addr + len;
51697 @@ -1291,6 +1414,19 @@ munmap_back:
51698 error = file->f_op->mmap(file, vma);
51699 if (error)
51700 goto unmap_and_free_vma;
51701 +
51702 +#ifdef CONFIG_PAX_SEGMEXEC
51703 + if (vma_m && (vm_flags & VM_EXECUTABLE))
51704 + added_exe_file_vma(mm);
51705 +#endif
51706 +
51707 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
51708 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
51709 + vma->vm_flags |= VM_PAGEEXEC;
51710 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
51711 + }
51712 +#endif
51713 +
51714 if (vm_flags & VM_EXECUTABLE)
51715 added_exe_file_vma(mm);
51716
51717 @@ -1326,6 +1462,11 @@ munmap_back:
51718 vma_link(mm, vma, prev, rb_link, rb_parent);
51719 file = vma->vm_file;
51720
51721 +#ifdef CONFIG_PAX_SEGMEXEC
51722 + if (vma_m)
51723 + BUG_ON(pax_mirror_vma(vma_m, vma));
51724 +#endif
51725 +
51726 /* Once vma denies write, undo our temporary denial count */
51727 if (correct_wcount)
51728 atomic_inc(&inode->i_writecount);
51729 @@ -1334,6 +1475,7 @@ out:
51730
51731 mm->total_vm += len >> PAGE_SHIFT;
51732 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
51733 + track_exec_limit(mm, addr, addr + len, vm_flags);
51734 if (vm_flags & VM_LOCKED) {
51735 if (!mlock_vma_pages_range(vma, addr, addr + len))
51736 mm->locked_vm += (len >> PAGE_SHIFT);
51737 @@ -1351,6 +1493,12 @@ unmap_and_free_vma:
51738 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
51739 charged = 0;
51740 free_vma:
51741 +
51742 +#ifdef CONFIG_PAX_SEGMEXEC
51743 + if (vma_m)
51744 + kmem_cache_free(vm_area_cachep, vma_m);
51745 +#endif
51746 +
51747 kmem_cache_free(vm_area_cachep, vma);
51748 unacct_error:
51749 if (charged)
51750 @@ -1358,6 +1506,33 @@ unacct_error:
51751 return error;
51752 }
51753
51754 +bool check_heap_stack_gap(struct vm_area_struct *vma, unsigned long addr, unsigned long len)
51755 +{
51756 + if (!vma) {
51757 +#ifdef CONFIG_STACK_GROWSUP
51758 + if (addr > sysctl_heap_stack_gap)
51759 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
51760 + else
51761 + vma = find_vma(current->mm, 0);
51762 + if (vma && (vma->vm_flags & VM_GROWSUP))
51763 + return false;
51764 +#endif
51765 + return true;
51766 + }
51767 +
51768 + if (addr + len > vma->vm_start)
51769 + return false;
51770 +
51771 + if (vma->vm_flags & VM_GROWSDOWN)
51772 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
51773 +#ifdef CONFIG_STACK_GROWSUP
51774 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
51775 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
51776 +#endif
51777 +
51778 + return true;
51779 +}
51780 +
51781 /* Get an address range which is currently unmapped.
51782 * For shmat() with addr=0.
51783 *
51784 @@ -1384,18 +1559,23 @@ arch_get_unmapped_area(struct file *filp
51785 if (flags & MAP_FIXED)
51786 return addr;
51787
51788 +#ifdef CONFIG_PAX_RANDMMAP
51789 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
51790 +#endif
51791 +
51792 if (addr) {
51793 addr = PAGE_ALIGN(addr);
51794 - vma = find_vma(mm, addr);
51795 - if (TASK_SIZE - len >= addr &&
51796 - (!vma || addr + len <= vma->vm_start))
51797 - return addr;
51798 + if (TASK_SIZE - len >= addr) {
51799 + vma = find_vma(mm, addr);
51800 + if (check_heap_stack_gap(vma, addr, len))
51801 + return addr;
51802 + }
51803 }
51804 if (len > mm->cached_hole_size) {
51805 - start_addr = addr = mm->free_area_cache;
51806 + start_addr = addr = mm->free_area_cache;
51807 } else {
51808 - start_addr = addr = TASK_UNMAPPED_BASE;
51809 - mm->cached_hole_size = 0;
51810 + start_addr = addr = mm->mmap_base;
51811 + mm->cached_hole_size = 0;
51812 }
51813
51814 full_search:
51815 @@ -1406,34 +1586,40 @@ full_search:
51816 * Start a new search - just in case we missed
51817 * some holes.
51818 */
51819 - if (start_addr != TASK_UNMAPPED_BASE) {
51820 - addr = TASK_UNMAPPED_BASE;
51821 - start_addr = addr;
51822 + if (start_addr != mm->mmap_base) {
51823 + start_addr = addr = mm->mmap_base;
51824 mm->cached_hole_size = 0;
51825 goto full_search;
51826 }
51827 return -ENOMEM;
51828 }
51829 - if (!vma || addr + len <= vma->vm_start) {
51830 - /*
51831 - * Remember the place where we stopped the search:
51832 - */
51833 - mm->free_area_cache = addr + len;
51834 - return addr;
51835 - }
51836 + if (check_heap_stack_gap(vma, addr, len))
51837 + break;
51838 if (addr + mm->cached_hole_size < vma->vm_start)
51839 mm->cached_hole_size = vma->vm_start - addr;
51840 addr = vma->vm_end;
51841 }
51842 +
51843 + /*
51844 + * Remember the place where we stopped the search:
51845 + */
51846 + mm->free_area_cache = addr + len;
51847 + return addr;
51848 }
51849 #endif
51850
51851 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
51852 {
51853 +
51854 +#ifdef CONFIG_PAX_SEGMEXEC
51855 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
51856 + return;
51857 +#endif
51858 +
51859 /*
51860 * Is this a new hole at the lowest possible address?
51861 */
51862 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
51863 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
51864 mm->free_area_cache = addr;
51865 mm->cached_hole_size = ~0UL;
51866 }
51867 @@ -1451,7 +1637,7 @@ arch_get_unmapped_area_topdown(struct fi
51868 {
51869 struct vm_area_struct *vma;
51870 struct mm_struct *mm = current->mm;
51871 - unsigned long addr = addr0;
51872 + unsigned long base = mm->mmap_base, addr = addr0;
51873
51874 /* requested length too big for entire address space */
51875 if (len > TASK_SIZE)
51876 @@ -1460,13 +1646,18 @@ arch_get_unmapped_area_topdown(struct fi
51877 if (flags & MAP_FIXED)
51878 return addr;
51879
51880 +#ifdef CONFIG_PAX_RANDMMAP
51881 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
51882 +#endif
51883 +
51884 /* requesting a specific address */
51885 if (addr) {
51886 addr = PAGE_ALIGN(addr);
51887 - vma = find_vma(mm, addr);
51888 - if (TASK_SIZE - len >= addr &&
51889 - (!vma || addr + len <= vma->vm_start))
51890 - return addr;
51891 + if (TASK_SIZE - len >= addr) {
51892 + vma = find_vma(mm, addr);
51893 + if (check_heap_stack_gap(vma, addr, len))
51894 + return addr;
51895 + }
51896 }
51897
51898 /* check if free_area_cache is useful for us */
51899 @@ -1481,7 +1672,7 @@ arch_get_unmapped_area_topdown(struct fi
51900 /* make sure it can fit in the remaining address space */
51901 if (addr > len) {
51902 vma = find_vma(mm, addr-len);
51903 - if (!vma || addr <= vma->vm_start)
51904 + if (check_heap_stack_gap(vma, addr - len, len))
51905 /* remember the address as a hint for next time */
51906 return (mm->free_area_cache = addr-len);
51907 }
51908 @@ -1498,7 +1689,7 @@ arch_get_unmapped_area_topdown(struct fi
51909 * return with success:
51910 */
51911 vma = find_vma(mm, addr);
51912 - if (!vma || addr+len <= vma->vm_start)
51913 + if (check_heap_stack_gap(vma, addr, len))
51914 /* remember the address as a hint for next time */
51915 return (mm->free_area_cache = addr);
51916
51917 @@ -1517,13 +1708,21 @@ bottomup:
51918 * can happen with large stack limits and large mmap()
51919 * allocations.
51920 */
51921 + mm->mmap_base = TASK_UNMAPPED_BASE;
51922 +
51923 +#ifdef CONFIG_PAX_RANDMMAP
51924 + if (mm->pax_flags & MF_PAX_RANDMMAP)
51925 + mm->mmap_base += mm->delta_mmap;
51926 +#endif
51927 +
51928 + mm->free_area_cache = mm->mmap_base;
51929 mm->cached_hole_size = ~0UL;
51930 - mm->free_area_cache = TASK_UNMAPPED_BASE;
51931 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
51932 /*
51933 * Restore the topdown base:
51934 */
51935 - mm->free_area_cache = mm->mmap_base;
51936 + mm->mmap_base = base;
51937 + mm->free_area_cache = base;
51938 mm->cached_hole_size = ~0UL;
51939
51940 return addr;
51941 @@ -1532,6 +1731,12 @@ bottomup:
51942
51943 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
51944 {
51945 +
51946 +#ifdef CONFIG_PAX_SEGMEXEC
51947 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
51948 + return;
51949 +#endif
51950 +
51951 /*
51952 * Is this a new hole at the highest possible address?
51953 */
51954 @@ -1539,8 +1744,10 @@ void arch_unmap_area_topdown(struct mm_s
51955 mm->free_area_cache = addr;
51956
51957 /* dont allow allocations above current base */
51958 - if (mm->free_area_cache > mm->mmap_base)
51959 + if (mm->free_area_cache > mm->mmap_base) {
51960 mm->free_area_cache = mm->mmap_base;
51961 + mm->cached_hole_size = ~0UL;
51962 + }
51963 }
51964
51965 unsigned long
51966 @@ -1648,6 +1855,34 @@ out:
51967 return prev ? prev->vm_next : vma;
51968 }
51969
51970 +#ifdef CONFIG_PAX_SEGMEXEC
51971 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
51972 +{
51973 + struct vm_area_struct *vma_m;
51974 +
51975 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
51976 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
51977 + BUG_ON(vma->vm_mirror);
51978 + return NULL;
51979 + }
51980 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
51981 + vma_m = vma->vm_mirror;
51982 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
51983 + BUG_ON(vma->vm_file != vma_m->vm_file);
51984 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
51985 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
51986 + if (vma->anon_vma != vma_m->anon_vma) {
51987 + struct anon_vma_chain *avc, *avc_m;
51988 +
51989 + avc = list_entry(vma->anon_vma_chain.prev, struct anon_vma_chain, same_vma);
51990 + avc_m = list_entry(vma_m->anon_vma_chain.prev, struct anon_vma_chain, same_vma);
51991 + BUG_ON(avc->anon_vma != avc_m->anon_vma);
51992 + }
51993 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
51994 + return vma_m;
51995 +}
51996 +#endif
51997 +
51998 /*
51999 * Verify that the stack growth is acceptable and
52000 * update accounting. This is shared with both the
52001 @@ -1664,6 +1899,7 @@ static int acct_stack_growth(struct vm_a
52002 return -ENOMEM;
52003
52004 /* Stack limit test */
52005 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
52006 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
52007 return -ENOMEM;
52008
52009 @@ -1674,6 +1910,7 @@ static int acct_stack_growth(struct vm_a
52010 locked = mm->locked_vm + grow;
52011 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
52012 limit >>= PAGE_SHIFT;
52013 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
52014 if (locked > limit && !capable(CAP_IPC_LOCK))
52015 return -ENOMEM;
52016 }
52017 @@ -1709,35 +1946,42 @@ static
52018 #endif
52019 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
52020 {
52021 - int error;
52022 + int error, locknext;
52023
52024 if (!(vma->vm_flags & VM_GROWSUP))
52025 return -EFAULT;
52026
52027 + /* Also guard against wrapping around to address 0. */
52028 + if (address < PAGE_ALIGN(address+1))
52029 + address = PAGE_ALIGN(address+1);
52030 + else
52031 + return -ENOMEM;
52032 +
52033 /*
52034 * We must make sure the anon_vma is allocated
52035 * so that the anon_vma locking is not a noop.
52036 */
52037 if (unlikely(anon_vma_prepare(vma)))
52038 return -ENOMEM;
52039 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
52040 + if (locknext && anon_vma_prepare(vma->vm_next))
52041 + return -ENOMEM;
52042 anon_vma_lock(vma);
52043 + if (locknext)
52044 + anon_vma_lock(vma->vm_next);
52045
52046 /*
52047 * vma->vm_start/vm_end cannot change under us because the caller
52048 * is required to hold the mmap_sem in read mode. We need the
52049 - * anon_vma lock to serialize against concurrent expand_stacks.
52050 - * Also guard against wrapping around to address 0.
52051 + * anon_vma locks to serialize against concurrent expand_stacks
52052 + * and expand_upwards.
52053 */
52054 - if (address < PAGE_ALIGN(address+4))
52055 - address = PAGE_ALIGN(address+4);
52056 - else {
52057 - anon_vma_unlock(vma);
52058 - return -ENOMEM;
52059 - }
52060 error = 0;
52061
52062 /* Somebody else might have raced and expanded it already */
52063 - if (address > vma->vm_end) {
52064 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
52065 + error = -ENOMEM;
52066 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
52067 unsigned long size, grow;
52068
52069 size = address - vma->vm_start;
52070 @@ -1747,6 +1991,8 @@ int expand_upwards(struct vm_area_struct
52071 if (!error)
52072 vma->vm_end = address;
52073 }
52074 + if (locknext)
52075 + anon_vma_unlock(vma->vm_next);
52076 anon_vma_unlock(vma);
52077 return error;
52078 }
52079 @@ -1758,7 +2004,8 @@ int expand_upwards(struct vm_area_struct
52080 static int expand_downwards(struct vm_area_struct *vma,
52081 unsigned long address)
52082 {
52083 - int error;
52084 + int error, lockprev = 0;
52085 + struct vm_area_struct *prev;
52086
52087 /*
52088 * We must make sure the anon_vma is allocated
52089 @@ -1772,6 +2019,15 @@ static int expand_downwards(struct vm_ar
52090 if (error)
52091 return error;
52092
52093 + prev = vma->vm_prev;
52094 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
52095 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
52096 +#endif
52097 + if (lockprev && anon_vma_prepare(prev))
52098 + return -ENOMEM;
52099 + if (lockprev)
52100 + anon_vma_lock(prev);
52101 +
52102 anon_vma_lock(vma);
52103
52104 /*
52105 @@ -1781,9 +2037,17 @@ static int expand_downwards(struct vm_ar
52106 */
52107
52108 /* Somebody else might have raced and expanded it already */
52109 - if (address < vma->vm_start) {
52110 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
52111 + error = -ENOMEM;
52112 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
52113 unsigned long size, grow;
52114
52115 +#ifdef CONFIG_PAX_SEGMEXEC
52116 + struct vm_area_struct *vma_m;
52117 +
52118 + vma_m = pax_find_mirror_vma(vma);
52119 +#endif
52120 +
52121 size = vma->vm_end - address;
52122 grow = (vma->vm_start - address) >> PAGE_SHIFT;
52123
52124 @@ -1791,9 +2055,20 @@ static int expand_downwards(struct vm_ar
52125 if (!error) {
52126 vma->vm_start = address;
52127 vma->vm_pgoff -= grow;
52128 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
52129 +
52130 +#ifdef CONFIG_PAX_SEGMEXEC
52131 + if (vma_m) {
52132 + vma_m->vm_start -= grow << PAGE_SHIFT;
52133 + vma_m->vm_pgoff -= grow;
52134 + }
52135 +#endif
52136 +
52137 }
52138 }
52139 anon_vma_unlock(vma);
52140 + if (lockprev)
52141 + anon_vma_unlock(prev);
52142 return error;
52143 }
52144
52145 @@ -1867,6 +2142,13 @@ static void remove_vma_list(struct mm_st
52146 do {
52147 long nrpages = vma_pages(vma);
52148
52149 +#ifdef CONFIG_PAX_SEGMEXEC
52150 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
52151 + vma = remove_vma(vma);
52152 + continue;
52153 + }
52154 +#endif
52155 +
52156 mm->total_vm -= nrpages;
52157 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
52158 vma = remove_vma(vma);
52159 @@ -1912,6 +2194,16 @@ detach_vmas_to_be_unmapped(struct mm_str
52160 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
52161 vma->vm_prev = NULL;
52162 do {
52163 +
52164 +#ifdef CONFIG_PAX_SEGMEXEC
52165 + if (vma->vm_mirror) {
52166 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
52167 + vma->vm_mirror->vm_mirror = NULL;
52168 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
52169 + vma->vm_mirror = NULL;
52170 + }
52171 +#endif
52172 +
52173 rb_erase(&vma->vm_rb, &mm->mm_rb);
52174 mm->map_count--;
52175 tail_vma = vma;
52176 @@ -1940,14 +2232,33 @@ static int __split_vma(struct mm_struct
52177 struct vm_area_struct *new;
52178 int err = -ENOMEM;
52179
52180 +#ifdef CONFIG_PAX_SEGMEXEC
52181 + struct vm_area_struct *vma_m, *new_m = NULL;
52182 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
52183 +#endif
52184 +
52185 if (is_vm_hugetlb_page(vma) && (addr &
52186 ~(huge_page_mask(hstate_vma(vma)))))
52187 return -EINVAL;
52188
52189 +#ifdef CONFIG_PAX_SEGMEXEC
52190 + vma_m = pax_find_mirror_vma(vma);
52191 +#endif
52192 +
52193 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
52194 if (!new)
52195 goto out_err;
52196
52197 +#ifdef CONFIG_PAX_SEGMEXEC
52198 + if (vma_m) {
52199 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
52200 + if (!new_m) {
52201 + kmem_cache_free(vm_area_cachep, new);
52202 + goto out_err;
52203 + }
52204 + }
52205 +#endif
52206 +
52207 /* most fields are the same, copy all, and then fixup */
52208 *new = *vma;
52209
52210 @@ -1960,6 +2271,22 @@ static int __split_vma(struct mm_struct
52211 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
52212 }
52213
52214 +#ifdef CONFIG_PAX_SEGMEXEC
52215 + if (vma_m) {
52216 + *new_m = *vma_m;
52217 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
52218 + new_m->vm_mirror = new;
52219 + new->vm_mirror = new_m;
52220 +
52221 + if (new_below)
52222 + new_m->vm_end = addr_m;
52223 + else {
52224 + new_m->vm_start = addr_m;
52225 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
52226 + }
52227 + }
52228 +#endif
52229 +
52230 pol = mpol_dup(vma_policy(vma));
52231 if (IS_ERR(pol)) {
52232 err = PTR_ERR(pol);
52233 @@ -1985,6 +2312,42 @@ static int __split_vma(struct mm_struct
52234 else
52235 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
52236
52237 +#ifdef CONFIG_PAX_SEGMEXEC
52238 + if (!err && vma_m) {
52239 + if (anon_vma_clone(new_m, vma_m))
52240 + goto out_free_mpol;
52241 +
52242 + mpol_get(pol);
52243 + vma_set_policy(new_m, pol);
52244 +
52245 + if (new_m->vm_file) {
52246 + get_file(new_m->vm_file);
52247 + if (vma_m->vm_flags & VM_EXECUTABLE)
52248 + added_exe_file_vma(mm);
52249 + }
52250 +
52251 + if (new_m->vm_ops && new_m->vm_ops->open)
52252 + new_m->vm_ops->open(new_m);
52253 +
52254 + if (new_below)
52255 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
52256 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
52257 + else
52258 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
52259 +
52260 + if (err) {
52261 + if (new_m->vm_ops && new_m->vm_ops->close)
52262 + new_m->vm_ops->close(new_m);
52263 + if (new_m->vm_file) {
52264 + if (vma_m->vm_flags & VM_EXECUTABLE)
52265 + removed_exe_file_vma(mm);
52266 + fput(new_m->vm_file);
52267 + }
52268 + mpol_put(pol);
52269 + }
52270 + }
52271 +#endif
52272 +
52273 /* Success. */
52274 if (!err)
52275 return 0;
52276 @@ -2000,6 +2363,15 @@ static int __split_vma(struct mm_struct
52277 out_free_mpol:
52278 mpol_put(pol);
52279 out_free_vma:
52280 +
52281 +#ifdef CONFIG_PAX_SEGMEXEC
52282 + if (new_m) {
52283 + unlink_anon_vmas(new_m);
52284 + kmem_cache_free(vm_area_cachep, new_m);
52285 + }
52286 +#endif
52287 +
52288 + unlink_anon_vmas(new);
52289 kmem_cache_free(vm_area_cachep, new);
52290 out_err:
52291 return err;
52292 @@ -2012,6 +2384,15 @@ static int __split_vma(struct mm_struct
52293 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
52294 unsigned long addr, int new_below)
52295 {
52296 +
52297 +#ifdef CONFIG_PAX_SEGMEXEC
52298 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
52299 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
52300 + if (mm->map_count >= sysctl_max_map_count-1)
52301 + return -ENOMEM;
52302 + } else
52303 +#endif
52304 +
52305 if (mm->map_count >= sysctl_max_map_count)
52306 return -ENOMEM;
52307
52308 @@ -2023,11 +2404,30 @@ int split_vma(struct mm_struct *mm, stru
52309 * work. This now handles partial unmappings.
52310 * Jeremy Fitzhardinge <jeremy@goop.org>
52311 */
52312 +#ifdef CONFIG_PAX_SEGMEXEC
52313 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
52314 +{
52315 + int ret = __do_munmap(mm, start, len);
52316 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
52317 + return ret;
52318 +
52319 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
52320 +}
52321 +
52322 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
52323 +#else
52324 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
52325 +#endif
52326 {
52327 unsigned long end;
52328 struct vm_area_struct *vma, *prev, *last;
52329
52330 + /*
52331 + * mm->mmap_sem is required to protect against another thread
52332 + * changing the mappings in case we sleep.
52333 + */
52334 + verify_mm_writelocked(mm);
52335 +
52336 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
52337 return -EINVAL;
52338
52339 @@ -2101,6 +2501,8 @@ int do_munmap(struct mm_struct *mm, unsi
52340 /* Fix up all other VM information */
52341 remove_vma_list(mm, vma);
52342
52343 + track_exec_limit(mm, start, end, 0UL);
52344 +
52345 return 0;
52346 }
52347
52348 @@ -2113,22 +2515,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
52349
52350 profile_munmap(addr);
52351
52352 +#ifdef CONFIG_PAX_SEGMEXEC
52353 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
52354 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
52355 + return -EINVAL;
52356 +#endif
52357 +
52358 down_write(&mm->mmap_sem);
52359 ret = do_munmap(mm, addr, len);
52360 up_write(&mm->mmap_sem);
52361 return ret;
52362 }
52363
52364 -static inline void verify_mm_writelocked(struct mm_struct *mm)
52365 -{
52366 -#ifdef CONFIG_DEBUG_VM
52367 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
52368 - WARN_ON(1);
52369 - up_read(&mm->mmap_sem);
52370 - }
52371 -#endif
52372 -}
52373 -
52374 /*
52375 * this is really a simplified "do_mmap". it only handles
52376 * anonymous maps. eventually we may be able to do some
52377 @@ -2142,6 +2540,7 @@ unsigned long do_brk(unsigned long addr,
52378 struct rb_node ** rb_link, * rb_parent;
52379 pgoff_t pgoff = addr >> PAGE_SHIFT;
52380 int error;
52381 + unsigned long charged;
52382
52383 len = PAGE_ALIGN(len);
52384 if (!len)
52385 @@ -2153,16 +2552,30 @@ unsigned long do_brk(unsigned long addr,
52386
52387 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
52388
52389 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
52390 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
52391 + flags &= ~VM_EXEC;
52392 +
52393 +#ifdef CONFIG_PAX_MPROTECT
52394 + if (mm->pax_flags & MF_PAX_MPROTECT)
52395 + flags &= ~VM_MAYEXEC;
52396 +#endif
52397 +
52398 + }
52399 +#endif
52400 +
52401 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
52402 if (error & ~PAGE_MASK)
52403 return error;
52404
52405 + charged = len >> PAGE_SHIFT;
52406 +
52407 /*
52408 * mlock MCL_FUTURE?
52409 */
52410 if (mm->def_flags & VM_LOCKED) {
52411 unsigned long locked, lock_limit;
52412 - locked = len >> PAGE_SHIFT;
52413 + locked = charged;
52414 locked += mm->locked_vm;
52415 lock_limit = rlimit(RLIMIT_MEMLOCK);
52416 lock_limit >>= PAGE_SHIFT;
52417 @@ -2179,22 +2592,22 @@ unsigned long do_brk(unsigned long addr,
52418 /*
52419 * Clear old maps. this also does some error checking for us
52420 */
52421 - munmap_back:
52422 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
52423 if (vma && vma->vm_start < addr + len) {
52424 if (do_munmap(mm, addr, len))
52425 return -ENOMEM;
52426 - goto munmap_back;
52427 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
52428 + BUG_ON(vma && vma->vm_start < addr + len);
52429 }
52430
52431 /* Check against address space limits *after* clearing old maps... */
52432 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
52433 + if (!may_expand_vm(mm, charged))
52434 return -ENOMEM;
52435
52436 if (mm->map_count > sysctl_max_map_count)
52437 return -ENOMEM;
52438
52439 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
52440 + if (security_vm_enough_memory(charged))
52441 return -ENOMEM;
52442
52443 /* Can we just expand an old private anonymous mapping? */
52444 @@ -2208,7 +2621,7 @@ unsigned long do_brk(unsigned long addr,
52445 */
52446 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
52447 if (!vma) {
52448 - vm_unacct_memory(len >> PAGE_SHIFT);
52449 + vm_unacct_memory(charged);
52450 return -ENOMEM;
52451 }
52452
52453 @@ -2221,11 +2634,12 @@ unsigned long do_brk(unsigned long addr,
52454 vma->vm_page_prot = vm_get_page_prot(flags);
52455 vma_link(mm, vma, prev, rb_link, rb_parent);
52456 out:
52457 - mm->total_vm += len >> PAGE_SHIFT;
52458 + mm->total_vm += charged;
52459 if (flags & VM_LOCKED) {
52460 if (!mlock_vma_pages_range(vma, addr, addr + len))
52461 - mm->locked_vm += (len >> PAGE_SHIFT);
52462 + mm->locked_vm += charged;
52463 }
52464 + track_exec_limit(mm, addr, addr + len, flags);
52465 return addr;
52466 }
52467
52468 @@ -2272,8 +2686,10 @@ void exit_mmap(struct mm_struct *mm)
52469 * Walk the list again, actually closing and freeing it,
52470 * with preemption enabled, without holding any MM locks.
52471 */
52472 - while (vma)
52473 + while (vma) {
52474 + vma->vm_mirror = NULL;
52475 vma = remove_vma(vma);
52476 + }
52477
52478 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
52479 }
52480 @@ -2287,6 +2703,10 @@ int insert_vm_struct(struct mm_struct *
52481 struct vm_area_struct * __vma, * prev;
52482 struct rb_node ** rb_link, * rb_parent;
52483
52484 +#ifdef CONFIG_PAX_SEGMEXEC
52485 + struct vm_area_struct *vma_m = NULL;
52486 +#endif
52487 +
52488 /*
52489 * The vm_pgoff of a purely anonymous vma should be irrelevant
52490 * until its first write fault, when page's anon_vma and index
52491 @@ -2309,7 +2729,22 @@ int insert_vm_struct(struct mm_struct *
52492 if ((vma->vm_flags & VM_ACCOUNT) &&
52493 security_vm_enough_memory_mm(mm, vma_pages(vma)))
52494 return -ENOMEM;
52495 +
52496 +#ifdef CONFIG_PAX_SEGMEXEC
52497 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
52498 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
52499 + if (!vma_m)
52500 + return -ENOMEM;
52501 + }
52502 +#endif
52503 +
52504 vma_link(mm, vma, prev, rb_link, rb_parent);
52505 +
52506 +#ifdef CONFIG_PAX_SEGMEXEC
52507 + if (vma_m)
52508 + BUG_ON(pax_mirror_vma(vma_m, vma));
52509 +#endif
52510 +
52511 return 0;
52512 }
52513
52514 @@ -2327,6 +2762,8 @@ struct vm_area_struct *copy_vma(struct v
52515 struct rb_node **rb_link, *rb_parent;
52516 struct mempolicy *pol;
52517
52518 + BUG_ON(vma->vm_mirror);
52519 +
52520 /*
52521 * If anonymous vma has not yet been faulted, update new pgoff
52522 * to match new location, to increase its chance of merging.
52523 @@ -2376,6 +2813,39 @@ struct vm_area_struct *copy_vma(struct v
52524 kmem_cache_free(vm_area_cachep, new_vma);
52525 return NULL;
52526 }
52527 +
52528 +#ifdef CONFIG_PAX_SEGMEXEC
52529 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
52530 +{
52531 + struct vm_area_struct *prev_m;
52532 + struct rb_node **rb_link_m, *rb_parent_m;
52533 + struct mempolicy *pol_m;
52534 +
52535 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
52536 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
52537 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
52538 + *vma_m = *vma;
52539 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
52540 + if (anon_vma_clone(vma_m, vma))
52541 + return -ENOMEM;
52542 + pol_m = vma_policy(vma_m);
52543 + mpol_get(pol_m);
52544 + vma_set_policy(vma_m, pol_m);
52545 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
52546 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
52547 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
52548 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
52549 + if (vma_m->vm_file)
52550 + get_file(vma_m->vm_file);
52551 + if (vma_m->vm_ops && vma_m->vm_ops->open)
52552 + vma_m->vm_ops->open(vma_m);
52553 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
52554 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
52555 + vma_m->vm_mirror = vma;
52556 + vma->vm_mirror = vma_m;
52557 + return 0;
52558 +}
52559 +#endif
52560
52561 /*
52562 * Return true if the calling process may expand its vm space by the passed
52563 @@ -2387,7 +2857,7 @@ int may_expand_vm(struct mm_struct *mm,
52564 unsigned long lim;
52565
52566 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
52567 -
52568 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
52569 if (cur + npages > lim)
52570 return 0;
52571 return 1;
52572 @@ -2457,6 +2927,17 @@ int install_special_mapping(struct mm_st
52573 vma->vm_start = addr;
52574 vma->vm_end = addr + len;
52575
52576 +#ifdef CONFIG_PAX_MPROTECT
52577 + if (mm->pax_flags & MF_PAX_MPROTECT) {
52578 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
52579 + return -EPERM;
52580 + if (!(vm_flags & VM_EXEC))
52581 + vm_flags &= ~VM_MAYEXEC;
52582 + else
52583 + vm_flags &= ~VM_MAYWRITE;
52584 + }
52585 +#endif
52586 +
52587 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
52588 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
52589
52590 diff -urNp linux-2.6.35.4/mm/mprotect.c linux-2.6.35.4/mm/mprotect.c
52591 --- linux-2.6.35.4/mm/mprotect.c 2010-08-26 19:47:12.000000000 -0400
52592 +++ linux-2.6.35.4/mm/mprotect.c 2010-09-17 20:12:37.000000000 -0400
52593 @@ -23,10 +23,16 @@
52594 #include <linux/mmu_notifier.h>
52595 #include <linux/migrate.h>
52596 #include <linux/perf_event.h>
52597 +
52598 +#ifdef CONFIG_PAX_MPROTECT
52599 +#include <linux/elf.h>
52600 +#endif
52601 +
52602 #include <asm/uaccess.h>
52603 #include <asm/pgtable.h>
52604 #include <asm/cacheflush.h>
52605 #include <asm/tlbflush.h>
52606 +#include <asm/mmu_context.h>
52607
52608 #ifndef pgprot_modify
52609 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
52610 @@ -131,6 +137,48 @@ static void change_protection(struct vm_
52611 flush_tlb_range(vma, start, end);
52612 }
52613
52614 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
52615 +/* called while holding the mmap semaphor for writing except stack expansion */
52616 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
52617 +{
52618 + unsigned long oldlimit, newlimit = 0UL;
52619 +
52620 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
52621 + return;
52622 +
52623 + spin_lock(&mm->page_table_lock);
52624 + oldlimit = mm->context.user_cs_limit;
52625 + if ((prot & VM_EXEC) && oldlimit < end)
52626 + /* USER_CS limit moved up */
52627 + newlimit = end;
52628 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
52629 + /* USER_CS limit moved down */
52630 + newlimit = start;
52631 +
52632 + if (newlimit) {
52633 + mm->context.user_cs_limit = newlimit;
52634 +
52635 +#ifdef CONFIG_SMP
52636 + wmb();
52637 + cpus_clear(mm->context.cpu_user_cs_mask);
52638 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
52639 +#endif
52640 +
52641 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
52642 + }
52643 + spin_unlock(&mm->page_table_lock);
52644 + if (newlimit == end) {
52645 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
52646 +
52647 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
52648 + if (is_vm_hugetlb_page(vma))
52649 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
52650 + else
52651 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
52652 + }
52653 +}
52654 +#endif
52655 +
52656 int
52657 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
52658 unsigned long start, unsigned long end, unsigned long newflags)
52659 @@ -143,11 +191,29 @@ mprotect_fixup(struct vm_area_struct *vm
52660 int error;
52661 int dirty_accountable = 0;
52662
52663 +#ifdef CONFIG_PAX_SEGMEXEC
52664 + struct vm_area_struct *vma_m = NULL;
52665 + unsigned long start_m, end_m;
52666 +
52667 + start_m = start + SEGMEXEC_TASK_SIZE;
52668 + end_m = end + SEGMEXEC_TASK_SIZE;
52669 +#endif
52670 +
52671 if (newflags == oldflags) {
52672 *pprev = vma;
52673 return 0;
52674 }
52675
52676 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
52677 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
52678 +
52679 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
52680 + return -ENOMEM;
52681 +
52682 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
52683 + return -ENOMEM;
52684 + }
52685 +
52686 /*
52687 * If we make a private mapping writable we increase our commit;
52688 * but (without finer accounting) cannot reduce our commit if we
52689 @@ -164,6 +230,42 @@ mprotect_fixup(struct vm_area_struct *vm
52690 }
52691 }
52692
52693 +#ifdef CONFIG_PAX_SEGMEXEC
52694 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
52695 + if (start != vma->vm_start) {
52696 + error = split_vma(mm, vma, start, 1);
52697 + if (error)
52698 + goto fail;
52699 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
52700 + *pprev = (*pprev)->vm_next;
52701 + }
52702 +
52703 + if (end != vma->vm_end) {
52704 + error = split_vma(mm, vma, end, 0);
52705 + if (error)
52706 + goto fail;
52707 + }
52708 +
52709 + if (pax_find_mirror_vma(vma)) {
52710 + error = __do_munmap(mm, start_m, end_m - start_m);
52711 + if (error)
52712 + goto fail;
52713 + } else {
52714 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
52715 + if (!vma_m) {
52716 + error = -ENOMEM;
52717 + goto fail;
52718 + }
52719 + vma->vm_flags = newflags;
52720 + error = pax_mirror_vma(vma_m, vma);
52721 + if (error) {
52722 + vma->vm_flags = oldflags;
52723 + goto fail;
52724 + }
52725 + }
52726 + }
52727 +#endif
52728 +
52729 /*
52730 * First try to merge with previous and/or next vma.
52731 */
52732 @@ -194,9 +296,21 @@ success:
52733 * vm_flags and vm_page_prot are protected by the mmap_sem
52734 * held in write mode.
52735 */
52736 +
52737 +#ifdef CONFIG_PAX_SEGMEXEC
52738 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
52739 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
52740 +#endif
52741 +
52742 vma->vm_flags = newflags;
52743 +
52744 +#ifdef CONFIG_PAX_MPROTECT
52745 + if (mm->binfmt && mm->binfmt->handle_mprotect)
52746 + mm->binfmt->handle_mprotect(vma, newflags);
52747 +#endif
52748 +
52749 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
52750 - vm_get_page_prot(newflags));
52751 + vm_get_page_prot(vma->vm_flags));
52752
52753 if (vma_wants_writenotify(vma)) {
52754 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
52755 @@ -237,6 +351,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
52756 end = start + len;
52757 if (end <= start)
52758 return -ENOMEM;
52759 +
52760 +#ifdef CONFIG_PAX_SEGMEXEC
52761 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
52762 + if (end > SEGMEXEC_TASK_SIZE)
52763 + return -EINVAL;
52764 + } else
52765 +#endif
52766 +
52767 + if (end > TASK_SIZE)
52768 + return -EINVAL;
52769 +
52770 if (!arch_validate_prot(prot))
52771 return -EINVAL;
52772
52773 @@ -244,7 +369,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
52774 /*
52775 * Does the application expect PROT_READ to imply PROT_EXEC:
52776 */
52777 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
52778 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
52779 prot |= PROT_EXEC;
52780
52781 vm_flags = calc_vm_prot_bits(prot);
52782 @@ -276,6 +401,16 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
52783 if (start > vma->vm_start)
52784 prev = vma;
52785
52786 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
52787 + error = -EACCES;
52788 + goto out;
52789 + }
52790 +
52791 +#ifdef CONFIG_PAX_MPROTECT
52792 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
52793 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
52794 +#endif
52795 +
52796 for (nstart = start ; ; ) {
52797 unsigned long newflags;
52798
52799 @@ -300,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
52800 if (error)
52801 goto out;
52802 perf_event_mmap(vma);
52803 +
52804 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
52805 +
52806 nstart = tmp;
52807
52808 if (nstart < prev->vm_end)
52809 diff -urNp linux-2.6.35.4/mm/mremap.c linux-2.6.35.4/mm/mremap.c
52810 --- linux-2.6.35.4/mm/mremap.c 2010-08-26 19:47:12.000000000 -0400
52811 +++ linux-2.6.35.4/mm/mremap.c 2010-09-17 20:12:09.000000000 -0400
52812 @@ -113,6 +113,12 @@ static void move_ptes(struct vm_area_str
52813 continue;
52814 pte = ptep_clear_flush(vma, old_addr, old_pte);
52815 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
52816 +
52817 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
52818 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
52819 + pte = pte_exprotect(pte);
52820 +#endif
52821 +
52822 set_pte_at(mm, new_addr, new_pte, pte);
52823 }
52824
52825 @@ -272,6 +278,11 @@ static struct vm_area_struct *vma_to_res
52826 if (is_vm_hugetlb_page(vma))
52827 goto Einval;
52828
52829 +#ifdef CONFIG_PAX_SEGMEXEC
52830 + if (pax_find_mirror_vma(vma))
52831 + goto Einval;
52832 +#endif
52833 +
52834 /* We can't remap across vm area boundaries */
52835 if (old_len > vma->vm_end - addr)
52836 goto Efault;
52837 @@ -321,20 +332,23 @@ static unsigned long mremap_to(unsigned
52838 unsigned long ret = -EINVAL;
52839 unsigned long charged = 0;
52840 unsigned long map_flags;
52841 + unsigned long pax_task_size = TASK_SIZE;
52842
52843 if (new_addr & ~PAGE_MASK)
52844 goto out;
52845
52846 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
52847 +#ifdef CONFIG_PAX_SEGMEXEC
52848 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
52849 + pax_task_size = SEGMEXEC_TASK_SIZE;
52850 +#endif
52851 +
52852 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
52853 goto out;
52854
52855 /* Check if the location we're moving into overlaps the
52856 * old location at all, and fail if it does.
52857 */
52858 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
52859 - goto out;
52860 -
52861 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
52862 + if (addr + old_len > new_addr && new_addr + new_len > addr)
52863 goto out;
52864
52865 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
52866 @@ -406,6 +420,7 @@ unsigned long do_mremap(unsigned long ad
52867 struct vm_area_struct *vma;
52868 unsigned long ret = -EINVAL;
52869 unsigned long charged = 0;
52870 + unsigned long pax_task_size = TASK_SIZE;
52871
52872 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
52873 goto out;
52874 @@ -424,6 +439,15 @@ unsigned long do_mremap(unsigned long ad
52875 if (!new_len)
52876 goto out;
52877
52878 +#ifdef CONFIG_PAX_SEGMEXEC
52879 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
52880 + pax_task_size = SEGMEXEC_TASK_SIZE;
52881 +#endif
52882 +
52883 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
52884 + old_len > pax_task_size || addr > pax_task_size-old_len)
52885 + goto out;
52886 +
52887 if (flags & MREMAP_FIXED) {
52888 if (flags & MREMAP_MAYMOVE)
52889 ret = mremap_to(addr, old_len, new_addr, new_len);
52890 @@ -473,6 +497,7 @@ unsigned long do_mremap(unsigned long ad
52891 addr + new_len);
52892 }
52893 ret = addr;
52894 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
52895 goto out;
52896 }
52897 }
52898 @@ -499,7 +524,13 @@ unsigned long do_mremap(unsigned long ad
52899 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
52900 if (ret)
52901 goto out;
52902 +
52903 + map_flags = vma->vm_flags;
52904 ret = move_vma(vma, addr, old_len, new_len, new_addr);
52905 + if (!(ret & ~PAGE_MASK)) {
52906 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
52907 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
52908 + }
52909 }
52910 out:
52911 if (ret & ~PAGE_MASK)
52912 diff -urNp linux-2.6.35.4/mm/nommu.c linux-2.6.35.4/mm/nommu.c
52913 --- linux-2.6.35.4/mm/nommu.c 2010-08-26 19:47:12.000000000 -0400
52914 +++ linux-2.6.35.4/mm/nommu.c 2010-09-17 20:12:09.000000000 -0400
52915 @@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMI
52916 int sysctl_overcommit_ratio = 50; /* default is 50% */
52917 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
52918 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
52919 -int heap_stack_gap = 0;
52920
52921 atomic_long_t mmap_pages_allocated;
52922
52923 @@ -762,15 +761,6 @@ struct vm_area_struct *find_vma(struct m
52924 EXPORT_SYMBOL(find_vma);
52925
52926 /*
52927 - * find a VMA
52928 - * - we don't extend stack VMAs under NOMMU conditions
52929 - */
52930 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
52931 -{
52932 - return find_vma(mm, addr);
52933 -}
52934 -
52935 -/*
52936 * expand a stack to a given address
52937 * - not supported under NOMMU conditions
52938 */
52939 @@ -1491,6 +1481,7 @@ int split_vma(struct mm_struct *mm, stru
52940
52941 /* most fields are the same, copy all, and then fixup */
52942 *new = *vma;
52943 + INIT_LIST_HEAD(&new->anon_vma_chain);
52944 *region = *vma->vm_region;
52945 new->vm_region = region;
52946
52947 diff -urNp linux-2.6.35.4/mm/page_alloc.c linux-2.6.35.4/mm/page_alloc.c
52948 --- linux-2.6.35.4/mm/page_alloc.c 2010-08-26 19:47:12.000000000 -0400
52949 +++ linux-2.6.35.4/mm/page_alloc.c 2010-09-17 20:12:09.000000000 -0400
52950 @@ -641,6 +641,10 @@ static bool free_pages_prepare(struct pa
52951 int i;
52952 int bad = 0;
52953
52954 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
52955 + unsigned long index = 1UL << order;
52956 +#endif
52957 +
52958 trace_mm_page_free_direct(page, order);
52959 kmemcheck_free_shadow(page, order);
52960
52961 @@ -659,6 +663,12 @@ static bool free_pages_prepare(struct pa
52962 debug_check_no_obj_freed(page_address(page),
52963 PAGE_SIZE << order);
52964 }
52965 +
52966 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
52967 + for (; index; --index)
52968 + sanitize_highpage(page + index - 1);
52969 +#endif
52970 +
52971 arch_free_page(page, order);
52972 kernel_map_pages(page, 1 << order, 0);
52973
52974 @@ -773,8 +783,10 @@ static int prep_new_page(struct page *pa
52975 arch_alloc_page(page, order);
52976 kernel_map_pages(page, 1 << order, 1);
52977
52978 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
52979 if (gfp_flags & __GFP_ZERO)
52980 prep_zero_page(page, order, gfp_flags);
52981 +#endif
52982
52983 if (order && (gfp_flags & __GFP_COMP))
52984 prep_compound_page(page, order);
52985 @@ -3973,7 +3985,7 @@ static void __init setup_usemap(struct p
52986 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
52987 }
52988 #else
52989 -static void inline setup_usemap(struct pglist_data *pgdat,
52990 +static inline void setup_usemap(struct pglist_data *pgdat,
52991 struct zone *zone, unsigned long zonesize) {}
52992 #endif /* CONFIG_SPARSEMEM */
52993
52994 diff -urNp linux-2.6.35.4/mm/percpu.c linux-2.6.35.4/mm/percpu.c
52995 --- linux-2.6.35.4/mm/percpu.c 2010-08-26 19:47:12.000000000 -0400
52996 +++ linux-2.6.35.4/mm/percpu.c 2010-09-17 20:12:09.000000000 -0400
52997 @@ -115,7 +115,7 @@ static unsigned int pcpu_first_unit_cpu
52998 static unsigned int pcpu_last_unit_cpu __read_mostly;
52999
53000 /* the address of the first chunk which starts with the kernel static area */
53001 -void *pcpu_base_addr __read_mostly;
53002 +void *pcpu_base_addr __read_only;
53003 EXPORT_SYMBOL_GPL(pcpu_base_addr);
53004
53005 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
53006 diff -urNp linux-2.6.35.4/mm/rmap.c linux-2.6.35.4/mm/rmap.c
53007 --- linux-2.6.35.4/mm/rmap.c 2010-08-26 19:47:12.000000000 -0400
53008 +++ linux-2.6.35.4/mm/rmap.c 2010-09-17 20:12:09.000000000 -0400
53009 @@ -116,6 +116,10 @@ int anon_vma_prepare(struct vm_area_stru
53010 struct anon_vma *anon_vma = vma->anon_vma;
53011 struct anon_vma_chain *avc;
53012
53013 +#ifdef CONFIG_PAX_SEGMEXEC
53014 + struct anon_vma_chain *avc_m = NULL;
53015 +#endif
53016 +
53017 might_sleep();
53018 if (unlikely(!anon_vma)) {
53019 struct mm_struct *mm = vma->vm_mm;
53020 @@ -125,6 +129,12 @@ int anon_vma_prepare(struct vm_area_stru
53021 if (!avc)
53022 goto out_enomem;
53023
53024 +#ifdef CONFIG_PAX_SEGMEXEC
53025 + avc_m = anon_vma_chain_alloc();
53026 + if (!avc_m)
53027 + goto out_enomem_free_avc;
53028 +#endif
53029 +
53030 anon_vma = find_mergeable_anon_vma(vma);
53031 allocated = NULL;
53032 if (!anon_vma) {
53033 @@ -138,6 +148,21 @@ int anon_vma_prepare(struct vm_area_stru
53034 /* page_table_lock to protect against threads */
53035 spin_lock(&mm->page_table_lock);
53036 if (likely(!vma->anon_vma)) {
53037 +
53038 +#ifdef CONFIG_PAX_SEGMEXEC
53039 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
53040 +
53041 + if (vma_m) {
53042 + BUG_ON(vma_m->anon_vma);
53043 + vma_m->anon_vma = anon_vma;
53044 + avc_m->anon_vma = anon_vma;
53045 + avc_m->vma = vma;
53046 + list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
53047 + list_add(&avc_m->same_anon_vma, &anon_vma->head);
53048 + avc_m = NULL;
53049 + }
53050 +#endif
53051 +
53052 vma->anon_vma = anon_vma;
53053 avc->anon_vma = anon_vma;
53054 avc->vma = vma;
53055 @@ -151,12 +176,24 @@ int anon_vma_prepare(struct vm_area_stru
53056
53057 if (unlikely(allocated))
53058 anon_vma_free(allocated);
53059 +
53060 +#ifdef CONFIG_PAX_SEGMEXEC
53061 + if (unlikely(avc_m))
53062 + anon_vma_chain_free(avc_m);
53063 +#endif
53064 +
53065 if (unlikely(avc))
53066 anon_vma_chain_free(avc);
53067 }
53068 return 0;
53069
53070 out_enomem_free_avc:
53071 +
53072 +#ifdef CONFIG_PAX_SEGMEXEC
53073 + if (avc_m)
53074 + anon_vma_chain_free(avc_m);
53075 +#endif
53076 +
53077 anon_vma_chain_free(avc);
53078 out_enomem:
53079 return -ENOMEM;
53080 @@ -179,7 +216,7 @@ static void anon_vma_chain_link(struct v
53081 * Attach the anon_vmas from src to dst.
53082 * Returns 0 on success, -ENOMEM on failure.
53083 */
53084 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
53085 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
53086 {
53087 struct anon_vma_chain *avc, *pavc;
53088
53089 @@ -201,7 +238,7 @@ int anon_vma_clone(struct vm_area_struct
53090 * the corresponding VMA in the parent process is attached to.
53091 * Returns 0 on success, non-zero on failure.
53092 */
53093 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
53094 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
53095 {
53096 struct anon_vma_chain *avc;
53097 struct anon_vma *anon_vma;
53098 diff -urNp linux-2.6.35.4/mm/shmem.c linux-2.6.35.4/mm/shmem.c
53099 --- linux-2.6.35.4/mm/shmem.c 2010-08-26 19:47:12.000000000 -0400
53100 +++ linux-2.6.35.4/mm/shmem.c 2010-09-17 20:12:37.000000000 -0400
53101 @@ -30,7 +30,7 @@
53102 #include <linux/module.h>
53103 #include <linux/swap.h>
53104
53105 -static struct vfsmount *shm_mnt;
53106 +struct vfsmount *shm_mnt;
53107
53108 #ifdef CONFIG_SHMEM
53109 /*
53110 diff -urNp linux-2.6.35.4/mm/slab.c linux-2.6.35.4/mm/slab.c
53111 --- linux-2.6.35.4/mm/slab.c 2010-08-26 19:47:12.000000000 -0400
53112 +++ linux-2.6.35.4/mm/slab.c 2010-09-17 20:12:37.000000000 -0400
53113 @@ -285,7 +285,7 @@ struct kmem_list3 {
53114 * Need this for bootstrapping a per node allocator.
53115 */
53116 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
53117 -struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
53118 +struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
53119 #define CACHE_CACHE 0
53120 #define SIZE_AC MAX_NUMNODES
53121 #define SIZE_L3 (2 * MAX_NUMNODES)
53122 @@ -535,7 +535,7 @@ static inline void *index_to_obj(struct
53123 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
53124 */
53125 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
53126 - const struct slab *slab, void *obj)
53127 + const struct slab *slab, const void *obj)
53128 {
53129 u32 offset = (obj - slab->s_mem);
53130 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
53131 @@ -561,14 +561,14 @@ struct cache_names {
53132 static struct cache_names __initdata cache_names[] = {
53133 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
53134 #include <linux/kmalloc_sizes.h>
53135 - {NULL,}
53136 + {NULL, NULL}
53137 #undef CACHE
53138 };
53139
53140 static struct arraycache_init initarray_cache __initdata =
53141 - { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
53142 + { {0, BOOT_CPUCACHE_ENTRIES, 1, 0}, {NULL} };
53143 static struct arraycache_init initarray_generic =
53144 - { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
53145 + { {0, BOOT_CPUCACHE_ENTRIES, 1, 0}, {NULL} };
53146
53147 /* internal cache of cache description objs */
53148 static struct kmem_cache cache_cache = {
53149 @@ -4558,15 +4558,66 @@ static const struct file_operations proc
53150
53151 static int __init slab_proc_init(void)
53152 {
53153 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
53154 + mode_t gr_mode = S_IRUGO;
53155 +
53156 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
53157 + gr_mode = S_IRUSR;
53158 +#endif
53159 +
53160 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
53161 #ifdef CONFIG_DEBUG_SLAB_LEAK
53162 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
53163 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
53164 #endif
53165 return 0;
53166 }
53167 module_init(slab_proc_init);
53168 #endif
53169
53170 +void check_object_size(const void *ptr, unsigned long n, bool to)
53171 +{
53172 +
53173 +#ifdef CONFIG_PAX_USERCOPY
53174 + struct kmem_cache *cachep;
53175 + struct slab *slabp;
53176 + struct page *page;
53177 + unsigned int objnr;
53178 + unsigned long offset;
53179 +
53180 + if (!n)
53181 + return;
53182 +
53183 + if (ZERO_OR_NULL_PTR(ptr))
53184 + goto report;
53185 +
53186 + if (!virt_addr_valid(ptr))
53187 + return;
53188 +
53189 + page = virt_to_head_page(ptr);
53190 +
53191 + if (!PageSlab(page)) {
53192 + if (object_is_on_stack(ptr, n) == -1)
53193 + goto report;
53194 + return;
53195 + }
53196 +
53197 + cachep = page_get_cache(page);
53198 + slabp = page_get_slab(page);
53199 + objnr = obj_to_index(cachep, slabp, ptr);
53200 + BUG_ON(objnr >= cachep->num);
53201 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
53202 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
53203 + return;
53204 +
53205 +report:
53206 + if (to)
53207 + pax_report_leak_to_user(ptr, n);
53208 + else
53209 + pax_report_overflow_from_user(ptr, n);
53210 +#endif
53211 +
53212 +}
53213 +EXPORT_SYMBOL(check_object_size);
53214 +
53215 /**
53216 * ksize - get the actual amount of memory allocated for a given object
53217 * @objp: Pointer to the object
53218 diff -urNp linux-2.6.35.4/mm/slob.c linux-2.6.35.4/mm/slob.c
53219 --- linux-2.6.35.4/mm/slob.c 2010-08-26 19:47:12.000000000 -0400
53220 +++ linux-2.6.35.4/mm/slob.c 2010-09-17 20:12:09.000000000 -0400
53221 @@ -29,7 +29,7 @@
53222 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
53223 * alloc_pages() directly, allocating compound pages so the page order
53224 * does not have to be separately tracked, and also stores the exact
53225 - * allocation size in page->private so that it can be used to accurately
53226 + * allocation size in slob_page->size so that it can be used to accurately
53227 * provide ksize(). These objects are detected in kfree() because slob_page()
53228 * is false for them.
53229 *
53230 @@ -58,6 +58,7 @@
53231 */
53232
53233 #include <linux/kernel.h>
53234 +#include <linux/sched.h>
53235 #include <linux/slab.h>
53236 #include <linux/mm.h>
53237 #include <linux/swap.h> /* struct reclaim_state */
53238 @@ -100,7 +101,8 @@ struct slob_page {
53239 unsigned long flags; /* mandatory */
53240 atomic_t _count; /* mandatory */
53241 slobidx_t units; /* free units left in page */
53242 - unsigned long pad[2];
53243 + unsigned long pad[1];
53244 + unsigned long size; /* size when >=PAGE_SIZE */
53245 slob_t *free; /* first free slob_t in page */
53246 struct list_head list; /* linked list of free pages */
53247 };
53248 @@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
53249 */
53250 static inline int is_slob_page(struct slob_page *sp)
53251 {
53252 - return PageSlab((struct page *)sp);
53253 + return PageSlab((struct page *)sp) && !sp->size;
53254 }
53255
53256 static inline void set_slob_page(struct slob_page *sp)
53257 @@ -148,7 +150,7 @@ static inline void clear_slob_page(struc
53258
53259 static inline struct slob_page *slob_page(const void *addr)
53260 {
53261 - return (struct slob_page *)virt_to_page(addr);
53262 + return (struct slob_page *)virt_to_head_page(addr);
53263 }
53264
53265 /*
53266 @@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_
53267 /*
53268 * Return the size of a slob block.
53269 */
53270 -static slobidx_t slob_units(slob_t *s)
53271 +static slobidx_t slob_units(const slob_t *s)
53272 {
53273 if (s->units > 0)
53274 return s->units;
53275 @@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
53276 /*
53277 * Return the next free slob block pointer after this one.
53278 */
53279 -static slob_t *slob_next(slob_t *s)
53280 +static slob_t *slob_next(const slob_t *s)
53281 {
53282 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
53283 slobidx_t next;
53284 @@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
53285 /*
53286 * Returns true if s is the last free block in its page.
53287 */
53288 -static int slob_last(slob_t *s)
53289 +static int slob_last(const slob_t *s)
53290 {
53291 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
53292 }
53293 @@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, i
53294 if (!page)
53295 return NULL;
53296
53297 + set_slob_page(page);
53298 return page_address(page);
53299 }
53300
53301 @@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp
53302 if (!b)
53303 return NULL;
53304 sp = slob_page(b);
53305 - set_slob_page(sp);
53306
53307 spin_lock_irqsave(&slob_lock, flags);
53308 sp->units = SLOB_UNITS(PAGE_SIZE);
53309 sp->free = b;
53310 + sp->size = 0;
53311 INIT_LIST_HEAD(&sp->list);
53312 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
53313 set_slob_page_free(sp, slob_list);
53314 @@ -467,10 +470,9 @@ out:
53315 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
53316 */
53317
53318 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
53319 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
53320 {
53321 - unsigned int *m;
53322 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
53323 + slob_t *m;
53324 void *ret;
53325
53326 lockdep_trace_alloc(gfp);
53327 @@ -483,7 +485,10 @@ void *__kmalloc_node(size_t size, gfp_t
53328
53329 if (!m)
53330 return NULL;
53331 - *m = size;
53332 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
53333 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
53334 + m[0].units = size;
53335 + m[1].units = align;
53336 ret = (void *)m + align;
53337
53338 trace_kmalloc_node(_RET_IP_, ret,
53339 @@ -493,9 +498,9 @@ void *__kmalloc_node(size_t size, gfp_t
53340
53341 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
53342 if (ret) {
53343 - struct page *page;
53344 - page = virt_to_page(ret);
53345 - page->private = size;
53346 + struct slob_page *sp;
53347 + sp = slob_page(ret);
53348 + sp->size = size;
53349 }
53350
53351 trace_kmalloc_node(_RET_IP_, ret,
53352 @@ -505,6 +510,13 @@ void *__kmalloc_node(size_t size, gfp_t
53353 kmemleak_alloc(ret, size, 1, gfp);
53354 return ret;
53355 }
53356 +
53357 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
53358 +{
53359 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
53360 +
53361 + return __kmalloc_node_align(size, gfp, node, align);
53362 +}
53363 EXPORT_SYMBOL(__kmalloc_node);
53364
53365 void kfree(const void *block)
53366 @@ -520,13 +532,84 @@ void kfree(const void *block)
53367 sp = slob_page(block);
53368 if (is_slob_page(sp)) {
53369 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
53370 - unsigned int *m = (unsigned int *)(block - align);
53371 - slob_free(m, *m + align);
53372 - } else
53373 + slob_t *m = (slob_t *)(block - align);
53374 + slob_free(m, m[0].units + align);
53375 + } else {
53376 + clear_slob_page(sp);
53377 + free_slob_page(sp);
53378 + sp->size = 0;
53379 put_page(&sp->page);
53380 + }
53381 }
53382 EXPORT_SYMBOL(kfree);
53383
53384 +void check_object_size(const void *ptr, unsigned long n, bool to)
53385 +{
53386 +
53387 +#ifdef CONFIG_PAX_USERCOPY
53388 + struct slob_page *sp;
53389 + const slob_t *free;
53390 + const void *base;
53391 +
53392 + if (!n)
53393 + return;
53394 +
53395 + if (ZERO_OR_NULL_PTR(ptr))
53396 + goto report;
53397 +
53398 + if (!virt_addr_valid(ptr))
53399 + return;
53400 +
53401 + sp = slob_page(ptr);
53402 + if (!PageSlab((struct page*)sp)) {
53403 + if (object_is_on_stack(ptr, n) == -1)
53404 + goto report;
53405 + return;
53406 + }
53407 +
53408 + if (sp->size) {
53409 + base = page_address(&sp->page);
53410 + if (base <= ptr && n <= sp->size - (ptr - base))
53411 + return;
53412 + goto report;
53413 + }
53414 +
53415 + /* some tricky double walking to find the chunk */
53416 + base = (void *)((unsigned long)ptr & PAGE_MASK);
53417 + free = sp->free;
53418 +
53419 + while (!slob_last(free) && (void *)free <= ptr) {
53420 + base = free + slob_units(free);
53421 + free = slob_next(free);
53422 + }
53423 +
53424 + while (base < (void *)free) {
53425 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
53426 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
53427 + int offset;
53428 +
53429 + if (ptr < base + align)
53430 + goto report;
53431 +
53432 + offset = ptr - base - align;
53433 + if (offset < m) {
53434 + if (n <= m - offset)
53435 + return;
53436 + goto report;
53437 + }
53438 + base += size;
53439 + }
53440 +
53441 +report:
53442 + if (to)
53443 + pax_report_leak_to_user(ptr, n);
53444 + else
53445 + pax_report_overflow_from_user(ptr, n);
53446 +#endif
53447 +
53448 +}
53449 +EXPORT_SYMBOL(check_object_size);
53450 +
53451 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
53452 size_t ksize(const void *block)
53453 {
53454 @@ -539,10 +622,10 @@ size_t ksize(const void *block)
53455 sp = slob_page(block);
53456 if (is_slob_page(sp)) {
53457 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
53458 - unsigned int *m = (unsigned int *)(block - align);
53459 - return SLOB_UNITS(*m) * SLOB_UNIT;
53460 + slob_t *m = (slob_t *)(block - align);
53461 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
53462 } else
53463 - return sp->page.private;
53464 + return sp->size;
53465 }
53466 EXPORT_SYMBOL(ksize);
53467
53468 @@ -597,17 +680,25 @@ void *kmem_cache_alloc_node(struct kmem_
53469 {
53470 void *b;
53471
53472 +#ifdef CONFIG_PAX_USERCOPY
53473 + b = __kmalloc_node_align(c->size, flags, node, c->align);
53474 +#else
53475 if (c->size < PAGE_SIZE) {
53476 b = slob_alloc(c->size, flags, c->align, node);
53477 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
53478 SLOB_UNITS(c->size) * SLOB_UNIT,
53479 flags, node);
53480 } else {
53481 + struct slob_page *sp;
53482 +
53483 b = slob_new_pages(flags, get_order(c->size), node);
53484 + sp = slob_page(b);
53485 + sp->size = c->size;
53486 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
53487 PAGE_SIZE << get_order(c->size),
53488 flags, node);
53489 }
53490 +#endif
53491
53492 if (c->ctor)
53493 c->ctor(b);
53494 @@ -619,10 +710,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
53495
53496 static void __kmem_cache_free(void *b, int size)
53497 {
53498 - if (size < PAGE_SIZE)
53499 + struct slob_page *sp = slob_page(b);
53500 +
53501 + if (is_slob_page(sp))
53502 slob_free(b, size);
53503 - else
53504 + else {
53505 + clear_slob_page(sp);
53506 + free_slob_page(sp);
53507 + sp->size = 0;
53508 slob_free_pages(b, get_order(size));
53509 + }
53510 }
53511
53512 static void kmem_rcu_free(struct rcu_head *head)
53513 @@ -635,15 +732,24 @@ static void kmem_rcu_free(struct rcu_hea
53514
53515 void kmem_cache_free(struct kmem_cache *c, void *b)
53516 {
53517 + int size = c->size;
53518 +
53519 +#ifdef CONFIG_PAX_USERCOPY
53520 + if (size + c->align < PAGE_SIZE) {
53521 + size += c->align;
53522 + b -= c->align;
53523 + }
53524 +#endif
53525 +
53526 kmemleak_free_recursive(b, c->flags);
53527 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
53528 struct slob_rcu *slob_rcu;
53529 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
53530 + slob_rcu = b + (size - sizeof(struct slob_rcu));
53531 INIT_RCU_HEAD(&slob_rcu->head);
53532 - slob_rcu->size = c->size;
53533 + slob_rcu->size = size;
53534 call_rcu(&slob_rcu->head, kmem_rcu_free);
53535 } else {
53536 - __kmem_cache_free(b, c->size);
53537 + __kmem_cache_free(b, size);
53538 }
53539
53540 trace_kmem_cache_free(_RET_IP_, b);
53541 diff -urNp linux-2.6.35.4/mm/slub.c linux-2.6.35.4/mm/slub.c
53542 --- linux-2.6.35.4/mm/slub.c 2010-08-26 19:47:12.000000000 -0400
53543 +++ linux-2.6.35.4/mm/slub.c 2010-09-17 20:12:37.000000000 -0400
53544 @@ -1873,6 +1873,8 @@ void kmem_cache_free(struct kmem_cache *
53545
53546 page = virt_to_head_page(x);
53547
53548 + BUG_ON(!PageSlab(page));
53549 +
53550 slab_free(s, page, x, _RET_IP_);
53551
53552 trace_kmem_cache_free(_RET_IP_, x);
53553 @@ -1917,7 +1919,7 @@ static int slub_min_objects;
53554 * Merge control. If this is set then no merging of slab caches will occur.
53555 * (Could be removed. This was introduced to pacify the merge skeptics.)
53556 */
53557 -static int slub_nomerge;
53558 +static int slub_nomerge = 1;
53559
53560 /*
53561 * Calculate the order of allocation given an slab object size.
53562 @@ -2344,7 +2346,7 @@ static int kmem_cache_open(struct kmem_c
53563 * list to avoid pounding the page allocator excessively.
53564 */
53565 set_min_partial(s, ilog2(s->size));
53566 - s->refcount = 1;
53567 + atomic_set(&s->refcount, 1);
53568 #ifdef CONFIG_NUMA
53569 s->remote_node_defrag_ratio = 1000;
53570 #endif
53571 @@ -2487,8 +2489,7 @@ static inline int kmem_cache_close(struc
53572 void kmem_cache_destroy(struct kmem_cache *s)
53573 {
53574 down_write(&slub_lock);
53575 - s->refcount--;
53576 - if (!s->refcount) {
53577 + if (atomic_dec_and_test(&s->refcount)) {
53578 list_del(&s->list);
53579 up_write(&slub_lock);
53580 if (kmem_cache_close(s)) {
53581 @@ -2780,6 +2781,46 @@ void *__kmalloc_node(size_t size, gfp_t
53582 EXPORT_SYMBOL(__kmalloc_node);
53583 #endif
53584
53585 +void check_object_size(const void *ptr, unsigned long n, bool to)
53586 +{
53587 +
53588 +#ifdef CONFIG_PAX_USERCOPY
53589 + struct page *page;
53590 + struct kmem_cache *s;
53591 + unsigned long offset;
53592 +
53593 + if (!n)
53594 + return;
53595 +
53596 + if (ZERO_OR_NULL_PTR(ptr))
53597 + goto report;
53598 +
53599 + if (!virt_addr_valid(ptr))
53600 + return;
53601 +
53602 + page = get_object_page(ptr);
53603 +
53604 + if (!page) {
53605 + if (object_is_on_stack(ptr, n) == -1)
53606 + goto report;
53607 + return;
53608 + }
53609 +
53610 + s = page->slab;
53611 + offset = (ptr - page_address(page)) % s->size;
53612 + if (offset <= s->objsize && n <= s->objsize - offset)
53613 + return;
53614 +
53615 +report:
53616 + if (to)
53617 + pax_report_leak_to_user(ptr, n);
53618 + else
53619 + pax_report_overflow_from_user(ptr, n);
53620 +#endif
53621 +
53622 +}
53623 +EXPORT_SYMBOL(check_object_size);
53624 +
53625 size_t ksize(const void *object)
53626 {
53627 struct page *page;
53628 @@ -3049,7 +3090,7 @@ void __init kmem_cache_init(void)
53629 */
53630 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
53631 sizeof(struct kmem_cache_node), GFP_NOWAIT);
53632 - kmalloc_caches[0].refcount = -1;
53633 + atomic_set(&kmalloc_caches[0].refcount, -1);
53634 caches++;
53635
53636 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
53637 @@ -3158,7 +3199,7 @@ static int slab_unmergeable(struct kmem_
53638 /*
53639 * We may have set a slab to be unmergeable during bootstrap.
53640 */
53641 - if (s->refcount < 0)
53642 + if (atomic_read(&s->refcount) < 0)
53643 return 1;
53644
53645 return 0;
53646 @@ -3216,7 +3257,7 @@ struct kmem_cache *kmem_cache_create(con
53647 down_write(&slub_lock);
53648 s = find_mergeable(size, align, flags, name, ctor);
53649 if (s) {
53650 - s->refcount++;
53651 + atomic_inc(&s->refcount);
53652 /*
53653 * Adjust the object sizes so that we clear
53654 * the complete object on kzalloc.
53655 @@ -3227,7 +3268,7 @@ struct kmem_cache *kmem_cache_create(con
53656
53657 if (sysfs_slab_alias(s, name)) {
53658 down_write(&slub_lock);
53659 - s->refcount--;
53660 + atomic_dec(&s->refcount);
53661 up_write(&slub_lock);
53662 goto err;
53663 }
53664 @@ -3953,7 +3994,7 @@ SLAB_ATTR_RO(ctor);
53665
53666 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
53667 {
53668 - return sprintf(buf, "%d\n", s->refcount - 1);
53669 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
53670 }
53671 SLAB_ATTR_RO(aliases);
53672
53673 @@ -4674,7 +4715,13 @@ static const struct file_operations proc
53674
53675 static int __init slab_proc_init(void)
53676 {
53677 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
53678 + mode_t gr_mode = S_IRUGO;
53679 +
53680 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
53681 + gr_mode = S_IRUSR;
53682 +#endif
53683 +
53684 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
53685 return 0;
53686 }
53687 module_init(slab_proc_init);
53688 diff -urNp linux-2.6.35.4/mm/util.c linux-2.6.35.4/mm/util.c
53689 --- linux-2.6.35.4/mm/util.c 2010-08-26 19:47:12.000000000 -0400
53690 +++ linux-2.6.35.4/mm/util.c 2010-09-17 20:12:09.000000000 -0400
53691 @@ -245,6 +245,12 @@ EXPORT_SYMBOL(strndup_user);
53692 void arch_pick_mmap_layout(struct mm_struct *mm)
53693 {
53694 mm->mmap_base = TASK_UNMAPPED_BASE;
53695 +
53696 +#ifdef CONFIG_PAX_RANDMMAP
53697 + if (mm->pax_flags & MF_PAX_RANDMMAP)
53698 + mm->mmap_base += mm->delta_mmap;
53699 +#endif
53700 +
53701 mm->get_unmapped_area = arch_get_unmapped_area;
53702 mm->unmap_area = arch_unmap_area;
53703 }
53704 diff -urNp linux-2.6.35.4/mm/vmalloc.c linux-2.6.35.4/mm/vmalloc.c
53705 --- linux-2.6.35.4/mm/vmalloc.c 2010-08-26 19:47:12.000000000 -0400
53706 +++ linux-2.6.35.4/mm/vmalloc.c 2010-09-17 20:12:09.000000000 -0400
53707 @@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd,
53708
53709 pte = pte_offset_kernel(pmd, addr);
53710 do {
53711 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
53712 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
53713 +
53714 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
53715 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
53716 + BUG_ON(!pte_exec(*pte));
53717 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
53718 + continue;
53719 + }
53720 +#endif
53721 +
53722 + {
53723 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
53724 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
53725 + }
53726 } while (pte++, addr += PAGE_SIZE, addr != end);
53727 }
53728
53729 @@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, un
53730 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
53731 {
53732 pte_t *pte;
53733 + int ret = -ENOMEM;
53734
53735 /*
53736 * nr is a running index into the array which helps higher level
53737 @@ -101,17 +113,30 @@ static int vmap_pte_range(pmd_t *pmd, un
53738 pte = pte_alloc_kernel(pmd, addr);
53739 if (!pte)
53740 return -ENOMEM;
53741 +
53742 + pax_open_kernel();
53743 do {
53744 struct page *page = pages[*nr];
53745
53746 - if (WARN_ON(!pte_none(*pte)))
53747 - return -EBUSY;
53748 - if (WARN_ON(!page))
53749 - return -ENOMEM;
53750 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
53751 + if (pgprot_val(prot) & _PAGE_NX)
53752 +#endif
53753 +
53754 + if (WARN_ON(!pte_none(*pte))) {
53755 + ret = -EBUSY;
53756 + goto out;
53757 + }
53758 + if (WARN_ON(!page)) {
53759 + ret = -ENOMEM;
53760 + goto out;
53761 + }
53762 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
53763 (*nr)++;
53764 } while (pte++, addr += PAGE_SIZE, addr != end);
53765 - return 0;
53766 + ret = 0;
53767 +out:
53768 + pax_close_kernel();
53769 + return ret;
53770 }
53771
53772 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
53773 @@ -192,11 +217,20 @@ int is_vmalloc_or_module_addr(const void
53774 * and fall back on vmalloc() if that fails. Others
53775 * just put it in the vmalloc space.
53776 */
53777 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
53778 +#ifdef CONFIG_MODULES
53779 +#ifdef MODULES_VADDR
53780 unsigned long addr = (unsigned long)x;
53781 if (addr >= MODULES_VADDR && addr < MODULES_END)
53782 return 1;
53783 #endif
53784 +
53785 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
53786 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
53787 + return 1;
53788 +#endif
53789 +
53790 +#endif
53791 +
53792 return is_vmalloc_addr(x);
53793 }
53794
53795 @@ -217,8 +251,14 @@ struct page *vmalloc_to_page(const void
53796
53797 if (!pgd_none(*pgd)) {
53798 pud_t *pud = pud_offset(pgd, addr);
53799 +#ifdef CONFIG_X86
53800 + if (!pud_large(*pud))
53801 +#endif
53802 if (!pud_none(*pud)) {
53803 pmd_t *pmd = pmd_offset(pud, addr);
53804 +#ifdef CONFIG_X86
53805 + if (!pmd_large(*pmd))
53806 +#endif
53807 if (!pmd_none(*pmd)) {
53808 pte_t *ptep, pte;
53809
53810 @@ -292,13 +332,13 @@ static void __insert_vmap_area(struct vm
53811 struct rb_node *tmp;
53812
53813 while (*p) {
53814 - struct vmap_area *tmp;
53815 + struct vmap_area *varea;
53816
53817 parent = *p;
53818 - tmp = rb_entry(parent, struct vmap_area, rb_node);
53819 - if (va->va_start < tmp->va_end)
53820 + varea = rb_entry(parent, struct vmap_area, rb_node);
53821 + if (va->va_start < varea->va_end)
53822 p = &(*p)->rb_left;
53823 - else if (va->va_end > tmp->va_start)
53824 + else if (va->va_end > varea->va_start)
53825 p = &(*p)->rb_right;
53826 else
53827 BUG();
53828 @@ -1224,6 +1264,16 @@ static struct vm_struct *__get_vm_area_n
53829 struct vm_struct *area;
53830
53831 BUG_ON(in_interrupt());
53832 +
53833 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
53834 + if (flags & VM_KERNEXEC) {
53835 + if (start != VMALLOC_START || end != VMALLOC_END)
53836 + return NULL;
53837 + start = (unsigned long)MODULES_EXEC_VADDR;
53838 + end = (unsigned long)MODULES_EXEC_END;
53839 + }
53840 +#endif
53841 +
53842 if (flags & VM_IOREMAP) {
53843 int bit = fls(size);
53844
53845 @@ -1449,6 +1499,11 @@ void *vmap(struct page **pages, unsigned
53846 if (count > totalram_pages)
53847 return NULL;
53848
53849 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
53850 + if (!(pgprot_val(prot) & _PAGE_NX))
53851 + flags |= VM_KERNEXEC;
53852 +#endif
53853 +
53854 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
53855 __builtin_return_address(0));
53856 if (!area)
53857 @@ -1558,6 +1613,13 @@ static void *__vmalloc_node(unsigned lon
53858 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
53859 return NULL;
53860
53861 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
53862 + if (!(pgprot_val(prot) & _PAGE_NX))
53863 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
53864 + node, gfp_mask, caller);
53865 + else
53866 +#endif
53867 +
53868 area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
53869 VMALLOC_END, node, gfp_mask, caller);
53870
53871 @@ -1576,6 +1638,7 @@ static void *__vmalloc_node(unsigned lon
53872 return addr;
53873 }
53874
53875 +#undef __vmalloc
53876 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
53877 {
53878 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
53879 @@ -1592,6 +1655,7 @@ EXPORT_SYMBOL(__vmalloc);
53880 * For tight control over page level allocator and protection flags
53881 * use __vmalloc() instead.
53882 */
53883 +#undef vmalloc
53884 void *vmalloc(unsigned long size)
53885 {
53886 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
53887 @@ -1606,6 +1670,7 @@ EXPORT_SYMBOL(vmalloc);
53888 * The resulting memory area is zeroed so it can be mapped to userspace
53889 * without leaking data.
53890 */
53891 +#undef vmalloc_user
53892 void *vmalloc_user(unsigned long size)
53893 {
53894 struct vm_struct *area;
53895 @@ -1633,6 +1698,7 @@ EXPORT_SYMBOL(vmalloc_user);
53896 * For tight control over page level allocator and protection flags
53897 * use __vmalloc() instead.
53898 */
53899 +#undef vmalloc_node
53900 void *vmalloc_node(unsigned long size, int node)
53901 {
53902 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
53903 @@ -1655,10 +1721,10 @@ EXPORT_SYMBOL(vmalloc_node);
53904 * For tight control over page level allocator and protection flags
53905 * use __vmalloc() instead.
53906 */
53907 -
53908 +#undef vmalloc_exec
53909 void *vmalloc_exec(unsigned long size)
53910 {
53911 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
53912 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
53913 -1, __builtin_return_address(0));
53914 }
53915
53916 @@ -1677,6 +1743,7 @@ void *vmalloc_exec(unsigned long size)
53917 * Allocate enough 32bit PA addressable pages to cover @size from the
53918 * page level allocator and map them into contiguous kernel virtual space.
53919 */
53920 +#undef vmalloc_32
53921 void *vmalloc_32(unsigned long size)
53922 {
53923 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
53924 @@ -1691,6 +1758,7 @@ EXPORT_SYMBOL(vmalloc_32);
53925 * The resulting memory area is 32bit addressable and zeroed so it can be
53926 * mapped to userspace without leaking data.
53927 */
53928 +#undef vmalloc_32_user
53929 void *vmalloc_32_user(unsigned long size)
53930 {
53931 struct vm_struct *area;
53932 diff -urNp linux-2.6.35.4/mm/vmstat.c linux-2.6.35.4/mm/vmstat.c
53933 --- linux-2.6.35.4/mm/vmstat.c 2010-08-26 19:47:12.000000000 -0400
53934 +++ linux-2.6.35.4/mm/vmstat.c 2010-09-17 20:12:37.000000000 -0400
53935 @@ -76,7 +76,7 @@ void vm_events_fold_cpu(int cpu)
53936 *
53937 * vm_stat contains the global counters
53938 */
53939 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
53940 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
53941 EXPORT_SYMBOL(vm_stat);
53942
53943 #ifdef CONFIG_SMP
53944 @@ -315,7 +315,7 @@ void refresh_cpu_vm_stats(int cpu)
53945 v = p->vm_stat_diff[i];
53946 p->vm_stat_diff[i] = 0;
53947 local_irq_restore(flags);
53948 - atomic_long_add(v, &zone->vm_stat[i]);
53949 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
53950 global_diff[i] += v;
53951 #ifdef CONFIG_NUMA
53952 /* 3 seconds idle till flush */
53953 @@ -353,7 +353,7 @@ void refresh_cpu_vm_stats(int cpu)
53954
53955 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
53956 if (global_diff[i])
53957 - atomic_long_add(global_diff[i], &vm_stat[i]);
53958 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
53959 }
53960
53961 #endif
53962 @@ -1038,10 +1038,16 @@ static int __init setup_vmstat(void)
53963 start_cpu_timer(cpu);
53964 #endif
53965 #ifdef CONFIG_PROC_FS
53966 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
53967 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
53968 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
53969 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
53970 + {
53971 + mode_t gr_mode = S_IRUGO;
53972 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
53973 + gr_mode = S_IRUSR;
53974 +#endif
53975 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
53976 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
53977 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
53978 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
53979 + }
53980 #endif
53981 return 0;
53982 }
53983 diff -urNp linux-2.6.35.4/net/8021q/vlan.c linux-2.6.35.4/net/8021q/vlan.c
53984 --- linux-2.6.35.4/net/8021q/vlan.c 2010-08-26 19:47:12.000000000 -0400
53985 +++ linux-2.6.35.4/net/8021q/vlan.c 2010-09-17 20:12:09.000000000 -0400
53986 @@ -618,8 +618,7 @@ static int vlan_ioctl_handler(struct net
53987 err = -EPERM;
53988 if (!capable(CAP_NET_ADMIN))
53989 break;
53990 - if ((args.u.name_type >= 0) &&
53991 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
53992 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
53993 struct vlan_net *vn;
53994
53995 vn = net_generic(net, vlan_net_id);
53996 diff -urNp linux-2.6.35.4/net/atm/atm_misc.c linux-2.6.35.4/net/atm/atm_misc.c
53997 --- linux-2.6.35.4/net/atm/atm_misc.c 2010-08-26 19:47:12.000000000 -0400
53998 +++ linux-2.6.35.4/net/atm/atm_misc.c 2010-09-17 20:12:09.000000000 -0400
53999 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int
54000 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
54001 return 1;
54002 atm_return(vcc, truesize);
54003 - atomic_inc(&vcc->stats->rx_drop);
54004 + atomic_inc_unchecked(&vcc->stats->rx_drop);
54005 return 0;
54006 }
54007 EXPORT_SYMBOL(atm_charge);
54008 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct
54009 }
54010 }
54011 atm_return(vcc, guess);
54012 - atomic_inc(&vcc->stats->rx_drop);
54013 + atomic_inc_unchecked(&vcc->stats->rx_drop);
54014 return NULL;
54015 }
54016 EXPORT_SYMBOL(atm_alloc_charge);
54017 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
54018
54019 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
54020 {
54021 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
54022 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
54023 __SONET_ITEMS
54024 #undef __HANDLE_ITEM
54025 }
54026 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
54027
54028 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
54029 {
54030 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
54031 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
54032 __SONET_ITEMS
54033 #undef __HANDLE_ITEM
54034 }
54035 diff -urNp linux-2.6.35.4/net/atm/proc.c linux-2.6.35.4/net/atm/proc.c
54036 --- linux-2.6.35.4/net/atm/proc.c 2010-08-26 19:47:12.000000000 -0400
54037 +++ linux-2.6.35.4/net/atm/proc.c 2010-09-17 20:12:37.000000000 -0400
54038 @@ -44,9 +44,9 @@ static void add_stats(struct seq_file *s
54039 const struct k_atm_aal_stats *stats)
54040 {
54041 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
54042 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
54043 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
54044 - atomic_read(&stats->rx_drop));
54045 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
54046 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
54047 + atomic_read_unchecked(&stats->rx_drop));
54048 }
54049
54050 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
54051 @@ -190,7 +190,12 @@ static void vcc_info(struct seq_file *se
54052 {
54053 struct sock *sk = sk_atm(vcc);
54054
54055 +#ifdef CONFIG_GRKERNSEC_HIDESYM
54056 + seq_printf(seq, "%p ", NULL);
54057 +#else
54058 seq_printf(seq, "%p ", vcc);
54059 +#endif
54060 +
54061 if (!vcc->dev)
54062 seq_printf(seq, "Unassigned ");
54063 else
54064 diff -urNp linux-2.6.35.4/net/atm/resources.c linux-2.6.35.4/net/atm/resources.c
54065 --- linux-2.6.35.4/net/atm/resources.c 2010-08-26 19:47:12.000000000 -0400
54066 +++ linux-2.6.35.4/net/atm/resources.c 2010-09-17 20:12:09.000000000 -0400
54067 @@ -159,7 +159,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
54068 static void copy_aal_stats(struct k_atm_aal_stats *from,
54069 struct atm_aal_stats *to)
54070 {
54071 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
54072 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
54073 __AAL_STAT_ITEMS
54074 #undef __HANDLE_ITEM
54075 }
54076 @@ -167,7 +167,7 @@ static void copy_aal_stats(struct k_atm_
54077 static void subtract_aal_stats(struct k_atm_aal_stats *from,
54078 struct atm_aal_stats *to)
54079 {
54080 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
54081 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
54082 __AAL_STAT_ITEMS
54083 #undef __HANDLE_ITEM
54084 }
54085 diff -urNp linux-2.6.35.4/net/bridge/br_stp_if.c linux-2.6.35.4/net/bridge/br_stp_if.c
54086 --- linux-2.6.35.4/net/bridge/br_stp_if.c 2010-08-26 19:47:12.000000000 -0400
54087 +++ linux-2.6.35.4/net/bridge/br_stp_if.c 2010-09-17 20:12:09.000000000 -0400
54088 @@ -145,7 +145,7 @@ static void br_stp_stop(struct net_bridg
54089 char *envp[] = { NULL };
54090
54091 if (br->stp_enabled == BR_USER_STP) {
54092 - r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
54093 + r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
54094 br_info(br, "userspace STP stopped, return code %d\n", r);
54095
54096 /* To start timers on any ports left in blocking */
54097 diff -urNp linux-2.6.35.4/net/bridge/netfilter/ebtables.c linux-2.6.35.4/net/bridge/netfilter/ebtables.c
54098 --- linux-2.6.35.4/net/bridge/netfilter/ebtables.c 2010-08-26 19:47:12.000000000 -0400
54099 +++ linux-2.6.35.4/net/bridge/netfilter/ebtables.c 2010-09-17 20:12:09.000000000 -0400
54100 @@ -1501,7 +1501,7 @@ static int do_ebt_get_ctl(struct sock *s
54101 tmp.valid_hooks = t->table->valid_hooks;
54102 }
54103 mutex_unlock(&ebt_mutex);
54104 - if (copy_to_user(user, &tmp, *len) != 0){
54105 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
54106 BUGPRINT("c2u Didn't work\n");
54107 ret = -EFAULT;
54108 break;
54109 diff -urNp linux-2.6.35.4/net/core/dev.c linux-2.6.35.4/net/core/dev.c
54110 --- linux-2.6.35.4/net/core/dev.c 2010-08-26 19:47:12.000000000 -0400
54111 +++ linux-2.6.35.4/net/core/dev.c 2010-09-17 20:12:09.000000000 -0400
54112 @@ -2541,7 +2541,7 @@ int netif_rx_ni(struct sk_buff *skb)
54113 }
54114 EXPORT_SYMBOL(netif_rx_ni);
54115
54116 -static void net_tx_action(struct softirq_action *h)
54117 +static void net_tx_action(void)
54118 {
54119 struct softnet_data *sd = &__get_cpu_var(softnet_data);
54120
54121 @@ -3474,7 +3474,7 @@ void netif_napi_del(struct napi_struct *
54122 }
54123 EXPORT_SYMBOL(netif_napi_del);
54124
54125 -static void net_rx_action(struct softirq_action *h)
54126 +static void net_rx_action(void)
54127 {
54128 struct softnet_data *sd = &__get_cpu_var(softnet_data);
54129 unsigned long time_limit = jiffies + 2;
54130 diff -urNp linux-2.6.35.4/net/core/net-sysfs.c linux-2.6.35.4/net/core/net-sysfs.c
54131 --- linux-2.6.35.4/net/core/net-sysfs.c 2010-08-26 19:47:12.000000000 -0400
54132 +++ linux-2.6.35.4/net/core/net-sysfs.c 2010-09-17 20:12:09.000000000 -0400
54133 @@ -511,7 +511,7 @@ static ssize_t rx_queue_attr_store(struc
54134 return attribute->store(queue, attribute, buf, count);
54135 }
54136
54137 -static struct sysfs_ops rx_queue_sysfs_ops = {
54138 +static const struct sysfs_ops rx_queue_sysfs_ops = {
54139 .show = rx_queue_attr_show,
54140 .store = rx_queue_attr_store,
54141 };
54142 diff -urNp linux-2.6.35.4/net/core/sock.c linux-2.6.35.4/net/core/sock.c
54143 --- linux-2.6.35.4/net/core/sock.c 2010-08-26 19:47:12.000000000 -0400
54144 +++ linux-2.6.35.4/net/core/sock.c 2010-09-17 20:12:09.000000000 -0400
54145 @@ -915,7 +915,7 @@ int sock_getsockopt(struct socket *sock,
54146 return -ENOTCONN;
54147 if (lv < len)
54148 return -EINVAL;
54149 - if (copy_to_user(optval, address, len))
54150 + if (len > sizeof(address) || copy_to_user(optval, address, len))
54151 return -EFAULT;
54152 goto lenout;
54153 }
54154 @@ -948,7 +948,7 @@ int sock_getsockopt(struct socket *sock,
54155
54156 if (len > lv)
54157 len = lv;
54158 - if (copy_to_user(optval, &v, len))
54159 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
54160 return -EFAULT;
54161 lenout:
54162 if (put_user(len, optlen))
54163 diff -urNp linux-2.6.35.4/net/dccp/ccids/ccid3.c linux-2.6.35.4/net/dccp/ccids/ccid3.c
54164 --- linux-2.6.35.4/net/dccp/ccids/ccid3.c 2010-08-26 19:47:12.000000000 -0400
54165 +++ linux-2.6.35.4/net/dccp/ccids/ccid3.c 2010-09-17 20:12:09.000000000 -0400
54166 @@ -41,7 +41,7 @@
54167 static int ccid3_debug;
54168 #define ccid3_pr_debug(format, a...) DCCP_PR_DEBUG(ccid3_debug, format, ##a)
54169 #else
54170 -#define ccid3_pr_debug(format, a...)
54171 +#define ccid3_pr_debug(format, a...) do {} while (0)
54172 #endif
54173
54174 /*
54175 diff -urNp linux-2.6.35.4/net/dccp/dccp.h linux-2.6.35.4/net/dccp/dccp.h
54176 --- linux-2.6.35.4/net/dccp/dccp.h 2010-08-26 19:47:12.000000000 -0400
54177 +++ linux-2.6.35.4/net/dccp/dccp.h 2010-09-17 20:12:09.000000000 -0400
54178 @@ -44,9 +44,9 @@ extern int dccp_debug;
54179 #define dccp_pr_debug_cat(format, a...) DCCP_PRINTK(dccp_debug, format, ##a)
54180 #define dccp_debug(fmt, a...) dccp_pr_debug_cat(KERN_DEBUG fmt, ##a)
54181 #else
54182 -#define dccp_pr_debug(format, a...)
54183 -#define dccp_pr_debug_cat(format, a...)
54184 -#define dccp_debug(format, a...)
54185 +#define dccp_pr_debug(format, a...) do {} while (0)
54186 +#define dccp_pr_debug_cat(format, a...) do {} while (0)
54187 +#define dccp_debug(format, a...) do {} while (0)
54188 #endif
54189
54190 extern struct inet_hashinfo dccp_hashinfo;
54191 diff -urNp linux-2.6.35.4/net/decnet/sysctl_net_decnet.c linux-2.6.35.4/net/decnet/sysctl_net_decnet.c
54192 --- linux-2.6.35.4/net/decnet/sysctl_net_decnet.c 2010-08-26 19:47:12.000000000 -0400
54193 +++ linux-2.6.35.4/net/decnet/sysctl_net_decnet.c 2010-09-17 20:12:37.000000000 -0400
54194 @@ -173,7 +173,7 @@ static int dn_node_address_handler(ctl_t
54195
54196 if (len > *lenp) len = *lenp;
54197
54198 - if (copy_to_user(buffer, addr, len))
54199 + if (len > sizeof(addr) || copy_to_user(buffer, addr, len))
54200 return -EFAULT;
54201
54202 *lenp = len;
54203 @@ -236,7 +236,7 @@ static int dn_def_dev_handler(ctl_table
54204
54205 if (len > *lenp) len = *lenp;
54206
54207 - if (copy_to_user(buffer, devname, len))
54208 + if (len > sizeof(devname) || copy_to_user(buffer, devname, len))
54209 return -EFAULT;
54210
54211 *lenp = len;
54212 diff -urNp linux-2.6.35.4/net/ipv4/inet_hashtables.c linux-2.6.35.4/net/ipv4/inet_hashtables.c
54213 --- linux-2.6.35.4/net/ipv4/inet_hashtables.c 2010-08-26 19:47:12.000000000 -0400
54214 +++ linux-2.6.35.4/net/ipv4/inet_hashtables.c 2010-09-17 20:12:37.000000000 -0400
54215 @@ -18,11 +18,14 @@
54216 #include <linux/sched.h>
54217 #include <linux/slab.h>
54218 #include <linux/wait.h>
54219 +#include <linux/security.h>
54220
54221 #include <net/inet_connection_sock.h>
54222 #include <net/inet_hashtables.h>
54223 #include <net/ip.h>
54224
54225 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
54226 +
54227 /*
54228 * Allocate and initialize a new local port bind bucket.
54229 * The bindhash mutex for snum's hash chain must be held here.
54230 @@ -508,6 +511,8 @@ ok:
54231 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
54232 spin_unlock(&head->lock);
54233
54234 + gr_update_task_in_ip_table(current, inet_sk(sk));
54235 +
54236 if (tw) {
54237 inet_twsk_deschedule(tw, death_row);
54238 while (twrefcnt) {
54239 diff -urNp linux-2.6.35.4/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-2.6.35.4/net/ipv4/netfilter/nf_nat_snmp_basic.c
54240 --- linux-2.6.35.4/net/ipv4/netfilter/nf_nat_snmp_basic.c 2010-08-26 19:47:12.000000000 -0400
54241 +++ linux-2.6.35.4/net/ipv4/netfilter/nf_nat_snmp_basic.c 2010-09-17 20:12:09.000000000 -0400
54242 @@ -398,7 +398,7 @@ static unsigned char asn1_octets_decode(
54243
54244 *len = 0;
54245
54246 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
54247 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
54248 if (*octets == NULL) {
54249 if (net_ratelimit())
54250 pr_notice("OOM in bsalg (%d)\n", __LINE__);
54251 diff -urNp linux-2.6.35.4/net/ipv4/tcp_ipv4.c linux-2.6.35.4/net/ipv4/tcp_ipv4.c
54252 --- linux-2.6.35.4/net/ipv4/tcp_ipv4.c 2010-08-26 19:47:12.000000000 -0400
54253 +++ linux-2.6.35.4/net/ipv4/tcp_ipv4.c 2010-09-17 20:12:37.000000000 -0400
54254 @@ -85,6 +85,9 @@
54255 int sysctl_tcp_tw_reuse __read_mostly;
54256 int sysctl_tcp_low_latency __read_mostly;
54257
54258 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
54259 +extern int grsec_enable_blackhole;
54260 +#endif
54261
54262 #ifdef CONFIG_TCP_MD5SIG
54263 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
54264 @@ -1593,6 +1596,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
54265 return 0;
54266
54267 reset:
54268 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
54269 + if (!grsec_enable_blackhole)
54270 +#endif
54271 tcp_v4_send_reset(rsk, skb);
54272 discard:
54273 kfree_skb(skb);
54274 @@ -1654,12 +1660,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
54275 TCP_SKB_CB(skb)->sacked = 0;
54276
54277 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
54278 - if (!sk)
54279 + if (!sk) {
54280 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
54281 + ret = 1;
54282 +#endif
54283 goto no_tcp_socket;
54284 -
54285 + }
54286 process:
54287 - if (sk->sk_state == TCP_TIME_WAIT)
54288 + if (sk->sk_state == TCP_TIME_WAIT) {
54289 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
54290 + ret = 2;
54291 +#endif
54292 goto do_time_wait;
54293 + }
54294
54295 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
54296 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
54297 @@ -1709,6 +1722,10 @@ no_tcp_socket:
54298 bad_packet:
54299 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
54300 } else {
54301 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
54302 + if (!grsec_enable_blackhole || (ret == 1 &&
54303 + (skb->dev->flags & IFF_LOOPBACK)))
54304 +#endif
54305 tcp_v4_send_reset(NULL, skb);
54306 }
54307
54308 @@ -2316,7 +2333,11 @@ static void get_openreq4(struct sock *sk
54309 0, /* non standard timer */
54310 0, /* open_requests have no inode */
54311 atomic_read(&sk->sk_refcnt),
54312 +#ifdef CONFIG_GRKERNSEC_HIDESYM
54313 + NULL,
54314 +#else
54315 req,
54316 +#endif
54317 len);
54318 }
54319
54320 @@ -2366,7 +2387,12 @@ static void get_tcp4_sock(struct sock *s
54321 sock_i_uid(sk),
54322 icsk->icsk_probes_out,
54323 sock_i_ino(sk),
54324 - atomic_read(&sk->sk_refcnt), sk,
54325 + atomic_read(&sk->sk_refcnt),
54326 +#ifdef CONFIG_GRKERNSEC_HIDESYM
54327 + NULL,
54328 +#else
54329 + sk,
54330 +#endif
54331 jiffies_to_clock_t(icsk->icsk_rto),
54332 jiffies_to_clock_t(icsk->icsk_ack.ato),
54333 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
54334 @@ -2394,7 +2420,13 @@ static void get_timewait4_sock(struct in
54335 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
54336 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
54337 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
54338 - atomic_read(&tw->tw_refcnt), tw, len);
54339 + atomic_read(&tw->tw_refcnt),
54340 +#ifdef CONFIG_GRKERNSEC_HIDESYM
54341 + NULL,
54342 +#else
54343 + tw,
54344 +#endif
54345 + len);
54346 }
54347
54348 #define TMPSZ 150
54349 diff -urNp linux-2.6.35.4/net/ipv4/tcp_minisocks.c linux-2.6.35.4/net/ipv4/tcp_minisocks.c
54350 --- linux-2.6.35.4/net/ipv4/tcp_minisocks.c 2010-08-26 19:47:12.000000000 -0400
54351 +++ linux-2.6.35.4/net/ipv4/tcp_minisocks.c 2010-09-17 20:12:37.000000000 -0400
54352 @@ -27,6 +27,10 @@
54353 #include <net/inet_common.h>
54354 #include <net/xfrm.h>
54355
54356 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
54357 +extern int grsec_enable_blackhole;
54358 +#endif
54359 +
54360 int sysctl_tcp_syncookies __read_mostly = 1;
54361 EXPORT_SYMBOL(sysctl_tcp_syncookies);
54362
54363 @@ -700,6 +704,10 @@ listen_overflow:
54364
54365 embryonic_reset:
54366 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
54367 +
54368 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
54369 + if (!grsec_enable_blackhole)
54370 +#endif
54371 if (!(flg & TCP_FLAG_RST))
54372 req->rsk_ops->send_reset(sk, skb);
54373
54374 diff -urNp linux-2.6.35.4/net/ipv4/tcp_probe.c linux-2.6.35.4/net/ipv4/tcp_probe.c
54375 --- linux-2.6.35.4/net/ipv4/tcp_probe.c 2010-08-26 19:47:12.000000000 -0400
54376 +++ linux-2.6.35.4/net/ipv4/tcp_probe.c 2010-09-17 20:12:37.000000000 -0400
54377 @@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file
54378 if (cnt + width >= len)
54379 break;
54380
54381 - if (copy_to_user(buf + cnt, tbuf, width))
54382 + if (width > sizeof(tbuf) || copy_to_user(buf + cnt, tbuf, width))
54383 return -EFAULT;
54384 cnt += width;
54385 }
54386 diff -urNp linux-2.6.35.4/net/ipv4/tcp_timer.c linux-2.6.35.4/net/ipv4/tcp_timer.c
54387 --- linux-2.6.35.4/net/ipv4/tcp_timer.c 2010-08-26 19:47:12.000000000 -0400
54388 +++ linux-2.6.35.4/net/ipv4/tcp_timer.c 2010-09-17 20:12:37.000000000 -0400
54389 @@ -22,6 +22,10 @@
54390 #include <linux/gfp.h>
54391 #include <net/tcp.h>
54392
54393 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
54394 +extern int grsec_lastack_retries;
54395 +#endif
54396 +
54397 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
54398 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
54399 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
54400 @@ -195,6 +199,13 @@ static int tcp_write_timeout(struct sock
54401 }
54402 }
54403
54404 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
54405 + if ((sk->sk_state == TCP_LAST_ACK) &&
54406 + (grsec_lastack_retries > 0) &&
54407 + (grsec_lastack_retries < retry_until))
54408 + retry_until = grsec_lastack_retries;
54409 +#endif
54410 +
54411 if (retransmits_timed_out(sk, retry_until)) {
54412 /* Has it gone just too far? */
54413 tcp_write_err(sk);
54414 diff -urNp linux-2.6.35.4/net/ipv4/udp.c linux-2.6.35.4/net/ipv4/udp.c
54415 --- linux-2.6.35.4/net/ipv4/udp.c 2010-08-26 19:47:12.000000000 -0400
54416 +++ linux-2.6.35.4/net/ipv4/udp.c 2010-09-17 20:12:37.000000000 -0400
54417 @@ -86,6 +86,7 @@
54418 #include <linux/types.h>
54419 #include <linux/fcntl.h>
54420 #include <linux/module.h>
54421 +#include <linux/security.h>
54422 #include <linux/socket.h>
54423 #include <linux/sockios.h>
54424 #include <linux/igmp.h>
54425 @@ -107,6 +108,10 @@
54426 #include <net/xfrm.h>
54427 #include "udp_impl.h"
54428
54429 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
54430 +extern int grsec_enable_blackhole;
54431 +#endif
54432 +
54433 struct udp_table udp_table __read_mostly;
54434 EXPORT_SYMBOL(udp_table);
54435
54436 @@ -564,6 +569,9 @@ found:
54437 return s;
54438 }
54439
54440 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
54441 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
54442 +
54443 /*
54444 * This routine is called by the ICMP module when it gets some
54445 * sort of error condition. If err < 0 then the socket should
54446 @@ -832,9 +840,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
54447 dport = usin->sin_port;
54448 if (dport == 0)
54449 return -EINVAL;
54450 +
54451 + err = gr_search_udp_sendmsg(sk, usin);
54452 + if (err)
54453 + return err;
54454 } else {
54455 if (sk->sk_state != TCP_ESTABLISHED)
54456 return -EDESTADDRREQ;
54457 +
54458 + err = gr_search_udp_sendmsg(sk, NULL);
54459 + if (err)
54460 + return err;
54461 +
54462 daddr = inet->inet_daddr;
54463 dport = inet->inet_dport;
54464 /* Open fast path for connected socket.
54465 @@ -1141,6 +1158,10 @@ try_again:
54466 if (!skb)
54467 goto out;
54468
54469 + err = gr_search_udp_recvmsg(sk, skb);
54470 + if (err)
54471 + goto out_free;
54472 +
54473 ulen = skb->len - sizeof(struct udphdr);
54474 if (len > ulen)
54475 len = ulen;
54476 @@ -1582,6 +1603,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
54477 goto csum_error;
54478
54479 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
54480 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
54481 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
54482 +#endif
54483 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
54484
54485 /*
54486 @@ -2007,7 +2031,12 @@ static void udp4_format_sock(struct sock
54487 sk_wmem_alloc_get(sp),
54488 sk_rmem_alloc_get(sp),
54489 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
54490 - atomic_read(&sp->sk_refcnt), sp,
54491 + atomic_read(&sp->sk_refcnt),
54492 +#ifdef CONFIG_GRKERNSEC_HIDESYM
54493 + NULL,
54494 +#else
54495 + sp,
54496 +#endif
54497 atomic_read(&sp->sk_drops), len);
54498 }
54499
54500 diff -urNp linux-2.6.35.4/net/ipv6/exthdrs.c linux-2.6.35.4/net/ipv6/exthdrs.c
54501 --- linux-2.6.35.4/net/ipv6/exthdrs.c 2010-08-26 19:47:12.000000000 -0400
54502 +++ linux-2.6.35.4/net/ipv6/exthdrs.c 2010-09-17 20:12:09.000000000 -0400
54503 @@ -636,7 +636,7 @@ static struct tlvtype_proc tlvprochopopt
54504 .type = IPV6_TLV_JUMBO,
54505 .func = ipv6_hop_jumbo,
54506 },
54507 - { -1, }
54508 + { -1, NULL }
54509 };
54510
54511 int ipv6_parse_hopopts(struct sk_buff *skb)
54512 diff -urNp linux-2.6.35.4/net/ipv6/raw.c linux-2.6.35.4/net/ipv6/raw.c
54513 --- linux-2.6.35.4/net/ipv6/raw.c 2010-08-26 19:47:12.000000000 -0400
54514 +++ linux-2.6.35.4/net/ipv6/raw.c 2010-09-17 20:12:09.000000000 -0400
54515 @@ -601,7 +601,7 @@ out:
54516 return err;
54517 }
54518
54519 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
54520 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
54521 struct flowi *fl, struct rt6_info *rt,
54522 unsigned int flags)
54523 {
54524 diff -urNp linux-2.6.35.4/net/ipv6/tcp_ipv6.c linux-2.6.35.4/net/ipv6/tcp_ipv6.c
54525 --- linux-2.6.35.4/net/ipv6/tcp_ipv6.c 2010-08-26 19:47:12.000000000 -0400
54526 +++ linux-2.6.35.4/net/ipv6/tcp_ipv6.c 2010-09-17 20:23:25.000000000 -0400
54527 @@ -92,6 +92,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
54528 }
54529 #endif
54530
54531 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
54532 +extern int grsec_enable_blackhole;
54533 +#endif
54534 +
54535 static void tcp_v6_hash(struct sock *sk)
54536 {
54537 if (sk->sk_state != TCP_CLOSE) {
54538 @@ -1641,6 +1645,9 @@ static int tcp_v6_do_rcv(struct sock *sk
54539 return 0;
54540
54541 reset:
54542 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
54543 + if (!grsec_enable_blackhole)
54544 +#endif
54545 tcp_v6_send_reset(sk, skb);
54546 discard:
54547 if (opt_skb)
54548 @@ -1720,12 +1727,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
54549 TCP_SKB_CB(skb)->sacked = 0;
54550
54551 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
54552 - if (!sk)
54553 + if (!sk) {
54554 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
54555 + ret = 1;
54556 +#endif
54557 goto no_tcp_socket;
54558 + }
54559
54560 process:
54561 - if (sk->sk_state == TCP_TIME_WAIT)
54562 + if (sk->sk_state == TCP_TIME_WAIT) {
54563 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
54564 + ret = 2;
54565 +#endif
54566 goto do_time_wait;
54567 + }
54568
54569 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
54570 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
54571 @@ -1773,6 +1788,10 @@ no_tcp_socket:
54572 bad_packet:
54573 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
54574 } else {
54575 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
54576 + if (!grsec_enable_blackhole || (ret == 1 &&
54577 + (skb->dev->flags & IFF_LOOPBACK)))
54578 +#endif
54579 tcp_v6_send_reset(NULL, skb);
54580 }
54581
54582 diff -urNp linux-2.6.35.4/net/ipv6/udp.c linux-2.6.35.4/net/ipv6/udp.c
54583 --- linux-2.6.35.4/net/ipv6/udp.c 2010-08-26 19:47:12.000000000 -0400
54584 +++ linux-2.6.35.4/net/ipv6/udp.c 2010-09-17 20:12:37.000000000 -0400
54585 @@ -50,6 +50,10 @@
54586 #include <linux/seq_file.h>
54587 #include "udp_impl.h"
54588
54589 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
54590 +extern int grsec_enable_blackhole;
54591 +#endif
54592 +
54593 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
54594 {
54595 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
54596 @@ -756,6 +760,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
54597 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
54598 proto == IPPROTO_UDPLITE);
54599
54600 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
54601 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
54602 +#endif
54603 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
54604
54605 kfree_skb(skb);
54606 diff -urNp linux-2.6.35.4/net/irda/ircomm/ircomm_tty.c linux-2.6.35.4/net/irda/ircomm/ircomm_tty.c
54607 --- linux-2.6.35.4/net/irda/ircomm/ircomm_tty.c 2010-08-26 19:47:12.000000000 -0400
54608 +++ linux-2.6.35.4/net/irda/ircomm/ircomm_tty.c 2010-09-17 20:12:09.000000000 -0400
54609 @@ -281,16 +281,16 @@ static int ircomm_tty_block_til_ready(st
54610 add_wait_queue(&self->open_wait, &wait);
54611
54612 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
54613 - __FILE__,__LINE__, tty->driver->name, self->open_count );
54614 + __FILE__,__LINE__, tty->driver->name, atomic_read(&self->open_count) );
54615
54616 /* As far as I can see, we protect open_count - Jean II */
54617 spin_lock_irqsave(&self->spinlock, flags);
54618 if (!tty_hung_up_p(filp)) {
54619 extra_count = 1;
54620 - self->open_count--;
54621 + atomic_dec(&self->open_count);
54622 }
54623 spin_unlock_irqrestore(&self->spinlock, flags);
54624 - self->blocked_open++;
54625 + atomic_inc(&self->blocked_open);
54626
54627 while (1) {
54628 if (tty->termios->c_cflag & CBAUD) {
54629 @@ -330,7 +330,7 @@ static int ircomm_tty_block_til_ready(st
54630 }
54631
54632 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
54633 - __FILE__,__LINE__, tty->driver->name, self->open_count );
54634 + __FILE__,__LINE__, tty->driver->name, atomic_read(&self->open_count) );
54635
54636 schedule();
54637 }
54638 @@ -341,13 +341,13 @@ static int ircomm_tty_block_til_ready(st
54639 if (extra_count) {
54640 /* ++ is not atomic, so this should be protected - Jean II */
54641 spin_lock_irqsave(&self->spinlock, flags);
54642 - self->open_count++;
54643 + atomic_inc(&self->open_count);
54644 spin_unlock_irqrestore(&self->spinlock, flags);
54645 }
54646 - self->blocked_open--;
54647 + atomic_dec(&self->blocked_open);
54648
54649 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
54650 - __FILE__,__LINE__, tty->driver->name, self->open_count);
54651 + __FILE__,__LINE__, tty->driver->name, atomic_read(&self->open_count));
54652
54653 if (!retval)
54654 self->flags |= ASYNC_NORMAL_ACTIVE;
54655 @@ -416,14 +416,14 @@ static int ircomm_tty_open(struct tty_st
54656 }
54657 /* ++ is not atomic, so this should be protected - Jean II */
54658 spin_lock_irqsave(&self->spinlock, flags);
54659 - self->open_count++;
54660 + atomic_inc(&self->open_count);
54661
54662 tty->driver_data = self;
54663 self->tty = tty;
54664 spin_unlock_irqrestore(&self->spinlock, flags);
54665
54666 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
54667 - self->line, self->open_count);
54668 + self->line, atomic_read(&self->open_count));
54669
54670 /* Not really used by us, but lets do it anyway */
54671 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
54672 @@ -509,7 +509,7 @@ static void ircomm_tty_close(struct tty_
54673 return;
54674 }
54675
54676 - if ((tty->count == 1) && (self->open_count != 1)) {
54677 + if ((tty->count == 1) && (atomic_read(&self->open_count) != 1)) {
54678 /*
54679 * Uh, oh. tty->count is 1, which means that the tty
54680 * structure will be freed. state->count should always
54681 @@ -519,16 +519,16 @@ static void ircomm_tty_close(struct tty_
54682 */
54683 IRDA_DEBUG(0, "%s(), bad serial port count; "
54684 "tty->count is 1, state->count is %d\n", __func__ ,
54685 - self->open_count);
54686 - self->open_count = 1;
54687 + atomic_read(&self->open_count));
54688 + atomic_set(&self->open_count, 1);
54689 }
54690
54691 - if (--self->open_count < 0) {
54692 + if (atomic_dec_return(&self->open_count) < 0) {
54693 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
54694 - __func__, self->line, self->open_count);
54695 - self->open_count = 0;
54696 + __func__, self->line, atomic_read(&self->open_count));
54697 + atomic_set(&self->open_count, 0);
54698 }
54699 - if (self->open_count) {
54700 + if (atomic_read(&self->open_count)) {
54701 spin_unlock_irqrestore(&self->spinlock, flags);
54702
54703 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
54704 @@ -560,7 +560,7 @@ static void ircomm_tty_close(struct tty_
54705 tty->closing = 0;
54706 self->tty = NULL;
54707
54708 - if (self->blocked_open) {
54709 + if (atomic_read(&self->blocked_open)) {
54710 if (self->close_delay)
54711 schedule_timeout_interruptible(self->close_delay);
54712 wake_up_interruptible(&self->open_wait);
54713 @@ -1012,7 +1012,7 @@ static void ircomm_tty_hangup(struct tty
54714 spin_lock_irqsave(&self->spinlock, flags);
54715 self->flags &= ~ASYNC_NORMAL_ACTIVE;
54716 self->tty = NULL;
54717 - self->open_count = 0;
54718 + atomic_set(&self->open_count, 0);
54719 spin_unlock_irqrestore(&self->spinlock, flags);
54720
54721 wake_up_interruptible(&self->open_wait);
54722 @@ -1364,7 +1364,7 @@ static void ircomm_tty_line_info(struct
54723 seq_putc(m, '\n');
54724
54725 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
54726 - seq_printf(m, "Open count: %d\n", self->open_count);
54727 + seq_printf(m, "Open count: %d\n", atomic_read(&self->open_count));
54728 seq_printf(m, "Max data size: %d\n", self->max_data_size);
54729 seq_printf(m, "Max header size: %d\n", self->max_header_size);
54730
54731 diff -urNp linux-2.6.35.4/net/key/af_key.c linux-2.6.35.4/net/key/af_key.c
54732 --- linux-2.6.35.4/net/key/af_key.c 2010-08-26 19:47:12.000000000 -0400
54733 +++ linux-2.6.35.4/net/key/af_key.c 2010-09-17 20:12:37.000000000 -0400
54734 @@ -3644,7 +3644,11 @@ static int pfkey_seq_show(struct seq_fil
54735 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
54736 else
54737 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
54738 +#ifdef CONFIG_GRKERNSEC_HIDESYM
54739 + NULL,
54740 +#else
54741 s,
54742 +#endif
54743 atomic_read(&s->sk_refcnt),
54744 sk_rmem_alloc_get(s),
54745 sk_wmem_alloc_get(s),
54746 diff -urNp linux-2.6.35.4/net/mac80211/ieee80211_i.h linux-2.6.35.4/net/mac80211/ieee80211_i.h
54747 --- linux-2.6.35.4/net/mac80211/ieee80211_i.h 2010-08-26 19:47:12.000000000 -0400
54748 +++ linux-2.6.35.4/net/mac80211/ieee80211_i.h 2010-09-17 20:12:09.000000000 -0400
54749 @@ -649,7 +649,7 @@ struct ieee80211_local {
54750 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
54751 spinlock_t queue_stop_reason_lock;
54752
54753 - int open_count;
54754 + atomic_t open_count;
54755 int monitors, cooked_mntrs;
54756 /* number of interfaces with corresponding FIF_ flags */
54757 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
54758 diff -urNp linux-2.6.35.4/net/mac80211/iface.c linux-2.6.35.4/net/mac80211/iface.c
54759 --- linux-2.6.35.4/net/mac80211/iface.c 2010-08-26 19:47:12.000000000 -0400
54760 +++ linux-2.6.35.4/net/mac80211/iface.c 2010-09-17 20:12:09.000000000 -0400
54761 @@ -183,7 +183,7 @@ static int ieee80211_open(struct net_dev
54762 break;
54763 }
54764
54765 - if (local->open_count == 0) {
54766 + if (atomic_read(&local->open_count) == 0) {
54767 res = drv_start(local);
54768 if (res)
54769 goto err_del_bss;
54770 @@ -215,7 +215,7 @@ static int ieee80211_open(struct net_dev
54771 * Validate the MAC address for this device.
54772 */
54773 if (!is_valid_ether_addr(dev->dev_addr)) {
54774 - if (!local->open_count)
54775 + if (!atomic_read(&local->open_count))
54776 drv_stop(local);
54777 return -EADDRNOTAVAIL;
54778 }
54779 @@ -308,7 +308,7 @@ static int ieee80211_open(struct net_dev
54780
54781 hw_reconf_flags |= __ieee80211_recalc_idle(local);
54782
54783 - local->open_count++;
54784 + atomic_inc(&local->open_count);
54785 if (hw_reconf_flags) {
54786 ieee80211_hw_config(local, hw_reconf_flags);
54787 /*
54788 @@ -336,7 +336,7 @@ static int ieee80211_open(struct net_dev
54789 err_del_interface:
54790 drv_remove_interface(local, &sdata->vif);
54791 err_stop:
54792 - if (!local->open_count)
54793 + if (!atomic_read(&local->open_count))
54794 drv_stop(local);
54795 err_del_bss:
54796 sdata->bss = NULL;
54797 @@ -439,7 +439,7 @@ static int ieee80211_stop(struct net_dev
54798 WARN_ON(!list_empty(&sdata->u.ap.vlans));
54799 }
54800
54801 - local->open_count--;
54802 + atomic_dec(&local->open_count);
54803
54804 switch (sdata->vif.type) {
54805 case NL80211_IFTYPE_AP_VLAN:
54806 @@ -542,7 +542,7 @@ static int ieee80211_stop(struct net_dev
54807
54808 ieee80211_recalc_ps(local, -1);
54809
54810 - if (local->open_count == 0) {
54811 + if (atomic_read(&local->open_count) == 0) {
54812 ieee80211_clear_tx_pending(local);
54813 ieee80211_stop_device(local);
54814
54815 diff -urNp linux-2.6.35.4/net/mac80211/main.c linux-2.6.35.4/net/mac80211/main.c
54816 --- linux-2.6.35.4/net/mac80211/main.c 2010-08-26 19:47:12.000000000 -0400
54817 +++ linux-2.6.35.4/net/mac80211/main.c 2010-09-17 20:12:09.000000000 -0400
54818 @@ -148,7 +148,7 @@ int ieee80211_hw_config(struct ieee80211
54819 local->hw.conf.power_level = power;
54820 }
54821
54822 - if (changed && local->open_count) {
54823 + if (changed && atomic_read(&local->open_count)) {
54824 ret = drv_config(local, changed);
54825 /*
54826 * Goal:
54827 diff -urNp linux-2.6.35.4/net/mac80211/pm.c linux-2.6.35.4/net/mac80211/pm.c
54828 --- linux-2.6.35.4/net/mac80211/pm.c 2010-08-26 19:47:12.000000000 -0400
54829 +++ linux-2.6.35.4/net/mac80211/pm.c 2010-09-17 20:12:09.000000000 -0400
54830 @@ -101,7 +101,7 @@ int __ieee80211_suspend(struct ieee80211
54831 }
54832
54833 /* stop hardware - this must stop RX */
54834 - if (local->open_count)
54835 + if (atomic_read(&local->open_count))
54836 ieee80211_stop_device(local);
54837
54838 local->suspended = true;
54839 diff -urNp linux-2.6.35.4/net/mac80211/rate.c linux-2.6.35.4/net/mac80211/rate.c
54840 --- linux-2.6.35.4/net/mac80211/rate.c 2010-08-26 19:47:12.000000000 -0400
54841 +++ linux-2.6.35.4/net/mac80211/rate.c 2010-09-17 20:12:09.000000000 -0400
54842 @@ -355,7 +355,7 @@ int ieee80211_init_rate_ctrl_alg(struct
54843
54844 ASSERT_RTNL();
54845
54846 - if (local->open_count)
54847 + if (atomic_read(&local->open_count))
54848 return -EBUSY;
54849
54850 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
54851 diff -urNp linux-2.6.35.4/net/mac80211/rc80211_pid_debugfs.c linux-2.6.35.4/net/mac80211/rc80211_pid_debugfs.c
54852 --- linux-2.6.35.4/net/mac80211/rc80211_pid_debugfs.c 2010-08-26 19:47:12.000000000 -0400
54853 +++ linux-2.6.35.4/net/mac80211/rc80211_pid_debugfs.c 2010-09-17 20:12:09.000000000 -0400
54854 @@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_r
54855
54856 spin_unlock_irqrestore(&events->lock, status);
54857
54858 - if (copy_to_user(buf, pb, p))
54859 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
54860 return -EFAULT;
54861
54862 return p;
54863 diff -urNp linux-2.6.35.4/net/mac80211/tx.c linux-2.6.35.4/net/mac80211/tx.c
54864 --- linux-2.6.35.4/net/mac80211/tx.c 2010-08-26 19:47:12.000000000 -0400
54865 +++ linux-2.6.35.4/net/mac80211/tx.c 2010-09-17 20:12:09.000000000 -0400
54866 @@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct
54867 return cpu_to_le16(dur);
54868 }
54869
54870 -static int inline is_ieee80211_device(struct ieee80211_local *local,
54871 +static inline int is_ieee80211_device(struct ieee80211_local *local,
54872 struct net_device *dev)
54873 {
54874 return local == wdev_priv(dev->ieee80211_ptr);
54875 diff -urNp linux-2.6.35.4/net/mac80211/util.c linux-2.6.35.4/net/mac80211/util.c
54876 --- linux-2.6.35.4/net/mac80211/util.c 2010-08-26 19:47:12.000000000 -0400
54877 +++ linux-2.6.35.4/net/mac80211/util.c 2010-09-17 20:12:09.000000000 -0400
54878 @@ -1097,7 +1097,7 @@ int ieee80211_reconfig(struct ieee80211_
54879 local->resuming = true;
54880
54881 /* restart hardware */
54882 - if (local->open_count) {
54883 + if (atomic_read(&local->open_count)) {
54884 /*
54885 * Upon resume hardware can sometimes be goofy due to
54886 * various platform / driver / bus issues, so restarting
54887 diff -urNp linux-2.6.35.4/net/netlink/af_netlink.c linux-2.6.35.4/net/netlink/af_netlink.c
54888 --- linux-2.6.35.4/net/netlink/af_netlink.c 2010-08-26 19:47:12.000000000 -0400
54889 +++ linux-2.6.35.4/net/netlink/af_netlink.c 2010-09-17 20:12:37.000000000 -0400
54890 @@ -2001,13 +2001,21 @@ static int netlink_seq_show(struct seq_f
54891 struct netlink_sock *nlk = nlk_sk(s);
54892
54893 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d %-8lu\n",
54894 +#ifdef CONFIG_GRKERNSEC_HIDESYM
54895 + NULL,
54896 +#else
54897 s,
54898 +#endif
54899 s->sk_protocol,
54900 nlk->pid,
54901 nlk->groups ? (u32)nlk->groups[0] : 0,
54902 sk_rmem_alloc_get(s),
54903 sk_wmem_alloc_get(s),
54904 +#ifdef CONFIG_GRKERNSEC_HIDESYM
54905 + NULL,
54906 +#else
54907 nlk->cb,
54908 +#endif
54909 atomic_read(&s->sk_refcnt),
54910 atomic_read(&s->sk_drops),
54911 sock_i_ino(s)
54912 diff -urNp linux-2.6.35.4/net/packet/af_packet.c linux-2.6.35.4/net/packet/af_packet.c
54913 --- linux-2.6.35.4/net/packet/af_packet.c 2010-08-26 19:47:12.000000000 -0400
54914 +++ linux-2.6.35.4/net/packet/af_packet.c 2010-09-17 20:12:37.000000000 -0400
54915 @@ -2093,7 +2093,7 @@ static int packet_getsockopt(struct sock
54916 case PACKET_HDRLEN:
54917 if (len > sizeof(int))
54918 len = sizeof(int);
54919 - if (copy_from_user(&val, optval, len))
54920 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
54921 return -EFAULT;
54922 switch (val) {
54923 case TPACKET_V1:
54924 @@ -2125,7 +2125,7 @@ static int packet_getsockopt(struct sock
54925
54926 if (put_user(len, optlen))
54927 return -EFAULT;
54928 - if (copy_to_user(optval, data, len))
54929 + if (len > sizeof(st) || copy_to_user(optval, data, len))
54930 return -EFAULT;
54931 return 0;
54932 }
54933 @@ -2604,7 +2604,11 @@ static int packet_seq_show(struct seq_fi
54934
54935 seq_printf(seq,
54936 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
54937 +#ifdef CONFIG_GRKERNSEC_HIDESYM
54938 + NULL,
54939 +#else
54940 s,
54941 +#endif
54942 atomic_read(&s->sk_refcnt),
54943 s->sk_type,
54944 ntohs(po->num),
54945 diff -urNp linux-2.6.35.4/net/sctp/socket.c linux-2.6.35.4/net/sctp/socket.c
54946 --- linux-2.6.35.4/net/sctp/socket.c 2010-08-26 19:47:12.000000000 -0400
54947 +++ linux-2.6.35.4/net/sctp/socket.c 2010-09-17 20:12:09.000000000 -0400
54948 @@ -1483,7 +1483,7 @@ SCTP_STATIC int sctp_sendmsg(struct kioc
54949 struct sctp_sndrcvinfo *sinfo;
54950 struct sctp_initmsg *sinit;
54951 sctp_assoc_t associd = 0;
54952 - sctp_cmsgs_t cmsgs = { NULL };
54953 + sctp_cmsgs_t cmsgs = { NULL, NULL };
54954 int err;
54955 sctp_scope_t scope;
54956 long timeo;
54957 @@ -4387,7 +4387,7 @@ static int sctp_getsockopt_peer_addrs(st
54958 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
54959 if (space_left < addrlen)
54960 return -ENOMEM;
54961 - if (copy_to_user(to, &temp, addrlen))
54962 + if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
54963 return -EFAULT;
54964 to += addrlen;
54965 cnt++;
54966 diff -urNp linux-2.6.35.4/net/socket.c linux-2.6.35.4/net/socket.c
54967 --- linux-2.6.35.4/net/socket.c 2010-08-26 19:47:12.000000000 -0400
54968 +++ linux-2.6.35.4/net/socket.c 2010-09-17 20:12:37.000000000 -0400
54969 @@ -88,6 +88,7 @@
54970 #include <linux/nsproxy.h>
54971 #include <linux/magic.h>
54972 #include <linux/slab.h>
54973 +#include <linux/in.h>
54974
54975 #include <asm/uaccess.h>
54976 #include <asm/unistd.h>
54977 @@ -105,6 +106,8 @@
54978 #include <linux/sockios.h>
54979 #include <linux/atalk.h>
54980
54981 +#include <linux/grsock.h>
54982 +
54983 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
54984 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
54985 unsigned long nr_segs, loff_t pos);
54986 @@ -322,7 +325,7 @@ static int sockfs_get_sb(struct file_sys
54987 mnt);
54988 }
54989
54990 -static struct vfsmount *sock_mnt __read_mostly;
54991 +struct vfsmount *sock_mnt __read_mostly;
54992
54993 static struct file_system_type sock_fs_type = {
54994 .name = "sockfs",
54995 @@ -1291,6 +1294,16 @@ SYSCALL_DEFINE3(socket, int, family, int
54996 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
54997 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
54998
54999 + if(!gr_search_socket(family, type, protocol)) {
55000 + retval = -EACCES;
55001 + goto out;
55002 + }
55003 +
55004 + if (gr_handle_sock_all(family, type, protocol)) {
55005 + retval = -EACCES;
55006 + goto out;
55007 + }
55008 +
55009 retval = sock_create(family, type, protocol, &sock);
55010 if (retval < 0)
55011 goto out;
55012 @@ -1403,6 +1416,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
55013 if (sock) {
55014 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
55015 if (err >= 0) {
55016 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
55017 + err = -EACCES;
55018 + goto error;
55019 + }
55020 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
55021 + if (err)
55022 + goto error;
55023 +
55024 err = security_socket_bind(sock,
55025 (struct sockaddr *)&address,
55026 addrlen);
55027 @@ -1411,6 +1432,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
55028 (struct sockaddr *)
55029 &address, addrlen);
55030 }
55031 +error:
55032 fput_light(sock->file, fput_needed);
55033 }
55034 return err;
55035 @@ -1434,10 +1456,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, ba
55036 if ((unsigned)backlog > somaxconn)
55037 backlog = somaxconn;
55038
55039 + if (gr_handle_sock_server_other(sock->sk)) {
55040 + err = -EPERM;
55041 + goto error;
55042 + }
55043 +
55044 + err = gr_search_listen(sock);
55045 + if (err)
55046 + goto error;
55047 +
55048 err = security_socket_listen(sock, backlog);
55049 if (!err)
55050 err = sock->ops->listen(sock, backlog);
55051
55052 +error:
55053 fput_light(sock->file, fput_needed);
55054 }
55055 return err;
55056 @@ -1480,6 +1512,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
55057 newsock->type = sock->type;
55058 newsock->ops = sock->ops;
55059
55060 + if (gr_handle_sock_server_other(sock->sk)) {
55061 + err = -EPERM;
55062 + sock_release(newsock);
55063 + goto out_put;
55064 + }
55065 +
55066 + err = gr_search_accept(sock);
55067 + if (err) {
55068 + sock_release(newsock);
55069 + goto out_put;
55070 + }
55071 +
55072 /*
55073 * We don't need try_module_get here, as the listening socket (sock)
55074 * has the protocol module (sock->ops->owner) held.
55075 @@ -1518,6 +1562,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
55076 fd_install(newfd, newfile);
55077 err = newfd;
55078
55079 + gr_attach_curr_ip(newsock->sk);
55080 +
55081 out_put:
55082 fput_light(sock->file, fput_needed);
55083 out:
55084 @@ -1550,6 +1596,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct
55085 int, addrlen)
55086 {
55087 struct socket *sock;
55088 + struct sockaddr *sck;
55089 struct sockaddr_storage address;
55090 int err, fput_needed;
55091
55092 @@ -1560,6 +1607,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct
55093 if (err < 0)
55094 goto out_put;
55095
55096 + sck = (struct sockaddr *)&address;
55097 +
55098 + if (gr_handle_sock_client(sck)) {
55099 + err = -EACCES;
55100 + goto out_put;
55101 + }
55102 +
55103 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
55104 + if (err)
55105 + goto out_put;
55106 +
55107 err =
55108 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
55109 if (err)
55110 diff -urNp linux-2.6.35.4/net/sunrpc/sched.c linux-2.6.35.4/net/sunrpc/sched.c
55111 --- linux-2.6.35.4/net/sunrpc/sched.c 2010-08-26 19:47:12.000000000 -0400
55112 +++ linux-2.6.35.4/net/sunrpc/sched.c 2010-09-17 20:12:09.000000000 -0400
55113 @@ -234,9 +234,9 @@ static int rpc_wait_bit_killable(void *w
55114 #ifdef RPC_DEBUG
55115 static void rpc_task_set_debuginfo(struct rpc_task *task)
55116 {
55117 - static atomic_t rpc_pid;
55118 + static atomic_unchecked_t rpc_pid;
55119
55120 - task->tk_pid = atomic_inc_return(&rpc_pid);
55121 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
55122 }
55123 #else
55124 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
55125 diff -urNp linux-2.6.35.4/net/sunrpc/xprtrdma/svc_rdma.c linux-2.6.35.4/net/sunrpc/xprtrdma/svc_rdma.c
55126 --- linux-2.6.35.4/net/sunrpc/xprtrdma/svc_rdma.c 2010-08-26 19:47:12.000000000 -0400
55127 +++ linux-2.6.35.4/net/sunrpc/xprtrdma/svc_rdma.c 2010-09-17 20:12:37.000000000 -0400
55128 @@ -106,7 +106,7 @@ static int read_reset_stat(ctl_table *ta
55129 len -= *ppos;
55130 if (len > *lenp)
55131 len = *lenp;
55132 - if (len && copy_to_user(buffer, str_buf, len))
55133 + if (len > sizeof(str_buf) || (len && copy_to_user(buffer, str_buf, len)))
55134 return -EFAULT;
55135 *lenp = len;
55136 *ppos += len;
55137 diff -urNp linux-2.6.35.4/net/sysctl_net.c linux-2.6.35.4/net/sysctl_net.c
55138 --- linux-2.6.35.4/net/sysctl_net.c 2010-08-26 19:47:12.000000000 -0400
55139 +++ linux-2.6.35.4/net/sysctl_net.c 2010-09-17 20:12:37.000000000 -0400
55140 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ct
55141 struct ctl_table *table)
55142 {
55143 /* Allow network administrator to have same access as root. */
55144 - if (capable(CAP_NET_ADMIN)) {
55145 + if (capable_nolog(CAP_NET_ADMIN)) {
55146 int mode = (table->mode >> 6) & 7;
55147 return (mode << 6) | (mode << 3) | mode;
55148 }
55149 diff -urNp linux-2.6.35.4/net/tipc/socket.c linux-2.6.35.4/net/tipc/socket.c
55150 --- linux-2.6.35.4/net/tipc/socket.c 2010-08-26 19:47:12.000000000 -0400
55151 +++ linux-2.6.35.4/net/tipc/socket.c 2010-09-17 20:12:09.000000000 -0400
55152 @@ -1451,8 +1451,9 @@ static int connect(struct socket *sock,
55153 } else {
55154 if (res == 0)
55155 res = -ETIMEDOUT;
55156 - else
55157 - ; /* leave "res" unchanged */
55158 + else {
55159 + /* leave "res" unchanged */
55160 + }
55161 sock->state = SS_DISCONNECTING;
55162 }
55163
55164 diff -urNp linux-2.6.35.4/net/unix/af_unix.c linux-2.6.35.4/net/unix/af_unix.c
55165 --- linux-2.6.35.4/net/unix/af_unix.c 2010-08-26 19:47:12.000000000 -0400
55166 +++ linux-2.6.35.4/net/unix/af_unix.c 2010-09-17 20:12:37.000000000 -0400
55167 @@ -736,6 +736,12 @@ static struct sock *unix_find_other(stru
55168 err = -ECONNREFUSED;
55169 if (!S_ISSOCK(inode->i_mode))
55170 goto put_fail;
55171 +
55172 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
55173 + err = -EACCES;
55174 + goto put_fail;
55175 + }
55176 +
55177 u = unix_find_socket_byinode(net, inode);
55178 if (!u)
55179 goto put_fail;
55180 @@ -756,6 +762,13 @@ static struct sock *unix_find_other(stru
55181 if (u) {
55182 struct dentry *dentry;
55183 dentry = unix_sk(u)->dentry;
55184 +
55185 + if (!gr_handle_chroot_unix(u->sk_peercred.pid)) {
55186 + err = -EPERM;
55187 + sock_put(u);
55188 + goto fail;
55189 + }
55190 +
55191 if (dentry)
55192 touch_atime(unix_sk(u)->mnt, dentry);
55193 } else
55194 @@ -841,11 +854,18 @@ static int unix_bind(struct socket *sock
55195 err = security_path_mknod(&nd.path, dentry, mode, 0);
55196 if (err)
55197 goto out_mknod_drop_write;
55198 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
55199 + err = -EACCES;
55200 + goto out_mknod_drop_write;
55201 + }
55202 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
55203 out_mknod_drop_write:
55204 mnt_drop_write(nd.path.mnt);
55205 if (err)
55206 goto out_mknod_dput;
55207 +
55208 + gr_handle_create(dentry, nd.path.mnt);
55209 +
55210 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
55211 dput(nd.path.dentry);
55212 nd.path.dentry = dentry;
55213 @@ -863,6 +883,10 @@ out_mknod_drop_write:
55214 goto out_unlock;
55215 }
55216
55217 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
55218 + sk->sk_peercred.pid = current->pid;
55219 +#endif
55220 +
55221 list = &unix_socket_table[addr->hash];
55222 } else {
55223 list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
55224 @@ -2161,7 +2185,11 @@ static int unix_seq_show(struct seq_file
55225 unix_state_lock(s);
55226
55227 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
55228 +#ifdef CONFIG_GRKERNSEC_HIDESYM
55229 + NULL,
55230 +#else
55231 s,
55232 +#endif
55233 atomic_read(&s->sk_refcnt),
55234 0,
55235 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
55236 diff -urNp linux-2.6.35.4/net/wireless/reg.c linux-2.6.35.4/net/wireless/reg.c
55237 --- linux-2.6.35.4/net/wireless/reg.c 2010-08-26 19:47:12.000000000 -0400
55238 +++ linux-2.6.35.4/net/wireless/reg.c 2010-09-17 20:12:09.000000000 -0400
55239 @@ -50,7 +50,7 @@
55240 printk(KERN_DEBUG format , ## args); \
55241 } while (0)
55242 #else
55243 -#define REG_DBG_PRINT(args...)
55244 +#define REG_DBG_PRINT(args...) do {} while (0)
55245 #endif
55246
55247 /* Receipt of information from last regulatory request */
55248 diff -urNp linux-2.6.35.4/net/wireless/wext-core.c linux-2.6.35.4/net/wireless/wext-core.c
55249 --- linux-2.6.35.4/net/wireless/wext-core.c 2010-08-26 19:47:12.000000000 -0400
55250 +++ linux-2.6.35.4/net/wireless/wext-core.c 2010-09-17 20:12:09.000000000 -0400
55251 @@ -744,8 +744,7 @@ static int ioctl_standard_iw_point(struc
55252 */
55253
55254 /* Support for very large requests */
55255 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
55256 - (user_length > descr->max_tokens)) {
55257 + if (user_length > descr->max_tokens) {
55258 /* Allow userspace to GET more than max so
55259 * we can support any size GET requests.
55260 * There is still a limit : -ENOMEM.
55261 diff -urNp linux-2.6.35.4/net/xfrm/xfrm_policy.c linux-2.6.35.4/net/xfrm/xfrm_policy.c
55262 --- linux-2.6.35.4/net/xfrm/xfrm_policy.c 2010-08-26 19:47:12.000000000 -0400
55263 +++ linux-2.6.35.4/net/xfrm/xfrm_policy.c 2010-09-17 20:12:09.000000000 -0400
55264 @@ -1502,7 +1502,7 @@ free_dst:
55265 goto out;
55266 }
55267
55268 -static int inline
55269 +static inline int
55270 xfrm_dst_alloc_copy(void **target, void *src, int size)
55271 {
55272 if (!*target) {
55273 @@ -1514,7 +1514,7 @@ xfrm_dst_alloc_copy(void **target, void
55274 return 0;
55275 }
55276
55277 -static int inline
55278 +static inline int
55279 xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
55280 {
55281 #ifdef CONFIG_XFRM_SUB_POLICY
55282 @@ -1526,7 +1526,7 @@ xfrm_dst_update_parent(struct dst_entry
55283 #endif
55284 }
55285
55286 -static int inline
55287 +static inline int
55288 xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
55289 {
55290 #ifdef CONFIG_XFRM_SUB_POLICY
55291 diff -urNp linux-2.6.35.4/scripts/basic/fixdep.c linux-2.6.35.4/scripts/basic/fixdep.c
55292 --- linux-2.6.35.4/scripts/basic/fixdep.c 2010-08-26 19:47:12.000000000 -0400
55293 +++ linux-2.6.35.4/scripts/basic/fixdep.c 2010-09-17 20:12:09.000000000 -0400
55294 @@ -222,9 +222,9 @@ static void use_config(char *m, int slen
55295
55296 static void parse_config_file(char *map, size_t len)
55297 {
55298 - int *end = (int *) (map + len);
55299 + unsigned int *end = (unsigned int *) (map + len);
55300 /* start at +1, so that p can never be < map */
55301 - int *m = (int *) map + 1;
55302 + unsigned int *m = (unsigned int *) map + 1;
55303 char *p, *q;
55304
55305 for (; m < end; m++) {
55306 @@ -371,7 +371,7 @@ static void print_deps(void)
55307 static void traps(void)
55308 {
55309 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
55310 - int *p = (int *)test;
55311 + unsigned int *p = (unsigned int *)test;
55312
55313 if (*p != INT_CONF) {
55314 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
55315 diff -urNp linux-2.6.35.4/scripts/kallsyms.c linux-2.6.35.4/scripts/kallsyms.c
55316 --- linux-2.6.35.4/scripts/kallsyms.c 2010-08-26 19:47:12.000000000 -0400
55317 +++ linux-2.6.35.4/scripts/kallsyms.c 2010-09-17 20:12:09.000000000 -0400
55318 @@ -43,10 +43,10 @@ struct text_range {
55319
55320 static unsigned long long _text;
55321 static struct text_range text_ranges[] = {
55322 - { "_stext", "_etext" },
55323 - { "_sinittext", "_einittext" },
55324 - { "_stext_l1", "_etext_l1" }, /* Blackfin on-chip L1 inst SRAM */
55325 - { "_stext_l2", "_etext_l2" }, /* Blackfin on-chip L2 SRAM */
55326 + { "_stext", "_etext", 0, 0 },
55327 + { "_sinittext", "_einittext", 0, 0 },
55328 + { "_stext_l1", "_etext_l1", 0, 0 }, /* Blackfin on-chip L1 inst SRAM */
55329 + { "_stext_l2", "_etext_l2", 0, 0 }, /* Blackfin on-chip L2 SRAM */
55330 };
55331 #define text_range_text (&text_ranges[0])
55332 #define text_range_inittext (&text_ranges[1])
55333 diff -urNp linux-2.6.35.4/scripts/mod/file2alias.c linux-2.6.35.4/scripts/mod/file2alias.c
55334 --- linux-2.6.35.4/scripts/mod/file2alias.c 2010-08-26 19:47:12.000000000 -0400
55335 +++ linux-2.6.35.4/scripts/mod/file2alias.c 2010-09-17 20:12:09.000000000 -0400
55336 @@ -72,7 +72,7 @@ static void device_id_check(const char *
55337 unsigned long size, unsigned long id_size,
55338 void *symval)
55339 {
55340 - int i;
55341 + unsigned int i;
55342
55343 if (size % id_size || size < id_size) {
55344 if (cross_build != 0)
55345 @@ -102,7 +102,7 @@ static void device_id_check(const char *
55346 /* USB is special because the bcdDevice can be matched against a numeric range */
55347 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
55348 static void do_usb_entry(struct usb_device_id *id,
55349 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
55350 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
55351 unsigned char range_lo, unsigned char range_hi,
55352 unsigned char max, struct module *mod)
55353 {
55354 @@ -437,7 +437,7 @@ static void do_pnp_device_entry(void *sy
55355 for (i = 0; i < count; i++) {
55356 const char *id = (char *)devs[i].id;
55357 char acpi_id[sizeof(devs[0].id)];
55358 - int j;
55359 + unsigned int j;
55360
55361 buf_printf(&mod->dev_table_buf,
55362 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
55363 @@ -467,7 +467,7 @@ static void do_pnp_card_entries(void *sy
55364
55365 for (j = 0; j < PNP_MAX_DEVICES; j++) {
55366 const char *id = (char *)card->devs[j].id;
55367 - int i2, j2;
55368 + unsigned int i2, j2;
55369 int dup = 0;
55370
55371 if (!id[0])
55372 @@ -493,7 +493,7 @@ static void do_pnp_card_entries(void *sy
55373 /* add an individual alias for every device entry */
55374 if (!dup) {
55375 char acpi_id[sizeof(card->devs[0].id)];
55376 - int k;
55377 + unsigned int k;
55378
55379 buf_printf(&mod->dev_table_buf,
55380 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
55381 @@ -768,7 +768,7 @@ static void dmi_ascii_filter(char *d, co
55382 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
55383 char *alias)
55384 {
55385 - int i, j;
55386 + unsigned int i, j;
55387
55388 sprintf(alias, "dmi*");
55389
55390 diff -urNp linux-2.6.35.4/scripts/mod/modpost.c linux-2.6.35.4/scripts/mod/modpost.c
55391 --- linux-2.6.35.4/scripts/mod/modpost.c 2010-08-26 19:47:12.000000000 -0400
55392 +++ linux-2.6.35.4/scripts/mod/modpost.c 2010-09-17 20:12:09.000000000 -0400
55393 @@ -846,6 +846,7 @@ enum mismatch {
55394 ANY_INIT_TO_ANY_EXIT,
55395 ANY_EXIT_TO_ANY_INIT,
55396 EXPORT_TO_INIT_EXIT,
55397 + DATA_TO_TEXT
55398 };
55399
55400 struct sectioncheck {
55401 @@ -954,6 +955,12 @@ const struct sectioncheck sectioncheck[]
55402 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
55403 .mismatch = EXPORT_TO_INIT_EXIT,
55404 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
55405 +},
55406 +/* Do not reference code from writable data */
55407 +{
55408 + .fromsec = { DATA_SECTIONS, NULL },
55409 + .tosec = { TEXT_SECTIONS, NULL },
55410 + .mismatch = DATA_TO_TEXT
55411 }
55412 };
55413
55414 @@ -1060,10 +1067,10 @@ static Elf_Sym *find_elf_symbol(struct e
55415 continue;
55416 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
55417 continue;
55418 - if (sym->st_value == addr)
55419 - return sym;
55420 /* Find a symbol nearby - addr are maybe negative */
55421 d = sym->st_value - addr;
55422 + if (d == 0)
55423 + return sym;
55424 if (d < 0)
55425 d = addr - sym->st_value;
55426 if (d < distance) {
55427 @@ -1306,6 +1313,14 @@ static void report_sec_mismatch(const ch
55428 "or drop the export.\n",
55429 tosym, sec2annotation(tosec), sec2annotation(tosec), tosym);
55430 break;
55431 + case DATA_TO_TEXT:
55432 +/*
55433 + fprintf(stderr,
55434 + "The variable %s references\n"
55435 + "the %s %s%s%s\n",
55436 + fromsym, to, sec2annotation(tosec), tosym, to_p);
55437 +*/
55438 + break;
55439 }
55440 fprintf(stderr, "\n");
55441 }
55442 @@ -1629,7 +1644,7 @@ void __attribute__((format(printf, 2, 3)
55443 va_end(ap);
55444 }
55445
55446 -void buf_write(struct buffer *buf, const char *s, int len)
55447 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
55448 {
55449 if (buf->size - buf->pos < len) {
55450 buf->size += len + SZ;
55451 @@ -1841,7 +1856,7 @@ static void write_if_changed(struct buff
55452 if (fstat(fileno(file), &st) < 0)
55453 goto close_write;
55454
55455 - if (st.st_size != b->pos)
55456 + if (st.st_size != (off_t)b->pos)
55457 goto close_write;
55458
55459 tmp = NOFAIL(malloc(b->pos));
55460 diff -urNp linux-2.6.35.4/scripts/mod/modpost.h linux-2.6.35.4/scripts/mod/modpost.h
55461 --- linux-2.6.35.4/scripts/mod/modpost.h 2010-08-26 19:47:12.000000000 -0400
55462 +++ linux-2.6.35.4/scripts/mod/modpost.h 2010-09-17 20:12:09.000000000 -0400
55463 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *e
55464
55465 struct buffer {
55466 char *p;
55467 - int pos;
55468 - int size;
55469 + unsigned int pos;
55470 + unsigned int size;
55471 };
55472
55473 void __attribute__((format(printf, 2, 3)))
55474 buf_printf(struct buffer *buf, const char *fmt, ...);
55475
55476 void
55477 -buf_write(struct buffer *buf, const char *s, int len);
55478 +buf_write(struct buffer *buf, const char *s, unsigned int len);
55479
55480 struct module {
55481 struct module *next;
55482 diff -urNp linux-2.6.35.4/scripts/mod/sumversion.c linux-2.6.35.4/scripts/mod/sumversion.c
55483 --- linux-2.6.35.4/scripts/mod/sumversion.c 2010-08-26 19:47:12.000000000 -0400
55484 +++ linux-2.6.35.4/scripts/mod/sumversion.c 2010-09-17 20:12:09.000000000 -0400
55485 @@ -455,7 +455,7 @@ static void write_version(const char *fi
55486 goto out;
55487 }
55488
55489 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
55490 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
55491 warn("writing sum in %s failed: %s\n",
55492 filename, strerror(errno));
55493 goto out;
55494 diff -urNp linux-2.6.35.4/scripts/pnmtologo.c linux-2.6.35.4/scripts/pnmtologo.c
55495 --- linux-2.6.35.4/scripts/pnmtologo.c 2010-08-26 19:47:12.000000000 -0400
55496 +++ linux-2.6.35.4/scripts/pnmtologo.c 2010-09-17 20:12:09.000000000 -0400
55497 @@ -237,14 +237,14 @@ static void write_header(void)
55498 fprintf(out, " * Linux logo %s\n", logoname);
55499 fputs(" */\n\n", out);
55500 fputs("#include <linux/linux_logo.h>\n\n", out);
55501 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
55502 + fprintf(out, "static unsigned char %s_data[] = {\n",
55503 logoname);
55504 }
55505
55506 static void write_footer(void)
55507 {
55508 fputs("\n};\n\n", out);
55509 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
55510 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
55511 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
55512 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
55513 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
55514 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
55515 fputs("\n};\n\n", out);
55516
55517 /* write logo clut */
55518 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
55519 + fprintf(out, "static unsigned char %s_clut[] = {\n",
55520 logoname);
55521 write_hex_cnt = 0;
55522 for (i = 0; i < logo_clutsize; i++) {
55523 diff -urNp linux-2.6.35.4/security/commoncap.c linux-2.6.35.4/security/commoncap.c
55524 --- linux-2.6.35.4/security/commoncap.c 2010-08-26 19:47:12.000000000 -0400
55525 +++ linux-2.6.35.4/security/commoncap.c 2010-09-17 20:12:37.000000000 -0400
55526 @@ -28,6 +28,7 @@
55527 #include <linux/prctl.h>
55528 #include <linux/securebits.h>
55529 #include <linux/syslog.h>
55530 +#include <net/sock.h>
55531
55532 /*
55533 * If a non-root user executes a setuid-root binary in
55534 @@ -51,9 +52,11 @@ static void warn_setuid_and_fcaps_mixed(
55535 }
55536 }
55537
55538 +extern kernel_cap_t gr_cap_rtnetlink(struct sock *sk);
55539 +
55540 int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
55541 {
55542 - NETLINK_CB(skb).eff_cap = current_cap();
55543 + NETLINK_CB(skb).eff_cap = gr_cap_rtnetlink(sk);
55544 return 0;
55545 }
55546
55547 diff -urNp linux-2.6.35.4/security/integrity/ima/ima_api.c linux-2.6.35.4/security/integrity/ima/ima_api.c
55548 --- linux-2.6.35.4/security/integrity/ima/ima_api.c 2010-08-26 19:47:12.000000000 -0400
55549 +++ linux-2.6.35.4/security/integrity/ima/ima_api.c 2010-09-17 20:12:09.000000000 -0400
55550 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *ino
55551 int result;
55552
55553 /* can overflow, only indicator */
55554 - atomic_long_inc(&ima_htable.violations);
55555 + atomic_long_inc_unchecked(&ima_htable.violations);
55556
55557 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
55558 if (!entry) {
55559 diff -urNp linux-2.6.35.4/security/integrity/ima/ima_fs.c linux-2.6.35.4/security/integrity/ima/ima_fs.c
55560 --- linux-2.6.35.4/security/integrity/ima/ima_fs.c 2010-08-26 19:47:12.000000000 -0400
55561 +++ linux-2.6.35.4/security/integrity/ima/ima_fs.c 2010-09-17 20:12:09.000000000 -0400
55562 @@ -28,12 +28,12 @@
55563 static int valid_policy = 1;
55564 #define TMPBUFLEN 12
55565 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
55566 - loff_t *ppos, atomic_long_t *val)
55567 + loff_t *ppos, atomic_long_unchecked_t *val)
55568 {
55569 char tmpbuf[TMPBUFLEN];
55570 ssize_t len;
55571
55572 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
55573 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
55574 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
55575 }
55576
55577 diff -urNp linux-2.6.35.4/security/integrity/ima/ima.h linux-2.6.35.4/security/integrity/ima/ima.h
55578 --- linux-2.6.35.4/security/integrity/ima/ima.h 2010-08-26 19:47:12.000000000 -0400
55579 +++ linux-2.6.35.4/security/integrity/ima/ima.h 2010-09-17 20:12:09.000000000 -0400
55580 @@ -83,8 +83,8 @@ void ima_add_violation(struct inode *ino
55581 extern spinlock_t ima_queue_lock;
55582
55583 struct ima_h_table {
55584 - atomic_long_t len; /* number of stored measurements in the list */
55585 - atomic_long_t violations;
55586 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
55587 + atomic_long_unchecked_t violations;
55588 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
55589 };
55590 extern struct ima_h_table ima_htable;
55591 diff -urNp linux-2.6.35.4/security/integrity/ima/ima_queue.c linux-2.6.35.4/security/integrity/ima/ima_queue.c
55592 --- linux-2.6.35.4/security/integrity/ima/ima_queue.c 2010-08-26 19:47:12.000000000 -0400
55593 +++ linux-2.6.35.4/security/integrity/ima/ima_queue.c 2010-09-17 20:12:09.000000000 -0400
55594 @@ -79,7 +79,7 @@ static int ima_add_digest_entry(struct i
55595 INIT_LIST_HEAD(&qe->later);
55596 list_add_tail_rcu(&qe->later, &ima_measurements);
55597
55598 - atomic_long_inc(&ima_htable.len);
55599 + atomic_long_inc_unchecked(&ima_htable.len);
55600 key = ima_hash_key(entry->digest);
55601 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
55602 return 0;
55603 diff -urNp linux-2.6.35.4/security/Kconfig linux-2.6.35.4/security/Kconfig
55604 --- linux-2.6.35.4/security/Kconfig 2010-08-26 19:47:12.000000000 -0400
55605 +++ linux-2.6.35.4/security/Kconfig 2010-09-17 20:12:37.000000000 -0400
55606 @@ -4,6 +4,505 @@
55607
55608 menu "Security options"
55609
55610 +source grsecurity/Kconfig
55611 +
55612 +menu "PaX"
55613 +
55614 + config PAX_PER_CPU_PGD
55615 + bool
55616 +
55617 + config TASK_SIZE_MAX_SHIFT
55618 + int
55619 + depends on X86_64
55620 + default 47 if !PAX_PER_CPU_PGD
55621 + default 42 if PAX_PER_CPU_PGD
55622 +
55623 + config PAX_ENABLE_PAE
55624 + bool
55625 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
55626 +
55627 +config PAX
55628 + bool "Enable various PaX features"
55629 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
55630 + help
55631 + This allows you to enable various PaX features. PaX adds
55632 + intrusion prevention mechanisms to the kernel that reduce
55633 + the risks posed by exploitable memory corruption bugs.
55634 +
55635 +menu "PaX Control"
55636 + depends on PAX
55637 +
55638 +config PAX_SOFTMODE
55639 + bool 'Support soft mode'
55640 + select PAX_PT_PAX_FLAGS
55641 + help
55642 + Enabling this option will allow you to run PaX in soft mode, that
55643 + is, PaX features will not be enforced by default, only on executables
55644 + marked explicitly. You must also enable PT_PAX_FLAGS support as it
55645 + is the only way to mark executables for soft mode use.
55646 +
55647 + Soft mode can be activated by using the "pax_softmode=1" kernel command
55648 + line option on boot. Furthermore you can control various PaX features
55649 + at runtime via the entries in /proc/sys/kernel/pax.
55650 +
55651 +config PAX_EI_PAX
55652 + bool 'Use legacy ELF header marking'
55653 + help
55654 + Enabling this option will allow you to control PaX features on
55655 + a per executable basis via the 'chpax' utility available at
55656 + http://pax.grsecurity.net/. The control flags will be read from
55657 + an otherwise reserved part of the ELF header. This marking has
55658 + numerous drawbacks (no support for soft-mode, toolchain does not
55659 + know about the non-standard use of the ELF header) therefore it
55660 + has been deprecated in favour of PT_PAX_FLAGS support.
55661 +
55662 + If you have applications not marked by the PT_PAX_FLAGS ELF
55663 + program header then you MUST enable this option otherwise they
55664 + will not get any protection.
55665 +
55666 + Note that if you enable PT_PAX_FLAGS marking support as well,
55667 + the PT_PAX_FLAG marks will override the legacy EI_PAX marks.
55668 +
55669 +config PAX_PT_PAX_FLAGS
55670 + bool 'Use ELF program header marking'
55671 + help
55672 + Enabling this option will allow you to control PaX features on
55673 + a per executable basis via the 'paxctl' utility available at
55674 + http://pax.grsecurity.net/. The control flags will be read from
55675 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
55676 + has the benefits of supporting both soft mode and being fully
55677 + integrated into the toolchain (the binutils patch is available
55678 + from http://pax.grsecurity.net).
55679 +
55680 + If you have applications not marked by the PT_PAX_FLAGS ELF
55681 + program header then you MUST enable the EI_PAX marking support
55682 + otherwise they will not get any protection.
55683 +
55684 + Note that if you enable the legacy EI_PAX marking support as well,
55685 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
55686 +
55687 +choice
55688 + prompt 'MAC system integration'
55689 + default PAX_HAVE_ACL_FLAGS
55690 + help
55691 + Mandatory Access Control systems have the option of controlling
55692 + PaX flags on a per executable basis, choose the method supported
55693 + by your particular system.
55694 +
55695 + - "none": if your MAC system does not interact with PaX,
55696 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
55697 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
55698 +
55699 + NOTE: this option is for developers/integrators only.
55700 +
55701 + config PAX_NO_ACL_FLAGS
55702 + bool 'none'
55703 +
55704 + config PAX_HAVE_ACL_FLAGS
55705 + bool 'direct'
55706 +
55707 + config PAX_HOOK_ACL_FLAGS
55708 + bool 'hook'
55709 +endchoice
55710 +
55711 +endmenu
55712 +
55713 +menu "Non-executable pages"
55714 + depends on PAX
55715 +
55716 +config PAX_NOEXEC
55717 + bool "Enforce non-executable pages"
55718 + depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
55719 + help
55720 + By design some architectures do not allow for protecting memory
55721 + pages against execution or even if they do, Linux does not make
55722 + use of this feature. In practice this means that if a page is
55723 + readable (such as the stack or heap) it is also executable.
55724 +
55725 + There is a well known exploit technique that makes use of this
55726 + fact and a common programming mistake where an attacker can
55727 + introduce code of his choice somewhere in the attacked program's
55728 + memory (typically the stack or the heap) and then execute it.
55729 +
55730 + If the attacked program was running with different (typically
55731 + higher) privileges than that of the attacker, then he can elevate
55732 + his own privilege level (e.g. get a root shell, write to files for
55733 + which he does not have write access to, etc).
55734 +
55735 + Enabling this option will let you choose from various features
55736 + that prevent the injection and execution of 'foreign' code in
55737 + a program.
55738 +
55739 + This will also break programs that rely on the old behaviour and
55740 + expect that dynamically allocated memory via the malloc() family
55741 + of functions is executable (which it is not). Notable examples
55742 + are the XFree86 4.x server, the java runtime and wine.
55743 +
55744 +config PAX_PAGEEXEC
55745 + bool "Paging based non-executable pages"
55746 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
55747 + select S390_SWITCH_AMODE if S390
55748 + select S390_EXEC_PROTECT if S390
55749 + help
55750 + This implementation is based on the paging feature of the CPU.
55751 + On i386 without hardware non-executable bit support there is a
55752 + variable but usually low performance impact, however on Intel's
55753 + P4 core based CPUs it is very high so you should not enable this
55754 + for kernels meant to be used on such CPUs.
55755 +
55756 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
55757 + with hardware non-executable bit support there is no performance
55758 + impact, on ppc the impact is negligible.
55759 +
55760 + Note that several architectures require various emulations due to
55761 + badly designed userland ABIs, this will cause a performance impact
55762 + but will disappear as soon as userland is fixed. For example, ppc
55763 + userland MUST have been built with secure-plt by a recent toolchain.
55764 +
55765 +config PAX_SEGMEXEC
55766 + bool "Segmentation based non-executable pages"
55767 + depends on PAX_NOEXEC && X86_32
55768 + help
55769 + This implementation is based on the segmentation feature of the
55770 + CPU and has a very small performance impact, however applications
55771 + will be limited to a 1.5 GB address space instead of the normal
55772 + 3 GB.
55773 +
55774 +config PAX_EMUTRAMP
55775 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
55776 + default y if PARISC
55777 + help
55778 + There are some programs and libraries that for one reason or
55779 + another attempt to execute special small code snippets from
55780 + non-executable memory pages. Most notable examples are the
55781 + signal handler return code generated by the kernel itself and
55782 + the GCC trampolines.
55783 +
55784 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
55785 + such programs will no longer work under your kernel.
55786 +
55787 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
55788 + utilities to enable trampoline emulation for the affected programs
55789 + yet still have the protection provided by the non-executable pages.
55790 +
55791 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
55792 + your system will not even boot.
55793 +
55794 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
55795 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
55796 + for the affected files.
55797 +
55798 + NOTE: enabling this feature *may* open up a loophole in the
55799 + protection provided by non-executable pages that an attacker
55800 + could abuse. Therefore the best solution is to not have any
55801 + files on your system that would require this option. This can
55802 + be achieved by not using libc5 (which relies on the kernel
55803 + signal handler return code) and not using or rewriting programs
55804 + that make use of the nested function implementation of GCC.
55805 + Skilled users can just fix GCC itself so that it implements
55806 + nested function calls in a way that does not interfere with PaX.
55807 +
55808 +config PAX_EMUSIGRT
55809 + bool "Automatically emulate sigreturn trampolines"
55810 + depends on PAX_EMUTRAMP && PARISC
55811 + default y
55812 + help
55813 + Enabling this option will have the kernel automatically detect
55814 + and emulate signal return trampolines executing on the stack
55815 + that would otherwise lead to task termination.
55816 +
55817 + This solution is intended as a temporary one for users with
55818 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
55819 + Modula-3 runtime, etc) or executables linked to such, basically
55820 + everything that does not specify its own SA_RESTORER function in
55821 + normal executable memory like glibc 2.1+ does.
55822 +
55823 + On parisc you MUST enable this option, otherwise your system will
55824 + not even boot.
55825 +
55826 + NOTE: this feature cannot be disabled on a per executable basis
55827 + and since it *does* open up a loophole in the protection provided
55828 + by non-executable pages, the best solution is to not have any
55829 + files on your system that would require this option.
55830 +
55831 +config PAX_MPROTECT
55832 + bool "Restrict mprotect()"
55833 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
55834 + help
55835 + Enabling this option will prevent programs from
55836 + - changing the executable status of memory pages that were
55837 + not originally created as executable,
55838 + - making read-only executable pages writable again,
55839 + - creating executable pages from anonymous memory,
55840 + - making read-only-after-relocations (RELRO) data pages writable again.
55841 +
55842 + You should say Y here to complete the protection provided by
55843 + the enforcement of non-executable pages.
55844 +
55845 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
55846 + this feature on a per file basis.
55847 +
55848 +config PAX_ELFRELOCS
55849 + bool "Allow ELF text relocations (read help)"
55850 + depends on PAX_MPROTECT
55851 + default n
55852 + help
55853 + Non-executable pages and mprotect() restrictions are effective
55854 + in preventing the introduction of new executable code into an
55855 + attacked task's address space. There remain only two venues
55856 + for this kind of attack: if the attacker can execute already
55857 + existing code in the attacked task then he can either have it
55858 + create and mmap() a file containing his code or have it mmap()
55859 + an already existing ELF library that does not have position
55860 + independent code in it and use mprotect() on it to make it
55861 + writable and copy his code there. While protecting against
55862 + the former approach is beyond PaX, the latter can be prevented
55863 + by having only PIC ELF libraries on one's system (which do not
55864 + need to relocate their code). If you are sure this is your case,
55865 + as is the case with all modern Linux distributions, then leave
55866 + this option disabled. You should say 'n' here.
55867 +
55868 +config PAX_ETEXECRELOCS
55869 + bool "Allow ELF ET_EXEC text relocations"
55870 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
55871 + select PAX_ELFRELOCS
55872 + default y
55873 + help
55874 + On some architectures there are incorrectly created applications
55875 + that require text relocations and would not work without enabling
55876 + this option. If you are an alpha, ia64 or parisc user, you should
55877 + enable this option and disable it once you have made sure that
55878 + none of your applications need it.
55879 +
55880 +config PAX_EMUPLT
55881 + bool "Automatically emulate ELF PLT"
55882 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
55883 + default y
55884 + help
55885 + Enabling this option will have the kernel automatically detect
55886 + and emulate the Procedure Linkage Table entries in ELF files.
55887 + On some architectures such entries are in writable memory, and
55888 + become non-executable leading to task termination. Therefore
55889 + it is mandatory that you enable this option on alpha, parisc,
55890 + sparc and sparc64, otherwise your system would not even boot.
55891 +
55892 + NOTE: this feature *does* open up a loophole in the protection
55893 + provided by the non-executable pages, therefore the proper
55894 + solution is to modify the toolchain to produce a PLT that does
55895 + not need to be writable.
55896 +
55897 +config PAX_DLRESOLVE
55898 + bool 'Emulate old glibc resolver stub'
55899 + depends on PAX_EMUPLT && SPARC
55900 + default n
55901 + help
55902 + This option is needed if userland has an old glibc (before 2.4)
55903 + that puts a 'save' instruction into the runtime generated resolver
55904 + stub that needs special emulation.
55905 +
55906 +config PAX_KERNEXEC
55907 + bool "Enforce non-executable kernel pages"
55908 + depends on PAX_NOEXEC && (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
55909 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
55910 + help
55911 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
55912 + that is, enabling this option will make it harder to inject
55913 + and execute 'foreign' code in kernel memory itself.
55914 +
55915 +config PAX_KERNEXEC_MODULE_TEXT
55916 + int "Minimum amount of memory reserved for module code"
55917 + default "4"
55918 + depends on PAX_KERNEXEC && X86_32 && MODULES
55919 + help
55920 + Due to implementation details the kernel must reserve a fixed
55921 + amount of memory for module code at compile time that cannot be
55922 + changed at runtime. Here you can specify the minimum amount
55923 + in MB that will be reserved. Due to the same implementation
55924 + details this size will always be rounded up to the next 2/4 MB
55925 + boundary (depends on PAE) so the actually available memory for
55926 + module code will usually be more than this minimum.
55927 +
55928 + The default 4 MB should be enough for most users but if you have
55929 + an excessive number of modules (e.g., most distribution configs
55930 + compile many drivers as modules) or use huge modules such as
55931 + nvidia's kernel driver, you will need to adjust this amount.
55932 + A good rule of thumb is to look at your currently loaded kernel
55933 + modules and add up their sizes.
55934 +
55935 +endmenu
55936 +
55937 +menu "Address Space Layout Randomization"
55938 + depends on PAX
55939 +
55940 +config PAX_ASLR
55941 + bool "Address Space Layout Randomization"
55942 + depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
55943 + help
55944 + Many if not most exploit techniques rely on the knowledge of
55945 + certain addresses in the attacked program. The following options
55946 + will allow the kernel to apply a certain amount of randomization
55947 + to specific parts of the program thereby forcing an attacker to
55948 + guess them in most cases. Any failed guess will most likely crash
55949 + the attacked program which allows the kernel to detect such attempts
55950 + and react on them. PaX itself provides no reaction mechanisms,
55951 + instead it is strongly encouraged that you make use of Nergal's
55952 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
55953 + (http://www.grsecurity.net/) built-in crash detection features or
55954 + develop one yourself.
55955 +
55956 + By saying Y here you can choose to randomize the following areas:
55957 + - top of the task's kernel stack
55958 + - top of the task's userland stack
55959 + - base address for mmap() requests that do not specify one
55960 + (this includes all libraries)
55961 + - base address of the main executable
55962 +
55963 + It is strongly recommended to say Y here as address space layout
55964 + randomization has negligible impact on performance yet it provides
55965 + a very effective protection.
55966 +
55967 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
55968 + this feature on a per file basis.
55969 +
55970 +config PAX_RANDKSTACK
55971 + bool "Randomize kernel stack base"
55972 + depends on PAX_ASLR && X86_TSC && X86_32
55973 + help
55974 + By saying Y here the kernel will randomize every task's kernel
55975 + stack on every system call. This will not only force an attacker
55976 + to guess it but also prevent him from making use of possible
55977 + leaked information about it.
55978 +
55979 + Since the kernel stack is a rather scarce resource, randomization
55980 + may cause unexpected stack overflows, therefore you should very
55981 + carefully test your system. Note that once enabled in the kernel
55982 + configuration, this feature cannot be disabled on a per file basis.
55983 +
55984 +config PAX_RANDUSTACK
55985 + bool "Randomize user stack base"
55986 + depends on PAX_ASLR
55987 + help
55988 + By saying Y here the kernel will randomize every task's userland
55989 + stack. The randomization is done in two steps where the second
55990 + one may apply a big amount of shift to the top of the stack and
55991 + cause problems for programs that want to use lots of memory (more
55992 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
55993 + For this reason the second step can be controlled by 'chpax' or
55994 + 'paxctl' on a per file basis.
55995 +
55996 +config PAX_RANDMMAP
55997 + bool "Randomize mmap() base"
55998 + depends on PAX_ASLR
55999 + help
56000 + By saying Y here the kernel will use a randomized base address for
56001 + mmap() requests that do not specify one themselves. As a result
56002 + all dynamically loaded libraries will appear at random addresses
56003 + and therefore be harder to exploit by a technique where an attacker
56004 + attempts to execute library code for his purposes (e.g. spawn a
56005 + shell from an exploited program that is running at an elevated
56006 + privilege level).
56007 +
56008 + Furthermore, if a program is relinked as a dynamic ELF file, its
56009 + base address will be randomized as well, completing the full
56010 + randomization of the address space layout. Attacking such programs
56011 + becomes a guess game. You can find an example of doing this at
56012 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
56013 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
56014 +
56015 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
56016 + feature on a per file basis.
56017 +
56018 +endmenu
56019 +
56020 +menu "Miscellaneous hardening features"
56021 +
56022 +config PAX_MEMORY_SANITIZE
56023 + bool "Sanitize all freed memory"
56024 + help
56025 + By saying Y here the kernel will erase memory pages as soon as they
56026 + are freed. This in turn reduces the lifetime of data stored in the
56027 + pages, making it less likely that sensitive information such as
56028 + passwords, cryptographic secrets, etc stay in memory for too long.
56029 +
56030 + This is especially useful for programs whose runtime is short, long
56031 + lived processes and the kernel itself benefit from this as long as
56032 + they operate on whole memory pages and ensure timely freeing of pages
56033 + that may hold sensitive information.
56034 +
56035 + The tradeoff is performance impact, on a single CPU system kernel
56036 + compilation sees a 3% slowdown, other systems and workloads may vary
56037 + and you are advised to test this feature on your expected workload
56038 + before deploying it.
56039 +
56040 + Note that this feature does not protect data stored in live pages,
56041 + e.g., process memory swapped to disk may stay there for a long time.
56042 +
56043 +config PAX_MEMORY_UDEREF
56044 + bool "Prevent invalid userland pointer dereference"
56045 + depends on X86 && !UML_X86 && !XEN
56046 + select PAX_PER_CPU_PGD if X86_64
56047 + help
56048 + By saying Y here the kernel will be prevented from dereferencing
56049 + userland pointers in contexts where the kernel expects only kernel
56050 + pointers. This is both a useful runtime debugging feature and a
56051 + security measure that prevents exploiting a class of kernel bugs.
56052 +
56053 + The tradeoff is that some virtualization solutions may experience
56054 + a huge slowdown and therefore you should not enable this feature
56055 + for kernels meant to run in such environments. Whether a given VM
56056 + solution is affected or not is best determined by simply trying it
56057 + out, the performance impact will be obvious right on boot as this
56058 + mechanism engages from very early on. A good rule of thumb is that
56059 + VMs running on CPUs without hardware virtualization support (i.e.,
56060 + the majority of IA-32 CPUs) will likely experience the slowdown.
56061 +
56062 +config PAX_REFCOUNT
56063 + bool "Prevent various kernel object reference counter overflows"
56064 + depends on GRKERNSEC && (X86 || SPARC64)
56065 + help
56066 + By saying Y here the kernel will detect and prevent overflowing
56067 + various (but not all) kinds of object reference counters. Such
56068 + overflows can normally occur due to bugs only and are often, if
56069 + not always, exploitable.
56070 +
56071 + The tradeoff is that data structures protected by an overflowed
56072 + refcount will never be freed and therefore will leak memory. Note
56073 + that this leak also happens even without this protection but in
56074 + that case the overflow can eventually trigger the freeing of the
56075 + data structure while it is still being used elsewhere, resulting
56076 + in the exploitable situation that this feature prevents.
56077 +
56078 + Since this has a negligible performance impact, you should enable
56079 + this feature.
56080 +
56081 +config PAX_USERCOPY
56082 + bool "Bounds check heap object copies between kernel and userland"
56083 + depends on X86 || PPC || SPARC
56084 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
56085 + help
56086 + By saying Y here the kernel will enforce the size of heap objects
56087 + when they are copied in either direction between the kernel and
56088 + userland, even if only a part of the heap object is copied.
56089 +
56090 + Specifically, this checking prevents information leaking from the
56091 + kernel heap during kernel to userland copies (if the kernel heap
56092 + object is otherwise fully initialized) and prevents kernel heap
56093 + overflows during userland to kernel copies.
56094 +
56095 + Note that the current implementation provides the strictest checks
56096 + for the SLUB allocator.
56097 +
56098 + If frame pointers are enabled on x86, this option will also
56099 + restrict copies into and out of the kernel stack to local variables
56100 + within a single frame.
56101 +
56102 + Since this has a negligible performance impact, you should enable
56103 + this feature.
56104 +
56105 +endmenu
56106 +
56107 +endmenu
56108 +
56109 config KEYS
56110 bool "Enable access key retention support"
56111 help
56112 @@ -124,7 +623,7 @@ config INTEL_TXT
56113 config LSM_MMAP_MIN_ADDR
56114 int "Low address space for LSM to protect from user allocation"
56115 depends on SECURITY && SECURITY_SELINUX
56116 - default 65536
56117 + default 32768
56118 help
56119 This is the portion of low virtual memory which should be protected
56120 from userspace allocation. Keeping a user from writing to low pages
56121 diff -urNp linux-2.6.35.4/security/min_addr.c linux-2.6.35.4/security/min_addr.c
56122 --- linux-2.6.35.4/security/min_addr.c 2010-08-26 19:47:12.000000000 -0400
56123 +++ linux-2.6.35.4/security/min_addr.c 2010-09-17 20:12:37.000000000 -0400
56124 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG
56125 */
56126 static void update_mmap_min_addr(void)
56127 {
56128 +#ifndef SPARC
56129 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
56130 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
56131 mmap_min_addr = dac_mmap_min_addr;
56132 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
56133 #else
56134 mmap_min_addr = dac_mmap_min_addr;
56135 #endif
56136 +#endif
56137 }
56138
56139 /*
56140 diff -urNp linux-2.6.35.4/security/security.c linux-2.6.35.4/security/security.c
56141 --- linux-2.6.35.4/security/security.c 2010-08-26 19:47:12.000000000 -0400
56142 +++ linux-2.6.35.4/security/security.c 2010-09-17 20:12:37.000000000 -0400
56143 @@ -25,8 +25,8 @@ static __initdata char chosen_lsm[SECURI
56144 /* things that live in capability.c */
56145 extern void __init security_fixup_ops(struct security_operations *ops);
56146
56147 -static struct security_operations *security_ops;
56148 -static struct security_operations default_security_ops = {
56149 +static struct security_operations *security_ops __read_only;
56150 +static struct security_operations default_security_ops __read_only = {
56151 .name = "default",
56152 };
56153
56154 @@ -67,7 +67,9 @@ int __init security_init(void)
56155
56156 void reset_security_ops(void)
56157 {
56158 + pax_open_kernel();
56159 security_ops = &default_security_ops;
56160 + pax_close_kernel();
56161 }
56162
56163 /* Save user chosen LSM */
56164 diff -urNp linux-2.6.35.4/security/selinux/hooks.c linux-2.6.35.4/security/selinux/hooks.c
56165 --- linux-2.6.35.4/security/selinux/hooks.c 2010-08-26 19:47:12.000000000 -0400
56166 +++ linux-2.6.35.4/security/selinux/hooks.c 2010-09-17 20:12:37.000000000 -0400
56167 @@ -93,7 +93,6 @@
56168 #define NUM_SEL_MNT_OPTS 5
56169
56170 extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm);
56171 -extern struct security_operations *security_ops;
56172
56173 /* SECMARK reference count */
56174 atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
56175 @@ -5428,7 +5427,7 @@ static int selinux_key_getsecurity(struc
56176
56177 #endif
56178
56179 -static struct security_operations selinux_ops = {
56180 +static struct security_operations selinux_ops __read_only = {
56181 .name = "selinux",
56182
56183 .ptrace_access_check = selinux_ptrace_access_check,
56184 diff -urNp linux-2.6.35.4/security/smack/smack_lsm.c linux-2.6.35.4/security/smack/smack_lsm.c
56185 --- linux-2.6.35.4/security/smack/smack_lsm.c 2010-08-26 19:47:12.000000000 -0400
56186 +++ linux-2.6.35.4/security/smack/smack_lsm.c 2010-09-17 20:12:09.000000000 -0400
56187 @@ -3064,7 +3064,7 @@ static int smack_inode_getsecctx(struct
56188 return 0;
56189 }
56190
56191 -struct security_operations smack_ops = {
56192 +struct security_operations smack_ops __read_only = {
56193 .name = "smack",
56194
56195 .ptrace_access_check = smack_ptrace_access_check,
56196 diff -urNp linux-2.6.35.4/security/tomoyo/tomoyo.c linux-2.6.35.4/security/tomoyo/tomoyo.c
56197 --- linux-2.6.35.4/security/tomoyo/tomoyo.c 2010-08-26 19:47:12.000000000 -0400
56198 +++ linux-2.6.35.4/security/tomoyo/tomoyo.c 2010-09-17 20:12:09.000000000 -0400
56199 @@ -235,7 +235,7 @@ static int tomoyo_sb_pivotroot(struct pa
56200 * tomoyo_security_ops is a "struct security_operations" which is used for
56201 * registering TOMOYO.
56202 */
56203 -static struct security_operations tomoyo_security_ops = {
56204 +static struct security_operations tomoyo_security_ops __read_only = {
56205 .name = "tomoyo",
56206 .cred_alloc_blank = tomoyo_cred_alloc_blank,
56207 .cred_prepare = tomoyo_cred_prepare,
56208 diff -urNp linux-2.6.35.4/sound/aoa/codecs/onyx.c linux-2.6.35.4/sound/aoa/codecs/onyx.c
56209 --- linux-2.6.35.4/sound/aoa/codecs/onyx.c 2010-08-26 19:47:12.000000000 -0400
56210 +++ linux-2.6.35.4/sound/aoa/codecs/onyx.c 2010-09-17 20:12:09.000000000 -0400
56211 @@ -54,7 +54,7 @@ struct onyx {
56212 spdif_locked:1,
56213 analog_locked:1,
56214 original_mute:2;
56215 - int open_count;
56216 + atomic_t open_count;
56217 struct codec_info *codec_info;
56218
56219 /* mutex serializes concurrent access to the device
56220 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_i
56221 struct onyx *onyx = cii->codec_data;
56222
56223 mutex_lock(&onyx->mutex);
56224 - onyx->open_count++;
56225 + atomic_inc(&onyx->open_count);
56226 mutex_unlock(&onyx->mutex);
56227
56228 return 0;
56229 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_
56230 struct onyx *onyx = cii->codec_data;
56231
56232 mutex_lock(&onyx->mutex);
56233 - onyx->open_count--;
56234 - if (!onyx->open_count)
56235 + if (atomic_dec_and_test(&onyx->open_count))
56236 onyx->spdif_locked = onyx->analog_locked = 0;
56237 mutex_unlock(&onyx->mutex);
56238
56239 diff -urNp linux-2.6.35.4/sound/core/oss/pcm_oss.c linux-2.6.35.4/sound/core/oss/pcm_oss.c
56240 --- linux-2.6.35.4/sound/core/oss/pcm_oss.c 2010-08-26 19:47:12.000000000 -0400
56241 +++ linux-2.6.35.4/sound/core/oss/pcm_oss.c 2010-09-17 20:12:09.000000000 -0400
56242 @@ -2966,8 +2966,8 @@ static void snd_pcm_oss_proc_done(struct
56243 }
56244 }
56245 #else /* !CONFIG_SND_VERBOSE_PROCFS */
56246 -#define snd_pcm_oss_proc_init(pcm)
56247 -#define snd_pcm_oss_proc_done(pcm)
56248 +#define snd_pcm_oss_proc_init(pcm) do {} while (0)
56249 +#define snd_pcm_oss_proc_done(pcm) do {} while (0)
56250 #endif /* CONFIG_SND_VERBOSE_PROCFS */
56251
56252 /*
56253 diff -urNp linux-2.6.35.4/sound/core/seq/seq_lock.h linux-2.6.35.4/sound/core/seq/seq_lock.h
56254 --- linux-2.6.35.4/sound/core/seq/seq_lock.h 2010-08-26 19:47:12.000000000 -0400
56255 +++ linux-2.6.35.4/sound/core/seq/seq_lock.h 2010-09-17 20:12:09.000000000 -0400
56256 @@ -23,10 +23,10 @@ void snd_use_lock_sync_helper(snd_use_lo
56257 #else /* SMP || CONFIG_SND_DEBUG */
56258
56259 typedef spinlock_t snd_use_lock_t; /* dummy */
56260 -#define snd_use_lock_init(lockp) /**/
56261 -#define snd_use_lock_use(lockp) /**/
56262 -#define snd_use_lock_free(lockp) /**/
56263 -#define snd_use_lock_sync(lockp) /**/
56264 +#define snd_use_lock_init(lockp) do {} while (0)
56265 +#define snd_use_lock_use(lockp) do {} while (0)
56266 +#define snd_use_lock_free(lockp) do {} while (0)
56267 +#define snd_use_lock_sync(lockp) do {} while (0)
56268
56269 #endif /* SMP || CONFIG_SND_DEBUG */
56270
56271 diff -urNp linux-2.6.35.4/sound/drivers/mts64.c linux-2.6.35.4/sound/drivers/mts64.c
56272 --- linux-2.6.35.4/sound/drivers/mts64.c 2010-08-26 19:47:12.000000000 -0400
56273 +++ linux-2.6.35.4/sound/drivers/mts64.c 2010-09-17 20:12:09.000000000 -0400
56274 @@ -66,7 +66,7 @@ struct mts64 {
56275 struct pardevice *pardev;
56276 int pardev_claimed;
56277
56278 - int open_count;
56279 + atomic_t open_count;
56280 int current_midi_output_port;
56281 int current_midi_input_port;
56282 u8 mode[MTS64_NUM_INPUT_PORTS];
56283 @@ -696,7 +696,7 @@ static int snd_mts64_rawmidi_open(struct
56284 {
56285 struct mts64 *mts = substream->rmidi->private_data;
56286
56287 - if (mts->open_count == 0) {
56288 + if (atomic_read(&mts->open_count) == 0) {
56289 /* We don't need a spinlock here, because this is just called
56290 if the device has not been opened before.
56291 So there aren't any IRQs from the device */
56292 @@ -704,7 +704,7 @@ static int snd_mts64_rawmidi_open(struct
56293
56294 msleep(50);
56295 }
56296 - ++(mts->open_count);
56297 + atomic_inc(&mts->open_count);
56298
56299 return 0;
56300 }
56301 @@ -714,8 +714,7 @@ static int snd_mts64_rawmidi_close(struc
56302 struct mts64 *mts = substream->rmidi->private_data;
56303 unsigned long flags;
56304
56305 - --(mts->open_count);
56306 - if (mts->open_count == 0) {
56307 + if (atomic_dec_return(&mts->open_count) == 0) {
56308 /* We need the spinlock_irqsave here because we can still
56309 have IRQs at this point */
56310 spin_lock_irqsave(&mts->lock, flags);
56311 @@ -724,8 +723,8 @@ static int snd_mts64_rawmidi_close(struc
56312
56313 msleep(500);
56314
56315 - } else if (mts->open_count < 0)
56316 - mts->open_count = 0;
56317 + } else if (atomic_read(&mts->open_count) < 0)
56318 + atomic_set(&mts->open_count, 0);
56319
56320 return 0;
56321 }
56322 diff -urNp linux-2.6.35.4/sound/drivers/portman2x4.c linux-2.6.35.4/sound/drivers/portman2x4.c
56323 --- linux-2.6.35.4/sound/drivers/portman2x4.c 2010-08-26 19:47:12.000000000 -0400
56324 +++ linux-2.6.35.4/sound/drivers/portman2x4.c 2010-09-17 20:12:09.000000000 -0400
56325 @@ -84,7 +84,7 @@ struct portman {
56326 struct pardevice *pardev;
56327 int pardev_claimed;
56328
56329 - int open_count;
56330 + atomic_t open_count;
56331 int mode[PORTMAN_NUM_INPUT_PORTS];
56332 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
56333 };
56334 diff -urNp linux-2.6.35.4/sound/oss/sb_audio.c linux-2.6.35.4/sound/oss/sb_audio.c
56335 --- linux-2.6.35.4/sound/oss/sb_audio.c 2010-08-26 19:47:12.000000000 -0400
56336 +++ linux-2.6.35.4/sound/oss/sb_audio.c 2010-09-17 20:12:09.000000000 -0400
56337 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
56338 buf16 = (signed short *)(localbuf + localoffs);
56339 while (c)
56340 {
56341 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
56342 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
56343 if (copy_from_user(lbuf8,
56344 userbuf+useroffs + p,
56345 locallen))
56346 diff -urNp linux-2.6.35.4/sound/pci/ac97/ac97_codec.c linux-2.6.35.4/sound/pci/ac97/ac97_codec.c
56347 --- linux-2.6.35.4/sound/pci/ac97/ac97_codec.c 2010-08-26 19:47:12.000000000 -0400
56348 +++ linux-2.6.35.4/sound/pci/ac97/ac97_codec.c 2010-09-17 20:12:09.000000000 -0400
56349 @@ -1962,7 +1962,7 @@ static int snd_ac97_dev_disconnect(struc
56350 }
56351
56352 /* build_ops to do nothing */
56353 -static struct snd_ac97_build_ops null_build_ops;
56354 +static const struct snd_ac97_build_ops null_build_ops;
56355
56356 #ifdef CONFIG_SND_AC97_POWER_SAVE
56357 static void do_update_power(struct work_struct *work)
56358 diff -urNp linux-2.6.35.4/sound/pci/ac97/ac97_patch.c linux-2.6.35.4/sound/pci/ac97/ac97_patch.c
56359 --- linux-2.6.35.4/sound/pci/ac97/ac97_patch.c 2010-08-26 19:47:12.000000000 -0400
56360 +++ linux-2.6.35.4/sound/pci/ac97/ac97_patch.c 2010-09-17 20:12:09.000000000 -0400
56361 @@ -371,7 +371,7 @@ static int patch_yamaha_ymf743_build_spd
56362 return 0;
56363 }
56364
56365 -static struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
56366 +static const struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
56367 .build_spdif = patch_yamaha_ymf743_build_spdif,
56368 .build_3d = patch_yamaha_ymf7x3_3d,
56369 };
56370 @@ -455,7 +455,7 @@ static int patch_yamaha_ymf753_post_spdi
56371 return 0;
56372 }
56373
56374 -static struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
56375 +static const struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
56376 .build_3d = patch_yamaha_ymf7x3_3d,
56377 .build_post_spdif = patch_yamaha_ymf753_post_spdif
56378 };
56379 @@ -502,7 +502,7 @@ static int patch_wolfson_wm9703_specific
56380 return 0;
56381 }
56382
56383 -static struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
56384 +static const struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
56385 .build_specific = patch_wolfson_wm9703_specific,
56386 };
56387
56388 @@ -533,7 +533,7 @@ static int patch_wolfson_wm9704_specific
56389 return 0;
56390 }
56391
56392 -static struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
56393 +static const struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
56394 .build_specific = patch_wolfson_wm9704_specific,
56395 };
56396
56397 @@ -677,7 +677,7 @@ static int patch_wolfson_wm9711_specific
56398 return 0;
56399 }
56400
56401 -static struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
56402 +static const struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
56403 .build_specific = patch_wolfson_wm9711_specific,
56404 };
56405
56406 @@ -871,7 +871,7 @@ static void patch_wolfson_wm9713_resume
56407 }
56408 #endif
56409
56410 -static struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
56411 +static const struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
56412 .build_specific = patch_wolfson_wm9713_specific,
56413 .build_3d = patch_wolfson_wm9713_3d,
56414 #ifdef CONFIG_PM
56415 @@ -976,7 +976,7 @@ static int patch_sigmatel_stac97xx_speci
56416 return 0;
56417 }
56418
56419 -static struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
56420 +static const struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
56421 .build_3d = patch_sigmatel_stac9700_3d,
56422 .build_specific = patch_sigmatel_stac97xx_specific
56423 };
56424 @@ -1023,7 +1023,7 @@ static int patch_sigmatel_stac9708_speci
56425 return patch_sigmatel_stac97xx_specific(ac97);
56426 }
56427
56428 -static struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
56429 +static const struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
56430 .build_3d = patch_sigmatel_stac9708_3d,
56431 .build_specific = patch_sigmatel_stac9708_specific
56432 };
56433 @@ -1252,7 +1252,7 @@ static int patch_sigmatel_stac9758_speci
56434 return 0;
56435 }
56436
56437 -static struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
56438 +static const struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
56439 .build_3d = patch_sigmatel_stac9700_3d,
56440 .build_specific = patch_sigmatel_stac9758_specific
56441 };
56442 @@ -1327,7 +1327,7 @@ static int patch_cirrus_build_spdif(stru
56443 return 0;
56444 }
56445
56446 -static struct snd_ac97_build_ops patch_cirrus_ops = {
56447 +static const struct snd_ac97_build_ops patch_cirrus_ops = {
56448 .build_spdif = patch_cirrus_build_spdif
56449 };
56450
56451 @@ -1384,7 +1384,7 @@ static int patch_conexant_build_spdif(st
56452 return 0;
56453 }
56454
56455 -static struct snd_ac97_build_ops patch_conexant_ops = {
56456 +static const struct snd_ac97_build_ops patch_conexant_ops = {
56457 .build_spdif = patch_conexant_build_spdif
56458 };
56459
56460 @@ -1486,7 +1486,7 @@ static const struct snd_ac97_res_table a
56461 { AC97_VIDEO, 0x9f1f },
56462 { AC97_AUX, 0x9f1f },
56463 { AC97_PCM, 0x9f1f },
56464 - { } /* terminator */
56465 + { 0, 0 } /* terminator */
56466 };
56467
56468 static int patch_ad1819(struct snd_ac97 * ac97)
56469 @@ -1560,7 +1560,7 @@ static void patch_ad1881_chained(struct
56470 }
56471 }
56472
56473 -static struct snd_ac97_build_ops patch_ad1881_build_ops = {
56474 +static const struct snd_ac97_build_ops patch_ad1881_build_ops = {
56475 #ifdef CONFIG_PM
56476 .resume = ad18xx_resume
56477 #endif
56478 @@ -1647,7 +1647,7 @@ static int patch_ad1885_specific(struct
56479 return 0;
56480 }
56481
56482 -static struct snd_ac97_build_ops patch_ad1885_build_ops = {
56483 +static const struct snd_ac97_build_ops patch_ad1885_build_ops = {
56484 .build_specific = &patch_ad1885_specific,
56485 #ifdef CONFIG_PM
56486 .resume = ad18xx_resume
56487 @@ -1674,7 +1674,7 @@ static int patch_ad1886_specific(struct
56488 return 0;
56489 }
56490
56491 -static struct snd_ac97_build_ops patch_ad1886_build_ops = {
56492 +static const struct snd_ac97_build_ops patch_ad1886_build_ops = {
56493 .build_specific = &patch_ad1886_specific,
56494 #ifdef CONFIG_PM
56495 .resume = ad18xx_resume
56496 @@ -1881,7 +1881,7 @@ static int patch_ad1981a_specific(struct
56497 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
56498 }
56499
56500 -static struct snd_ac97_build_ops patch_ad1981a_build_ops = {
56501 +static const struct snd_ac97_build_ops patch_ad1981a_build_ops = {
56502 .build_post_spdif = patch_ad198x_post_spdif,
56503 .build_specific = patch_ad1981a_specific,
56504 #ifdef CONFIG_PM
56505 @@ -1936,7 +1936,7 @@ static int patch_ad1981b_specific(struct
56506 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
56507 }
56508
56509 -static struct snd_ac97_build_ops patch_ad1981b_build_ops = {
56510 +static const struct snd_ac97_build_ops patch_ad1981b_build_ops = {
56511 .build_post_spdif = patch_ad198x_post_spdif,
56512 .build_specific = patch_ad1981b_specific,
56513 #ifdef CONFIG_PM
56514 @@ -2075,7 +2075,7 @@ static int patch_ad1888_specific(struct
56515 return patch_build_controls(ac97, snd_ac97_ad1888_controls, ARRAY_SIZE(snd_ac97_ad1888_controls));
56516 }
56517
56518 -static struct snd_ac97_build_ops patch_ad1888_build_ops = {
56519 +static const struct snd_ac97_build_ops patch_ad1888_build_ops = {
56520 .build_post_spdif = patch_ad198x_post_spdif,
56521 .build_specific = patch_ad1888_specific,
56522 #ifdef CONFIG_PM
56523 @@ -2124,7 +2124,7 @@ static int patch_ad1980_specific(struct
56524 return patch_build_controls(ac97, &snd_ac97_ad198x_2cmic, 1);
56525 }
56526
56527 -static struct snd_ac97_build_ops patch_ad1980_build_ops = {
56528 +static const struct snd_ac97_build_ops patch_ad1980_build_ops = {
56529 .build_post_spdif = patch_ad198x_post_spdif,
56530 .build_specific = patch_ad1980_specific,
56531 #ifdef CONFIG_PM
56532 @@ -2239,7 +2239,7 @@ static int patch_ad1985_specific(struct
56533 ARRAY_SIZE(snd_ac97_ad1985_controls));
56534 }
56535
56536 -static struct snd_ac97_build_ops patch_ad1985_build_ops = {
56537 +static const struct snd_ac97_build_ops patch_ad1985_build_ops = {
56538 .build_post_spdif = patch_ad198x_post_spdif,
56539 .build_specific = patch_ad1985_specific,
56540 #ifdef CONFIG_PM
56541 @@ -2531,7 +2531,7 @@ static int patch_ad1986_specific(struct
56542 ARRAY_SIZE(snd_ac97_ad1985_controls));
56543 }
56544
56545 -static struct snd_ac97_build_ops patch_ad1986_build_ops = {
56546 +static const struct snd_ac97_build_ops patch_ad1986_build_ops = {
56547 .build_post_spdif = patch_ad198x_post_spdif,
56548 .build_specific = patch_ad1986_specific,
56549 #ifdef CONFIG_PM
56550 @@ -2636,7 +2636,7 @@ static int patch_alc650_specific(struct
56551 return 0;
56552 }
56553
56554 -static struct snd_ac97_build_ops patch_alc650_ops = {
56555 +static const struct snd_ac97_build_ops patch_alc650_ops = {
56556 .build_specific = patch_alc650_specific,
56557 .update_jacks = alc650_update_jacks
56558 };
56559 @@ -2788,7 +2788,7 @@ static int patch_alc655_specific(struct
56560 return 0;
56561 }
56562
56563 -static struct snd_ac97_build_ops patch_alc655_ops = {
56564 +static const struct snd_ac97_build_ops patch_alc655_ops = {
56565 .build_specific = patch_alc655_specific,
56566 .update_jacks = alc655_update_jacks
56567 };
56568 @@ -2900,7 +2900,7 @@ static int patch_alc850_specific(struct
56569 return 0;
56570 }
56571
56572 -static struct snd_ac97_build_ops patch_alc850_ops = {
56573 +static const struct snd_ac97_build_ops patch_alc850_ops = {
56574 .build_specific = patch_alc850_specific,
56575 .update_jacks = alc850_update_jacks
56576 };
56577 @@ -2962,7 +2962,7 @@ static int patch_cm9738_specific(struct
56578 return patch_build_controls(ac97, snd_ac97_cm9738_controls, ARRAY_SIZE(snd_ac97_cm9738_controls));
56579 }
56580
56581 -static struct snd_ac97_build_ops patch_cm9738_ops = {
56582 +static const struct snd_ac97_build_ops patch_cm9738_ops = {
56583 .build_specific = patch_cm9738_specific,
56584 .update_jacks = cm9738_update_jacks
56585 };
56586 @@ -3053,7 +3053,7 @@ static int patch_cm9739_post_spdif(struc
56587 return patch_build_controls(ac97, snd_ac97_cm9739_controls_spdif, ARRAY_SIZE(snd_ac97_cm9739_controls_spdif));
56588 }
56589
56590 -static struct snd_ac97_build_ops patch_cm9739_ops = {
56591 +static const struct snd_ac97_build_ops patch_cm9739_ops = {
56592 .build_specific = patch_cm9739_specific,
56593 .build_post_spdif = patch_cm9739_post_spdif,
56594 .update_jacks = cm9739_update_jacks
56595 @@ -3227,7 +3227,7 @@ static int patch_cm9761_specific(struct
56596 return patch_build_controls(ac97, snd_ac97_cm9761_controls, ARRAY_SIZE(snd_ac97_cm9761_controls));
56597 }
56598
56599 -static struct snd_ac97_build_ops patch_cm9761_ops = {
56600 +static const struct snd_ac97_build_ops patch_cm9761_ops = {
56601 .build_specific = patch_cm9761_specific,
56602 .build_post_spdif = patch_cm9761_post_spdif,
56603 .update_jacks = cm9761_update_jacks
56604 @@ -3323,7 +3323,7 @@ static int patch_cm9780_specific(struct
56605 return patch_build_controls(ac97, cm9780_controls, ARRAY_SIZE(cm9780_controls));
56606 }
56607
56608 -static struct snd_ac97_build_ops patch_cm9780_ops = {
56609 +static const struct snd_ac97_build_ops patch_cm9780_ops = {
56610 .build_specific = patch_cm9780_specific,
56611 .build_post_spdif = patch_cm9761_post_spdif /* identical with CM9761 */
56612 };
56613 @@ -3443,7 +3443,7 @@ static int patch_vt1616_specific(struct
56614 return 0;
56615 }
56616
56617 -static struct snd_ac97_build_ops patch_vt1616_ops = {
56618 +static const struct snd_ac97_build_ops patch_vt1616_ops = {
56619 .build_specific = patch_vt1616_specific
56620 };
56621
56622 @@ -3797,7 +3797,7 @@ static int patch_it2646_specific(struct
56623 return 0;
56624 }
56625
56626 -static struct snd_ac97_build_ops patch_it2646_ops = {
56627 +static const struct snd_ac97_build_ops patch_it2646_ops = {
56628 .build_specific = patch_it2646_specific,
56629 .update_jacks = it2646_update_jacks
56630 };
56631 @@ -3831,7 +3831,7 @@ static int patch_si3036_specific(struct
56632 return 0;
56633 }
56634
56635 -static struct snd_ac97_build_ops patch_si3036_ops = {
56636 +static const struct snd_ac97_build_ops patch_si3036_ops = {
56637 .build_specific = patch_si3036_specific,
56638 };
56639
56640 @@ -3864,7 +3864,7 @@ static struct snd_ac97_res_table lm4550_
56641 { AC97_AUX, 0x1f1f },
56642 { AC97_PCM, 0x1f1f },
56643 { AC97_REC_GAIN, 0x0f0f },
56644 - { } /* terminator */
56645 + { 0, 0 } /* terminator */
56646 };
56647
56648 static int patch_lm4550(struct snd_ac97 *ac97)
56649 @@ -3898,7 +3898,7 @@ static int patch_ucb1400_specific(struct
56650 return 0;
56651 }
56652
56653 -static struct snd_ac97_build_ops patch_ucb1400_ops = {
56654 +static const struct snd_ac97_build_ops patch_ucb1400_ops = {
56655 .build_specific = patch_ucb1400_specific,
56656 };
56657
56658 diff -urNp linux-2.6.35.4/sound/pci/ens1370.c linux-2.6.35.4/sound/pci/ens1370.c
56659 --- linux-2.6.35.4/sound/pci/ens1370.c 2010-08-26 19:47:12.000000000 -0400
56660 +++ linux-2.6.35.4/sound/pci/ens1370.c 2010-09-17 20:12:09.000000000 -0400
56661 @@ -452,7 +452,7 @@ static DEFINE_PCI_DEVICE_TABLE(snd_audio
56662 { PCI_VDEVICE(ENSONIQ, 0x5880), 0, }, /* ES1373 - CT5880 */
56663 { PCI_VDEVICE(ECTIVA, 0x8938), 0, }, /* Ectiva EV1938 */
56664 #endif
56665 - { 0, }
56666 + { 0, 0, 0, 0, 0, 0, 0 }
56667 };
56668
56669 MODULE_DEVICE_TABLE(pci, snd_audiopci_ids);
56670 diff -urNp linux-2.6.35.4/sound/pci/hda/patch_hdmi.c linux-2.6.35.4/sound/pci/hda/patch_hdmi.c
56671 --- linux-2.6.35.4/sound/pci/hda/patch_hdmi.c 2010-08-26 19:47:12.000000000 -0400
56672 +++ linux-2.6.35.4/sound/pci/hda/patch_hdmi.c 2010-09-17 20:12:09.000000000 -0400
56673 @@ -670,10 +670,10 @@ static void hdmi_non_intrinsic_event(str
56674 cp_ready);
56675
56676 /* TODO */
56677 - if (cp_state)
56678 - ;
56679 - if (cp_ready)
56680 - ;
56681 + if (cp_state) {
56682 + }
56683 + if (cp_ready) {
56684 + }
56685 }
56686
56687
56688 diff -urNp linux-2.6.35.4/sound/pci/intel8x0.c linux-2.6.35.4/sound/pci/intel8x0.c
56689 --- linux-2.6.35.4/sound/pci/intel8x0.c 2010-08-26 19:47:12.000000000 -0400
56690 +++ linux-2.6.35.4/sound/pci/intel8x0.c 2010-09-17 20:12:09.000000000 -0400
56691 @@ -444,7 +444,7 @@ static DEFINE_PCI_DEVICE_TABLE(snd_intel
56692 { PCI_VDEVICE(AMD, 0x746d), DEVICE_INTEL }, /* AMD8111 */
56693 { PCI_VDEVICE(AMD, 0x7445), DEVICE_INTEL }, /* AMD768 */
56694 { PCI_VDEVICE(AL, 0x5455), DEVICE_ALI }, /* Ali5455 */
56695 - { 0, }
56696 + { 0, 0, 0, 0, 0, 0, 0 }
56697 };
56698
56699 MODULE_DEVICE_TABLE(pci, snd_intel8x0_ids);
56700 @@ -2135,7 +2135,7 @@ static struct ac97_quirk ac97_quirks[] _
56701 .type = AC97_TUNE_HP_ONLY
56702 },
56703 #endif
56704 - { } /* terminator */
56705 + { 0, 0, 0, 0, NULL, 0 } /* terminator */
56706 };
56707
56708 static int __devinit snd_intel8x0_mixer(struct intel8x0 *chip, int ac97_clock,
56709 diff -urNp linux-2.6.35.4/sound/pci/intel8x0m.c linux-2.6.35.4/sound/pci/intel8x0m.c
56710 --- linux-2.6.35.4/sound/pci/intel8x0m.c 2010-08-26 19:47:12.000000000 -0400
56711 +++ linux-2.6.35.4/sound/pci/intel8x0m.c 2010-09-17 20:12:09.000000000 -0400
56712 @@ -239,7 +239,7 @@ static DEFINE_PCI_DEVICE_TABLE(snd_intel
56713 { PCI_VDEVICE(AMD, 0x746d), DEVICE_INTEL }, /* AMD8111 */
56714 { PCI_VDEVICE(AL, 0x5455), DEVICE_ALI }, /* Ali5455 */
56715 #endif
56716 - { 0, }
56717 + { 0, 0, 0, 0, 0, 0, 0 }
56718 };
56719
56720 MODULE_DEVICE_TABLE(pci, snd_intel8x0m_ids);
56721 @@ -1264,7 +1264,7 @@ static struct shortname_table {
56722 { 0x5455, "ALi M5455" },
56723 { 0x746d, "AMD AMD8111" },
56724 #endif
56725 - { 0 },
56726 + { 0, NULL },
56727 };
56728
56729 static int __devinit snd_intel8x0m_probe(struct pci_dev *pci,
56730 diff -urNp linux-2.6.35.4/usr/gen_init_cpio.c linux-2.6.35.4/usr/gen_init_cpio.c
56731 --- linux-2.6.35.4/usr/gen_init_cpio.c 2010-08-26 19:47:12.000000000 -0400
56732 +++ linux-2.6.35.4/usr/gen_init_cpio.c 2010-09-17 20:12:09.000000000 -0400
56733 @@ -299,7 +299,7 @@ static int cpio_mkfile(const char *name,
56734 int retval;
56735 int rc = -1;
56736 int namesize;
56737 - int i;
56738 + unsigned int i;
56739
56740 mode |= S_IFREG;
56741
56742 @@ -386,9 +386,10 @@ static char *cpio_replace_env(char *new_
56743 *env_var = *expanded = '\0';
56744 strncat(env_var, start + 2, end - start - 2);
56745 strncat(expanded, new_location, start - new_location);
56746 - strncat(expanded, getenv(env_var), PATH_MAX);
56747 - strncat(expanded, end + 1, PATH_MAX);
56748 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
56749 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
56750 strncpy(new_location, expanded, PATH_MAX);
56751 + new_location[PATH_MAX] = 0;
56752 } else
56753 break;
56754 }
56755 diff -urNp linux-2.6.35.4/virt/kvm/kvm_main.c linux-2.6.35.4/virt/kvm/kvm_main.c
56756 --- linux-2.6.35.4/virt/kvm/kvm_main.c 2010-08-26 19:47:12.000000000 -0400
56757 +++ linux-2.6.35.4/virt/kvm/kvm_main.c 2010-09-17 20:12:09.000000000 -0400
56758 @@ -1284,6 +1284,7 @@ static int kvm_vcpu_release(struct inode
56759 return 0;
56760 }
56761
56762 +/* cannot be const */
56763 static struct file_operations kvm_vcpu_fops = {
56764 .release = kvm_vcpu_release,
56765 .unlocked_ioctl = kvm_vcpu_ioctl,
56766 @@ -1738,6 +1739,7 @@ static int kvm_vm_mmap(struct file *file
56767 return 0;
56768 }
56769
56770 +/* cannot be const */
56771 static struct file_operations kvm_vm_fops = {
56772 .release = kvm_vm_release,
56773 .unlocked_ioctl = kvm_vm_ioctl,
56774 @@ -1835,6 +1837,7 @@ out:
56775 return r;
56776 }
56777
56778 +/* cannot be const */
56779 static struct file_operations kvm_chardev_ops = {
56780 .unlocked_ioctl = kvm_dev_ioctl,
56781 .compat_ioctl = kvm_dev_ioctl,
56782 @@ -1844,6 +1847,9 @@ static struct miscdevice kvm_dev = {
56783 KVM_MINOR,
56784 "kvm",
56785 &kvm_chardev_ops,
56786 + {NULL, NULL},
56787 + NULL,
56788 + NULL
56789 };
56790
56791 static void hardware_enable(void *junk)
56792 @@ -2178,7 +2184,7 @@ static void kvm_sched_out(struct preempt
56793 kvm_arch_vcpu_put(vcpu);
56794 }
56795
56796 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
56797 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
56798 struct module *module)
56799 {
56800 int r;